seq_id
stringlengths 7
11
| text
stringlengths 156
1.7M
| repo_name
stringlengths 7
125
| sub_path
stringlengths 4
132
| file_name
stringlengths 4
77
| file_ext
stringclasses 6
values | file_size_in_byte
int64 156
1.7M
| program_lang
stringclasses 1
value | lang
stringclasses 38
values | doc_type
stringclasses 1
value | stars
int64 0
24.2k
⌀ | dataset
stringclasses 1
value | pt
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|---|
38348533917
|
#!/usr/bin/python3
import sys, getopt, os
import pandas as pd
import numpy as np
from analytical_settings import HAP1_control_1, HAP1_control_2, HAP1_control_3, HAP1_control_4
from analytical_settings import grouping
from analytical_settings import cauchy_compatibility
from global_functions import read_csv, write_csv
from create_phenosaurus_reference import build_phenosaurus_reference
##################################
# Function 'normalize_to_median'
##################################
# Arguments: reference <pandas DataFrame with accumulated counts sense and antisense>
# sample <pandas DataFrame with sense and antisense counts for
# sample that requires normalization>
# Returns: ns <pandas DataFrame with identical structure to sample but with normalized counts>
##################################
def normalize_to_median(raw, reference):
# Calculate sense/total ratio for reference dataset and store in 'ref_ratio' column in DataFrame
reference = reference.assign(ref_ratio=(reference['sense_reference'] + 1)
/ (reference['sense_reference'] + reference['antisense_reference'] + 2))
normalized_data = pd.DataFrame(columns=['gene'])
if cauchy_compatibility:
print("original Cauchy.R algorithm...", end='')
else:
print("latest algorithm...", end='')
subset = ['gene', 'sense', 'antisense']
s = pd.merge(reference, raw, on='gene', how='outer')
# Split sample(s) into part of sample that can be normalized (sn) and the part the cannot be normalized (scn)
sn = s[((s['sense'] + s['antisense']) > 0) & (
(s.sense_reference + s.antisense_reference) > 0)] # meaning counts in reference and sample
scn = s[((s['sense'] + s['antisense']) > 0) & (
(s.sense_reference + s.antisense_reference) == 0)] # meaning counts in sample but not in reference
# Calculate sense-ratio for current sample and sort DataFrame based on sense-ratio of reference set
sn = sn.assign(ratio=(s[subset[1]] + 1) / (s[subset[1]] + s[subset[2]] + 2))
sn = sn.sort_values(by='ref_ratio')
# Create empty dataframe for the normalized sample (ns)
ns = pd.DataFrame(columns=subset)
if not cauchy_compatibility:
# Calculate optimal grouping value based on requested grouping (to create equal groups and hence identical normalization for every gene)
divider = np.round(sn.shape[0] / grouping)
groupsize = (sn.shape[0] / divider)
# Group sorted DataFrame in equally sized groups and loop over these groups
for g, df in sn.groupby(np.arange(len(sn)) // groupsize):
ref_median = df['ref_ratio'].median()
sample_median = df['ratio'].median()
df = df.assign(norm_sense=np.where(df['ratio'] <= sample_median
, np.where(
np.round((df['ratio'] / (sample_median) * ref_median * (df[subset[1]] + df[subset[2]]))) > (
df[subset[1]] + df[subset[2]]) # ie. the normalized sense count is higer than the total counts
, (df[subset[1]] + df[subset[2]])
, np.round((df['ratio'] / (sample_median) * ref_median * (df[subset[1]] + df[subset[2]]))))
, np.where(np.round(((1 - (1 - df['ratio']) / (1 - sample_median) * (
1 - ref_median)) * (df[subset[1]] + df[subset[2]]))) > (df[subset[1]] + df[subset[2]])
, (df[subset[1]] + df[subset[2]])
, np.round(((1 - (1 - df['ratio']) / (1 - sample_median) * (
1 - ref_median)) * (df[subset[1]] + df[subset[2]]))))))
df = df.assign(norm_antisense=(df[subset[1]] + df[subset[2]] - df['norm_sense']))
df[['norm_sense', 'norm_antisense']] = df[['norm_sense', 'norm_antisense']].astype(np.int32)
df = df[['gene', 'norm_sense', 'norm_antisense']]
df.columns = subset
ns = ns.append(df) # Append to DataFrame with normalized counts of previous group
else:
# If cauchy_compatibility==True, grouping of genes will be performed according to the previous version of
# the pipeline as in 'cauchy.R'
# Calculate the number of groups
divider = int(np.ceil(sn.shape[0] / grouping)) # The total number of groups
genes = sn.shape[0]
i = 1 # Current index of input dataframe
for g in range(0, divider): # Loop over the groups of genes
df = pd.DataFrame() # Create temporary dataframe for current group
for gi in range(0, grouping): # Append the genes to the current group
i = gi + g * grouping # DataFrame Index
if i <= genes: # For the last group
df = df.append(sn[i:i + 1])
ref_median = df['ref_ratio'].median()
sample_median = df['ratio'].median()
df = df.assign(norm_sense=np.where(df['ratio'] <= sample_median
, np.where(
np.round((df['ratio'] / (sample_median) * ref_median * (df[subset[1]] + df[subset[2]]))) > (
df[subset[1]] + df[subset[2]]) # ie. the normalized sense count is higer than the total counts
, (df[subset[1]] + df[subset[2]])
, np.round((df['ratio'] / (sample_median) * ref_median * (df[subset[1]] + df[subset[2]]))))
, np.where(np.round(((1 - (1 - df['ratio']) / (1 - sample_median) * (
1 - ref_median)) * (df[subset[1]] + df[subset[2]]))) > (df[subset[1]] + df[subset[2]])
, (df[subset[1]] + df[subset[2]])
, np.round(((1 - (1 - df['ratio']) / (1 - sample_median) * (
1 - ref_median)) * (df[subset[1]] + df[subset[2]]))))))
df = df.assign(norm_antisense=(df[subset[1]] + df[subset[2]] - df['norm_sense']))
df = df[['gene', 'norm_sense', 'norm_antisense']]
df.columns = subset
ns = ns.append(df) # Append to DataFrame with normalized counts of previous group
# Merge the normalized sample with the datapoint of the sample that could not be normalized because of absense
# of these genes in the reference
ns = ns.append(scn[subset])
normalized_data = pd.merge(ns, normalized_data, on='gene', how='outer')
normalized_data[['sense', 'antisense']] = normalized_data[['sense', 'antisense']].astype(np.int32)
return normalized_data
def process_controls(controls):
controls = controls.copy()
reference_cols = ['gene', 'sense_reference', 'antisense_reference']
reference = pd.DataFrame(columns=reference_cols)
for c, f in controls.items(): # c is control, f = file
controls[c] = pd.read_csv(f, sep='\t')[['gene', 'sense',
'antisense']] # Read the file and overwrite the value of the name of the with the actual data
reference = pd.merge(controls[c], reference, on='gene', how='outer').fillna(0)
reference = reference.assign(
sense_reference=(reference[['sense', 'sense_reference']].sum(axis=1)).astype(np.int32))
reference = reference.assign(
antisense_reference=(reference[['antisense', 'antisense_reference']].sum(axis=1)).astype(np.int32))
reference.drop(['sense', 'antisense'], axis=1, inplace=True)
return controls, reference
def main(argv):
syntax = 'normalize.py -n <controls or directory of replicate> -o <directory where normalized control will be saved>'
try:
opts, args = getopt.getopt(argv, "hnosr:", ["normalize=", "outdir=", "screenname=", "refname="])
except getopt.GetoptError:
print(syntax)
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print(syntax)
sys.exit()
elif opt in ("-n", "--normalize"):
data = arg
elif opt in ("-o", "--outdir"):
outdir = arg
elif opt in ("-s", "--screenname"):
screenname = arg
elif opt in ("-r", "--refname"):
refname = arg
else:
assert False, "unhandled option"
# Create dictionary to store the names of the controls (keys) and their associated records (values)
control_files = {'HAP1_control_1': HAP1_control_1, 'HAP1_control_2': HAP1_control_2, 'HAP1_control_3': HAP1_control_3,
'HAP1_control_4': HAP1_control_4}
# Read in controls and calculate aggregate reference counts
controls, reference = process_controls(control_files)
if data == 'controls':
for c, d in controls.items(): # c = control, d = data
print("Normalizing control using ", end='')
norm = normalize_to_median(d, reference).sort_values(by='gene')
write_csv(''.join([screenname, "_replicate_", str(int(c[-1:])), "_sense_vs_antisense_counts_normalized.csv"]), os.path.join(outdir, ''), norm) # Create file for normalization of samples
print(control_files[c])
build_phenosaurus_reference(norm, read_csv(control_files[c]), screenname, refname, outdir, int(c[-1:]))
print("done")
else:
data = os.path.join(data, '')
raw = read_csv(''.join([data, 'sense_vs_antisense_counts.csv']))[['gene', 'sense', 'antisense']]
print("Normalizing sample using ", end='')
norm = normalize_to_median(raw, reference).sort_values(by='gene')
write_csv('sense_vs_antisense_counts_normalized.csv', data, norm)
print("done")
if __name__ == "__main__":
main(sys.argv[1:])
|
BrummelkampResearch/HAP1_Synthetic_Lethality_pipeline
|
sub/normalize.py
|
normalize.py
|
py
| 9,869 |
python
|
en
|
code
| 1 |
github-code
|
6
|
34905834189
|
import shutil
import tempfile
from django.conf import settings
from django.contrib.auth import get_user_model
from django.core.files.uploadedfile import SimpleUploadedFile
from django.test import Client, TestCase, override_settings
from django.urls import reverse
from ..forms import PostForm
from ..models import Post
User = get_user_model()
TEMP_MEDIA_ROOT = tempfile.mkdtemp(dir=settings.BASE_DIR)
small_gif = (
b'\x47\x49\x46\x38\x39\x61\x02\x00'
b'\x01\x00\x80\x00\x00\x00\x00\x00'
b'\xFF\xFF\xFF\x21\xF9\x04\x00\x00'
b'\x00\x00\x00\x2C\x00\x00\x00\x00'
b'\x02\x00\x01\x00\x00\x02\x02\x0C'
b'\x0A\x00\x3B'
)
@override_settings(MEDIA_ROOT=TEMP_MEDIA_ROOT)
class PostFormTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.user = User.objects.create_user(username='username')
cls.uploaded = SimpleUploadedFile(
name='small.gif',
content=small_gif,
content_type='image/gif'
)
cls.form = PostForm()
@classmethod
def tearDownClass(cls):
super().tearDownClass()
shutil.rmtree(TEMP_MEDIA_ROOT, ignore_errors=True)
def setUp(self):
self.authorized_client = Client()
self.authorized_client.force_login(self.user)
def test_create_post(self):
"""Валидная форма создает запись в Post."""
# Подсчитаем количество записей в Post
posts_count = Post.objects.count()
form_data = {
'text': 'Тестовый пост',
'image': PostFormTests.uploaded,
}
self.uploaded.seek(0)
# Отправляем POST-запрос
response = self.authorized_client.post(
reverse('posts:post_create'),
data=form_data,
follow=True
)
# Проверяем, сработал ли редирект
self.assertRedirects(response, reverse(
'posts:profile',
kwargs={'username': 'username'})
)
# Проверяем, увеличилось ли число постов
self.assertEqual(Post.objects.count(), posts_count + 1)
# Проверяем, что создалась запись с заданным id
self.assertTrue(
Post.objects.filter(
text='Тестовый пост',
pk=1,
image='posts/small.gif',
).exists()
)
def test_edit_post(self):
Post.objects.create(
text='Тестовый пост',
author=self.user,
pk=1,
image=self.uploaded,
)
form_data = {
'text': 'Тестовый пост изменился',
}
# Отправляем POST-запрос
response = self.authorized_client.post(
reverse('posts:post_edit', kwargs={'post_id': 1}),
data=form_data,
follow=True
)
post_changed = Post.objects.get(pk=1)
# Проверяем, сработал ли редирект c тем же id
self.assertRedirects(response, reverse(
'posts:post_detail',
kwargs={'post_id': 1})
)
self.assertEqual(post_changed.text, 'Тестовый пост изменился')
class CommentFormTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.user = User.objects.create_user(username='username')
cls.guest_client = Client()
cls.authorized_client = Client()
cls.authorized_client.force_login(cls.user)
Post.objects.create(
text='Тестовый пост',
author=cls.user,
pk=1,
)
def test_add_comment(self):
"""Комментировать посты может только авторизованный пользователь."""
form_data = {
'text': 'Тестовый комментарий',
}
response1 = self.authorized_client.post(
reverse('posts:add_comment', kwargs={'post_id': 1}),
data=form_data,
follow=True
)
response2 = self.guest_client.post(
reverse('posts:add_comment', kwargs={'post_id': 1}),
data=form_data,
follow=True
)
# Проверяем, сработал ли редирект
self.assertRedirects(response1, reverse(
'posts:post_detail',
kwargs={'post_id': 1})
)
self.assertRedirects(response2,
'/auth/login/?next=/posts/1/comment/'
)
|
DianaKab/hw05_final_new
|
yatube/posts/tests/test_forms.py
|
test_forms.py
|
py
| 4,715 |
python
|
ru
|
code
| 0 |
github-code
|
6
|
11428296984
|
"""Tool pack for email.
emails.py in: 2021-12-11.
This module exports the following functions:
-> is_valid - Check if the string is a valid email.
-> is_same - Check if the two emails are the same.
"""
import doctest
from re import match
# File with the configuration of the loguru logger
from logs.loguru_conf import logger
# public symbols
__all__ = [
'is_valid',
'is_same',
]
def is_valid(email: str) -> bool:
"""-> Checks to see if the email address is valid.
The email address is valid if it has the following format:
<username>@<domain>.<tld>
Test:
>>> is_valid('[email protected]')
True
>>> is_valid('[email protected]')
True
>>> is_valid('[email protected]')
True
>>> is_valid('[email protected]')
True
>>> is_valid('[email protected]')
True
>>> is_valid('[email protected]')
True
>>> is_valid('mail')
False
>>> is_valid('john.doe@domain')
False
>>> is_valid('john.doe@ domain.com')
False
>>> is_valid('jane_doe @domain.com')
False
>>> is_valid('jane [email protected]')
False
>>> is_valid('[email protected] ')
False
>>> is_valid('[email protected].')
False
:param email: email to validate
:return: True if valid, False if not
:rtype: bool
"""
logger.debug(f'email: {email}')
return bool(
match(r'^[\w.+]+@[\w.-]+\.[a-zA-Z]{2,3}(?:.[a-zA-Z]{2})?$', email))
def is_same(email_1: str, email_2: str) -> bool:
"""-> Checks to see if the two emails are the same.
The two emails are the same if they have the same username, domain name
and the same tld.
Test:
>>> is_same('jane.doe@domain', 'jane.doe@domain')
False
>>> is_same('[email protected]', ' [email protected]')
False
>>> is_same('[email protected]', '[email protected]')
False
>>> is_same('[email protected]', '[email protected]')
True
:param email_1: First email address to compare
:param email_2: Second email address to compare
:return: True if the two email addresses are the same, False if not
:rtype: bool
"""
logger.debug(f'email_1: {email_1}')
logger.debug(f'email_2: {email_2}')
regular_expression = r'^[\w.+]+@[\w.-]+\.[a-zA-Z]{2,3}(?:.[a-zA-Z]{2})?$'
return bool(
match(regular_expression, email_1)
and match(regular_expression, email_2)
and email_1.split('@')[0] == email_2.split('@')[0]
and email_1.split('@')[1] == email_2.split('@')[1]
)
def main():
"""Main function."""
doctest.testmod()
list_mails = [
'jane.doe@domain',
'jane.doe@domain.',
'[email protected]',
'[email protected]',
'[email protected]',
]
for email in list_mails:
print(
f'Is {email} valid? {is_valid(email)}; '
f'Is same? {is_same(email, email)}',
end='\n\n',
)
if __name__ == '__main__':
logger.info('Running')
main()
logger.info('Ending')
|
cicerohr/python_template
|
tools/emails.py
|
emails.py
|
py
| 3,063 |
python
|
en
|
code
| 0 |
github-code
|
6
|
1907238463
|
#Haz un programa que lea un numero y te duvukeva la suma de sus cifras, tratando el numero como un numero natural.
#Ejemplo:
#Si leemos 123 la suma de sus cifras es 6.
#Si leemos 5 la suma de sus cifras es 5.
numero = int(input("Ingrese un número: "))
suma_cifras = 0
while numero > 0:
cifra = numero % 10
suma_cifras += cifra
numero //= 10
print("La suma de las cifras es:", suma_cifras)
|
ANDRESTOBAJAS/Carpeta-de-Python
|
sumador de cifras.py
|
sumador de cifras.py
|
py
| 406 |
python
|
es
|
code
| 0 |
github-code
|
6
|
23525022654
|
from matplotlib import pyplot as plt
import numpy as np
import collections
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
import matplotlib.pyplot as plt
# Get cpu or gpu device for training.
device = "cuda" if torch.cuda.is_available() else "cpu"
print("Using {} device".format(device))
# class_names = ['airplane','automobile','bird','cat','deer',
# 'dog','frog','horse','ship','truck']
# not needed AFTER getting mean and standard deviation
# cifar10_train = datasets.CIFAR10(
# root='data', train=True, download=True,
# transform=transforms.ToTensor())
#
# cifar10_val = datasets.CIFAR10(
# root='data', train=False, download=True,
# transform=transforms.ToTensor())
# imgs_train = torch.stack([img_t for img_t, _ in cifar10_train], dim=3)
# imgs_val = torch.stack([img_t for img_t, _ in cifar10_val], dim=3)
# train_mean = imgs_train.view(3,-1).mean(dim=1)
# train_std = imgs_train.view(3,-1).std(dim=1)
#
# val_mean = imgs_val.view(3,-1).mean(dim=1)
# val_std = imgs_val.view(3,-1).std(dim=1)
# load data, no think
cifar10_train = datasets.CIFAR10(
root='data', train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4915, 0.4823, 0.4468),
(0.2470, 0.2435, 0.2616))]))
train_length = len(cifar10_train)
train_size = int(0.8 *train_length)
val_size = train_length - train_size
# make trai and validation set
cifar10_train, cifar10_val = torch.utils.data.random_split(cifar10_train, [train_size, val_size])
cifar10_test = datasets.CIFAR10(
root='data', train=False, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4915, 0.4823, 0.4468),
(0.2470, 0.2435, 0.2616))]))
# comment this and change output neurons (and dataloader far below) if you want only to find difference beetwenn planes and birds
# get only birds and planes
label_map = {0: 0, 2: 1}
class_names = ['airplane', 'bird']
cifar10_train_ = [(img, label_map[label])
for img, label in cifar10_train
if label in [0, 2]]
cifar10_val_ = [(img, label_map[label])
for img, label in cifar10_val
if label in [0, 2]]
cifar10_test_ = [(img, label_map[label])
for img, label in cifar10_test
if label in [0, 2]]
# store train and val loss
train_loss_list = []
val_loss_list = []
epoch_list = []
# make network architecture
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 16, kernel_size=3, padding=1) # convolution layer (in_chl, out_chl,...)
self.conv1_batchnorm = nn.BatchNorm2d(16)
self.act1 = nn.Tanh() # activation function
self.pool1 = nn.MaxPool2d(2) # pooling (kernel size 2x2)
self.conv2 = nn.Conv2d(16, 8, kernel_size=3, padding=1)
self.conv2_batchnorm = nn.BatchNorm2d(8)
self.act2 = nn.Tanh()
self.pool2 = nn.MaxPool2d(2)
self.fc1 = nn.Linear(8 * 8 * 8, 32) # first 8 from conv2, next 8's from pooling (32->16->8)
self.act3 = nn.Tanh()
self.fc2 = nn.Linear(32, 2)
# self.act4 = nn.Softmax(dim=1)
def forward(self, x):
out = self.conv1_batchnorm(self.conv1(x))
out = self.pool1(((self.act1(out))))
out = self.conv2_batchnorm(self.conv2(out))
out = self.pool2(((self.act2(out))))
out = out.view(-1, 8 * 8 * 8) # not sure why reshape
out = self.act3(self.fc1(out))
out = self.fc2(out)
return out
import datetime # to measure time
def training_loop(n_epochs, optimizer, model, loss_fn, train_loader, val_loader, epoch_num_of_no_improve):
epoch_no_improve = 0
for epoch in range(1, n_epochs+1):
loss_train = 0.0
for imgs, labels in train_loader:
# move tensors to gpu if available
imgs = imgs.to(device=device)
labels = labels.to(device=device)
outputs = model(imgs)
loss = loss_fn(outputs, labels)
l2_lambda = 0.001
l2_norm = sum(p.pow(2.0).sum()
for p in model.parameters())
loss = loss + l2_lambda * l2_norm
optimizer.zero_grad()
loss.backward()
optimizer.step()
loss_train += loss.item()
epoch_list.append(epoch)
train_loss_list.append(loss_train / len(train_loader)) # to track loss
# get loss of validation data
with torch.no_grad():
loss_val = 0.0
for imgs, labels in val_loader:
# move tensors to gpu if available
imgs = imgs.to(device=device)
labels = labels.to(device=device)
outputs = model(imgs)
loss_v = loss_fn(outputs, labels)
loss_val += loss_v.item()
val_loss_list.append(loss_val / len(val_loader))
# set when to print info about training progress
if epoch == 1 or epoch % 1 == 0:
print('Epoch {}, Training loss {}, Validation loss {}'.format(epoch,
loss_train / len(train_loader),
loss_val / len(val_loader)))
# early stopping
if epoch > 1:
if val_loss_list[-1] >= val_loss_list[-2]:
epoch_no_improve += 1
else:
epoch_no_improve = 0
if epoch_no_improve == epoch_num_of_no_improve:
print('Early stopping:')
print('Epoch {}, Training loss {}, Validation loss {}'.format(epoch,
loss_train / len(train_loader),
loss_val / len(val_loader)))
break
def validate_on_test(model, train_loader, val_loader, test_loader):
for name, loader in [("train", train_loader), ("val", val_loader), ('test', test_loader)]:
correct = 0
total = 0
with torch.no_grad(): # <1>
for imgs, labels in loader:
# move to gpu
imgs = imgs.to(device=device)
labels = labels.to(device=device)
outputs = model(imgs)
_, predicted = torch.max(outputs, dim=1) # Gives us the index of the highest value
total += labels.shape[0] # Counts the number of examples, so total is increased by the batch size
correct += int((predicted == labels).sum())
print("Accuracy {}: {:.2f} %".format(name , 100 * (correct / total)))
n_epochs = 100
model = Net().to(device=device)
optimizer = optim.ASGD(model.parameters(), lr=1e-2)
loss_fn = nn.CrossEntropyLoss()
train_loader = torch.utils.data.DataLoader(cifar10_train_, batch_size=64, shuffle=False)
val_loader = torch.utils.data.DataLoader(cifar10_val_, batch_size=64, shuffle=False)
test_loader = torch.utils.data.DataLoader(cifar10_test_, batch_size=64, shuffle=False)
epoch_num_of_no_improve = 5
training_loop(
n_epochs = n_epochs,
optimizer = optimizer,
model = model,
loss_fn = loss_fn,
train_loader = train_loader,
val_loader = val_loader,
epoch_num_of_no_improve=epoch_num_of_no_improve)
validate_on_test(model, train_loader, val_loader, test_loader)
plt.plot(epoch_list, train_loss_list, color='blue', label='train_loss')
plt.plot(epoch_list, val_loss_list, color='green', label='validation loss')
plt.xlabel('epoch')
plt.ylabel('loss')
plt.legend()
plt.show()
|
lewiis252/machine_learning
|
cifar10_nn.py
|
cifar10_nn.py
|
py
| 7,791 |
python
|
en
|
code
| 0 |
github-code
|
6
|
8909155137
|
"""
Command line tool to search image sources.
"""
import sys
sys.path.insert(0, '../..')
from searcher import database, image
if __name__ == "__main__":
if len(sys.argv) != 4:
print("Invalid number of arguments.")
exit()
db = database.Database(sys.argv[1])
reference_url = sys.argv[2]
distance = int(sys.argv[3])
print(f"Loading {reference_url}")
results = db.search(str(image.Image(reference_url).dhash()), distance)
if results:
print("Results:")
for result in results:
print(f" - {result.url} ({result.distance})")
else:
print("No Results.")
|
SebastianBach/searcher
|
apps/search/search.py
|
search.py
|
py
| 640 |
python
|
en
|
code
| 0 |
github-code
|
6
|
73952557948
|
import os
today = '02-06-19_'
import numpy as np
import treecorr
def parse_args():
import argparse
parser = argparse.ArgumentParser(description='Produce Tau correlations, i.e correlation among galaxies and reserved stars')
parser.add_argument('--metacal_cat',
#default='/home2/dfa/sobreira/alsina/catalogs/y3_master/Y3_mastercat_v2_6_20_18_subsampled.h5',
#default='/home2/dfa/sobreira/alsina/catalogs/y3_master/Y3fullmaster/Y3_mastercat_v2_6_20_18.h5',
default='/home/dfa/sobreira/alsina/catalogs/Y3_mastercat_7_24/Y3_mastercat_7_24_19.h5',
help='Full Path to the Metacalibration catalog')
parser.add_argument('--piff_cat',
default='/home/dfa/sobreira/alsina/catalogs/y3a1-v29',
help='Full Path to the Only stars Piff catalog')
parser.add_argument('--exps_file',
default='/home/dfa/sobreira/alsina/Y3_shearcat_tests/alpha-beta-eta-test/code/ally3.grizY',
#default='/home/dfa/sobreira/alsina/DESWL/psf/testexp',
help='list of exposures (in lieu of separate exps)')
parser.add_argument('--bands', default='riz', type=str,
help='Limit to the given bands')
parser.add_argument('--use_reserved', default=True,
action='store_const', const=True,
help='just use the objects with the RESERVED flag')
parser.add_argument('--frac', default=1.,
type=float,
help='Choose a random fraction of the input stars')
parser.add_argument('--mod', default=True,
action='store_const', const=True,
help='If true it substracts the mean to each field before calculate correlations')
parser.add_argument('--obs', default=False,
action='store_const', const=True,
help='If true it uses psf_e stars for tau0')
parser.add_argument('--weights', default=False,
action='store_const', const=True,
help='Use weights in the reading of Metacal')
parser.add_argument('--bin_config', default=None,
help='bin_config file for running taus')
parser.add_argument('--outpath', default='/home/dfa/sobreira/alsina/Y3_shearcat_tests/alpha-beta-eta-test/measured_correlations/',
help='location of the output of the files')
parser.add_argument('--filename', default='TAUS_zbin_n.fits', type=str,
help='filename of the tau output file')
parser.add_argument('--zbin', default=None,type=int,
help='Run particular tomobin')
parser.add_argument('--nz_source',
#default='/home/dfa/sobreira/alsina/catalogs/y3_master/nz_source_zbin.h5',
default='/home/dfa/sobreira/alsina/catalogs/Y3_mastercat_7_24/nz_source_zbin.h5',
help='Indexes catalog to select galaxies in a particular redshift bin in Metacal')
args = parser.parse_args()
return args
def main():
import sys; sys.path.append(".")
from src.read_cats import read_data_stars, toList, read_metacal
from src.runcorr import measure_tau
from astropy.io import fits
import treecorr
args = parse_args()
#Make directory where the ouput data will be
outpath = os.path.expanduser(args.outpath)
try:
if not os.path.exists(outpath):
os.makedirs(outpath)
except OSError:
if not os.path.exists(outpath): raise
#Reading Mike stars catalog
keys = ['ra', 'dec','obs_e1', 'obs_e2', 'obs_T',
'piff_e1', 'piff_e2', 'piff_T', 'mag']
galkeys = ['ra','dec','e_1','e_2','R11','R22']
data_stars = read_data_stars(toList(args.exps_file),args.piff_cat, keys,limit_bands=args.bands,use_reserved=args.use_reserved)
if args.bin_config is not None:
print("Using external bin config")
bin_config = treecorr.read_config(args.bin_config)
print(bin_config)
else:
#bin_config = dict( sep_units = 'arcmin', min_sep = 0.1, max_sep = 250, nbins = 20, bin_slop=0.03 )
bin_config = dict( sep_units = 'arcmin', min_sep = 0.1, max_sep = 250, nbins = 20, )
#bin_config = dict( sep_units = 'arcmin', min_sep = 1.0, max_sep = 250, nbins = 20,)
#bin_config = dict(sep_units = 'arcmin' , bin_slop = 0.1, min_sep = 0.1, max_sep = 300, bin_size = 0.2)
if args.zbin is not None:
print('STARTING TOMOPRAPHIC TAUS!, measuring tau for zbin=', args.zbin)
data_galaxies = read_metacal(args.metacal_cat, galkeys, zbin=args.zbin,nz_source_file=args.nz_source, weights=args.weights)
else:
print("STARTING NON TOMOGRAPHIC TAUS")
data_galaxies = read_metacal(args.metacal_cat, galkeys, weights=args.weights )
tau0, tau2, tau5= measure_tau( data_stars , data_galaxies, bin_config,
mod=args.mod)
tau0marr = tau0.xim; tau2marr = tau2.xim; tau5marr = tau5.xim;
tau0parr = tau0.xip; tau2parr = tau2.xip; tau5parr = tau5.xip;
taus = [tau0parr, tau0marr, tau2parr, tau2marr, tau5parr, tau5marr]
taus_names = ['TAU0P', 'TAU0M','TAU2P','TAU2M', 'TAU5P', 'TAU5M']
##Format of the fit file output
names=['BIN1', 'BIN2','ANGBIN', 'VALUE', 'ANG']
forms = ['i4', 'i4', 'i4', 'f8', 'f8']
dtype = dict(names = names, formats=forms)
nrows = len(tau0marr)
outdata = np.recarray((nrows, ), dtype=dtype)
covmat = np.diag(np.concatenate( (tau0.varxip, tau0.varxim, tau2.varxip, tau2.varxim, tau5.varxip, tau5.varxim ) ))
hdu = fits.PrimaryHDU()
hdul = fits.HDUList([hdu])
covmathdu = fits.ImageHDU(covmat, name='COVMAT')
hdul.insert(1, covmathdu)
bin1array = np.array([ -999]*nrows)
bin2array = np.array([ -999]*nrows)
angbinarray = np.arange(nrows)
angarray = np.exp(tau0.meanlogr)
for j, nam in enumerate(taus_names):
array_list = [bin1array, bin2array, angbinarray,np.array(taus[j]), angarray ]
for array, name in zip(array_list, names): outdata[name] = array
corrhdu = fits.BinTableHDU(outdata, name=nam)
hdul.insert(j+2, corrhdu)
hdul[1].header['COVDATA'] = True
hdul[1].header['EXTNAME'] = 'COVMAT'
hdul[1].header['NAME_0'] = 'TAU0P'
hdul[1].header['STRT_0'] = 0
hdul[1].header['LEN_0'] = nrows
hdul[1].header['NAME_1'] = 'TAU0M'
hdul[1].header['STRT_1'] = nrows
hdul[1].header['LEN_1'] = nrows
hdul[1].header['NAME_2'] = 'TAU2P'
hdul[1].header['STRT_2'] = 2*nrows
hdul[1].header['LEN_2'] = nrows
hdul[1].header['NAME_3'] = 'TAU2M'
hdul[1].header['STRT_3'] = 3*nrows
hdul[1].header['LEN_3'] = nrows
hdul[1].header['NAME_4'] = 'TAU5P'
hdul[1].header['STRT_4'] = 4*nrows
hdul[1].header['LEN_4'] = nrows
hdul[1].header['NAME_5'] = 'TAU5M'
hdul[1].header['STRT_5'] = 5*nrows
hdul[1].header['LEN_5'] = nrows
hdul[2].header['QUANT1'] = 'GeR'; hdul[3].header['QUANT1'] = 'GeR'
hdul[2].header['QUANT2'] = 'PeR'; hdul[3].header['QUANT2'] = 'PeR'
hdul[4].header['QUANT1'] = 'GeR'; hdul[5].header['QUANT1'] = 'GeR'
hdul[4].header['QUANT2'] = 'PqR'; hdul[5].header['QUANT2'] = 'PqR'
hdul[6].header['QUANT1'] = 'GeR'; hdul[7].header['QUANT1'] = 'GeR'
hdul[6].header['QUANT2'] = 'PwR'; hdul[7].header['QUANT2'] = 'PwR'
filename = os.path.join(outpath, args.filename)
print("Printing file:", filename)
hdul.writeto(filename, overwrite=True)
if __name__ == "__main__":
main()
|
des-science/Y3_shearcat_tests
|
alpha-beta-eta-test/code/essentials/taus.py
|
taus.py
|
py
| 7,792 |
python
|
en
|
code
| 1 |
github-code
|
6
|
15362206849
|
from generator import Generator
from discriminator import Discriminator
from speaker_encoder import SPEncoder
import torch
import torch.nn.functional as F
import os
from os.path import join, basename, exists
import time
import datetime
import numpy as np
from tqdm import tqdm
import numpy as np
import copy
class Solver(object):
def __init__(self, train_loader, config):
"""Initialize configurations."""
self.train_loader = train_loader
self.sampling_rate = config.sampling_rate
self.D_name = config.discriminator
self.SPE_name = config.spenc
self.G_name = config.generator
self.g_hidden_size = config.g_hidden_size
self.num_speakers = config.num_speakers
self.spk_emb_dim = config.spk_emb_dim
self.lambda_rec = config.lambda_rec
self.lambda_id = config.lambda_id
self.lambda_adv = config.lambda_adv
self.batch_size = config.batch_size
self.num_iters = config.num_iters
self.g_lr = config.g_lr
self.d_lr = config.d_lr
self.beta1 = config.beta1
self.beta2 = config.beta2
self.resume_iters = config.resume_iters
self.use_ema = config.use_ema
self.auto_resume = config.auto_resume
self.kernel = config.kernel
self.num_heads = config.num_heads
self.num_res_blocks = config.num_res_blocks
self.use_tensorboard = config.use_tensorboard
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
self.log_dir = config.log_dir
self.model_save_dir = config.model_save_dir
self.log_step = config.log_step
self.sample_step = config.sample_step
self.model_save_step = config.model_save_step
self.build_model()
if self.use_tensorboard:
self.build_tensorboard()
def build_model(self):
"""Create a generator and a discriminator."""
self.generator = eval(self.G_name)(num_speakers=self.num_speakers,
kernel = self.kernel,
num_heads = self.num_heads,
num_res_blocks = self.num_res_blocks,
spk_emb_dim = self.spk_emb_dim,
)
self.discriminator = eval(self.D_name)(num_speakers=self.num_speakers)
self.sp_enc = eval(self.SPE_name)(num_speakers = self.num_speakers, spk_emb_dim = self.spk_emb_dim)
self.sp_enc.to(self.device)
self.generator.to(self.device)
self.discriminator.to(self.device)
g_params = list(self.generator.parameters())
g_params += list(self.sp_enc.parameters())
d_params = list(self.discriminator.parameters())
self.g_optimizer = torch.optim.Adam(g_params, self.g_lr, [self.beta1, self.beta2])
self.d_optimizer = torch.optim.Adam(d_params, self.d_lr, [self.beta1, self.beta2])
# restore model
if not self.auto_resume:
if self.resume_iters and not self.resume_ft:
print("resuming step %d ..."% self.resume_iters, flush=True)
self.restore_model(self.resume_iters)
else:
ckpt_files = [ int(x.split('-')[0]) for x in os.listdir(self.model_save_dir)]
last_step = sorted(ckpt_files, reverse = True)[0]
print("auto resuming step %d ..."% last_step, flush=True)
self.restore_model(last_step)
self.resume_iters = last_step
if self.use_ema:
self.generator_ema = copy.deepcopy(self.generator)
self.sp_enc_ema = copy.deepcopy(self.sp_enc)
self.print_network(self.generator, 'Generator')
self.print_network(self.discriminator, 'Discriminator')
self.print_network(self.sp_enc, 'SpeakerEncoder')
if self.use_ema:
self.generator_ema.to(self.device)
self.sp_enc_ema.to(self.device)
def print_network(self, model, name):
"""Print out the network information."""
num_params = 0
for p in model.parameters():
num_params += p.numel()
print(model, flush=True)
print(name,flush=True)
print("The number of parameters: {}".format(num_params), flush=True)
def moving_average(self, model, model_test, beta = 0.999):
for param, param_test in zip(model.parameters(), model_test.parameters()):
param_test.data = torch.lerp(param.data, param_test.data, beta)
def restore_model(self, resume_iters, resume_ft = False):
"""Restore the trained generator and discriminator."""
print('Loading the trained models from step {}...'.format(resume_iters), flush=True)
g_path = os.path.join(self.model_save_dir, '{}-G.ckpt'.format(resume_iters))
d_path = os.path.join(self.model_save_dir, '{}-D.ckpt'.format(resume_iters))
sp_path = os.path.join(self.model_save_dir, '{}-sp.ckpt'.format(resume_iters))
g_opt_path = os.path.join(self.model_save_dir, '{}-g_opt.ckpt'.format(resume_iters))
d_opt_path = os.path.join(self.model_save_dir, '{}-d_opt.ckpt'.format(resume_iters))
self.generator.load_state_dict(torch.load(g_path, map_location=lambda storage, loc: storage))
self.discriminator.load_state_dict(torch.load(d_path, map_location=lambda storage, loc: storage))
self.sp_enc.load_state_dict(torch.load(sp_path, map_location=lambda storage, loc: storage))
print("loading optimizer",flush=True)
if exists(g_opt_path):
self.g_optimizer.load_state_dict(torch.load(g_opt_path, map_location = lambda storage, loc: storage))
if exists(d_opt_path):
self.d_optimizer.load_state_dict(torch.load(d_opt_path, map_location = lambda storage, loc: storage))
def build_tensorboard(self):
"""Build a tensorboard logger."""
from logger import Logger
self.logger = Logger(self.log_dir)
def update_lr(self, g_lr, d_lr):
"""Decay learning rates of the generator and discriminator."""
for param_group in self.g_optimizer.param_groups:
param_group['lr'] = g_lr
for param_group in self.d_optimizer.param_groups:
param_group['lr'] = d_lr
def reset_grad(self):
"""Reset the gradientgradient buffers."""
self.g_optimizer.zero_grad()
self.d_optimizer.zero_grad()
def label2onehot(self, labels, dim):
"""Convert label indices to one-hot vectors."""
batch_size = labels.size(0)
out = torch.zeros(batch_size, dim)
out[np.arange(batch_size), labels.long()] = 1
return out
def sample_spk_c(self, size):
spk_c = np.random.randint(0, self.num_speakers, size=size)
spk_c_cat = to_categorical(spk_c, self.num_speakers)
return torch.LongTensor(spk_c), torch.FloatTensor(spk_c_cat)
def classification_loss(self, logit, target):
"""Compute softmax cross entropy loss."""
return F.cross_entropy(logit, target)
def load_wav(self, wavfile, sr=16000):
wav, _ = librosa.load(wavfile, sr=sr, mono=True)
return wav_padding(wav, sr=16000, frame_period=5, multiple = 4)
def load_mel(self, melfile):
tmp_mel = np.load(melfile)
return tmp_mel
def train(self):
# Set data loader.
train_loader = self.train_loader
data_iter = iter(train_loader)
g_lr = self.g_lr
d_lr = self.d_lr
start_iters = 0
if self.resume_iters:
start_iters = self.resume_iters
print('Start training...', flush=True)
start_time = time.time()
for i in range(start_iters, self.num_iters):
try:
mc_src, spk_label_org, spk_c_org, mc_trg, spk_label_trg, spk_c_trg = next(data_iter)
except:
data_iter = iter(train_loader)
mc_src, spk_label_org, spk_c_org, mc_trg, spk_label_trg, spk_c_trg = next(data_iter)
mc_src.unsqueeze_(1)
mc_trg.unsqueeze_(1)
mc_src = mc_src.to(self.device)
mc_trg = mc_trg.to(self.device)
spk_label_org = spk_label_org.to(self.device)
spk_c_org = spk_c_org.to(self.device)
spk_label_trg = spk_label_trg.to(self.device)
spk_c_trg = spk_c_trg.to(self.device)
spk_c_trg = self.sp_enc(mc_trg, spk_label_trg)
spk_c_org = self.sp_enc(mc_src, spk_label_org)
d_out_src = self.discriminator(mc_src, spk_label_trg, spk_label_org)
d_loss_real = torch.mean( (1.0 - d_out_src)**2 )
mc_fake = self.generator(mc_src, spk_c_org, spk_c_trg)
d_out_fake = self.discriminator(mc_fake.detach(), spk_label_org, spk_label_trg)
d_loss_fake = torch.mean(d_out_fake ** 2)
# Backward and optimize.
d_loss = d_loss_real + d_loss_fake
self.reset_grad()
d_loss.backward()
self.d_optimizer.step()
# Logging.
loss = {}
loss['D/loss_real'] = d_loss_real.item()
loss['D/loss_fake'] = d_loss_fake.item()
loss['D/loss'] = d_loss.item()
spk_c_trg = self.sp_enc(mc_trg, spk_label_trg)
spk_c_org = self.sp_enc(mc_src, spk_label_org)
mc_fake = self.generator(mc_src, spk_c_org, spk_c_trg)
g_out_src = self.discriminator(mc_fake, spk_label_org, spk_label_trg)
g_loss_fake = torch.mean((1.0 - g_out_src)**2)
mc_reconst = self.generator(mc_fake, spk_c_trg, spk_c_org)
g_loss_rec = torch.mean(torch.abs(mc_src - mc_reconst))
mc_fake_id = self.generator(mc_src, spk_c_org, spk_c_org)
g_loss_id = torch.mean(torch.abs(mc_src - mc_fake_id))
# Backward and optimize.
g_loss = self.lambda_adv * g_loss_fake \
+ self.lambda_rec * g_loss_rec \
+ self.lambda_id * g_loss_id
self.reset_grad()
g_loss.backward()
self.g_optimizer.step()
# Logging.
loss['G/loss_fake'] = g_loss_fake.item()
loss['G/loss_rec'] = g_loss_rec.item()
loss['G/loss_id'] = g_loss_id.item()
if self.use_ema:
self.moving_average(self.generator, self.generator_ema)
self.moving_average(self.sp_enc, self.sp_enc_ema)
if (i+1) % self.log_step == 0:
et = time.time() - start_time
et = str(datetime.timedelta(seconds=et))[:-7]
log = "Elapsed [{}], Iteration [{}/{}]".format(et, i+1, self.num_iters)
for tag, value in loss.items():
log += ", {}: {:.4f}".format(tag, value)
print(log, flush=True)
if self.use_tensorboard:
for tag, value in loss.items():
self.logger.scalar_summary(tag, value, i+1)
if (i+1) % self.model_save_step == 0:
g_path = os.path.join(self.model_save_dir, '{}-G.ckpt'.format(i+1))
g_path_ema = os.path.join(self.model_save_dir, '{}-G.ckpt.ema'.format(i+1))
d_path = os.path.join(self.model_save_dir, '{}-D.ckpt'.format(i+1))
sp_path = os.path.join(self.model_save_dir, '{}-sp.ckpt'.format(i+1))
sp_path_ema = os.path.join(self.model_save_dir, '{}-sp.ckpt.ema'.format(i+1))
g_opt_path = os.path.join(self.model_save_dir, '{}-g_opt.ckpt'.format(i+1))
d_opt_path = os.path.join(self.model_save_dir, '{}-d_opt.ckpt'.format(i+1))
torch.save(self.generator.state_dict(), g_path)
if self.use_ema:
torch.save(self.generator_ema.state_dict(), g_path_ema)
torch.save(self.discriminator.state_dict(), d_path)
torch.save(self.sp_enc.state_dict(), sp_path)
if self.use_ema:
torch.save(self.sp_enc_ema.state_dict(), sp_path_ema)
torch.save(self.g_optimizer.state_dict(), g_opt_path)
torch.save(self.d_optimizer.state_dict(), d_opt_path)
print('Saved model checkpoints into {}...'.format(self.model_save_dir), flush=True)
|
Mortyzhou-Shef-BIT/DYGANVC
|
solver.py
|
solver.py
|
py
| 12,824 |
python
|
en
|
code
| null |
github-code
|
6
|
24150027900
|
from fastapi import FastAPI, APIRouter,status, Request
from fastapi.staticfiles import StaticFiles
from fastapi.templating import Jinja2Templates
from fastapi.responses import HTMLResponse
from services.connectionHobolink import Connection
from routers import login
app=FastAPI(title="WeatherStation")
#routers
app.include_router(login.router)
app.mount("/static", StaticFiles(directory="static"), name="static")
router=APIRouter(prefix="/home",
tags=["Home page"],
responses={status.HTTP_404_NOT_FOUND:{"message":"Page not found"}})
# Modificado por me!
template = Jinja2Templates(directory="templates")
@app.get("/", response_class=HTMLResponse)
async def root(request:Request):
return template.TemplateResponse("index.html", {"request": request})
@app.get("/graficas")
async def root(request:Request):
return template.TemplateResponse("graficas.html", {"request": request})
@app.get("/api_data")
async def root():
conn = Connection()
data = conn.dataSensors
data['times'] = conn.timeStation
return {"data":data}
""" @app.get("/login")
async def login(request: Request):
return templates.TemplateResponse("index.html", {"request": request})
"""
|
AlvaroCoder/WeatherStation
|
main.py
|
main.py
|
py
| 1,214 |
python
|
en
|
code
| 0 |
github-code
|
6
|
29644942121
|
from openerp import models, fields, api, _
class QcInspection(models.Model):
_inherit = "qc.inspection"
@api.multi
def _prepare_inspection_line(self, test, line, fill=None):
res = super(QcInspection, self)._prepare_inspection_line(
test, line, fill=fill)
res['min_value_below'] = line.min_value_below
res['max_value_above'] = line.max_value_above
return res
class QcInspectionLine(models.Model):
_inherit = 'qc.inspection.line'
@api.one
@api.depends('quantitative_value', 'min_value', 'max_value',
'test_uom_id', 'min_value_below', 'max_value_above',
'question_type', 'uom_id')
def _compute_tolerance_status(self):
self.tolerance_status = 'not_tolerable'
if self.question_type == 'quantitative':
if self.uom_id.id == self.test_uom_id.id:
amount = self.quantitative_value
else:
amount = self.env['product.uom']._compute_qty(
self.uom_id.id, self.quantitative_value,
self.test_uom_id.id)
if amount >= self.min_value_below:
if amount >= self.min_value:
if amount <= self.max_value:
self.tolerance_status = 'optimal'
elif amount <= self.max_value_above:
self.tolerance_status = 'tolerable'
else:
self.tolerance_status = 'tolerable'
elif self.question_type == 'qualitative':
self.tolerance_status = self.qualitative_value.tolerance_status
@api.one
@api.depends('possible_ql_values', 'min_value', 'max_value', 'test_uom_id',
'question_type', 'min_value_below', 'max_value_above')
def get_valid_values(self):
if self.question_type == 'qualitative':
super(QcInspectionLine, self).get_valid_values()
self.valid_values = ", ".join([x.name for x in
self.possible_ql_values if x.ok])
self.valid_values += ", " + ", ".join(
[_("%s (tolerable)") % x.name for x in self.possible_ql_values
if not x.ok and x.tolerance_status == 'tolerable'])
elif self.question_type == 'quantitative':
self.valid_values = "(%s) %s-%s (%s)" % (
self.min_value_below, self.min_value, self.max_value,
self.max_value_above)
if self.env.ref("product.group_uom") in self.env.user.groups_id:
self.valid_values += " %s" % self.test_uom_id.name
tolerance_status = fields.Selection(
[('optimal', 'Optimal'),
('tolerable', 'Tolerable'),
('not_tolerable', 'Not tolerable')],
string='Tolerance status', compute='_compute_tolerance_status')
min_value_below = fields.Float(string='Min. tolerable')
max_value_above = fields.Float(string='Max. tolerable')
|
odoomrp/odoomrp-wip
|
quality_control_tolerance/models/qc_inspection.py
|
qc_inspection.py
|
py
| 2,979 |
python
|
en
|
code
| 119 |
github-code
|
6
|
15152787587
|
# -*- coding: utf-8 -*
#该程序用于模型测试
import os
import torch
import numpy as np
import torch.nn as nn
from evaluation import HKOEvaluation
from ium_data.bj_iterator import BJIterator
if __name__ == "__main__":
#最佳的模型
test_model = torch.load('./checkpoints/trained_model_12000.pkl' )
test_model.eval()
test_bj_iter = BJIterator(datetime_set="bj_test_set.txt",sample_mode="sequent",
seq_len=15,width=600,height=600,
begin_idx=None, end_idx=None)
for i in range(10):
frame_data, mask_dat, datetime_batch, _ = test_bj_iter.sample(batch_size=2)
frame_data = torch.from_numpy(frame_data)
frame_data = frame_data.permute(1, 2, 0, 3, 4).contiguous()
test_input = frame_data[:, :, 0:5, :, :].cuda()
test_label = frame_data[:, :, 5:15, :, :].cuda()
#通过5帧预测之后的10帧,即预测后面一小时
output1 = test_model(test_input)
output2 = test_model(output1)
output = torch.cat((output1,output2),2)
test_label = test_label * 80
output = output * 80
print('testing dataset {}'.format(i))
#计算评价指标
evaluation = HKOEvaluation(seq_len=10, use_central=False)
test_label = test_label.cpu().detach().numpy().transpose(2, 0, 1, 3, 4)
output = output.cpu().detach().numpy().transpose(2, 0, 1, 3, 4)
evaluation.update(test_label, output, mask=None)
POD, CSI, FAR = evaluation.calculate_stat()
#将结果写进txt文件
evaluation.print_stat_readable()
evaluation.save_txt_readable('./results/test_evaluation.txt')
|
LiangHe77/UNet_v1
|
test.py
|
test.py
|
py
| 1,817 |
python
|
en
|
code
| 0 |
github-code
|
6
|
17324365412
|
from motor import motor_asyncio
from .model import Guild
import os
class Database:
def __init__(self, *, letty):
self.letty = letty
self.connection = motor_asyncio.AsyncIOMotorClient(os.environ['DB_URL'])
self.db = db = self.connection[os.environ['DB_NAME']]
self.guild = db.guilds
async def get_guild(self, guild_id):
data = await self.guild.find_one({"_id": guild_id})
if data != None:
return Guild(data, self.guild)
else:
return await self.register_guild(guild_id)
async def register_guild(self, guild_id):
data = {
"_id": guild_id,
"config":{"prefix":"lt.","language":"pt_BR"},
"disable":{"command":[],"channel":[],"role":[],"member":[]}
}
await self.guild.insert_one(data)
return Guild(data, self.guild)
|
WhyNoLetty/Letty
|
database/base.py
|
base.py
|
py
| 890 |
python
|
en
|
code
| 7 |
github-code
|
6
|
17732707406
|
#!/usr/bin/env python
import subprocess
import time
import socket
import re
from threading import Thread
import tkinter as tk
class TCPClient(Thread):
def __init__(self, host, port, device):
Thread.__init__(self)
self.host = host
self.port = port
self.device = device
self.sim = None
self.running = False
self.connected = False
self.sock = None
def connect(self):
self.running = True
self.start()
def reconnect(self):
while not self.connected:
try:
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.connect((self.host, self.port))
self.connected = True
print("Connected")
except Exception:
pass
def disconnect(self):
self.connected = False
self.running = False
def run(self):
self.reconnect()
while self.running:
if not self.sim:
msgs = self.scrape_data()
try:
self.sock.sendall(b'AT+CWLAP\n')
for msg in msgs:
self.sock.sendall(str.encode('+CWLAP:(5,"{}",{},"{}",{})\n'.format(msg[0], msg[1], msg[2], msg[3])))
self.sock.sendall(b'\nOK\n')
except Exception as e:
print(e)
self.connected = False
print("Try Reconnect")
self.reconnect()
else:
try:
for msg in self.sim:
self.sock.sendall(b'AT+CWLAP')
self.sock.sendall(str.encode(msg))
time.sleep(2)
except Exception as e:
print(e)
self.connected = False
print("Try Reconnect")
self.reconnect()
time.sleep(1)
self.sock.close()
def scrape_data(self):
clean_data = []
raw_data = subprocess.Popen("iwlist " + self.device + " scanning", shell=True, stdout=subprocess.PIPE).stdout.read().decode("utf-8")
raw_data_splitted = raw_data.split("Cell ")[1:]
for raw_cell in raw_data_splitted:
cell = raw_cell.split("Mode:")[0]
# print(cell)
cell_data = []
m = re.search('.*Frequency:2.4.* GHz', cell)
if m:
m = re.search('.*ESSID:"(.*)".*', cell)
if m:
cell_data.append(m.group(1))
else:
cell_data.append("none")
m = re.search('.* Signal level=(.\d+) dBm.*', cell)
if m:
cell_data.append(m.group(1))
else:
cell_data.append("none")
m = re.search('.+ - Address: (.+).*', cell)
if m:
cell_data.append(m.group(1))
else:
cell_data.append("none")
m = re.search('.*Channel:(\d+).*', cell)
if m:
cell_data.append(m.group(1))
else:
cell_data.append("none")
clean_data.append(cell_data)
# print(clean_data)
return clean_data
class App(tk.Tk):
def __init__(self, host, port, device, filename):
tk.Tk.__init__(self)
self.wm_title("Spyduino Simulator")
self.protocol("WM_DELETE_WINDOW", self.on_close)
self.host = host
self.port = port
self.device = device
self.filename = filename
self.TCPClient = TCPClient(self.host, self.port, self.device)
# Define LEDs
self.frame = tk.Frame(self, background="bisque")
self.frame.pack(side="top", fill="both", expand=True)
self.canvas = tk.Canvas(self.frame, bg="green")
self.canvas.create_oval(10, 10, 40, 40, fill="red", tags="ledred")
self.led_red_on = True
self.canvas.create_oval(10, 50, 40, 80, fill="yellow", tags="ledyellow")
self.led_yellow_on = False
self.canvas.create_oval(10, 90, 40, 120, fill="#5EFF00", tags="ledgreen")
self.led_green_on = False
self.canvas.create_rectangle(20, 160, 60, 200, fill="grey", tags="s2")
self.canvas.create_rectangle(20, 210, 60, 250, fill="grey", tags="s1")
self.canvas.create_rectangle(280, 10, 320, 50, fill="grey", tags="rst")
self.canvas.pack(side="top", fill="both", expand=True)
# self.canvas.bind('<Button-1>', self.onCanvasClick)
self.canvas.tag_bind('s1', '<Button-1>', self.on_S1)
self.canvas.tag_bind('s2', '<Button-1>', self.on_S2)
self.canvas.tag_bind('rst', '<Button-1>', self.on_RST)
self.draw_light()
print("Running...")
# def onCanvasClick(self, event):
# print('Got canvas click')
# print(event.x, event.y, event.widget)
# print()
def on_S1(self, event):
if not self.TCPClient.connected:
with open(self.filename, 'r') as f:
self.TCPClient.sim = f.read().split("AT+CWLAP")[1:]
self.on_S2(event)
def on_S2(self, event):
if not self.TCPClient.connected:
self.wifi()
print("Try Connect")
self.TCPClient.connect()
else:
print("Try Close")
self.on_RST(None)
def on_RST(self, event):
self.TCPClient.disconnect()
self.TCPClient.join()
self.led_yellow_on = False
self.led_green_on = False
self.led_red_on = True
self.TCPClient = TCPClient(self.host, self.port, self.device)
def wifi(self):
print("WIFI Simulation")
self.led_red_on = False
self.canvas.itemconfig("ledred", fill="white")
self.canvas.itemconfig("ledgreen", fill="#5EFF00")
self.led_green_on = True
def draw_light(self):
if self.led_red_on:
self.canvas.itemconfig("ledred", fill="red")
self.light_on = False
else:
self.canvas.itemconfig("ledred", fill="white")
self.light_on = True
if self.TCPClient.connected:
self.canvas.itemconfig("ledyellow", fill="yellow")
self.light_on = False
else:
self.canvas.itemconfig("ledyellow", fill="white")
self.light_on = True
if self.led_green_on:
self.canvas.itemconfig("ledgreen", fill="#5EFF00")
self.light_on = False
else:
self.canvas.itemconfig("ledgreen", fill="white")
self.light_on = True
self.after(1000, self.draw_light)
def on_close(self):
self.TCPClient.disconnect()
self.destroy()
HOST = "127.0.0.1"
PORT = 1337
#DEVICE = "wlp3s0"
DEVICE = "wlan0"
FILENAME = "simfile.txt"
app = App(HOST, PORT, DEVICE, FILENAME)
app.mainloop()
|
Drake81/spyduino
|
spyduino_simulator/spyduino-simulator.py
|
spyduino-simulator.py
|
py
| 6,994 |
python
|
en
|
code
| 0 |
github-code
|
6
|
13020029275
|
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn import ensemble
def data_accuracy(predictions, real):
"""
Check the accuracy of the estimated prices
"""
# This will be a list, the ith element of this list will be abs(prediction[i] - real[i])/real[i]
differences = list(map(lambda x: abs(x[0] - x[1]) / x[1], zip(predictions, real)))
# Find the value for the bottom t percentile and the top t percentile
f = 0
t = 90
percentiles = np.percentile(differences, [f, t])
differences_filter = []
for diff in differences:
# Keep only values in between f and t percentile
if percentiles[0] < diff < percentiles[1]:
differences_filter.append(diff)
print(f"Differences excluding outliers: {np.average(differences_filter)}")
# clf = ensemble.GradientBoostingRegressor(n_estimators = 1100, max_depth = 15, min_samples_split = 9,learning_rate = 0.5, loss = 'squared_error')
# clf = ensemble.GradientBoostingRegressor(n_estimators = 1000, max_depth = 15, min_samples_split = 9, learning_rate = 0.2, loss = 'squared_error')
clf = ensemble.GradientBoostingRegressor(n_estimators = 600, max_depth = 7, min_samples_split = 5, learning_rate = 0.7, loss = 'squared_error')
data = pd.read_csv("PROJECTS/house-prices/HousePriceDataTRAINING.csv")
data.columns = ["long", "lat", "date", "price", "bed"]
# conv_dates = [0 if ("2011" in values or "2012" in values or "2013" in values or "2014" in values or "2015" in values or "2016" in values) else 1 for values in data.date ]
conv_dates = []
for i in range(data.date.size):
conv_dates.append(abs(int(data.at[i, "date"].split("/")[0]) + int(data.at[i, "date"].split("/")[1])*31 + int(data.at[i, "date"].split("/")[2])*366 - 737691))
data['date'] = conv_dates
labels = data['price']
train1 = data.drop('price', axis=1)
x_train, x_test, y_train, y_test = train_test_split(
train1, labels, test_size=0.10)
# y_train = list(map(lambda p: np.log2(p), y_train))
clf.fit(x_train, y_train)
# x_pred = list(map(lambda p: 2**p, clf.predict(x_test)))
x_pred = clf.predict(x_test)
# print(clf.get_params())
print(data_accuracy(y_test, x_pred))
|
V1K1NGbg/House-Price-Prediction-Project
|
testing.py
|
testing.py
|
py
| 2,216 |
python
|
en
|
code
| 0 |
github-code
|
6
|
10425091071
|
#-*- coding: utf-8 -*-
u"""
.. moduleauthor:: Martí Congost <[email protected]>
"""
from cocktail import schema
from .block import Block
class CustomBlock(Block):
instantiable = True
type_group = "blocks.custom"
view_class = schema.String(
required = True,
shadows_attribute = True,
before_member = "controller",
member_group = "behavior"
)
|
marticongost/woost
|
woost/models/customblock.py
|
customblock.py
|
py
| 402 |
python
|
en
|
code
| 0 |
github-code
|
6
|
27949370562
|
# String formatting/Templeting
# 1
name = "Mohib"
greeting = f"Hello, {name}"
print(f"Hello, {name}")
# 2
greeting = "Hi, {}"
with_name = greeting.format(name)
print(with_name)
# 3
longer_phrase = "Hello, {}. Today is {}."
formated = longer_phrase.format("Rahman", "Monday")
print(formated)
|
newmohib/python-fundamental-2
|
string_formatting.py
|
string_formatting.py
|
py
| 297 |
python
|
en
|
code
| 0 |
github-code
|
6
|
12858137004
|
"""
We are given a directed graph. We are given also a set of pairs of vertices.
Find the shortest distance between each pair of vertices or -1 if there is no path connecting them.
On the first line, you will get N, the number of vertices in the graph.
On the second line, you will get P, the number of pairs between which to find the shortest distance.
On the next N lines will be the edges of the graph and on the next P lines, the pairs.
"""
from collections import deque
from typing import Dict, List, Union
def build_graph(nodes: int) -> Dict[int, List[int]]:
graph = {}
for _ in range(nodes):
node, children_str = input().split(':')
node = int(node)
children = [int(x) for x in children_str.split(' ')] if children_str else []
graph[node] = children
return graph
def bfs(graph: Dict[int, List[int]], source: int, destination: int) -> Dict[int, Union[None, int]]:
queue = deque([source])
visited = {source}
parent = {source: None}
while queue:
node = queue.popleft()
if node == destination:
break
for child in graph[node]:
if child in visited:
continue
queue.append(child)
visited.add(child)
parent[child] = node
return parent
def find_size(parent: Dict[int, Union[None, int]], destination: int) -> int:
node = destination
size = -1
while node is not None:
node = parent[node]
size += 1
return size
nodes = int(input())
pairs = int(input())
graph = build_graph(nodes)
for _ in range(pairs):
source, destination = [int(x) for x in input().split('-')]
parent = bfs(graph, source, destination)
if destination not in parent:
print(f'{{{source}, {destination}}} -> -1')
continue
size = find_size(parent, destination)
print(f'{{{source}, {destination}}} -> {size}')
# Test solution at:
# https://judge.softuni.org/Contests/Practice/Index/3465#0
|
dandr94/Algorithms-with-Python
|
04. Minimum-spanning-tree-and-Shortest-path-in-Graph/02. Exercise/01. distance_between_vertices.py
|
01. distance_between_vertices.py
|
py
| 2,005 |
python
|
en
|
code
| 0 |
github-code
|
6
|
44870137636
|
#!/usr/bin/python3
def main():
d = { "one": 1, "two": 2, "three": 3, "four": 4, "five": 5 }
print("Dictionaries: ", d)
for k in d:
print(k, d[k])
print("Sorted by Keys: ")
for k in sorted(d.keys()):
print(k, d[k])
print("Dictionaries are mutable objects")
dd = dict(
one = 1, two = 2, three = 3, four = 4, five = "five"
)
dd["seven"] = 7
for kk in sorted(dd.keys()):
print(type(kk), kk, type(dd), dd[kk])
if __name__ == "__main__" : main()
|
sandeepgholve/Python_Programming
|
Python 3 Essential Training/05 Variables/variables-dictionaries.py
|
variables-dictionaries.py
|
py
| 539 |
python
|
en
|
code
| 0 |
github-code
|
6
|
31412656744
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
r"""
Life's pathetic, have fun ("▔□▔)/hi~♡ Nasy.
Excited without bugs::
| * *
| . .
| .
| * ,
| .
|
| *
| |\___/|
| ) -( . ·
| =\ - /=
| )===( *
| / - \
| |- |
| / - \ 0.|.0
| NASY___\__( (__/_____(\=/)__+1s____________
| ______|____) )______|______|______|______|_
| ___|______( (____|______|______|______|____
| ______|____\_|______|______|______|______|_
| ___|______|______|______|______|______|____
| ______|______|______|______|______|______|_
| ___|______|______|______|______|______|____
author : Nasy https://nasy.moe
date : Dec 23, 2018
email : Nasy <[email protected]>
filename : test_nacf.py
project : tests
license : LGPL-3.0+
There are more things in heaven and earth, Horatio, than are dreamt.
-- From "Hamlet"
"""
# Standard Library
import unittest
# Prelude
from nalude import flatten
# Other Packages
from nacf import (css, get, gets, html, json, post, text,
urls, posts, xpath, parallel, __version__,)
from nacf.types import Res, Json, Iterable
class NacfTest(unittest.TestCase):
"""Nasy crawler framework test."""
def test_version(self) -> None:
"""Test version of nacf."""
with open("pyproject.toml") as f:
for line in f:
if "version" in line:
version = line.split()[-1].replace('"', "")
break
self.assertEqual(__version__, version)
def test_get_css(self) -> None:
"""Test get and css."""
@get("python.org")
@css(".widget-title", first=True)
@text
def crawler(res: str) -> str:
"""Test crawler."""
return res
self.assertEqual(crawler(), "Get Started")
def test_gets_xpath(self) -> None:
"""Test gets and xpath."""
@gets(["python.org", "python.org"])
@xpath("//*[@class='widget-title']", first=True)
@text
def crawler(res: str) -> str:
"""Test crawler."""
return res
self.assertEqual(
list(flatten(crawler())), ["Get Started", "Get Started"]
)
def test_post_json(self) -> None:
"""Test post, posts and json."""
@post(
"https://app.fakejson.com/q",
json={
"token": "FOk7RjbecxtWJHljGjCNjg",
"data": {
"colorText": "colorText",
"colorHex": "colorHex",
"colorRGB": "colorRGB",
"colorHSL": "colorHSL",
},
},
)
@json
def crawler(res: Json) -> Json:
"""Test crawler."""
return res
self.assertEqual(
crawler(),
{
"colorText": "tufts blue",
"colorRGB": "rgb(22, 75, 56)",
"colorHex": "colorHex",
"colorHSL": "hsl(233, 14%, 14%)",
},
)
def test_posts(self) -> None:
"""Test posts and json."""
@posts(
["https://app.fakejson.com/q"] * 3,
jsons=[
{
"token": "FOk7RjbecxtWJHljGjCNjg",
"data": {
"colorText": "colorText",
"colorHex": "colorHex",
"colorRGB": "colorRGB",
"colorHSL": "colorHSL",
},
}
]
* 3,
)
def crawler(res: Res) -> Iterable[Json]:
"""Test crawler."""
return map(lambda r: r.json(), res)
self.assertEqual(
list(flatten(crawler())),
[
{
"colorText": "tufts blue",
"colorRGB": "rgb(22, 75, 56)",
"colorHex": "colorHex",
"colorHSL": "hsl(233, 14%, 14%)",
}
]
* 3,
)
def test_urls_text(self) -> None:
"""Test urls and text."""
@urls(["python.org", "python.org"])
@gets()
@css(".widget-title", first=True)
@text
def crawler(res: str) -> str:
"""Test crawler."""
return res
self.assertEqual(
list(flatten(crawler())), ["Get Started", "Get Started"]
)
def test_parallel_html(self) -> None:
"""Test parallel."""
@urls(["python.org", "python.org"])
@parallel()
@get()
@html
def crawler(res: Res) -> str:
"""Test crawler."""
return res.find(".widget-title", first=True).text
self.assertEqual(
list(flatten(crawler())), ["Get Started", "Get Started"]
)
def run() -> None:
"""Run test."""
unittest.main()
if __name__ == "__main__":
run()
|
nasyxx/nacf
|
tests/test_nacf.py
|
test_nacf.py
|
py
| 5,224 |
python
|
en
|
code
| 9 |
github-code
|
6
|
28153506484
|
import json
import numpy as np
def load_json(file_path : str) -> dict:
"""
Loads .json file types.
Use json python library to load a .json file.
Parameters
----------
file_path : string
Path to file.
Returns
-------
json file : dictionary
.json dictionary file.
See Also
--------
read_GMR_file
save_json_dicts
Notes
-----
json files are typically dictionaries, as such the function is intended for
use with dictionaries stored in .json file types.
Examples
--------
my_dictionary = load_json(file_path="/Path/To/File")
"""
with open(file_path, 'r') as file:
return json.load(file)
def read_GMR_file(file_path):
'''
Load txt output from GMRX spectrometer. Return wavelength in nm.
Args:
file_path: <string> path to file
Returns:
wavelength: <array> wavelength array
intensity: <array> intensity array
'''
try:
wavelength, intensity = np.genfromtxt(
fname=file_path,
delimiter=';',
unpack=True)
except:
wavelength, intensity = np.genfromtxt(
fname=file_path,
delimiter=',',
unpack=True)
return wavelength, intensity
def convert(o):
"""
Check data type.
Check type of data string.
Parameters
----------
o : string
String to check.
Returns
-------
TypeError : Boolean
TypeError if string is not suitable.
See Also
--------
None.
Notes
-----
None.
Examples
--------
None.
"""
if isinstance(o, np.generic):
return o.item()
raise TypeError
def save_json_dicts(out_path : str,
dictionary : dict) -> None:
"""
Save .json file types.
Use json python library to save a dictionary to a .json file.
Parameters
----------
out_path : string
Path to file.
dictionary : dictionary
Dictionary to save.
Returns
-------
None
See Also
--------
load_json
Notes
-----
json files are typically dictionaries, as such the function is intended for
use with dictionaries stored in .json file types.
Examples
--------
save_json_dicts(
out_path="/Path/To/File",
dictionary=my_dictionary)
"""
with open(out_path, 'w') as outfile:
json.dump(
dictionary,
outfile,
indent=2,
default=convert)
outfile.write('\n')
def reflectometer_in(file_path : str) -> list:
"""
Loads text file output from the Filmetrics spectroscopic reflectometer.
Loads a 3 column, comma delimited, .fitnk file output from a Filmetrics F20
spectroscopic reflectometer.
Parameters
----------
file_path: string
Path to file.
Returns
-------
col0, col1, col2: list
Typically wavelength (nm), n, k.
See Also
--------
numpy genfromtxt
Notes
-----
The .fitnk file from the Filmetrics F20 contains 5 header rows and 6 footer
rows that are seemingly not useful information. The function skips over the
rows.
Examples
--------
None
"""
col0, col1, col2 = np.genfromtxt(
fname=file_path,
delimiter=',',
skip_header=5,
skip_footer=6,
unpack=True)
return col0, col1, col2
def ellipsometer_in(file_path : str) -> list:
"""
Load text file output from the J.A. Woollam VASE.
Loads a 5 column, comma delimited, .csv file output from a J.A. Woollam
variable angle spectroscopic ellipsometer.
Parameters
----------
file_path: string
Path to file.
Returns
-------
col0, col1, col2, col3, col4: list
Typically wavelength (nm), sample psi, sample delta, model psi, model
delta.
See Also
--------
numpy genfromtxt
Notes
-----
None
Example
-------
None
"""
col0, col1, col2, col3, col4 = np.genfromtxt(
fname=file_path,
delimiter=',',
skip_header=2,
usecols=(0, 1, 2, 3, 4),
unpack=True)
return col0, col1, col2, col3, col4
|
jm1261/PeakFinder
|
src/fileIO.py
|
fileIO.py
|
py
| 4,274 |
python
|
en
|
code
| 0 |
github-code
|
6
|
25070502975
|
import pydoc
import logging
from typing import Generic, Type, Optional, Union, TypeVar, Any, NamedTuple
from django.db import models
from django.conf import settings
from django.forms.models import model_to_dict
from rest_framework import serializers
logger = logging.getLogger(__name__)
T = TypeVar("T")
class AbstractSerializer:
def create(self, validated_data, **kwargs):
super().create(validated_data)
def update(self, instance, validated_data, **kwargs):
super().update(instance, validated_data, **kwargs)
class Context(NamedTuple):
user: Any
org: Any = None
def __getitem__(self, item):
return getattr(self, item)
class Serializer(serializers.Serializer, AbstractSerializer):
pass
class ModelSerializer(serializers.ModelSerializer, AbstractSerializer):
def create(self, data: dict, **kwargs):
return self.Meta.model.objects.create(**data)
def update(self, instance, data: dict, **kwargs):
for name, value in data.items():
if name != "created_by":
setattr(instance, name, value)
instance.save()
return instance
"""
Custom serializer utilities functions
"""
def PaginatedResult(serializer_name: str, content_serializer: Type[Serializer]):
return type(
serializer_name,
(Serializer,),
dict(
next=serializers.URLField(
required=False, allow_blank=True, allow_null=True
),
previous=serializers.URLField(
required=False, allow_blank=True, allow_null=True
),
results=content_serializer(many=True),
),
)
class _SerializerDecoratorInitializer(Generic[T]):
def __getitem__(self, serializer_type: Type[Serializer]):
class Decorator:
def __init__(self, instance=None, data: Union[str, dict] = None, **kwargs):
self._instance = instance
if data is None and instance is None:
self._serializer = None
else:
self._serializer: serializer_type = (
serializer_type(data=data, **kwargs)
if instance is None
else serializer_type(
instance, data=data, **{**kwargs, "partial": True}
)
)
self._serializer.is_valid(raise_exception=True)
@property
def data(self) -> Optional[dict]:
return (
self._serializer.validated_data
if self._serializer is not None
else None
)
@property
def instance(self):
return self._instance
def save(self, **kwargs) -> "Decorator":
if self._serializer is not None:
self._instance = self._serializer.save(**kwargs)
return self
return Decorator
SerializerDecorator = _SerializerDecoratorInitializer()
def owned_model_serializer(serializer: Type[Serializer]):
class MetaSerializer(serializer):
def __init__(self, *args, **kwargs):
if "context" in kwargs:
context = kwargs.get("context") or {}
user = (
context.get("user") if isinstance(context, dict) else context.user
)
org = context.get("org") if isinstance(context, dict) else context.org
if settings.MULTI_ORGANIZATIONS and org is None:
import purplship.server.orgs.models as orgs
org = orgs.Organization.objects.filter(
users__id=getattr(user, "id", None)
).first()
self.__context: Context = Context(user, org)
else:
self.__context: Context = getattr(self, "__context", None)
kwargs.update({"context": self.__context})
super().__init__(*args, **kwargs)
def create(self, data: dict, **kwargs):
payload = {"created_by": self.__context.user, **data}
try:
instance = super().create(payload, context=self.__context)
link_org(instance, self.__context) # Link to organization if supported
except Exception as e:
logger.exception(e)
raise e
return instance
def update(self, instance, data: dict, **kwargs):
payload = {k: v for k, v in data.items()}
return super().update(instance, payload, context=self.__context)
return type(serializer.__name__, (MetaSerializer,), {})
def link_org(entity: ModelSerializer, context: Context):
if hasattr(entity, "org") and context.org is not None and not entity.org.exists():
entity.link = entity.__class__.link.related.related_model.objects.create(
org=context.org, item=entity
)
entity.save(
update_fields=(["created_at"] if hasattr(entity, "created_at") else [])
)
def save_many_to_many_data(
name: str,
serializer: ModelSerializer,
parent: models.Model,
payload: dict = None,
**kwargs,
):
if not any((key in payload for key in [name])):
return None
collection_data = payload.get(name)
collection = getattr(parent, name)
if collection_data is None and any(collection.all()):
for item in collection.all():
item.delete()
for data in collection_data:
item_instance = (
collection.filter(id=data.pop("id")).first() if "id" in data else None
)
if item_instance is None:
item = SerializerDecorator[serializer](data=data, **kwargs).save().instance
else:
item = (
SerializerDecorator[serializer](
instance=item_instance, data=data, **{**kwargs, "partial": True}
)
.save()
.instance
)
getattr(parent, name).add(item)
def save_one_to_one_data(
name: str,
serializer: ModelSerializer,
parent: models.Model = None,
payload: dict = None,
**kwargs,
):
if name not in payload:
return None
data = payload.get(name)
instance = getattr(parent, name, None)
if data is None and instance is not None:
instance.delete()
setattr(parent, name, None)
if instance is None:
new_instance = (
SerializerDecorator[serializer](data=data, **kwargs).save().instance
)
parent and setattr(parent, name, new_instance)
return new_instance
return (
SerializerDecorator[serializer](
instance=instance, data=data, partial=True, **kwargs
)
.save()
.instance
)
def allow_model_id(model_paths: []):
def _decorator(serializer: Type[Serializer]):
class ModelIdSerializer(serializer):
def __init__(self, *args, **kwargs):
for param, model_path in model_paths:
content = kwargs.get("data", {}).get(param)
values = content if isinstance(content, list) else [content]
model = pydoc.locate(model_path)
if any([isinstance(val, str) for val in values]):
new_content = []
for value in values:
if isinstance(value, str) and (model is not None):
data = model_to_dict(model.objects.get(pk=value))
("id" in data) and data.pop("id")
new_content.append(data)
kwargs.update(
data={
**kwargs["data"],
param: new_content
if isinstance(content, list)
else next(iter(new_content)),
}
)
super().__init__(*args, **kwargs)
return type(serializer.__name__, (ModelIdSerializer,), {})
return _decorator
def make_fields_optional(serializer: Type[ModelSerializer]):
_name = f"Partial{serializer.__name__}"
class _Meta(serializer.Meta):
extra_kwargs = {
**getattr(serializer.Meta, "extra_kwargs", {}),
**{
field.name: {"required": False}
for field in serializer.Meta.model._meta.fields
},
}
return type(_name, (serializer,), dict(Meta=_Meta))
def exclude_id_field(serializer: Type[ModelSerializer]):
class _Meta(serializer.Meta):
exclude = [*getattr(serializer.Meta, "exclude", []), "id"]
return type(serializer.__name__, (serializer,), dict(Meta=_Meta))
|
danh91/purplship
|
server/modules/core/purplship/server/serializers/abstract.py
|
abstract.py
|
py
| 8,956 |
python
|
en
|
code
| null |
github-code
|
6
|
71968698427
|
import torch.nn as nn
from collections import OrderedDict
from graph_ter_seg.tools import utils
class EdgeConvolution(nn.Module):
def __init__(self, k, in_features, out_features):
super(EdgeConvolution, self).__init__()
self.k = k
self.conv = nn.Conv2d(
in_features * 2, out_features, kernel_size=1, bias=False
)
self.bn = nn.BatchNorm2d(out_features)
self.relu = nn.LeakyReLU(negative_slope=0.2)
def forward(self, x):
x = utils.get_edge_feature(x, k=self.k)
x = self.relu(self.bn(self.conv(x)))
x = x.max(dim=-1, keepdim=False)[0]
return x
class MultiEdgeConvolution(nn.Module):
def __init__(self, k, in_features, mlp):
super(MultiEdgeConvolution, self).__init__()
self.k = k
self.conv = nn.Sequential()
for index, feature in enumerate(mlp):
if index == 0:
layer = nn.Sequential(OrderedDict([
('conv%d' %index, nn.Conv2d(
in_features * 2, feature, kernel_size=1, bias=False
)),
('bn%d' % index, nn.BatchNorm2d(feature)),
('relu%d' % index, nn.LeakyReLU(negative_slope=0.2))
]))
else:
layer = nn.Sequential(OrderedDict([
('conv%d' %index, nn.Conv2d(
mlp[index - 1], feature, kernel_size=1, bias=False
)),
('bn%d' % index, nn.BatchNorm2d(feature)),
('relu%d' % index, nn.LeakyReLU(negative_slope=0.2))
]))
self.conv.add_module('layer%d' % index, layer)
def forward(self, x):
x = utils.get_edge_feature(x, k=self.k)
x = self.conv(x)
x = x.max(dim=-1, keepdim=False)[0]
return x
class Flatten(nn.Module):
def __init__(self):
super(Flatten, self).__init__()
def forward(self, x):
return x.view(x.size(0), -1)
def main():
conv = MultiEdgeConvolution(k=20, mlp=(64, 64), in_features=64)
print(conv)
if __name__ == '__main__':
main()
|
gyshgx868/graph-ter
|
graph_ter_seg/models/layers.py
|
layers.py
|
py
| 2,158 |
python
|
en
|
code
| 56 |
github-code
|
6
|
6496477997
|
import os
from util import build_utils
def FilterProguardOutput(output):
'''ProGuard outputs boring stuff to stdout (proguard version, jar path, etc)
as well as interesting stuff (notes, warnings, etc). If stdout is entirely
boring, this method suppresses the output.
'''
ignore_patterns = [
'ProGuard, version ',
'Reading program jar [',
'Reading library jar [',
'Preparing output jar [',
' Copying resources from program jar [',
]
for line in output.splitlines():
for pattern in ignore_patterns:
if line.startswith(pattern):
break
else:
# line doesn't match any of the patterns; it's probably something worth
# printing out.
return output
return ''
class ProguardCmdBuilder(object):
def __init__(self, proguard_jar):
assert os.path.exists(proguard_jar)
self._proguard_jar_path = proguard_jar
self._test = None
self._mapping = None
self._libraries = None
self._injars = None
self._configs = None
self._outjar = None
def outjar(self, path):
assert self._outjar is None
self._outjar = path
def is_test(self, enable):
assert self._test is None
self._test = enable
def mapping(self, path):
assert self._mapping is None
assert os.path.exists(path), path
self._mapping = path
def libraryjars(self, paths):
assert self._libraries is None
for p in paths:
assert os.path.exists(p), p
self._libraries = paths
def injars(self, paths):
assert self._injars is None
for p in paths:
assert os.path.exists(p), p
self._injars = paths
def configs(self, paths):
assert self._configs is None
for p in paths:
assert os.path.exists(p), p
self._configs = paths
def build(self):
assert self._injars is not None
assert self._outjar is not None
assert self._configs is not None
cmd = [
'java', '-jar', self._proguard_jar_path,
'-forceprocessing',
]
if self._test:
cmd += [
'-dontobfuscate',
'-dontoptimize',
'-dontshrink',
'-dontskipnonpubliclibraryclassmembers',
]
if self._mapping:
cmd += [
'-applymapping', self._mapping,
]
if self._libraries:
cmd += [
'-libraryjars', ':'.join(self._libraries),
]
cmd += [
'-injars', ':'.join(self._injars)
]
for config_file in self._configs:
cmd += ['-include', config_file]
# The output jar must be specified after inputs.
cmd += [
'-outjars', self._outjar,
'-dump', self._outjar + '.dump',
'-printseeds', self._outjar + '.seeds',
'-printusage', self._outjar + '.usage',
'-printmapping', self._outjar + '.mapping',
]
return cmd
def GetInputs(self):
inputs = [self._proguard_jar_path] + self._configs + self._injars
if self._mapping:
inputs.append(self._mapping)
if self._libraries:
inputs += self._libraries
return inputs
def CheckOutput(self):
build_utils.CheckOutput(self.build(), print_stdout=True,
stdout_filter=FilterProguardOutput)
|
danrwhitcomb/Monarch
|
build/android/gyp/util/proguard_util.py
|
proguard_util.py
|
py
| 3,140 |
python
|
en
|
code
| 5 |
github-code
|
6
|
30241855772
|
import os
from autoPyTorch.core.api import AutoNet
from autoPyTorch.pipeline.base.pipeline import Pipeline
from autoPyTorch.pipeline.nodes.one_hot_encoding import OneHotEncoding
from autoPyTorch.pipeline.nodes.metric_selector import MetricSelector
from autoPyTorch.pipeline.nodes.ensemble import EnableComputePredictionsForEnsemble, SavePredictionsForEnsemble, BuildEnsemble, EnsembleServer
from autoPyTorch.pipeline.nodes.create_dataset_info import CreateDatasetInfo
class AutoNetEnsemble(AutoNet):
"""Build an ensemble of several neural networks that were evaluated during the architecure search"""
# OVERRIDE
def __init__(self, autonet, config_preset="medium_cs", **autonet_config):
if isinstance(autonet, AutoNet):
self.pipeline = autonet.pipeline
self.autonet_type = type(autonet)
self.base_config = autonet.base_config
self.autonet_config = autonet.autonet_config
self.fit_result = autonet.fit_result
elif issubclass(autonet, AutoNet):
self.pipeline = autonet.get_default_ensemble_pipeline()
self.autonet_type = autonet
self.base_config = dict()
self.autonet_config = None
self.fit_result = None
else:
raise("Invalid autonet argument")
assert EnableComputePredictionsForEnsemble in self.pipeline
assert SavePredictionsForEnsemble in self.pipeline
assert EnsembleServer in self.pipeline
assert BuildEnsemble in self.pipeline
self.base_config.update(autonet_config)
self.trained_autonets = None
self.dataset_info = None
if config_preset is not None:
parser = self.get_autonet_config_file_parser()
c = parser.read(os.path.join(os.path.dirname(__file__), "presets",
autonet.preset_folder_name, config_preset + ".txt"))
c.update(self.base_config)
self.base_config = c
# OVERRIDE
def fit(self, X_train, Y_train, X_valid=None, Y_valid=None, refit=True, **autonet_config):
X_train, Y_train, X_valid, Y_valid = self.check_data_array_types(X_train, Y_train, X_valid, Y_valid)
self.autonet_config = self.pipeline.get_pipeline_config(**dict(self.base_config, **autonet_config))
self.fit_result = self.pipeline.fit_pipeline(pipeline_config=self.autonet_config,
X_train=X_train, Y_train=Y_train, X_valid=X_valid, Y_valid=Y_valid)
self.dataset_info = self.pipeline[CreateDatasetInfo.get_name()].fit_output["dataset_info"]
self.pipeline.clean()
if refit:
self.refit(X_train=X_train, Y_train=Y_train, X_valid=X_valid, Y_valid=Y_valid)
return self.fit_result
# OVERRIDE
def refit(self, X_train, Y_train, X_valid=None, Y_valid=None, ensemble_configs=None, ensemble=None, autonet_config=None):
X_train, Y_train, X_valid, Y_valid = self.check_data_array_types(X_train, Y_train, X_valid, Y_valid)
if (autonet_config is None):
autonet_config = self.autonet_config
if (autonet_config is None):
autonet_config = self.base_config
if (ensemble_configs is None and self.fit_result):
ensemble_configs = self.fit_result["ensemble_configs"]
if (ensemble is None and self.fit_result):
ensemble = self.fit_result["ensemble"]
if (autonet_config is None or ensemble_configs is None or ensemble is None):
raise ValueError("You have to specify ensemble and autonet config in order to be able to refit")
identifiers = ensemble.get_selected_model_identifiers()
self.trained_autonets = dict()
for identifier in identifiers:
config_id = tuple(identifier[:3])
budget = identifier[3]
hyperparameter_config = ensemble_configs[config_id]
autonet = self.autonet_type(pipeline=self.pipeline.clone())
autonet.refit(X_train=X_train, Y_train=Y_train, X_valid=X_valid, Y_valid=Y_valid,
hyperparameter_config=hyperparameter_config, autonet_config=autonet_config, budget=budget)
self.trained_autonets[tuple(identifier)] = autonet
# OVERRIDE
def predict(self, X, return_probabilities=False, return_metric=False):
# run predict pipeline
X, = self.check_data_array_types(X)
prediction = None
models_with_weights = self.fit_result["ensemble"].get_models_with_weights(self.trained_autonets)
autonet_config = self.autonet_config or self.base_config
for weight, autonet in models_with_weights:
current_prediction = autonet.pipeline.predict_pipeline(pipeline_config=autonet_config, X=X)["Y"]
prediction = current_prediction if prediction is None else prediction + weight * current_prediction
OHE = autonet.pipeline[OneHotEncoding.get_name()]
metric = autonet.pipeline[MetricSelector.get_name()].fit_output['optimize_metric']
# reverse one hot encoding
result = OHE.reverse_transform_y(prediction, OHE.fit_output['y_one_hot_encoder'])
if not return_probabilities and not return_metric:
return result
result = [result]
if return_probabilities:
result.append(prediction)
if return_metric:
result.append(metric)
return tuple(result)
# OVERRIDE
def score(self, X_test, Y_test):
# run predict pipeline
X_test, Y_test = self.check_data_array_types(X_test, Y_test)
_, Y_pred, metric = self.predict(X_test, return_probabilities=True, return_metric=True)
Y_test, _ = self.pipeline[OneHotEncoding.get_name()].complete_y_tranformation(Y_test)
return metric(Y_pred, Y_test)
|
RitchieAlpha/Auto-PyTorch
|
autoPyTorch/core/ensemble.py
|
ensemble.py
|
py
| 5,843 |
python
|
en
|
code
| null |
github-code
|
6
|
12805757281
|
import os
import cv2
import matplotlib.pyplot as plt
import numpy as np
import random
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
Datadirectory = "train\\"
Classes = ["0", "1", "2", "3", "4", "5", "6"]
img_size = 224
training_data = []
counter = 0
def createtrainingset():
for category in Classes:
path = os.path.join(Datadirectory, category)
class_num = Classes.index(category)
for img in os.listdir(path):
try:
img_arr = cv2.imread(os.path.join(path, img))
new_arr = cv2.resize(img_arr, (img_size, img_size))
training_data.append([new_arr, class_num])
except Exception as e:
pass
createtrainingset()
print(len(training_data))
random.shuffle(training_data)
X = [] # Images (features)
y = [] # Labels
for feature, label in training_data:
X.append(feature)
y.append(label)
y = np.array(y)
X = np.array(X)
X = X.reshape(-1, img_size, img_size, 3)
X = X / 255.0 # Normalize the image data between 0 and 1
print(X.shape)
print(y.shape)
plt.imshow(X[0])
plt.show()
model = tf.keras.applications.MobileNetV2()
#TRANSFER LEARNING - TUNING ,weights will start from lasr check point
base_input = model.layers[0].input
base_output = model.layers[-2].output
final_output = layers.Dense(128)(base_output)
final_output = layers.Activation('relu')(final_output)
final_output = layers.Dense(64)(final_output)
final_output = layers.Activation('relu')(final_output)
final_output = layers.Dense(7, activation = 'softmax')(final_output)
new_model = keras.Model(inputs = base_input, outputs = final_output)
new_model.compile(loss = "sparse_categorical_crossentropy", optimizer = "adam", metrics = ["accuracy"])
new_model.fit(X,y, epochs=10, batch_size = 8)
new_model.save('onbes_epoch.h5')
|
Mudaferkaymak/Detecting-Faces-and-Analyzing-Them-with-Computer-Vision
|
Detecting-Faces-and-Analyzing-Them-with-Computer-Vision/training_themodel.py
|
training_themodel.py
|
py
| 1,867 |
python
|
en
|
code
| 1 |
github-code
|
6
|
17536523132
|
import pystan
import stan_utility
import matplotlib
import matplotlib.pyplot as plot
##################################################
##### Simulate data and write to file
##################################################
model = stan_utility.compile_model('gen_data.stan')
fit = model.sampling(seed=194838, algorithm='Fixed_param', iter=1, chains=1,n_jobs=1)
data = dict(N = 25, M = 3,
X=fit.extract()['X'][0,:,:], y = fit.extract()['y'][0,:])
pystan.stan_rdump(data, 'lin_regr.data.R')
##################################################
##### Fit model and check diagnostics
##################################################
# Read in data from Rdump file
data = pystan.read_rdump('lin_regr.data.R')
# Fit posterior with Stan
model = stan_utility.compile_model('lin_regr.stan')
fit = model.sampling(data=data, seed=194838,n_jobs=1)
# Check sampler diagnostics
print(fit)
sampler_params = fit.get_sampler_params(inc_warmup=False)
stan_utility.check_div(sampler_params)
stan_utility.check_treedepth(sampler_params)
stan_utility.check_energy(sampler_params)
# Check visual diagnostics
fit.plot()
plot.show()
##################################################
##### Visualize posterior
##################################################
light="#DCBCBC"
light_highlight="#C79999"
mid="#B97C7C"
mid_highlight="#A25050"
dark="#8F2727"
dark_highlight="#7C0000"
# Plot parameter posteriors
params = fit.extract()
f, axarr = plot.subplots(2, 3)
for a in axarr[0,:]:
a.xaxis.set_ticks_position('bottom')
a.yaxis.set_ticks_position('none')
for a in axarr[1,:]:
a.xaxis.set_ticks_position('bottom')
a.yaxis.set_ticks_position('none')
axarr[0, 0].set_title("beta_1")
axarr[0, 0].hist(params['beta'][:,0], bins = 25, color = dark, ec = dark_highlight)
axarr[0, 0].axvline(x=5, linewidth=2, color=light)
axarr[0, 1].set_title("beta_2")
axarr[0, 1].hist(params['beta'][:,1], bins = 25, color = dark, ec = dark_highlight)
axarr[0, 1].axvline(x=-3, linewidth=2, color=light)
axarr[0, 2].set_title("beta_3")
axarr[0, 2].hist(params['beta'][:,2], bins = 25, color = dark, ec = dark_highlight)
axarr[0, 2].axvline(x=2, linewidth=2, color=light)
axarr[1, 0].set_title("alpha")
axarr[1, 0].hist(params['alpha'], bins = 25, color = dark, ec = dark_highlight)
axarr[1, 0].axvline(x=10, linewidth=2, color=light)
axarr[1, 1].set_title("sigma")
axarr[1, 1].hist(params['sigma'], bins = 25, color = dark, ec = dark_highlight)
axarr[1, 1].axvline(x=1, linewidth=2, color=light)
plot.show()
# Perform a posterior predictive check by plotting
# posterior predictive distributions against data
f, axarr = plot.subplots(2, 2)
for a in axarr[0,:]:
a.xaxis.set_ticks_position('bottom')
a.yaxis.set_ticks_position('none')
for a in axarr[1,:]:
a.xaxis.set_ticks_position('bottom')
a.yaxis.set_ticks_position('none')
axarr[0, 0].set_title("y_1")
axarr[0, 0].hist(params['y_ppc'][:,0], bins = 25, color = dark, ec = dark_highlight)
axarr[0, 0].axvline(x=data['y'][0], linewidth=2, color=light)
axarr[0, 1].set_title("y_5")
axarr[0, 1].hist(params['y_ppc'][:,4], bins = 25, color = dark, ec = dark_highlight)
axarr[0, 1].axvline(x=data['y'][4], linewidth=2, color=light)
axarr[1, 0].set_title("y_10")
axarr[1, 0].hist(params['y_ppc'][:,9], bins = 25, color = dark, ec = dark_highlight)
axarr[1, 0].axvline(x=data['y'][9], linewidth=2, color=light)
axarr[1, 1].set_title("y_15")
axarr[1, 1].hist(params['y_ppc'][:,14], bins = 25, color = dark, ec = dark_highlight)
axarr[1, 1].axvline(x=data['y'][14], linewidth=2, color=light)
plot.show()
|
MiyainNYC/Rose
|
stan/wimlds/1/lin_regr.py
|
lin_regr.py
|
py
| 3,574 |
python
|
en
|
code
| 0 |
github-code
|
6
|
23048935694
|
from sqlalchemy.orm import Session
from .. import models, schemas
from fastapi.encoders import jsonable_encoder
def get_score(db: Session):
score = db.query(models.Score).first()
if not score:
new_score = create_score()
db.add(new_score)
db.commit()
db.refresh(new_score)
return new_score
return score
def post_goal(request: schemas.Goal, db: Session):
score = db.query(models.Score).first()
if not score:
new_score = create_score()
db.add(new_score)
db.commit()
db.refresh(new_score)
score = db.query(models.Score).first()
query = jsonable_encoder(request)
if query["team"] == "home":
score.home += 1
else:
score.away += 1
db.commit()
return score
def create_score():
new_score = models.Score(home=0, away=0)
return new_score
|
hooglander/fastapi-get-and-post
|
app/repository/score.py
|
score.py
|
py
| 873 |
python
|
en
|
code
| 0 |
github-code
|
6
|
70939280508
|
import os
import argparse
import pickle
import scipy
import trajnetplusplustools
class TrajnetEvaluator:
def __init__(self, reader_gt, scenes_gt, scenes_id_gt, scenes_sub, indexes, sub_indexes, args):
self.reader_gt = reader_gt
##Ground Truth
self.scenes_gt = scenes_gt
self.scenes_id_gt = scenes_id_gt
##Prediction
self.scenes_sub = scenes_sub
## Dictionary of type of trajectories
self.indexes = indexes
self.sub_indexes = sub_indexes
## The 4 types of Trajectories
self.static_scenes = {'N': len(indexes[1])}
self.linear_scenes = {'N': len(indexes[2])}
self.forced_non_linear_scenes = {'N': len(indexes[3])}
self.non_linear_scenes = {'N': len(indexes[4])}
## The 4 types of Interactions
self.lf = {'N': len(sub_indexes[1])}
self.ca = {'N': len(sub_indexes[2])}
self.grp = {'N': len(sub_indexes[3])}
self.others = {'N': len(sub_indexes[4])}
## The 4 metrics ADE, FDE, ColI, ColII
self.average_l2 = {'N': len(scenes_gt)}
self.final_l2 = {'N': len(scenes_gt)}
## Multimodal Prediction
self.overall_nll = {'N': len(scenes_gt)}
self.topk_ade = {'N': len(scenes_gt)}
self.topk_fde = {'N': len(scenes_gt)}
num_predictions = 0
for track in self.scenes_sub[0][0]:
if track.prediction_number and track.prediction_number > num_predictions:
num_predictions = track.prediction_number
self.num_predictions = num_predictions
self.pred_length = args.pred_length
self.obs_length = args.obs_length
self.enable_col1 = True
self.ade_list = {}
self.fde_list = {}
def aggregate(self, name, disable_collision):
## Overall Single Mode Scores
average = 0.0
final = 0.0
## Overall Multi Mode Scores
average_topk_ade = 0
average_topk_fde = 0
average_nll = 0
## Aggregates ADE, FDE and Collision in GT & Pred, Topk ADE-FDE , NLL for each category & sub_category
score = {1: [0.0, 0.0, 0, 0, 0, 0.0, 0.0, 0.0], 2: [0.0, 0.0, 0, 0, 0, 0.0, 0.0, 0.0], \
3: [0.0, 0.0, 0, 0, 0, 0.0, 0.0, 0.0], 4: [0.0, 0.0, 0, 0, 0, 0.0, 0.0, 0.0]}
sub_score = {1: [0.0, 0.0, 0, 0, 0, 0.0, 0.0, 0.0], 2: [0.0, 0.0, 0, 0, 0, 0.0, 0.0, 0.0], \
3: [0.0, 0.0, 0, 0, 0, 0.0, 0.0, 0.0], 4: [0.0, 0.0, 0, 0, 0, 0.0, 0.0, 0.0]}
## Iterate
for i in range(len(self.scenes_gt)):
ground_truth = self.scenes_gt[i]
## Get Keys and Sub_keys
keys = []
sub_keys = []
## Main
for key in list(score.keys()):
if self.scenes_id_gt[i] in self.indexes[key]:
keys.append(key)
# ## Sub
for sub_key in list(sub_score.keys()):
if self.scenes_id_gt[i] in self.sub_indexes[sub_key]:
sub_keys.append(sub_key)
## Extract Prediction Frames
primary_tracks_all = [t for t in self.scenes_sub[i][0] if t.scene_id == self.scenes_id_gt[i]]
neighbours_tracks_all = [[t for t in self.scenes_sub[i][j] if t.scene_id == self.scenes_id_gt[i]] for j in range(1, len(self.scenes_sub[i]))]
##### --------------------------------------------------- SINGLE -------------------------------------------- ####
primary_tracks = [t for t in primary_tracks_all if t.prediction_number == 0]
neighbours_tracks = [[t for t in neighbours_tracks_all[j] if t.prediction_number == 0] for j in range(len(neighbours_tracks_all))]
frame_gt = [t.frame for t in ground_truth[0]][-self.pred_length:]
frame_pred = [t.frame for t in primary_tracks]
## To verify if same scene
if frame_gt != frame_pred:
print("Frame id Groud truth: ", frame_gt)
print("Frame id Predictions: ", frame_pred)
raise Exception('frame numbers are not consistent')
average_l2 = trajnetplusplustools.metrics.average_l2(ground_truth[0], primary_tracks, n_predictions=self.pred_length)
final_l2 = trajnetplusplustools.metrics.final_l2(ground_truth[0], primary_tracks)
self.ade_list[self.scenes_id_gt[i]] = average_l2
self.fde_list[self.scenes_id_gt[i]] = final_l2
if not disable_collision:
ground_truth = self.drop_post_obs(ground_truth, self.obs_length)
## Collisions in GT
# person_radius=0.1
for j in range(1, len(ground_truth)):
if trajnetplusplustools.metrics.collision(primary_tracks, ground_truth[j], n_predictions=self.pred_length):
for key in keys:
score[key][2] += 1
## Sub
for sub_key in sub_keys:
sub_score[sub_key][2] += 1
break
## Collision in Predictions
# [Col-I] only if neighs in gt = neighs in prediction
num_gt_neigh = len(ground_truth) - 1
num_predicted_neigh = len(neighbours_tracks)
if num_gt_neigh != num_predicted_neigh:
self.enable_col1 = False
for key in score:
score[key][4] = 0
score[key][3] = 0
for sub_key in sub_score:
sub_score[sub_key][4] = 0
sub_score[sub_key][3] = 0
if self.enable_col1:
for key in keys:
score[key][4] += 1
for j in range(len(neighbours_tracks)):
if trajnetplusplustools.metrics.collision(primary_tracks, neighbours_tracks[j], n_predictions=self.pred_length):
score[key][3] += 1
break
## Sub
for sub_key in sub_keys:
sub_score[sub_key][4] += 1
for j in range(len(neighbours_tracks)):
if trajnetplusplustools.metrics.collision(primary_tracks, neighbours_tracks[j], n_predictions=self.pred_length):
sub_score[sub_key][3] += 1
break
# aggregate FDE and ADE
average += average_l2
final += final_l2
for key in keys:
score[key][0] += average_l2
score[key][1] += final_l2
## Sub
for sub_key in sub_keys:
sub_score[sub_key][0] += average_l2
sub_score[sub_key][1] += final_l2
##### --------------------------------------------------- SINGLE -------------------------------------------- ####
##### --------------------------------------------------- Top 3 -------------------------------------------- ####
if self.num_predictions > 1:
topk_ade, topk_fde = trajnetplusplustools.metrics.topk(primary_tracks_all, ground_truth[0], n_predictions=self.pred_length)
average_topk_ade += topk_ade
##Key
for key in keys:
score[key][5] += topk_ade
## SubKey
for sub_key in sub_keys:
sub_score[sub_key][5] += topk_ade
average_topk_fde += topk_fde
##Key
for key in keys:
score[key][6] += topk_fde
## SubKey
for sub_key in sub_keys:
sub_score[sub_key][6] += topk_fde
##### --------------------------------------------------- Top 3 -------------------------------------------- ####
##### --------------------------------------------------- NLL -------------------------------------------- ####
if self.num_predictions > 48:
nll = trajnetplusplustools.metrics.nll(primary_tracks_all, ground_truth[0], n_predictions=self.pred_length, n_samples=50)
average_nll += nll
##Key
for key in keys:
score[key][7] += nll
## SubKey
for sub_key in sub_keys:
sub_score[sub_key][7] += nll
##### --------------------------------------------------- NLL -------------------------------------------- ####
## Average ADE and FDE
average /= len(self.scenes_gt)
final /= len(self.scenes_gt)
## Average TopK ADE and Topk FDE and NLL
average_topk_ade /= len(self.scenes_gt)
average_topk_fde /= len(self.scenes_gt)
average_nll /= len(self.scenes_gt)
## Average categories
for key in list(score.keys()):
if self.indexes[key]:
score[key][0] /= len(self.indexes[key])
score[key][1] /= len(self.indexes[key])
score[key][5] /= len(self.indexes[key])
score[key][6] /= len(self.indexes[key])
score[key][7] /= len(self.indexes[key])
## Average subcategories
## Sub
for sub_key in list(sub_score.keys()):
if self.sub_indexes[sub_key]:
sub_score[sub_key][0] /= len(self.sub_indexes[sub_key])
sub_score[sub_key][1] /= len(self.sub_indexes[sub_key])
sub_score[sub_key][5] /= len(self.sub_indexes[sub_key])
sub_score[sub_key][6] /= len(self.sub_indexes[sub_key])
sub_score[sub_key][7] /= len(self.sub_indexes[sub_key])
# ##Adding value to dict
self.average_l2[name] = average
self.final_l2[name] = final
##APPEND to overall keys
self.overall_nll[name] = average_nll
self.topk_ade[name] = average_topk_ade
self.topk_fde[name] = average_topk_fde
## Main
self.static_scenes[name] = score[1]
self.linear_scenes[name] = score[2]
self.forced_non_linear_scenes[name] = score[3]
self.non_linear_scenes[name] = score[4]
## Sub_keys
self.lf[name] = sub_score[1]
self.ca[name] = sub_score[2]
self.grp[name] = sub_score[3]
self.others[name] = sub_score[4]
return self
def result(self):
return self.average_l2, self.final_l2, \
self.static_scenes, self.linear_scenes, self.forced_non_linear_scenes, self.non_linear_scenes, \
self.lf, self.ca, self.grp, self.others, \
self.topk_ade, self.topk_fde, self.overall_nll
def save_distance_lists(self, input_file):
distance_file = os.path.dirname(input_file).replace('test_pred', 'ade_fde_list')
os.makedirs(distance_file)
with open(distance_file + '/ade_fde.pkl', 'wb') as handle:
pickle.dump([self.ade_list, self.fde_list], handle, protocol=pickle.HIGHEST_PROTOCOL)
## drop pedestrians that appear post observation
def drop_post_obs(self, ground_truth, obs_length):
obs_end_frame = ground_truth[0][obs_length].frame
ground_truth = [track for track in ground_truth if track[0].frame < obs_end_frame]
return ground_truth
def collision_test(list_sub, name, args):
""" Simple Collision Test """
submit_datasets = [args.path + name + '/' + f for f in list_sub if 'collision_test.ndjson' in f]
if len(submit_datasets):
# Scene Prediction
reader_sub = trajnetplusplustools.Reader(submit_datasets[0], scene_type='paths')
scenes_sub = [s for _, s in reader_sub.scenes()]
if trajnetplusplustools.metrics.collision(scenes_sub[0][0], scenes_sub[0][1], n_predictions=args.pred_length):
return "Fail"
return "Pass"
return "NA"
def eval(gt, input_file, args):
# Ground Truth
reader_gt = trajnetplusplustools.Reader(gt, scene_type='paths')
scenes_gt = [s for _, s in reader_gt.scenes()]
scenes_id_gt = [s_id for s_id, _ in reader_gt.scenes()]
# Scene Predictions
reader_sub = trajnetplusplustools.Reader(input_file, scene_type='paths')
scenes_sub = [s for _, s in reader_sub.scenes()]
## indexes is dictionary deciding which scenes are in which type
indexes = {}
for i in range(1, 5):
indexes[i] = []
## sub-indexes
sub_indexes = {}
for i in range(1, 5):
sub_indexes[i] = []
for scene in reader_gt.scenes_by_id:
tags = reader_gt.scenes_by_id[scene].tag
main_tag = tags[0:1]
sub_tags = tags[1]
for ii in range(1, 5):
if ii in main_tag:
indexes[ii].append(scene)
if ii in sub_tags:
sub_indexes[ii].append(scene)
# Evaluate
evaluator = TrajnetEvaluator(reader_gt, scenes_gt, scenes_id_gt, scenes_sub, indexes, sub_indexes, args)
evaluator.aggregate('kf', args.disable_collision)
## Save Lists
# evaluator.save_distance_lists(input_file)
return evaluator.result()
|
lzz970818/Trajectory-Prediction
|
Trajectory-Prediction-master/evaluator.py
|
evaluator.py
|
py
| 13,202 |
python
|
en
|
code
| 12 |
github-code
|
6
|
17284176875
|
# Calculates the number of trees that are visibile from outside the grid
def part1():
ROW, COLUMN = [99, 99] # Dimensions for the forest map
forest_grid = [[] for _ in range(COLUMN)] # Forest map represented by a 2D Array (99x99)
trees_visible = 0 # Number of trees visible
# === Helper Functions Start === #
# Inserts tree heights into forest_grid
def grid_setup(forest_grid):
for row, line in enumerate(file):
for tree_height in line.strip():
forest_grid[row].append(int(tree_height))
return forest_grid
# Checks downwards
def check_row_down(row, column, tree_house_height, forest_grid):
for x in range(row + 1, ROW, 1):
if((forest_grid[x][column] < tree_house_height) and (x == (ROW - 1))): # Tree search reaches the edge
return True
elif(forest_grid[x][column] >= tree_house_height): # Tree gets blocked
return False
# Checks upwards
def check_row_up(row, column, tree_house_height, forest_grid):
for x in reversed(range(row)):
if((forest_grid[x][column] < tree_house_height) and (x == 0)): # Tree search reaches the edge
return True
elif(forest_grid[x][column] >= tree_house_height): # Tree gets blocked
return False
# Checks to the right
def check_column_right(row, column, tree_house_height, forest_grid):
for y in range(column + 1, COLUMN, 1):
if((forest_grid[row][y] < tree_house_height) and (y == (COLUMN - 1))): # Tree search reaches the edge
return True
elif(forest_grid[row][y] >= tree_house_height): # Tree gets blocked
return False
# Checks to the left
def check_column_left(row, column, tree_house_height, forest_grid):
for y in reversed(range(column)):
if((forest_grid[row][y] < tree_house_height) and (y == 0)): # Tree search reaches the edge
return True
elif(forest_grid[row][y] >= tree_house_height): # Tree gets blocked
return False
# === Helper Functions End === #
# Program Start #
with open('forest_map.txt') as file:
forest_grid = grid_setup(forest_grid)
for x in range(ROW):
for y in range(COLUMN):
if((x == 0) or (y == 0) or (x == (ROW - 1)) or (y == (COLUMN - 1))):
trees_visible += 1
else:
tree_house_height = forest_grid[x][y] # Spot from which we are comparing other tree heights
# Checks to see if a tree would be visible from any direction
if (check_row_down(x, y, tree_house_height,forest_grid) or (check_row_up(x, y, tree_house_height,forest_grid)) or (check_column_left(x, y, tree_house_height,forest_grid)) or (check_column_right(x, y, tree_house_height,forest_grid))):
trees_visible += 1
print(trees_visible, "trees are visible")
|
kianlak/Advent-Of-Code-2022
|
Day8/Day8Part1.py
|
Day8Part1.py
|
py
| 3,196 |
python
|
en
|
code
| 0 |
github-code
|
6
|
25528262922
|
#!/usr/bin/python3
# 3-infinite_add.py
# Simon Tagbor <[email protected]>
if __name__ == "__main__":
import sys
result = 0
for i in range(len(sys.argv) - 1):
result += int(sys.argv[i+1])
print("{:d}".format(result))
|
Simontagbor/alx-higher_level_programming
|
0x02-python-import_modules/3-infinite_add.py
|
3-infinite_add.py
|
py
| 248 |
python
|
en
|
code
| 2 |
github-code
|
6
|
21840251334
|
"""Order views module"""
from django_filters.rest_framework import DjangoFilterBackend
from rest_framework import filters
from rest_framework import status as st
from rest_framework import generics
from rest_framework.renderers import JSONRenderer, BrowsableAPIRenderer
from rest_framework.parsers import JSONParser
from rest_framework.response import Response
from rest_framework.exceptions import MethodNotAllowed, NotFound
from rest_framework.decorators import api_view
from orders.models import Order, STATUS_CHOICES
from orders.serializers import OrderSerializer
from orders.pagination import CustomPagination
from order_flow.settings import DEBUG
class OrderAPIListCreate(generics.ListCreateAPIView):
"""
Returns list of orders in JSON format and gave an option to create orders
"""
if DEBUG:
renderer_classes = [JSONRenderer, BrowsableAPIRenderer]
else:
renderer_classes = [JSONRenderer]
queryset = Order.objects.all()
serializer_class = OrderSerializer
pagination_class = CustomPagination
filter_backends = [DjangoFilterBackend, filters.OrderingFilter]
filterset_fields = ['external_id', 'status']
ordering_fields = ['id', 'status', 'created_at']
class OrderAPIRetrieveUpdateDestroy(generics.RetrieveUpdateDestroyAPIView):
"""
Returns distinct order JSON info and gave an option to update and delete it
"""
if DEBUG:
renderer_classes = [JSONRenderer, BrowsableAPIRenderer]
else:
renderer_classes = [JSONRenderer]
parser_classes = [JSONParser]
queryset = Order.objects.all()
serializer_class = OrderSerializer
def put(self, request, *args, **kwargs):
"""Add a possibility of partial update, using put method"""
return self.partial_update(request, *args, **kwargs)
def perform_destroy(self, instance):
"""Protect order from delete if its status is 'accepted'."""
if instance.status == 'accepted':
raise MethodNotAllowed(
'delete',
detail="You can not delete orders with status 'accepted'.",
)
instance.delete()
@api_view(['POST'])
def status_change(request, pk, status):
"""Change order status"""
try:
order = Order.objects.get(id=pk)
except Order.DoesNotExist:
raise NotFound(f'Order with id {pk} does not exist.')
if status not in [statuses[0] for statuses in STATUS_CHOICES]:
raise MethodNotAllowed(
'post',
detail="You can change order status"
" only to 'accepted' or 'failed'",
)
if order.status != 'new':
raise MethodNotAllowed(
'post',
detail="You can not change order status if it is not 'new'",
)
order.status = status
order.save()
return Response(status=st.HTTP_200_OK)
|
GunGalla/order-flow-test
|
orders/views.py
|
views.py
|
py
| 2,855 |
python
|
en
|
code
| 0 |
github-code
|
6
|
18246562040
|
class nomatch(Exception):
'''Thrown when parsing fails. Almost always caught and almost never fatal'''
def parse(string):
'''Parse a full string and return a lego piece. Fail if the whole string wasn't parsed'''
p, i = pattern.match(string, 0)
if i != len(string):
raise Exception("Could not parse '" + string + "' beyond index " + str(i))
return p.reduce()
class lego:
'''
Parent class for all lego pieces.
All lego pieces have some things in common. This parent class mainly
hosts documentation though.
'''
def __setattr__(self, name, value):
'''
Lego pieces are immutable. It caused some pretty serious problems when
I didn't have this.
'''
raise Exception("Can't set " + str(self) + " attribute " + str(name) + " to " + str(value))
def fsm(self, alphabet):
'''
Return the present lego piece in the form of a finite state machine,
as imported from the fsm module.
If no alphabet is explicitly supplied, which seems quite probable,
we use the lego.alphabet() method (later) to list all the characters
mentioned in self. However, if we intend to connect this FSM to another
one which uses different characters, we may need to supply an alphabet
which is a superset of both sets.
'''
def __repr__(self):
'''
Return a string approximating the instantiation line
for the present lego piece.
'''
def __str__(self):
'''
Render the present lego piece in the form of a regular expression.
Some lego pieces may be created which cannot be rendered in this way.
In particular: a pattern containing no concs; a multiplier of zero.
'''
def match(cls, string, i):
'''
Start at index i in the supplied string and try to match one of the
present class. Elementary recursive descent parsing with very little
need for flair. The opposite of __str__(), above. (In most cases.)
Throws a nomatch in the event of failure.
'''
def reduce(self):
'''
The most important and algorithmically complex method. Takes the current
lego piece and simplifies it in every way possible, returning a simpler
lego piece which is quite probably not of the same class as the original.
Approaches vary by the class of the present lego piece.
It is critically important to (1) always call reduce() on whatever you're
returning before you return it and therefore (2) always return something
STRICTLY SIMPLER than the current object. Otherwise, infinite loops become
possible in reduce() calls.
'''
def __add__(self, other):
'''
Concatenate any two lego pieces, regardless of differing classes. Because
reduce() (above) is always called afterwards, the result is as simplified
as possible.
Call using "a = b + c"
'''
def __mul__(self, multiplier):
'''
Equivalent to repeated concatenation. Multiplier consists of a minimum
and a maximum; maximum may be infinite (for Kleene star closure).
Call using "a = b * qm"
Reduce() is always called afterwards.
'''
def __or__(self, other):
'''
Alternate between any two lego pieces, regardless of differing classes.
Again, reduce() is called afterwards, usually with excellent results.
Call using "a = b | c"
'''
def __and__(self, other):
'''
Intersection function. Return a lego piece that can match any string
that both self and other can match. Fairly elementary results relating
to regular languages and finite state machines show that this is
possible, but implementation is a BEAST in many cases. Here, we convert
both lego pieces to FSMs (see fsm(), above) for the intersection, then
back to lego afterwards.
Call using "a = b & c"
'''
def alphabet(self):
'''
Return a set of all unique characters used in this lego piece.
In theory this could be a static property, self.alphabet, not
a function, self.alphabet(), but in the vast majority of cases
this will never be queried so it's a waste of computation to
calculate it every time a lego piece is instantiated.
By convention, otherchars is always included in this result.
'''
pass
def everythingbut(self):
'''
Return a lego object which will match any string not matched by self,
and which will not match any string matched by self.
Another task which is very difficult in general (and typically returns
utter garbage when actually printed), but becomes trivial to code
thanks to FSM routines.
'''
return self.fsm().everythingbut().lego().reduce()
def empty(self):
'''
Return False if there exists a string which the present lego piece
can match. Return True if no such string exists. Examples of empty
lego pieces are charclass() and pattern()
'''
pass
@classmethod
def matchStatic(cls, string, i, static):
if string[i:len(static)+i] == static:
return i+len(static)
raise nomatch(
"Can't find '" + static + "' at index " + str(i) + " in '" + string + "'"
)
class multiplicand(lego):
'''
This class only exists so that it can be a parent to both charclass and
pattern, which can both be used in multipliers.
'''
class charclass(multiplicand):
'''
A charclass is basically a frozenset of symbols. The reason for the
charclass object instead of using frozenset directly is to allow us to
set a "negated" flag. A charclass with the negation flag set is assumed
to contain every symbol that is in the alphabet of all symbols but not
explicitly listed inside the frozenset. e.g. [^a]. This is very handy
if the full alphabet is extremely large, but also requires dedicated
combination functions.
'''
def __init__(self, chars=set(), negateMe=False):
# chars should consist only of chars
if otherchars in set(chars):
raise Exception("Can't put non-character 'otherchars' in a charclass")
self.__dict__["chars"] = frozenset(chars)
self.__dict__["negated"] = negateMe
def __eq__(self, other):
try:
return self.chars == other.chars \
and self.negated == other.negated
except AttributeError:
return False
def __hash__(self):
return hash((self.chars, self.negated))
def __mul__(self, ier):
# e.g. "a" * {0,1} = "a?"
if ier == one:
return self.reduce()
return mult(self, ier).reduce()
# These are the characters carrying special meanings when they appear "outdoors"
# within a regular expression. To be interpreted literally, they must be
# escaped with a backslash.
allSpecial = set("\\[]|().?*+{}")
# These are the characters carrying special meanings when they appear INSIDE a
# character class (delimited by square brackets) within a regular expression.
# To be interpreted literally, they must be escaped with a backslash.
# Notice how much smaller this class is than the one above; note also that the
# hyphen and caret do NOT appear above.
classSpecial = set("\\[]^-")
# these are the character ranges which can be used inside square brackets e.g.
# "[a-z]", "[F-J]". These ranges should be disjoint.
allowableRanges = {
"ABCDEFGHIJKLMNOPQRSTUVWXYZ",
"abcdefghijklmnopqrstuvwxyz",
"0123456789",
}
def __str__(self):
# e.g. \w
if self in shorthand.keys():
return shorthand[self]
# e.g. [^a]
if self.negated:
return "[^" + self.escape() + "]"
# single character, not contained inside square brackets.
if len(self.chars) == 1:
# Python lacks the Axiom of Choice
char = "".join(self.chars)
# e.g. if char is "\t", return "\\t"
if char in escapes.keys():
return escapes[char]
if char in charclass.allSpecial:
return "\\" + char
return char
# multiple characters (or possibly 0 characters)
return "[" + self.escape() + "]"
def escape(self):
def escapeChar(char):
if char in charclass.classSpecial:
return "\\" + char
if char in escapes.keys():
return escapes[char]
return char
def recordRange():
nonlocal currentRange
nonlocal output
# there's no point in putting a range when the whole thing is
# 3 characters or fewer.
if len(currentRange) in {0, 1, 2, 3}:
output += "".join(escapeChar(char) for char in currentRange)
else:
output += escapeChar(currentRange[0]) + "-" + escapeChar(currentRange[-1])
currentRange = ""
output = ""
# look for ranges
currentRange = ""
for char in sorted(self.chars, key=str):
# range is not empty: new char must fit after previous one
if len(currentRange) > 0:
# find out if this character appears in any of the
# charclass.allowableRanges listed above.
superRange = None
for allowableRange in charclass.allowableRanges:
if char in allowableRange:
superRange = allowableRange
break
if superRange is None:
# if this character doesn't appear above, then any existing
# currentRange should be sorted and filed now
# if there is one
recordRange()
else:
i = superRange.index(char)
# char doesn't fit old range: restart
if i == 0 or superRange[i-1] != currentRange[-1]:
recordRange()
currentRange += char
recordRange()
return output
def fsm(self, alphabet=None):
from fsm import fsm
if alphabet is None:
alphabet = self.alphabet()
# 0 is initial, 1 is final, 2 is oblivion
# If negated, make a singular FSM accepting any other characters
if self.negated:
map = {
0: dict([(symbol, 2 if symbol in self.chars else 1) for symbol in alphabet]),
1: dict([(symbol, 2) for symbol in alphabet]),
2: dict([(symbol, 2) for symbol in alphabet]),
}
# If normal, make a singular FSM accepting only these characters
else:
map = {
0: dict([(symbol, 1 if symbol in self.chars else 2) for symbol in alphabet]),
1: dict([(symbol, 2) for symbol in alphabet]),
2: dict([(symbol, 2) for symbol in alphabet]),
}
return fsm(
alphabet = alphabet,
states = {0, 1, 2},
initial = 0,
finals = {1},
map = map,
)
def __repr__(self):
string = ""
if self.negated is True:
string += "~"
string += "charclass("
if len(self.chars) > 0:
string += "\"" + "".join(str(char) for char in sorted(self.chars, key=str)) + "\""
string += ")"
return string
def reduce(self):
# Charclasses cannot be reduced()
return self
def __add__(self, other):
return (mult(self, one) + other).reduce()
def alphabet(self):
return {otherchars} | self.chars
def empty(self):
return len(self.chars) == 0 and self.negated == False
@classmethod
def match(cls, string, i):
# wildcard ".", "\\w", "\\d", etc.
for key in shorthand.keys():
try:
return key, cls.matchStatic(string, i, shorthand[key])
except nomatch:
pass
# "[^dsgsdg]"
try:
j = cls.matchStatic(string, i, "[^")
chars, j = cls.matchClassInterior(string, j)
j = cls.matchStatic(string, j, "]")
return ~charclass(chars), j
except nomatch:
pass
# "[sdfsf]"
try:
j = cls.matchStatic(string, i, "[")
chars, j = cls.matchClassInterior(string, j)
j = cls.matchStatic(string, j, "]")
return charclass(chars), j
except nomatch:
pass
# e.g. if seeing "\\t", return "\t"
for key in escapes.keys():
try:
return charclass(key), cls.matchStatic(string, i, escapes[key])
except nomatch:
pass
# e.g. if seeing "\\{", return "{"
for char in charclass.allSpecial:
try:
return charclass(char), cls.matchStatic(string, i, "\\" + char)
except nomatch:
pass
# single non-special character, not contained inside square brackets
char, i = cls.matchAny(string, i)
if char in charclass.allSpecial:
raise nomatch
return charclass(char), i
@classmethod
def matchAny(cls, string, i):
if i >= len(string):
raise nomatch
return string[i], i+1
@classmethod
def matchClassInterior(cls, string, i):
internals = ""
try:
while True:
internal, i = cls.matchRange(string, i)
internals += internal
except nomatch:
pass
return internals, i
@classmethod
def matchRange(cls, string, i):
first, i = cls.matchInternalChar(string, i)
try:
j = cls.matchStatic(string, i, "-")
last, j = cls.matchInternalChar(string, j)
for allowableRange in charclass.allowableRanges:
if first in allowableRange:
# first and last must be in the same character range
if last not in allowableRange:
raise nomatch("Char '" + last + "' not allowed as end of range")
firstIndex = allowableRange.index(first)
lastIndex = allowableRange.index(last)
# and in order i.e. a < b
if firstIndex >= lastIndex:
raise nomatch(
"Disordered range ('" + first + "' !< '" + last + "')"
)
# OK
return allowableRange[firstIndex:lastIndex + 1], j
raise nomatch("Char '" + first + "' not allowed as start of a range")
except nomatch:
return first, i
@classmethod
def matchInternalChar(cls, string, i):
# e.g. if we see "\\t", return "\t"
for key in escapes.keys():
try:
return key, cls.matchStatic(string, i, escapes[key])
except nomatch:
pass
# special chars e.g. "\\-" returns "-"
for char in charclass.classSpecial:
try:
return char, cls.matchStatic(string, i, "\\" + char)
except nomatch:
pass
# single non-special character, not contained
# inside square brackets
char, j = cls.matchAny(string, i)
if char in charclass.classSpecial:
raise nomatch
return char, j
# self output methods:
def escape(self):
def escapeChar(char):
if char in charclass.classSpecial:
return "\\" + char
if char in escapes.keys():
return escapes[char]
return char
def recordRange():
nonlocal currentRange
nonlocal output
# there's no point in putting a range when the whole thing is
# 3 characters or fewer.
if len(currentRange) < 4:
output += "".join(escapeChar(char) for char in currentRange)
else:
output += escapeChar(currentRange[0]) + "-" + escapeChar(currentRange[-1])
currentRange = ""
output = ""
# use shorthand for known character ranges
# note the nested processing order. DO NOT process \d before processing
# \w. if more character class constants arise which do not nest nicely,
# a problem will arise because there is no clear ordering to use...
# look for ranges
currentRange = ""
for char in sorted(self.chars, key=str):
# range is not empty: new char must fit after previous one
if len(currentRange) > 0:
# find out if this character appears in any of the
# charclass.allowableRanges listed above.
superRange = None
for allowableRange in charclass.allowableRanges:
if char in allowableRange:
superRange = allowableRange
break
if superRange is None:
# if this character doesn't appear above, then any existing
# currentRange should be sorted and filed now
# if there is one
recordRange()
else:
i = superRange.index(char)
# char doesn't fit old range: restart
if i == 0 or superRange[i-1] != currentRange[-1]:
recordRange()
currentRange += char
recordRange()
return output
# set operations
def __invert__(self):
'''
Negate the current charclass. e.g. [ab] becomes [^ab]. Call
using "charclass2 = ~charclass1"
'''
return charclass(self.chars, negateMe=not self.negated)
def __or__(self, other):
try:
# ¬A OR ¬B = ¬(A AND B)
# ¬A OR B = ¬(A - B)
# A OR ¬B = ¬(B - A)
# A OR B
if self.negated:
if other.negated:
return ~charclass(self.chars & other.chars)
return ~charclass(self.chars - other.chars)
if other.negated:
return ~charclass(other.chars - self.chars)
return charclass(self.chars | other.chars)
# "other" lacks attribute "negated" or "chars"
# "other" is not a charclass
# Never mind!
except AttributeError:
return mult(self, one) | other
def __and__(self, other):
try:
# ¬A AND ¬B = ¬(A OR B)
# ¬A AND B = B - A
# A AND ¬B = A - B
# A AND B
if self.negated:
if other.negated:
return ~charclass(self.chars | other.chars)
return charclass(other.chars - self.chars)
if other.negated:
return charclass(self.chars - other.chars)
return charclass(self.chars & other.chars)
# "other" lacks attribute "negated" or "chars"
# "other" is not a charclass
# Never mind!
except AttributeError:
return (mult(self, one) & other).reduce()
class bound:
'''An integer but sometimes also possibly infinite (None)'''
def __init__(self, v):
if v is not None and v < 0:
raise Exception("Value must be >= 0 or None")
self.__dict__['v'] = v
def __repr__(self):
if self == inf:
return "inf"
return repr(self.v)
def __str__(self):
if self == inf:
# This only happens for an unlimited upper bound
return ""
return str(self.v)
def __eq__(self, other):
try:
return self.v == other.v
except AttributeError:
return False
def __hash__(self):
return hash(self.v)
def __lt__(self, other):
if self == inf:
return False
if other == inf:
return True
return self.v < other.v
def __gt__(self, other):
if other == inf:
return False
if self == inf:
return True
return self.v > other.v
def __mul__(self, other):
'''Multiply this bound by another'''
if self == inf or other == inf:
return inf
return bound(self.v * other.v)
def __add__(self, other):
'''Add this bound to another'''
if self == inf or other == inf:
return inf
return bound(self.v + other.v)
def __sub__(self, other):
'''
Subtract another bound from this one.
Caution: this operation is not meaningful for all bounds.
'''
if other == inf:
if self == inf:
# Infinity minus infinity is zero. This has to be true so that
# we can for example subtract multiplier(bound(0), inf) from
# multiplier(bound(1), inf) to get multiplier(bound(1), bound(1))
return bound(0)
raise Exception("Can't subtract " + str(other) + " from " + str(self))
if self == inf:
return self
return bound(self.v - other.v)
def common(self, other):
'''
Find the minimum of two bounds. This is the largest bound which can
be legally subtracted from both of the originals.
This could return bound(0) very easily.
'''
if self == inf:
return other
if other == inf:
return self
return bound(min(self.v, other.v))
class multiplier(lego):
'''
A min and a max. The vast majority of characters in regular
expressions occur without a specific multiplier, which is implicitly
equivalent to a min of 1 and a max of 1, but many more have explicit
multipliers like "*" (min = 0, max = inf) and so on.
Although it seems odd and can lead to some confusing edge cases, we do
also permit a max of 0 (iff min is 0 too). This allows the multiplier
"zero" to exist, which actually are quite useful in their own special way.
'''
def __init__(self, min, max):
if min == inf:
raise Exception("Can't have an infinite lower bound")
if max < min:
raise Exception(
"max '" + str(max) + "' must match or exceed min '" + str(min) + "'"
)
# More useful than "min" and "max" in many situations
# are "mandatory" and "optional".
mandatory = min
optional = max - min
self.__dict__['min'] = min
self.__dict__['max'] = max
self.__dict__['mandatory'] = mandatory
self.__dict__['optional'] = optional
def __eq__(self, other):
try:
return self.min == other.min \
and self.max == other.max
except AttributeError:
return False
def __hash__(self):
return hash((self.min, self.max))
def __repr__(self):
return "multiplier(" + repr(self.min) + ", " + repr(self.max) + ")"
def __str__(self):
if self.max == bound(0):
raise Exception("No regex available for " + repr(self))
if self in symbolic.keys():
return symbolic[self]
if self.max == inf:
return "{" + str(self.min) + ",}"
if self.min == self.max:
return "{" + str(self.min) + "}"
return "{" + str(self.min) + "," + str(self.max) + "}"
@classmethod
def match(cls, string, i):
# {2,3}
try:
j = cls.matchStatic(string, i, "{")
min, j = cls.matchInteger(string, j)
j = cls.matchStatic(string, j, ",")
max, j = cls.matchInteger(string, j)
j = cls.matchStatic(string, j, "}")
return multiplier(bound(min), bound(max)), j
except nomatch:
pass
# {2,}
try:
j = cls.matchStatic(string, i, "{")
min, j = cls.matchInteger(string, j)
j = cls.matchStatic(string, j, ",}")
return multiplier(bound(min), inf), j
except nomatch:
pass
# {2}
try:
j = cls.matchStatic(string, i, "{")
min, j = cls.matchInteger(string, j)
j = cls.matchStatic(string, j, "}")
return multiplier(bound(min), bound(min)), j
except nomatch:
pass
# "?"/"*"/"+"/""
# we do these in reverse order of symbol length, because
# that forces "" to be done last
for key in sorted(symbolic, key=lambda key: -len(symbolic[key])):
try:
return key, cls.matchStatic(string, i, symbolic[key])
except nomatch:
pass
raise nomatch
@classmethod
def matchInteger(cls, string, i):
try:
return 0, cls.matchStatic(string, i, "0")
except nomatch:
pass
digit, i = cls.matchAnyOf(string, i, "123456789")
integer = int(digit)
try:
while True:
digit, i = cls.matchAnyOf(string, i, "0123456789")
integer *= 10
integer += int(digit)
except nomatch:
return integer, i
@classmethod
def matchAnyOf(cls, string, i, collection):
for char in collection:
try:
return char, cls.matchStatic(string, i, char)
except nomatch:
pass
raise nomatch("Can't find any of '" + str(collection) + "' at index " + str(i) + " in '" + string + "'")
def __mul__(self, other):
'''Multiply this multiplier by another'''
return multiplier(self.min * other.min, self.max * other.max)
def __add__(self, other):
'''Add two multipliers together'''
return multiplier(self.min + other.min, self.max + other.max)
def __sub__(self, other):
'''
Subtract another multiplier from this one.
Caution: multipliers are not totally ordered.
This operation is not meaningful for all pairs of multipliers.
'''
mandatory = self.mandatory - other.mandatory
optional = self.optional - other.optional
return multiplier(mandatory, mandatory + optional)
def __and__(self, other):
'''
Find the intersection of two multipliers: that is, a third multiplier
expressing the range covered by both of the originals. This is not
defined for all multipliers.
'''
a = max(self.min, other.min)
b = min(self.max, other.max)
return multiplier(a, b)
def common(self, other):
'''
Find the shared part of two multipliers. This is the largest multiplier
which can be safely subtracted from both the originals. This may
return the "zero" multiplier.
'''
mandatory = self.mandatory.common(other.mandatory)
optional = self.optional.common(other.optional)
return multiplier(mandatory, mandatory + optional)
class mult(lego):
'''
A mult is a combination of a multiplicand with
a multiplier (a min and a max). The vast majority of characters in regular
expressions occur without a specific multiplier, which is implicitly
equivalent to a min of 1 and a max of 1, but many more have explicit
multipliers like "*" (min = 0, max = inf) and so on.
e.g. a, b{2}, c?, d*, [efg]{2,5}, f{2,}, (anysubpattern)+, .*, and so on
'''
def __init__(self, cand, ier):
self.__dict__["multiplicand"] = cand
self.__dict__["multiplier"] = ier
def __eq__(self, other):
try:
return self.multiplicand == other.multiplicand \
and self.multiplier == other.multiplier
except AttributeError:
return False
def __hash__(self):
return hash((self.multiplicand, self.multiplier))
def __repr__(self):
string = "mult("
string += repr(self.multiplicand)
string += ", " + repr(self.multiplier)
string += ")"
return string
def __mul__(self, multiplier):
if multiplier == one:
return self.reduce()
return mult(self.multiplicand, self.multiplier * multiplier).reduce()
def __add__(self, other):
return (conc(self) + other).reduce()
def __or__(self, other):
return (conc(self) | other).reduce()
def __sub__(self, other):
'''
Subtract another mult from this one and return the result.
The reverse of concatenation. This is a lot trickier.
e.g. a{4,5} - a{3} = a{1,2}
'''
if other.multiplicand != self.multiplicand:
raise Exception("Can't subtract " + str(other) + " from " + str(self))
return mult(self.multiplicand, self.multiplier - other.multiplier)
def common(self, other):
'''
Return the common part of these two mults. This is the largest mult
which can be safely subtracted from both the originals. The multiplier
on this mult could be zero: this is the case if, for example, the
multiplicands disagree.
'''
if self.multiplicand == other.multiplicand:
return mult(self.multiplicand, self.multiplier.common(other.multiplier))
# Multiplicands disagree, no common part at all.
return mult(nothing, zero)
def __and__(self, other):
if isinstance(other, charclass):
other = mult(other, one)
# If two mults are given which have a common multiplicand, the shortcut
# is just to take the intersection of the two multiplicands.
try:
if self.multiplicand == other.multiplicand:
return mult(self.multiplicand, self.multiplier & other.multiplier).reduce()
except AttributeError:
# "other" isn't a mult; lacks either a multiplicand or a multiplier.
# Never mind!
pass
# This situation is substantially more complicated if the multiplicand is,
# for example, a pattern. It's difficult to reason sensibly about this
# kind of thing.
return (conc(self) & other).reduce()
def alphabet(self):
return {otherchars} | self.multiplicand.alphabet()
def empty(self):
if self.multiplicand.empty() \
and self.multiplier.min > bound(0):
return True
return False
def reduce(self):
# Can't match anything: reduce to nothing
if self.empty():
return nothing.reduce()
# If our multiplicand is a pattern containing an empty conc()
# we can pull that "optional" bit out into our own multiplier
# instead.
# e.g. (A|B|C|)D -> (A|B|C)?D
# e.g. (A|B|C|){2} -> (A|B|C){0,2}
try:
if emptystring in self.multiplicand.concs:
return mult(
pattern(
*self.multiplicand.concs.difference({emptystring})
),
self.multiplier * qm,
).reduce()
except AttributeError:
# self.multiplicand has no attribute "concs"; isn't a pattern; never mind
pass
# If we have an empty multiplicand, we can only match it
# zero times
if self.multiplicand.empty() \
and self.multiplier.min == bound(0):
return emptystring.reduce()
# Failing that, we have a positive multiplicand which we
# intend to match zero times. In this case the only possible
# match is the empty string.
if self.multiplier == zero:
return emptystring.reduce()
# no point multiplying in the singular
if self.multiplier == one:
return self.multiplicand.reduce()
# Try recursively reducing our internal.
reduced = self.multiplicand.reduce()
# "bulk up" smaller lego pieces to pattern if need be
if isinstance(reduced, mult):
reduced = conc(reduced)
if isinstance(reduced, conc):
reduced = pattern(reduced)
if reduced != self.multiplicand:
return mult(reduced, self.multiplier).reduce()
# If our multiplicand is a pattern containing a single conc
# containing a single mult, we can separate that out a lot
# e.g. ([ab])* -> [ab]*
try:
if len(self.multiplicand.concs) == 1:
singleton = [c for c in self.multiplicand.concs][0]
if len(singleton.mults) == 1:
return mult(
singleton.mults[0].multiplicand,
singleton.mults[0].multiplier * self.multiplier
).reduce()
except AttributeError:
# self.multiplicand has no attribute "concs"; isn't a pattern; never mind
pass
return self
def __str__(self):
# recurse into subpattern
if isinstance(self.multiplicand, pattern):
output = "(" + str(self.multiplicand) + ")"
else:
output = str(self.multiplicand)
suffix = str(self.multiplier)
# Pick whatever is shorter/more comprehensible.
# e.g. "aa" beats "a{2}", "ababab" beats "(ab){3}"
if self.multiplier.min == self.multiplier.max \
and len(output) * self.multiplier.min.v <= len(output) + len(suffix):
return output * self.multiplier.min.v
return output + suffix
def fsm(self, alphabet=None):
from fsm import epsilon
if alphabet is None:
alphabet = self.alphabet()
# worked example: (min, max) = (5, 7) or (5, inf)
# (mandatory, optional) = (5, 2) or (5, inf)
unit = self.multiplicand.fsm(alphabet)
# accepts e.g. "ab"
# accepts "ababababab"
mandatory = unit * self.multiplier.mandatory.v
# unlimited additional copies
if self.multiplier.optional == inf:
optional = unit.star()
# accepts "(ab)*"
else:
optional = epsilon(alphabet) | unit
# accepts "(ab)?"
optional *= self.multiplier.optional.v
# accepts "(ab)?(ab)?"
return mandatory + optional
@classmethod
def match(cls, string, i):
try:
j = cls.matchStatic(string, i, "(")
cand, j = pattern.match(string, j)
j = cls.matchStatic(string, j, ")")
except nomatch:
cand, j = charclass.match(string, i)
ier, j = multiplier.match(string, j)
return mult(cand, ier), j
class conc(lego):
'''
A conc (short for "concatenation") is a tuple of mults i.e. an unbroken
string of mults occurring one after the other.
e.g. abcde[^fg]*h{4}[a-z]+(subpattern)(subpattern2)
To express the empty string, use an empty conc, conc().
'''
def __init__(self, *mults):
self.__dict__["mults"] = tuple(mults)
def __eq__(self, other):
try:
return self.mults == other.mults
except AttributeError:
return False
def __hash__(self):
return hash(self.mults)
def __repr__(self):
string = "conc("
string += ", ".join(repr(m) for m in self.mults)
string += ")"
return string
def __mul__(self, multiplier):
if multiplier == one:
return self
# Have to replace self with a pattern unfortunately
return pattern(self) * multiplier
def __add__(self, other):
# other must be a conc too
if isinstance(other, multiplicand):
other = mult(other, one)
if isinstance(other, mult):
other = conc(other)
return conc(*(self.mults + other.mults)).reduce()
def __or__(self, other):
return (pattern(self) | other).reduce()
def __and__(self, other):
return (pattern(self) & other).reduce()
def reduce(self):
# Can't match anything
if self.empty():
return nothing.reduce()
# no point concatenating one thing (note: concatenating 0 things is
# entirely valid)
if len(self.mults) == 1:
return self.mults[0].reduce()
# Try recursively reducing our internals
reduced = [m.reduce() for m in self.mults]
# "bulk up" smaller lego pieces to concs if need be
reduced = [
pattern(x) if isinstance(x, conc) else x
for x in reduced
]
reduced = [
mult(x, one) if isinstance(x, multiplicand) else x
for x in reduced
]
reduced = tuple(reduced)
if reduced != self.mults:
return conc(*reduced).reduce()
# multiple mults with identical multiplicands in a row?
# squish those together
# e.g. ab?b?c -> ab{0,2}c
if len(self.mults) > 1:
for i in range(len(self.mults)-1):
if self.mults[i].multiplicand == self.mults[i+1].multiplicand:
squished = mult(
self.mults[i].multiplicand,
self.mults[i].multiplier + self.mults[i+1].multiplier
)
new = self.mults[:i] + (squished,) + self.mults[i+2:]
return conc(*new).reduce()
# Conc contains (among other things) a *singleton* mult containing a pattern
# with only one internal conc? Flatten out.
# e.g. "a(d(ab|a*c))" -> "ad(ab|a*c)"
# BUT NOT "a(d(ab|a*c)){2,}"
# AND NOT "a(d(ab|a*c)|y)"
for i in range(len(self.mults)):
m = self.mults[i]
try:
if m.multiplier == one and len(m.multiplicand.concs) == 1:
single = [c for c in m.multiplicand.concs][0]
new = self.mults[:i] + single.mults + self.mults[i+1:]
return conc(*new).reduce()
except AttributeError:
# m.multiplicand has no attribute "concs"; isn't a pattern; never mind
pass
return self
def fsm(self, alphabet=None):
from fsm import epsilon
if alphabet is None:
alphabet = self.alphabet()
# start with a component accepting only the empty string
fsm1 = epsilon(alphabet)
for m in self.mults:
fsm1 += m.fsm(alphabet)
return fsm1
def alphabet(self):
return {otherchars}.union(*[m.alphabet() for m in self.mults])
def empty(self):
for m in self.mults:
if m.empty():
return True
return False
def __str__(self):
return "".join(str(m) for m in self.mults)
@classmethod
def match(cls, string, i):
mults = list()
try:
while True:
m, i = mult.match(string, i)
mults.append(m)
except nomatch:
pass
return conc(*mults), i
def common(self, other, suffix=False):
'''
Return the common prefix of these two concs; that is, the largest conc
which can be safely beheaded() from the front of both.
The result could be emptystring.
"ZYAA, ZYBB" -> "ZY"
"CZ, CZ" -> "CZ"
"YC, ZC" -> ""
With the "suffix" flag set, works from the end. E.g.:
"AAZY, BBZY" -> "ZY"
"CZ, CZ" -> "CZ"
"CY, CZ" -> ""
'''
mults = []
indices = range(min(len(self.mults), len(other.mults))) # e.g. [0, 1, 2, 3]
# Work backwards from the end of both concs instead.
if suffix:
indices = [-i - 1 for i in indices] # e.g. [-1, -2, -3, -4]
for i in indices:
common = self.mults[i].common(other.mults[i])
# Happens when multiplicands disagree (e.g. "A.common(B)") or if
# the multiplicand is shared but the common multiplier is zero
# (e.g. "ABZ*.common(CZ)".)
if common.multiplier == zero:
break
mults.append(common)
# If we did not remove the entirety of both mults, we cannot continue.
if common != self.mults[i] \
or common != other.mults[i]:
break
if suffix:
mults = reversed(mults)
return conc(*mults)
def __sub__(self, other):
'''
Subtract another conc from this one.
This is the opposite of concatenation. For example, if ABC + DEF = ABCDEF,
then logically ABCDEF - DEF = ABC.
'''
# e.g. self has mults at indices [0, 1, 2, 3, 4, 5, 6] len=7
# e.g. other has mults at indices [0, 1, 2] len=3
new = list(self.mults)
for i in reversed(range(len(other.mults))): # [2, 1, 0]
# e.g. i = 1, j = 7 - 3 + 1 = 5
j = len(self.mults) - len(other.mults) + i
new[j] -= other.mults[i]
if new[j].multiplier == zero:
# omit that mult entirely since it has been factored out
del new[j]
# If the subtraction is incomplete but there is more to
# other.mults, then we have a problem. For example, "ABC{2} - BC"
# subtracts the C successfully but leaves something behind,
# then tries to subtract the B too, which isn't possible
else:
if i != 0:
raise Exception
return conc(*new)
def behead(self, other):
'''
As with __sub__ but the other way around. For example, if
ABC + DEF = ABCDEF, then ABCDEF.behead(AB) = CDEF.
'''
# e.g. self has mults at indices [0, 1, 2, 3, 4, 5, 6]
# e.g. other has mults at indices [0, 1, 2]
new = list(self.mults)
for i in range(len(other.mults)):
new[0] -= other.mults[i]
if new[0].multiplier == zero:
# omit that mult entirely since it has been factored out
new.pop(0)
# If the subtraction is incomplete but there is more to
# other.mults, then we have a problem. For example, "C{2}BA.behead(CB)"
# subtracts the C successfully but leaves something behind,
# then tries to subtract the B too, which isn't possible
else:
if i != len(other.mults) - 1:
raise Exception
return conc(*new)
class pattern(multiplicand):
'''
A pattern (also known as an "alt", short for "alternation") is a
set of concs. A pattern expresses multiple alternate possibilities.
When written out as a regex, these would separated by pipes. A pattern
containing no possibilities is possible and represents a regular expression
matching no strings whatsoever (there is no conventional string form for
this).
e.g. "abc|def(ghi|jkl)" is an alt containing two concs: "abc" and
"def(ghi|jkl)". The latter is a conc containing four mults: "d", "e", "f"
and "(ghi|jkl)". The latter in turn is a mult consisting of an upper bound
1, a lower bound 1, and a multiplicand which is a new subpattern, "ghi|jkl".
This new subpattern again consists of two concs: "ghi" and "jkl".
'''
def __init__(self, *concs):
self.__dict__["concs"] = frozenset(concs)
def __eq__(self, other):
try:
return self.concs == other.concs
except AttributeError:
return False
def __hash__(self):
return hash(self.concs)
def __repr__(self):
string = "pattern("
string += ", ".join(repr(c) for c in self.concs)
string += ")"
return string
def __mul__(self, multiplier):
if multiplier == one:
return self
return mult(self, multiplier).reduce()
def __add__(self, other):
return mult(self, one) + other
def alphabet(self):
return {otherchars}.union(*[c.alphabet() for c in self.concs])
def empty(self):
for c in self.concs:
if not c.empty():
return False
return True
def __and__(self, other):
# A deceptively simple method for an astoundingly difficult operation
alphabet = self.alphabet() | other.alphabet()
# Which means that we can build finite state machines sharing that alphabet
combined = self.fsm(alphabet) & other.fsm(alphabet)
return combined.lego().reduce()
def __or__(self, other):
# other must be a pattern too
if isinstance(other, charclass):
other = mult(other, one)
if isinstance(other, mult):
other = conc(other)
if isinstance(other, conc):
other = pattern(other)
return pattern(*(self.concs | other.concs)).reduce()
def __str__(self):
if len(self.concs) < 1:
raise Exception("Can't print an empty pattern.")
# take the alternation of the input collection of regular expressions.
# i.e. jam "|" between each element
# 1+ elements.
return "|".join(sorted(str(c) for c in self.concs))
def reduce(self):
# emptiness
if self.empty():
return nothing.reduce()
# If one of our internal concs is empty, remove it
for c in self.concs:
if c.empty():
new = self.concs - {c}
return pattern(*new).reduce()
# no point alternating among one possibility
if len(self.concs) == 1:
return [e for e in self.concs][0].reduce()
# Try recursively reducing our internals first.
reduced = [c.reduce() for c in self.concs]
# "bulk up" smaller lego pieces to concs if need be
reduced = [
mult(x, one) if isinstance(x, multiplicand) else x
for x in reduced
]
reduced = [
conc(x) if isinstance(x, mult) else x
for x in reduced
]
reduced = frozenset(reduced)
if reduced != self.concs:
return pattern(*reduced).reduce()
# If this pattern contains several concs each containing just 1 mult
# each containing just a charclass, with a multiplier of 1,
# then we can merge those branches together.
# e.g. "0|[1-9]|ab" -> "[0-9]|ab"
changed = False
merger = None
rest = []
for c in self.concs:
if len(c.mults) == 1 \
and c.mults[0].multiplier == one \
and isinstance(c.mults[0].multiplicand, charclass):
if merger is None:
merger = c.mults[0].multiplicand
else:
merger |= c.mults[0].multiplicand
changed = True
else:
rest.append(c)
if changed:
rest.append(conc(mult(merger, one)))
return pattern(*rest).reduce()
# If one of the present pattern's concs is the empty string, and
# there is another conc with a single mult whose lower bound is 0, we
# can omit the empty string.
# E.g. "|(ab)*|def" => "(ab)*|def".
# If there is another conc with a single mult whose lower bound is 1,
# we can merge the empty string into that.
# E.g. "|(ab)+|def" => "(ab)*|def".
if conc() in self.concs:
for c in self.concs:
if len(c.mults) != 1:
continue
m = c.mults[0]
if m.multiplier.min == bound(0):
rest = self.concs - {conc()}
return pattern(*rest).reduce()
if m.multiplier.min == bound(1):
rest = self.concs - {conc(), c} | {m * qm}
return pattern(*rest).reduce()
# If the present pattern's concs all have a common prefix, split
# that out. This increases the depth of the object
# but it is still arguably simpler/ripe for further reduction
# e.g. "abc|ade" -> a(bc|de)"
prefix = self._commonconc()
if prefix != emptystring:
leftovers = self.behead(prefix)
mults = prefix.mults + (mult(leftovers, one),)
return conc(*mults).reduce()
# Same but for suffixes.
# e.g. "xyz|stz -> (xy|st)z"
suffix = self._commonconc(suffix=True)
if suffix != emptystring:
leftovers = self - suffix
mults = (mult(leftovers, one),) + suffix.mults
return conc(*mults).reduce()
return self
@classmethod
def match(cls, string, i):
concs = list()
# first one
c, i = conc.match(string, i)
concs.append(c)
# the rest
while True:
try:
i = cls.matchStatic(string, i, "|")
c, i = conc.match(string, i)
concs.append(c)
except nomatch:
return pattern(*concs), i
def __sub__(self, other):
'''
The opposite of concatenation. Remove a common suffix from the present
pattern; that is, from each of its constituent concs.
AYZ|BYZ|CYZ - YZ = A|B|C.
'''
return pattern(*[c - other for c in self.concs])
def behead(self, other):
'''
Like __sub__ but the other way around. Remove a common prefix from the
present pattern; that is, from each of its constituent concs.
ZA|ZB|ZC.behead(Z) = A|B|C
'''
return pattern(*[c.behead(other) for c in self.concs])
def _commonconc(self, suffix=False):
'''
Find the longest conc which acts as prefix to every conc in this pattern.
This could be the empty string. Return the common prefix along with all
the leftovers after truncating that common prefix from each conc.
"ZA|ZB|ZC" -> "Z", "(A|B|C)"
"ZA|ZB|ZC|Z" -> "Z", "(A|B|C|)"
"CZ|CZ" -> "CZ", "()"
If "suffix" is True, the same result but for suffixes.
'''
# There's probably a way to do this as a one-liner
result = None
for c in self.concs:
if result is None:
result = c
else:
result = result.common(c, suffix=suffix)
if result is None:
raise Exception
return result
def fsm(self, alphabet=None):
from fsm import null
if alphabet is None:
alphabet = self.alphabet()
fsm1 = null(alphabet)
for c in self.concs:
fsm1 |= c.fsm(alphabet)
return fsm1
# Special and useful values go here.
# We need to add an extra character in the alphabet which can stand for
# "everything else". For example, if the regex is "abc.", then at the moment
# our alphabet is {"a", "b", "c"}. But "." could match anything else not yet
# specified. This extra letter stands for that ("[^abc]" in this case).
otherchars = None
# Standard character classes
w = charclass("0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ_abcdefghijklmnopqrstuvwxyz")
d = charclass("0123456789")
s = charclass("\t\n\v\f\r ")
W = ~w
D = ~d
S = ~s
dot = ~charclass()
# This charclasses expresses "no possibilities at all"
# and can never match anything.
nothing = charclass()
# Textual representations of standard character classes
shorthand = {
w : "\\w", d : "\\d", s : "\\s",
W : "\\W", D : "\\D", S : "\\S",
dot : ".",
}
# Characters which users may escape in a regex instead of inserting them
# literally. In ASCII order:
escapes = {
"\t" : "\\t", # tab
"\n" : "\\n", # line feed
"\v" : "\\v", # vertical tab
"\f" : "\\f", # form feed
"\r" : "\\r", # carriage return
}
# Use this for cases where no upper bound is needed
inf = bound(None)
# Preset multipliers. These get used ALL THE TIME in unit tests
zero = multiplier(bound(0), bound(0)) # has some occasional uses
qm = multiplier(bound(0), bound(1))
one = multiplier(bound(1), bound(1))
star = multiplier(bound(0), inf)
plus = multiplier(bound(1), inf)
# Symbol lookup table for preset multipliers.
symbolic = {
qm : "?",
one : "" ,
star : "*",
plus : "+",
}
# A very special conc expressing the empty string, ""
emptystring = conc()
# Unit tests.
if __name__ == '__main__':
# "AAZY, BBZY" -> "ZY"
assert conc(
mult(charclass("A"), one),
mult(charclass("A"), one),
mult(charclass("Z"), one),
mult(charclass("Y"), one),
).common(
conc(
mult(charclass("B"), one),
mult(charclass("B"), one),
mult(charclass("Z"), one),
mult(charclass("Y"), one),
),
suffix=True
) == conc(
mult(charclass("Z"), one),
mult(charclass("Y"), one),
)
# "CZ, CZ" -> "CZ"
assert conc(
mult(charclass("C"), one),
mult(charclass("Z"), one),
).common(
conc(
mult(charclass("C"), one),
mult(charclass("Z"), one),
),
suffix=True
) == conc(
mult(charclass("C"), one),
mult(charclass("Z"), one),
)
# "CY, CZ" -> ""
assert conc(
mult(charclass("C"), one),
mult(charclass("Y"), one),
).common(
conc(
mult(charclass("C"), one),
mult(charclass("Z"), one),
),
suffix=True
) == emptystring
# AZ, BZ -> Z
assert conc(
mult(charclass("A"), one),
mult(charclass("Z"), one),
).common(
conc(
mult(charclass("B"), one),
mult(charclass("Z"), one),
),
suffix=True
) == conc(
mult(charclass("Z"), one)
)
# AZ*, BZ -> ()
assert conc(
mult(charclass("A"), one),
mult(charclass("Z"), star),
).common(
conc(
mult(charclass("B"), one),
mult(charclass("Z"), one),
),
suffix=True
) == emptystring
# A, B -> no common part
assert conc(
mult(charclass("A"), one),
).common(
conc(
mult(charclass("B"), one),
),
suffix=True
) == emptystring
# Conc subtraction
# AZ - Z = A
assert conc(
mult(charclass("A"), one),
mult(charclass("Z"), one),
) - conc(
mult(charclass("Z"), one)
) == conc(
mult(charclass("A"), one)
)
# ABXY+Z - XY+Z = AB
assert conc(
mult(charclass("A"), one),
mult(charclass("B"), one),
mult(charclass("X"), one),
mult(charclass("Y"), plus),
mult(charclass("Z"), one),
) - conc(
mult(charclass("X"), one),
mult(charclass("Y"), plus),
mult(charclass("Z"), one),
) == conc(
mult(charclass("A"), one),
mult(charclass("B"), one),
)
# ABXY+Z.behead(ABXY+) = Z
assert conc(
mult(charclass("A"), one),
mult(charclass("B"), one),
mult(charclass("X"), one),
mult(charclass("Y"), plus),
mult(charclass("Z"), one),
).behead(
conc(
mult(charclass("A"), one),
mult(charclass("B"), one),
mult(charclass("X"), one),
mult(charclass("Y"), plus),
)
)== conc(
mult(charclass("Z"), one),
)
# X{2}Y+Z.behead(XY+) = exception
try:
conc(
mult(charclass("X"), multiplier),
mult(charclass("Y"), plus),
mult(charclass("Z"), one),
).behead(
conc(
mult(charclass("X"), one),
mult(charclass("Y"), plus),
)
)
assert(False)
except:
pass
# A - () = A
assert conc(
mult(charclass("A"), one),
) - emptystring == conc(
mult(charclass("A"), one),
)
# Odd bug with ([bc]*c)?[ab]*
int5A = mult(charclass("bc"), star).fsm({"a", "b", "c", otherchars})
assert int5A.accepts("")
int5B = mult(charclass("c"), one).fsm({"a", "b", "c", otherchars})
assert int5B.accepts("c")
int5C = int5A + int5B
assert (int5A + int5B).accepts("c")
# Empty mult suppression
assert conc(
mult(nothing, one), # this mult can never actually match anything
mult(charclass("0"), one),
mult(charclass("0123456789"), one),
).reduce() == nothing
assert conc(
mult(pattern(), one), # this mult can never actually match anything
mult(charclass("0"), one),
mult(charclass("0123456789"), one),
).reduce() == nothing
# Empty conc suppression in patterns.
assert pattern(
conc(
mult(nothing, one), # this mult can never actually match anything
mult(charclass("0"), one),
mult(charclass("0123456789"), one),
) # so neither can this conc
).reduce() == nothing
assert pattern(
conc(
mult(pattern(), one), # this mult can never actually match anything
mult(charclass("0"), one),
mult(charclass("0123456789"), one),
) # so neither can this conc
).reduce() == nothing
# Empty pattern suppression in mults
assert mult(nothing, qm).reduce() == emptystring
assert mult(pattern(), qm).reduce() == emptystring
# empty pattern behaviour
assert pattern().reduce() == charclass()
# pattern.fsm()
# "a[^a]"
anota = pattern(
conc(
mult(charclass("a"), one),
mult(~charclass("a"), one),
)
).fsm("ab")
assert not anota.accepts("a")
assert not anota.accepts("b")
assert not anota.accepts("aa")
assert anota.accepts("ab")
assert not anota.accepts("ba")
assert not anota.accepts("bb")
# "0\\d"
zeroD = pattern(
conc(
mult(charclass("0"), one),
mult(charclass("123456789"), one)
)
).fsm(d.chars)
assert zeroD.accepts("01")
assert not zeroD.accepts("10")
# "\\d{2}"
d2 = pattern(
conc(
mult(
d, multiplier(bound(2), bound(2))
)
)
).fsm(d.chars)
assert not d2.accepts("")
assert not d2.accepts("1")
assert d2.accepts("11")
assert not d2.accepts("111")
# abc|def(ghi|jkl)
conventional = pattern(
conc(
mult(charclass("a"), one),
mult(charclass("b"), one),
mult(charclass("c"), one),
),
conc(
mult(charclass("d"), one),
mult(charclass("e"), one),
mult(charclass("f"), one),
mult(
pattern(
conc(
mult(charclass("g"), one),
mult(charclass("h"), one),
mult(charclass("i"), one),
),
conc(
mult(charclass("j"), one),
mult(charclass("k"), one),
mult(charclass("l"), one),
),
), one
),
),
).fsm(w.chars)
assert not conventional.accepts("a")
assert not conventional.accepts("ab")
assert conventional.accepts("abc")
assert not conventional.accepts("abcj")
assert conventional.accepts("defghi")
assert conventional.accepts("defjkl")
# A subtlety in mult reduction.
# ([$%\^]|){1} should become ([$%\^])? then [$%\^]?,
# ([$%\^]|){1} should NOT become ([$%\^]|) (the pattern alone)
assert mult(
pattern(
conc(),
conc(
mult(charclass("$%^"), one)
)
), one
).reduce() == mult(charclass("$%^"), qm)
# nested pattern reduction in a conc
# a(d(ab|a*c)) -> ad(ab|a*c)
assert conc(
mult(charclass("a"), one),
mult(
pattern(
# must contain only one conc. Otherwise, we have e.g. "a(zz|d(ab|a*c))"
conc(
# can contain anything
mult(charclass("d"), one),
mult(
pattern(
conc(
mult(charclass("a"), one),
mult(charclass("b"), one),
),
conc(
mult(charclass("a"), star),
mult(charclass("c"), one),
),
), one
),
),
), one # must be one. Otherwise, we have e.g. "a(d(ab|a*c)){2}"
)
).reduce() == conc(
mult(charclass("a"), one),
mult(charclass("d"), one),
mult(
pattern(
conc(
mult(charclass("a"), one),
mult(charclass("b"), one),
),
conc(
mult(charclass("a"), star),
mult(charclass("c"), one),
),
), one
),
)
# "(aa)".behead("a") = "a"
assert pattern(
conc(
mult(charclass("a"), one),
mult(charclass("a"), one),
),
).behead(conc(mult(charclass("a"), one))) == pattern(
conc(
mult(charclass("a"), one)
),
)
# (abc|aa).behead(a) = (a|bc)
assert pattern(
conc(
mult(charclass("a"), one),
mult(charclass("b"), one),
mult(charclass("c"), one),
),
conc(
mult(charclass("a"), one),
mult(charclass("a"), one),
),
).behead(conc(mult(charclass("a"), one))) == pattern(
conc(
mult(charclass("a"), one),
),
conc(
mult(charclass("b"), one),
mult(charclass("c"), one),
),
)
# (cf{1,2}|cf) - c = (f|f?)
assert pattern(
conc(
mult(charclass("c"), one),
mult(charclass("f"), multiplier(bound(1), bound(2))),
),
conc(
mult(charclass("c"), one),
mult(charclass("f"), one),
),
).behead(conc(mult(charclass("c"), one))) == pattern(
conc(
mult(charclass("f"), multiplier(bound(1), bound(2))),
),
conc(
mult(charclass("f"), one),
),
)
# pattern._commonconc() tests
# aa, aa -> aa
assert pattern(
conc(
mult(charclass("a"), one),
mult(charclass("a"), one),
),
conc(
mult(charclass("a"), one),
mult(charclass("a"), one),
),
)._commonconc() == conc(
mult(charclass("a"), one),
mult(charclass("a"), one),
)
# (aa|aa).behead(aa) = ()
assert pattern(
conc(
mult(charclass("a"), one),
mult(charclass("a"), one),
),
conc(
mult(charclass("a"), one),
mult(charclass("a"), one),
),
).behead(
conc(
mult(charclass("a"), one),
mult(charclass("a"), one),
)
) == pattern(emptystring)
# abc, aa -> a
assert pattern(
conc(
mult(charclass("a"), one),
mult(charclass("b"), one),
mult(charclass("c"), one),
),
conc(
mult(charclass("a"), one),
mult(charclass("a"), one),
),
)._commonconc() == conc(
mult(charclass("a"), one),
)
# (abc|aa).behead(a) = (a|bc)
assert pattern(
conc(
mult(charclass("a"), one),
mult(charclass("b"), one),
mult(charclass("c"), one),
),
conc(
mult(charclass("a"), one),
mult(charclass("a"), one),
),
).behead(
conc(
mult(charclass("a"), one),
)
) == pattern(
conc(
mult(charclass("a"), one),
),
conc(
mult(charclass("b"), one),
mult(charclass("c"), one),
),
)
# a, bc -> emptystring
assert pattern(
conc(
mult(charclass("a"), one),
),
conc(
mult(charclass("b"), one),
mult(charclass("c"), one),
),
)._commonconc() == emptystring
# (a|bc).behead(emptystring) = (a|bc)
assert pattern(
conc(
mult(charclass("a"), one),
),
conc(
mult(charclass("b"), one),
mult(charclass("c"), one),
),
).behead(emptystring) == pattern(
conc(
mult(charclass("a"), one),
),
conc(
mult(charclass("b"), one),
mult(charclass("c"), one),
),
)
# cf{1,2}, cf -> cf, (f?|)
assert pattern(
conc(
mult(charclass("c"), one),
mult(charclass("f"), multiplier(bound(1), bound(2))),
),
conc(
mult(charclass("c"), one),
mult(charclass("f"), one),
),
)._commonconc() == conc(
mult(charclass("c"), one),
mult(charclass("f"), one),
)
# (cf{1,2}|cf).behead(cf) = (f?|)
assert pattern(
conc(
mult(charclass("c"), one),
mult(charclass("f"), multiplier(bound(1), bound(2))),
),
conc(
mult(charclass("c"), one),
mult(charclass("f"), one),
),
).behead(
conc(
mult(charclass("c"), one),
mult(charclass("f"), one),
)
) == pattern(
emptystring,
conc(
mult(charclass("f"), qm),
),
)
# ZA|ZB|ZC -> Z
assert pattern(
conc(
mult(charclass("Z"), one),
mult(charclass("A"), one),
),
conc(
mult(charclass("Z"), one),
mult(charclass("B"), one),
),
conc(
mult(charclass("Z"), one),
mult(charclass("C"), one),
),
)._commonconc() == conc(mult(charclass("Z"), one))
# ZA|ZB|ZC.behead(Z) = A|B|C
assert pattern(
conc(
mult(charclass("Z"), one),
mult(charclass("A"), one),
),
conc(
mult(charclass("Z"), one),
mult(charclass("B"), one),
),
conc(
mult(charclass("Z"), one),
mult(charclass("C"), one),
),
).behead(
conc(mult(charclass("Z"), one))
) == pattern(
conc(mult(charclass("A"), one)),
conc(mult(charclass("B"), one)),
conc(mult(charclass("C"), one)),
)
# Z+A|ZB|ZZC -> Z
assert pattern(
conc(
mult(charclass("Z"), plus),
mult(charclass("A"), one),
),
conc(
mult(charclass("Z"), one),
mult(charclass("B"), one),
),
conc(
mult(charclass("Z"), one),
mult(charclass("Z"), one),
mult(charclass("C"), one),
),
)._commonconc() == conc(mult(charclass("Z"), one))
# Z+A|ZB|ZZC.behead(Z) = Z*A|B|ZC
assert pattern(
conc(
mult(charclass("Z"), plus),
mult(charclass("A"), one),
),
conc(
mult(charclass("Z"), one),
mult(charclass("B"), one),
),
conc(
mult(charclass("Z"), one),
mult(charclass("Z"), one),
mult(charclass("C"), one),
),
).behead(
conc(mult(charclass("Z"), one))
) == pattern(
conc(
mult(charclass("Z"), star),
mult(charclass("A"), one),
),
conc(
mult(charclass("B"), one),
),
conc(
mult(charclass("Z"), one),
mult(charclass("C"), one),
),
)
# a{2}b|a+c -> a
assert pattern(
conc(
mult(charclass("a"), multiplier(bound(2), bound(2))),
mult(charclass("b"), one),
),
conc(
mult(charclass("a"), plus),
mult(charclass("c"), one),
)
)._commonconc() == conc(mult(charclass("a"), one))
# a{2}b|a+c.behead(a) = (ab|a*c)
assert pattern(
conc(
mult(charclass("a"), multiplier(bound(2), bound(2))),
mult(charclass("b"), one),
),
conc(
mult(charclass("a"), plus),
mult(charclass("c"), one),
)
).behead(
conc(mult(charclass("a"), one))
) == pattern(
conc(
mult(charclass("a"), one),
mult(charclass("b"), one),
),
conc(
mult(charclass("a"), star),
mult(charclass("c"), one),
),
)
# make sure recursion problem in reduce()
# has gone away
emptystring + mult(
pattern(
conc(mult(charclass("123456789"), one)),
conc(mult(charclass("0"), one))
),
one
)
# charclass equality
assert charclass("a") == charclass("a")
assert ~charclass("a") == ~charclass("a")
assert ~charclass("a") != charclass("a")
assert charclass("ab") == charclass("ba")
# str(charclass)
assert str(w) == "\\w"
assert str(d) == "\\d"
assert str(s) == "\\s"
assert str(charclass("a")) == "a"
assert str(charclass("{")) == "\\{"
assert str(charclass("\t")) == "\\t"
assert str(charclass("ab")) == "[ab]"
assert str(charclass("a{")) == "[a{]"
assert str(charclass("a\t")) == "[\\ta]"
assert str(charclass("a-")) == "[\\-a]"
assert str(charclass("a[")) == "[\\[a]"
assert str(charclass("a]")) == "[\\]a]"
assert str(charclass("ab")) == "[ab]"
assert str(charclass("abc")) == "[abc]"
assert str(charclass("abcd")) == "[a-d]"
assert str(charclass("abcdfghi")) == "[a-df-i]"
assert str(charclass("^")) == "^"
assert str(charclass("\\")) == "\\\\"
assert str(charclass("a^")) == "[\\^a]"
assert str(charclass("0123456789a")) == "[0-9a]"
assert str(charclass("\t\n\v\f\r A")) == "[\\t\\n\\v\\f\\r A]"
assert str(charclass("0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ_abcdefghijklmnopqrstuvwxyz|")) == "[0-9A-Z_a-z|]"
assert str(W) == "\\W"
assert str(D) == "\\D"
assert str(S) == "\\S"
assert str(dot) == "."
assert str(~charclass("")) == "."
assert str(~charclass("a")) == "[^a]"
assert str(~charclass("{")) == "[^{]"
assert str(~charclass("\t")) == "[^\\t]"
assert str(~charclass("^")) == "[^\\^]"
# charclass parsing
assert charclass.match("a", 0) == (charclass("a"), 1)
assert charclass.match("aa", 1) == (charclass("a"), 2)
assert charclass.match("a$", 1) == (charclass("$"), 2)
assert charclass.match(".", 0) == (dot, 1)
try:
charclass.match("[", 0)
assert False
except nomatch:
pass
try:
charclass.match("a", 1)
assert False
except nomatch:
pass
# charclass set operations
# charclass negation
assert ~~charclass("a") == charclass("a")
assert charclass("a") == ~~charclass("a")
# charclass union
# [ab] u [bc] = [abc]
assert charclass("ab") | charclass("bc") == charclass("abc")
# [ab] u [^bc] = [^c]
assert charclass("ab") | ~charclass("bc") == ~charclass("c")
# [^a] u [bc] = [^a]
assert ~charclass("ab") | charclass("bc") == ~charclass("a")
# [^ab] u [^bc] = [^b]
assert ~charclass("ab") | ~charclass("bc") == ~charclass("b")
# charclass intersection
# [ab] n [bc] = [b]
assert charclass("ab") & charclass("bc") == charclass("b")
# [ab] n [^bc] = [a]
assert charclass("ab") & ~charclass("bc") == charclass("a")
# [^ab] n [bc] = [c]
assert ~charclass("ab") & charclass("bc") == charclass("c")
# [^ab] n [^bc] = [^abc]
assert ~charclass("ab") & ~charclass("bc") == ~charclass("abc")
# mult equality
assert mult(charclass("a"), one) == mult(charclass("a"), one)
assert mult(charclass("a"), one) != mult(charclass("b"), one)
assert mult(charclass("a"), one) != mult(charclass("a"), qm)
assert mult(charclass("a"), one) != mult(charclass("a"), multiplier(bound(1), bound(2)))
# str(mult) tests
a = charclass("a")
assert str(mult(a, one)) == "a"
assert str(mult(a, multiplier(bound(2), bound(2)))) == "aa"
assert str(mult(a, multiplier(bound(3), bound(3)))) == "aaa"
assert str(mult(a, multiplier(bound(4), bound(4)))) == "aaaa"
assert str(mult(a, multiplier(bound(5), bound(5)))) == "a{5}"
assert str(mult(a, qm)) == "a?"
assert str(mult(a, star)) == "a*"
assert str(mult(a, plus)) == "a+"
assert str(mult(a, multiplier(bound(2), bound(5)))) == "a{2,5}"
assert str(bound(2)) == "2"
assert str(inf) == ""
assert str(multiplier(bound(2), inf)) == "{2,}"
assert str(mult(a, multiplier(bound(2), inf))) == "a{2,}"
assert str(mult(d, one)) == "\\d"
assert str(mult(d, multiplier(bound(2), bound(2)))) == "\\d\\d"
assert str(mult(d, multiplier(bound(3), bound(3)))) == "\\d{3}"
# mult parsing
assert mult.match("[a-g]+", 0) == (
mult(charclass("abcdefg"), plus),
6
)
assert mult.match("[a-g0-8$%]+", 0) == (
mult(charclass("abcdefg012345678$%"), plus),
11
)
assert mult.match("[a-g0-8$%\\^]+", 0) == (
mult(charclass("abcdefg012345678$%^"), plus),
13
)
assert mult.match("abcde[^fg]*", 5) == (
mult(~charclass("fg"), star),
11
)
assert mult.match("abcde[^fg]*h{5}[a-z]+", 11) == (
mult(charclass("h"), multiplier(bound(5), bound(5))),
15
)
assert mult.match("abcde[^fg]*h{5}[a-z]+T{1,}", 15) == (
mult(charclass("abcdefghijklmnopqrstuvwxyz"), plus),
21
)
assert mult.match("abcde[^fg]*h{5}[a-z]+T{2,}", 21) == (
mult(charclass("T"), multiplier(bound(2), inf)),
26
)
# mult.reduce() tests
# mult -> mult
# mult -> charclass
assert mult(charclass("a"), one).reduce() == charclass("a")
assert mult(charclass("a"), qm).reduce() == mult(charclass("a"), qm)
assert mult(charclass("a"), zero).reduce() == emptystring
assert mult(nothing, one).reduce() == nothing
assert mult(nothing, qm).reduce() == emptystring
assert mult(nothing, zero).reduce() == emptystring
assert mult(nothing, multiplier(bound(0), bound(5))).reduce() == emptystring
assert mult(pattern(), one).reduce() == nothing
assert mult(pattern(), qm).reduce() == emptystring
assert mult(pattern(), zero).reduce() == emptystring
assert mult(pattern(), multiplier(bound(0), bound(5))).reduce() == emptystring
# mult contains a pattern containing an empty conc? Pull the empty
# part out where it's external
assert mult(
pattern(
conc(mult(charclass("a"), one)),
conc(mult(charclass("b"), star)),
emptystring
), multiplier(bound(2), bound(2))
).reduce() == mult(
pattern(
conc(mult(charclass("a"), one)),
conc(mult(charclass("b"), star)),
), multiplier(bound(0), bound(2))
)
# This happens even if emptystring is the only thing left inside the mult
assert mult(
pattern(
emptystring
), multiplier(bound(2), bound(2))
).reduce() == emptystring
# mult contains a pattern containing a single conc containing a single mult?
# that can be reduced greatly
# e.g. "([ab])*" -> "[ab]*"
assert mult(
pattern(
conc(
mult(charclass("ab"), one)
)
), star
).reduce() == mult(charclass("ab"), star)
# e.g. "(c{1,2}){3,4}" -> "c{3,8}"
assert mult(
pattern(
conc(
mult(charclass("c"), multiplier(bound(1), bound(2)))
)
), multiplier(bound(3), bound(4))
).reduce() == mult(charclass("c"), multiplier(bound(3), bound(8)))
# recursive mult reduction
assert mult(
pattern(
conc(mult(charclass("a"), one)),
conc(mult(charclass("b"), one)),
), star
).reduce() == mult(charclass("ab"), star)
# mult subtraction
# a{4,5} - a{3} = a{1,2}
assert mult(
charclass("a"),
multiplier(bound(4), bound(5))
) - mult(
charclass("a"),
multiplier(bound(3), bound(3))
) == mult(
charclass("a"),
multiplier(bound(1), bound(2))
)
# conc equality
assert conc(mult(charclass("a"), one)) == conc(mult(charclass("a"), one))
assert conc(mult(charclass("a"), one)) != conc(mult(charclass("b"), one))
assert conc(mult(charclass("a"), one)) != conc(mult(charclass("a"), qm))
assert conc(mult(charclass("a"), one)) != conc(mult(charclass("a"), multiplier(bound(1), bound(2))))
assert conc(mult(charclass("a"), one)) != emptystring
# str(conc) tests
assert str(conc(
mult(charclass("a"), one),
mult(charclass("b"), one),
mult(charclass("c"), one),
mult(charclass("d"), one),
mult(charclass("e"), one),
mult(~charclass("fg"), star),
mult(charclass("h"), multiplier(bound(5), bound(5))),
mult(charclass("abcdefghijklmnopqrstuvwxyz"), plus),
)) == "abcde[^fg]*h{5}[a-z]+"
# conc parsing
assert conc.match("abcde[^fg]*h{5}[a-z]+", 0) == (
conc(
mult(charclass("a"), one),
mult(charclass("b"), one),
mult(charclass("c"), one),
mult(charclass("d"), one),
mult(charclass("e"), one),
mult(~charclass("fg"), star),
mult(charclass("h"), multiplier(bound(5), bound(5))),
mult(charclass("abcdefghijklmnopqrstuvwxyz"), plus),
), 21
)
assert conc.match("[bc]*[ab]*", 0) == (
conc(
mult(charclass("bc"), star),
mult(charclass("ab"), star),
),
10
)
assert conc.match("abc...", 0) == (
conc(
mult(charclass("a"), one),
mult(charclass("b"), one),
mult(charclass("c"), one),
mult(dot, one),
mult(dot, one),
mult(dot, one),
),
6
)
assert conc.match("\\d{4}-\\d{2}-\\d{2}", 0) == (
conc(
mult(charclass("0123456789"), multiplier(bound(4), bound(4))),
mult(charclass("-"), one),
mult(charclass("0123456789"), multiplier(bound(2), bound(2))),
mult(charclass("-"), one),
mult(charclass("0123456789"), multiplier(bound(2), bound(2))),
),
17
)
# conc.reduce()
assert conc(
mult(charclass("a"), one),
mult(charclass(), one),
mult(charclass("b"), one),
).reduce() == nothing
# conc -> conc
assert conc(
mult(charclass("a"), one),
mult(charclass("b"), one),
).reduce() == conc(
mult(charclass("a"), one),
mult(charclass("b"), one),
)
# conc -> mult
assert conc(
mult(charclass("a"), multiplier(bound(3), bound(4))),
).reduce() == mult(charclass("a"), multiplier(bound(3), bound(4)))
# conc -> charclass
assert conc(
mult(charclass("a"), one),
).reduce() == charclass("a")
# sequence squooshing of mults within a conc
# e.g. "[$%\\^]?[$%\\^]" -> "[$%\\^]{1,2}"
assert conc(
mult(charclass("a"), one),
mult(charclass("$%^"), qm),
mult(charclass("$%^"), one),
mult(charclass("b"), one),
).reduce() == conc(
mult(charclass("a"), one),
mult(charclass("$%^"), multiplier(bound(1), bound(2))),
mult(charclass("b"), one)
)
# recursive conc reduction
# (a){2}b -> a{2}b
assert conc(
mult(
pattern(
conc(
mult(charclass("a"), qm)
)
), plus
),
mult(charclass("b"), one)
).reduce() == conc(
mult(charclass("a"), star),
mult(charclass("b"), one)
).reduce()
# pattern equality
assert pattern(
conc(mult(charclass("a"), one)),
conc(mult(charclass("b"), one)),
) == pattern(
conc(mult(charclass("b"), one)),
conc(mult(charclass("a"), one)),
)
assert pattern(
conc(mult(charclass("a"), one)),
conc(mult(charclass("a"), one)),
) == pattern(
conc(mult(charclass("a"), one)),
)
# str(pattern)
assert str(pattern(
conc(mult(charclass("a"), one)),
conc(mult(charclass("b"), one)),
)) == "a|b"
assert str(pattern(
conc(mult(charclass("a"), one)),
conc(mult(charclass("a"), one)),
)) == "a"
assert str(pattern(
conc(
mult(charclass("a"), one),
mult(charclass("b"), one),
mult(charclass("c"), one),
),
conc(
mult(charclass("d"), one),
mult(charclass("e"), one),
mult(charclass("f"), one),
mult(
pattern(
conc(
mult(charclass("g"), one),
mult(charclass("h"), one),
mult(charclass("i"), one),
),
conc(
mult(charclass("j"), one),
mult(charclass("k"), one),
mult(charclass("l"), one),
),
), one
),
),
)) == "abc|def(ghi|jkl)"
# pattern.reduce() tests
# pattern -> pattern
# (ab|cd) -> (ab|cd)
assert pattern(
conc(
mult(charclass("a"), multiplier(bound(2), bound(2))),
mult(charclass("b"), multiplier(bound(2), bound(2))),
),
conc(
mult(charclass("c"), multiplier(bound(2), bound(2))),
mult(charclass("d"), multiplier(bound(2), bound(2))),
),
).reduce() == pattern(
conc(
mult(charclass("a"), multiplier(bound(2), bound(2))),
mult(charclass("b"), multiplier(bound(2), bound(2))),
),
conc(
mult(charclass("c"), multiplier(bound(2), bound(2))),
mult(charclass("d"), multiplier(bound(2), bound(2))),
),
)
# pattern -> conc
assert pattern(
conc(
mult(charclass("a"), multiplier(bound(2), bound(2))),
mult(charclass("b"), multiplier(bound(2), bound(2))),
),
).reduce() == conc(
mult(charclass("a"), multiplier(bound(2), bound(2))),
mult(charclass("b"), multiplier(bound(2), bound(2))),
)
# pattern -> mult
assert pattern(
conc(
mult(charclass("a"), multiplier(bound(2), bound(2))),
),
).reduce() == mult(charclass("a"), multiplier(bound(2), bound(2)))
# pattern -> charclass
assert pattern(
conc(
mult(charclass("a"), one),
),
).reduce() == charclass("a")
# special pattern reduction technique.
# 0|[1-9]|a{5,7} -> [0-9]|a{5,7}
assert pattern(
conc(mult(charclass("0"), one)),
conc(mult(charclass("123456789"), one)),
conc(mult(charclass("a"), multiplier(bound(5), bound(7)))),
).reduce() == pattern(
conc(mult(charclass("0123456789"), one)),
conc(mult(charclass("a"), multiplier(bound(5), bound(7)))),
)
# recursive pattern reduction
assert pattern(
conc(mult(charclass("0"), one)),
conc(
mult(
pattern(
conc(mult(charclass("0"), one)),
conc(mult(charclass("123456789"), one)),
conc(mult(charclass("a"), multiplier(bound(5), bound(7)))),
), one
)
)
).reduce() == pattern(
conc(mult(charclass("0"), one)),
conc(
mult(
pattern(
conc(mult(charclass("0123456789"), one)),
conc(mult(charclass("a"), multiplier(bound(5), bound(7)))),
), one
)
)
)
# common prefix reduction of pattern
# a{2}b|a+c -> a{2}(ab|a*c)
assert pattern(
conc(
mult(charclass("a"), multiplier(bound(2), bound(2))),
mult(charclass("b"), one),
),
conc(
mult(charclass("a"), plus),
mult(charclass("c"), one),
)
).reduce() == conc(
mult(charclass("a"), one),
mult(
pattern(
conc(
mult(charclass("a"), one),
mult(charclass("b"), one),
),
conc(
mult(charclass("a"), star),
mult(charclass("c"), one),
),
), one
)
)
# pattern parsing
assert pattern.match("abc|def(ghi|jkl)", 0) == (
pattern(
conc(
mult(charclass("a"), one),
mult(charclass("b"), one),
mult(charclass("c"), one),
),
conc(
mult(charclass("d"), one),
mult(charclass("e"), one),
mult(charclass("f"), one),
mult(
pattern(
conc(
mult(charclass("g"), one),
mult(charclass("h"), one),
mult(charclass("i"), one),
),
conc(
mult(charclass("j"), one),
mult(charclass("k"), one),
mult(charclass("l"), one),
),
), one
),
)
), 16
)
# charclass multiplication
# a * 1 = a
assert charclass("a") * one == charclass("a")
# a * {1,3} = a{1,3}
assert charclass("a") * multiplier(bound(1), bound(3)) == mult(charclass("a"), multiplier(bound(1), bound(3)))
# a * {4,} = a{4,}
assert charclass("a") * multiplier(bound(4), inf) == mult(charclass("a"), multiplier(bound(4), inf))
# mult multiplication
# a{2,3} * 1 = a{2,3}
assert mult(
charclass("a"), multiplier(bound(2), bound(3))
) * one == mult(charclass("a"), multiplier(bound(2), bound(3)))
# a{2,3} * {4,5} = a{8,15}
assert mult(
charclass("a"), multiplier(bound(2), bound(3))
) * multiplier(bound(4), bound(5)) == mult(charclass("a"), multiplier(bound(8), bound(15)))
# a{2,} * {2,} = a{4,}
assert mult(
charclass("a"), multiplier(bound(2), inf)
) * multiplier(bound(2), inf) == mult(charclass("a"), multiplier(bound(4), inf))
# conc multiplication
# ab? * {0,1} = (ab?)?
assert conc(
mult(charclass("a"), one),
mult(charclass("b"), qm),
) * qm == mult(
pattern(
conc(
mult(charclass("a"), one),
mult(charclass("b"), qm),
),
), qm
)
# pattern multiplication
# (ab?|ba?) * {2,3} = (ab?|ba?){2,3}
assert pattern(
conc(
mult(charclass("a"), one),
mult(charclass("b"), qm),
),
conc(
mult(charclass("b"), one),
mult(charclass("a"), qm),
),
) * multiplier(bound(2), bound(3)) == mult(
pattern(
conc(
mult(charclass("a"), one),
mult(charclass("b"), qm),
),
conc(
mult(charclass("b"), one),
mult(charclass("a"), qm),
),
), multiplier(bound(2), bound(3))
)
# bound class tests
assert bound(0).common(inf) == bound(0)
assert bound(1).common(inf) == bound(1)
assert qm.mandatory == bound(0)
assert qm.optional == bound(1)
# multiplier intersection operator tests
assert zero.common(zero) == zero
assert zero.common(qm ) == zero
assert zero.common(one ) == zero
assert zero.common(star) == zero
assert zero.common(plus) == zero
assert qm .common(zero) == zero
assert qm .common(qm ) == qm
assert qm .common(one ) == zero
assert qm .common(star) == qm
assert qm .common(plus) == qm
assert one .common(zero) == zero
assert one .common(qm ) == zero
assert one .common(one ) == one
assert one .common(star) == zero
assert one .common(plus) == one
assert star.common(zero) == zero
assert star.common(qm ) == qm
assert star.common(one ) == zero
assert star.common(star) == star
assert star.common(plus) == star
assert plus.common(zero) == zero
assert plus.common(qm ) == qm
assert plus.common(one ) == one
assert plus.common(star) == star
assert plus.common(plus) == plus
# a{3,4}, a{2,5} -> a{2,3} (with a{1,1}, a{0,2} left over)
assert multiplier(bound(3), bound(4)).common(multiplier(bound(2), bound(5))) == multiplier(bound(2), bound(3))
assert multiplier(bound(3), bound(4)) - multiplier(bound(2), bound(3)) == one
assert multiplier(bound(2), bound(5)) - multiplier(bound(2), bound(3)) == multiplier(bound(0), bound(2))
# a{2,}, a{1,5} -> a{1,5} (with a{1,}, a{0,0} left over)
assert multiplier(bound(2), inf).common(multiplier(bound(1), bound(5))) == multiplier(bound(1), bound(5))
assert multiplier(bound(2), inf) - multiplier(bound(1), bound(5)) == plus
assert multiplier(bound(1), bound(5)) - multiplier(bound(1), bound(5)) == zero
# a{3,}, a{2,} -> a{2,} (with a, epsilon left over)
assert multiplier(bound(3), inf).common(multiplier(bound(2), inf)) == multiplier(bound(2), inf)
assert multiplier(bound(3), inf) - multiplier(bound(2), inf) == one
assert multiplier(bound(2), inf) - multiplier(bound(2), inf) == zero
# a{3,}, a{3,} -> a{3,} (with zero, zero left over)
assert multiplier(bound(3), inf).common(multiplier(bound(3), inf)) == multiplier(bound(3), inf)
assert multiplier(bound(3), inf) - multiplier(bound(3), inf) == zero
# mult intersection ("&") tests
# a & b? = nothing
assert mult(charclass("a"), one) & mult(charclass("b"), qm) == charclass()
assert mult(charclass("a"), one) & mult(charclass("b"), qm) == nothing
# a & a? = nothing
assert mult(charclass("a"), one) & mult(charclass("a"), qm) == charclass("a")
# a{2} & a{2,} = a{2}
assert mult(charclass("a"), multiplier(bound(2), bound(2))) \
& mult(charclass("a"), multiplier(bound(2), inf)) \
== mult(charclass("a"), multiplier(bound(2), bound(2)))
# a & b -> no intersection.
assert mult(charclass("a"), one) & mult(charclass("b"), one) == nothing
# a & a -> a
assert mult(charclass("a"), one) & mult(charclass("a"), one) == charclass("a")
# a* & a -> no intersection
assert mult(charclass("a"), star) & mult(charclass("a"), one) == charclass("a")
# a* & b* -> emptystring
assert mult(charclass("a"), star) & mult(charclass("b"), star) == emptystring
# a* & a+ -> a+
assert mult(charclass("a"), star) & mult(charclass("a"), plus) == mult(charclass("a"), plus)
# a{3,4} & a{2,5} -> a{2,3}
assert mult(
charclass("a"), multiplier(bound(3), bound(4))
).common(mult(
charclass("a"), multiplier(bound(2), bound(5))
)) == mult(charclass("a"), multiplier(bound(2), bound(3)))
# a{2,} & a{1,5} -> a{1,5}
assert mult(
charclass("a"), multiplier(bound(2), inf)
).common(mult(
charclass("a"), multiplier(bound(1), bound(5))
)) == mult(charclass("a"), multiplier(bound(1), bound(5)))
# a{3,}, a{2,} -> a{2,} (with a, epsilon left over)
assert mult(
charclass("a"), multiplier(bound(3), inf)
).common(mult(
charclass("a"), multiplier(bound(2), inf)
)) == mult(charclass("a"), multiplier(bound(2), inf))
# a{3,}, a{3,} -> a{3,} (with inf, inf left over)
assert mult(
charclass("a"), multiplier(bound(3), inf)
) & mult(
charclass("a"), multiplier(bound(3), inf)
) == mult(charclass("a"), multiplier(bound(3), inf))
# pattern._commonconc(suffix=True) tests
# a | bc -> emptystring
assert pattern(
conc(
mult(charclass("a"), one),
),
conc(
mult(charclass("b"), one),
mult(charclass("c"), one),
),
)._commonconc(suffix=True) == emptystring
# (a|bc) - () = (a|bc)
assert pattern(
conc(
mult(charclass("a"), one),
),
conc(
mult(charclass("b"), one),
mult(charclass("c"), one),
),
) - emptystring == pattern(
conc(
mult(charclass("a"), one),
),
conc(
mult(charclass("b"), one),
mult(charclass("c"), one),
),
)
# (aa|bca) -> a
assert pattern(
conc(
mult(charclass("a"), one),
mult(charclass("a"), one),
),
conc(
mult(charclass("b"), one),
mult(charclass("c"), one),
mult(charclass("a"), one),
),
)._commonconc(suffix=True) == conc(mult(charclass("a"), one))
# (aa|bca) - a = (a|bc)
assert pattern(
conc(
mult(charclass("a"), one),
mult(charclass("a"), one),
),
conc(
mult(charclass("b"), one),
mult(charclass("c"), one),
mult(charclass("a"), one),
),
) - conc(mult(charclass("a"), one)) == pattern(
conc(
mult(charclass("a"), one),
),
conc(
mult(charclass("b"), one),
mult(charclass("c"), one),
),
)
# xyza | abca | a -> a
assert pattern(
conc(
mult(charclass("x"), one),
mult(charclass("y"), one),
mult(charclass("z"), one),
mult(charclass("a"), one),
),
conc(
mult(charclass("a"), one),
mult(charclass("b"), one),
mult(charclass("c"), one),
mult(charclass("a"), one),
),
conc(
mult(charclass("a"), one),
),
)._commonconc(suffix=True) == conc(mult(charclass("a"), one))
# (xyza|abca|a) - a = (xyz|abc|)
assert pattern(
conc(
mult(charclass("x"), one),
mult(charclass("y"), one),
mult(charclass("z"), one),
mult(charclass("a"), one),
),
conc(
mult(charclass("a"), one),
mult(charclass("b"), one),
mult(charclass("c"), one),
mult(charclass("a"), one),
),
conc(
mult(charclass("a"), one),
),
) - conc(mult(charclass("a"), one)) == pattern(
emptystring,
conc(
mult(charclass("x"), one),
mult(charclass("y"), one),
mult(charclass("z"), one),
),
conc(
mult(charclass("a"), one),
mult(charclass("b"), one),
mult(charclass("c"), one),
),
)
# f{2,3}c, fc -> fc
assert pattern(
conc(
mult(charclass("f"), multiplier(bound(2), bound(3))),
mult(charclass("c"), one),
),
conc(
mult(charclass("f"), one),
mult(charclass("c"), one),
),
)._commonconc(suffix=True) == conc(
mult(charclass("f"), one),
mult(charclass("c"), one),
)
# (f{2,3}c|fc) - fc = (f{1,2}|)
assert pattern(
conc(
mult(charclass("f"), multiplier(bound(2), bound(3))),
mult(charclass("c"), one),
),
conc(
mult(charclass("f"), one),
mult(charclass("c"), one),
),
) - conc(
mult(charclass("f"), one),
mult(charclass("c"), one),
) == pattern(
emptystring,
conc(
mult(charclass("f"), multiplier(bound(1), bound(2))),
),
)
# (aa) -> aa
assert pattern(
conc(
mult(charclass("a"), one),
mult(charclass("a"), one),
),
)._commonconc(suffix=True) == conc(
mult(charclass("a"), one),
mult(charclass("a"), one),
)
# (aa) - aa = ()
assert pattern(
conc(
mult(charclass("a"), one),
mult(charclass("a"), one),
),
) - conc(
mult(charclass("a"), one),
mult(charclass("a"), one),
) == pattern(emptystring)
# concatenation tests (__add__())
# empty conc + empty conc
assert emptystring + emptystring == emptystring
# charclass + charclass
# a + b = ab
assert charclass("a") + charclass("b") == conc(
mult(charclass("a"), one),
mult(charclass("b"), one),
)
# a + a = a{2}
assert charclass("a") + charclass("a") == mult(charclass("a"), multiplier(bound(2), bound(2)))
# charclass + mult
# a + a = a{2}
assert charclass("a") + mult(charclass("a"), one) == mult(charclass("a"), multiplier(bound(2), bound(2)))
# a + a{2,} = a{3,}
assert charclass("a") + mult(charclass("a"), multiplier(bound(2), inf)) == mult(charclass("a"), multiplier(bound(3), inf))
# a + a{,8} = a{1,9}
assert charclass("a") + mult(charclass("a"), multiplier(bound(0), bound(8))) == mult(charclass("a"), multiplier(bound(1), bound(9)))
# a + b{,8} = ab{,8}
assert charclass("a") + mult(charclass("b"), multiplier(bound(0), bound(8))) == conc(
mult(charclass("a"), one),
mult(charclass("b"), multiplier(bound(0), bound(8))),
)
# mult + charclass
# b + b = b{2}
assert mult(charclass("b"), one) + charclass("b") == mult(charclass("b"), multiplier(bound(2), bound(2)))
# b* + b = b+
assert mult(charclass("b"), star) + charclass("b") == mult(charclass("b"), plus)
# b{,8} + b = b{1,9}
assert mult(charclass("b"), multiplier(bound(0), bound(8))) + charclass("b") == mult(charclass("b"), multiplier(bound(1), bound(9)))
# b{,8} + c = b{,8}c
assert mult(charclass("b"), multiplier(bound(0), bound(8))) + charclass("c") == conc(
mult(charclass("b"), multiplier(bound(0), bound(8))),
mult(charclass("c"), one),
)
# charclass + conc
# a + nothing = a
assert charclass("a") + emptystring == charclass("a")
# a + bc = abc
assert charclass("a") + conc(
mult(charclass("b"), one),
mult(charclass("c"), one),
) == conc(
mult(charclass("a"), one),
mult(charclass("b"), one),
mult(charclass("c"), one),
)
# a + ab = a{2}b
assert charclass("a") + conc(
mult(charclass("a"), one),
mult(charclass("b"), one),
) == conc(
mult(charclass("a"), multiplier(bound(2), bound(2))),
mult(charclass("b"), one),
)
# conc + charclass
# nothing + a = a
assert emptystring + charclass("a") == charclass("a")
# ab + c = abc
assert conc(
mult(charclass("a"), one),
mult(charclass("b"), one),
) + charclass("c") == conc(
mult(charclass("a"), one),
mult(charclass("b"), one),
mult(charclass("c"), one),
)
# ab + b = ab{2}
assert conc(
mult(charclass("a"), one),
mult(charclass("b"), one),
) + charclass("b") == conc(
mult(charclass("a"), one),
mult(charclass("b"), multiplier(bound(2), bound(2))),
)
# pattern + charclass
# (a|bd) + c = (a|bd)c
assert pattern(
conc(
mult(charclass("a"), one),
),
conc(
mult(charclass("b"), one),
mult(charclass("d"), one),
),
) + charclass("c") == conc(
mult(
pattern(
conc(
mult(charclass("a"), one),
),
conc(
mult(charclass("b"), one),
mult(charclass("d"), one),
),
), one
),
mult(charclass("c"), one),
)
# (ac{2}|bc+) + c = (ac|bc*)c{2}
assert pattern(
conc(
mult(charclass("a"), one),
mult(charclass("c"), multiplier(bound(2), bound(2))),
),
conc(
mult(charclass("b"), one),
mult(charclass("c"), plus),
),
) + charclass("c") == conc(
mult(
pattern(
conc(
mult(charclass("a"), one),
mult(charclass("c"), one),
),
conc(
mult(charclass("b"), one),
mult(charclass("c"), star),
),
), one
),
mult(charclass("c"), multiplier(bound(2), bound(2))),
)
# charclass + pattern
# a + (b|cd) = a(b|cd)
assert charclass("a") + pattern(
conc(
mult(charclass("b"), one),
),
conc(
mult(charclass("c"), one),
mult(charclass("d"), one),
),
) == conc(
mult(charclass("a"), one),
mult(
pattern(
conc(
mult(charclass("b"), one),
),
conc(
mult(charclass("c"), one),
mult(charclass("d"), one),
),
), one
)
)
# a + (a{2}b|a+c) = a{2}(ab|a*c)
assert charclass("a") + pattern(
conc(
mult(charclass("a"), multiplier(bound(2), bound(2))),
mult(charclass("b"), one),
),
conc(
mult(charclass("a"), plus),
mult(charclass("c"), one),
),
) == conc(
mult(charclass("a"), multiplier(bound(2), bound(2))),
mult(
pattern(
conc(
mult(charclass("a"), one),
mult(charclass("b"), one),
),
conc(
mult(charclass("a"), star),
mult(charclass("c"), one),
),
), one
),
)
# mult + mult
# a{3,4} + b? = a{3,4}b?
assert mult(charclass("a"), multiplier(bound(3), bound(4))) + mult(charclass("b"), qm) == conc(
mult(charclass("a"), multiplier(bound(3), bound(4))),
mult(charclass("b"), qm),
)
# a* + a{2} = a{2,}
assert mult(charclass("a"), star) + mult(charclass("a"), multiplier(bound(2), bound(2))) == mult(charclass("a"), multiplier(bound(2), inf))
# mult + conc
# a{2} + bc = a{2}bc
assert mult(charclass("a"), multiplier(bound(2), bound(2))) + conc(
mult(charclass("b"), one),
mult(charclass("c"), one),
) == conc(
mult(charclass("a"), multiplier(bound(2), bound(2))),
mult(charclass("b"), one),
mult(charclass("c"), one),
)
# a? + ab = a{1,2}b
assert mult(charclass("a"), qm) + conc(
mult(charclass("a"), one),
mult(charclass("b"), one),
) == conc(
mult(charclass("a"), multiplier(bound(1), bound(2))),
mult(charclass("b"), one),
)
# conc + mult
# ab + c* = abc*
assert conc(
mult(charclass("a"), one),
mult(charclass("b"), one),
) + mult(charclass("c"), star) == conc(
mult(charclass("a"), one),
mult(charclass("b"), one),
mult(charclass("c"), star),
)
# ab + b* = ab+
assert conc(
mult(charclass("a"), one),
mult(charclass("b"), one),
) + mult(charclass("b"), star) == conc(
mult(charclass("a"), one),
mult(charclass("b"), plus),
)
# mult + pattern
# a{2,3} + (b|cd) = a{2,3}(b|cd)
assert mult(charclass("a"), multiplier(bound(2), bound(3))) + pattern(
conc(
mult(charclass("b"), one),
),
conc(
mult(charclass("c"), one),
mult(charclass("d"), one),
),
) == conc(
mult(charclass("a"), multiplier(bound(2), bound(3))),
mult(
pattern(
conc(
mult(charclass("b"), one),
),
conc(
mult(charclass("c"), one),
mult(charclass("d"), one),
),
), one
)
)
# a{2,3} + (a{2}b|a+c) = a{3,4}(ab|a*c)
assert mult(charclass("a"), multiplier(bound(2), bound(3))) + pattern(
conc(
mult(charclass("a"), multiplier(bound(2), bound(2))),
mult(charclass("b"), one),
),
conc(
mult(charclass("a"), plus),
mult(charclass("c"), one),
),
) == conc(
mult(charclass("a"), multiplier(bound(3), bound(4))),
mult(
pattern(
conc(
mult(charclass("a"), one),
mult(charclass("b"), one),
),
conc(
mult(charclass("a"), star),
mult(charclass("c"), one),
),
), one
),
)
# pattern + mult
# (b|cd) + a{2,3} = (b|cd)a{2,3}
assert pattern(
conc(
mult(charclass("b"), one),
),
conc(
mult(charclass("c"), one),
mult(charclass("d"), one),
),
) + mult(charclass("a"), multiplier(bound(2), bound(3))) == conc(
mult(
pattern(
conc(
mult(charclass("b"), one),
),
conc(
mult(charclass("c"), one),
mult(charclass("d"), one),
),
), one
),
mult(charclass("a"), multiplier(bound(2), bound(3))),
)
# (ba{2}|ca+) + a{2,3} = (ba|ca*)a{3,4}
assert pattern(
conc(
mult(charclass("b"), one),
mult(charclass("a"), multiplier(bound(2), bound(2))),
),
conc(
mult(charclass("c"), one),
mult(charclass("a"), plus),
),
) + mult(charclass("a"), multiplier(bound(2), bound(3))) == conc(
mult(
pattern(
conc(
mult(charclass("b"), one),
mult(charclass("a"), one),
),
conc(
mult(charclass("c"), one),
mult(charclass("a"), star),
),
), one
),
mult(charclass("a"), multiplier(bound(3), bound(4))),
)
# conc + conc
# ab + cd = abcd
assert conc(
mult(charclass("a"), one),
mult(charclass("b"), one),
) + conc(
mult(charclass("c"), one),
mult(charclass("d"), one),
) == conc(
mult(charclass("a"), one),
mult(charclass("b"), one),
mult(charclass("c"), one),
mult(charclass("d"), one),
)
# ab + bc = ab{2}c
assert conc(
mult(charclass("a"), one),
mult(charclass("b"), one),
) + conc(
mult(charclass("b"), one),
mult(charclass("c"), one),
) == conc(
mult(charclass("a"), one),
mult(charclass("b"), multiplier(bound(2), bound(2))),
mult(charclass("c"), one),
)
# conc + pattern
# za{2,3} + (b|cd) = za{2,3}(b|cd)
assert conc(
mult(charclass("z"), one),
mult(charclass("a"), multiplier(bound(2), bound(3))),
) + pattern(
conc(
mult(charclass("b"), one),
),
conc(
mult(charclass("c"), one),
mult(charclass("d"), one),
),
) == conc(
mult(charclass("z"), one),
mult(charclass("a"), multiplier(bound(2), bound(3))),
mult(
pattern(
conc(
mult(charclass("b"), one),
),
conc(
mult(charclass("c"), one),
mult(charclass("d"), one),
),
), one,
)
)
# za{2,3} + (a{2}b|a+c) = za{3,4}(ab|a*c)
assert conc(
mult(charclass("z"), one),
mult(charclass("a"), multiplier(bound(2), bound(3))),
) + pattern(
conc(
mult(charclass("a"), multiplier(bound(2), bound(2))),
mult(charclass("b"), one),
),
conc(
mult(charclass("a"), plus),
mult(charclass("c"), one),
),
) == conc(
mult(charclass("z"), one),
mult(charclass("a"), multiplier(bound(3), bound(4))),
mult(
pattern(
conc(
mult(charclass("a"), one),
mult(charclass("b"), one),
),
conc(
mult(charclass("a"), star),
mult(charclass("c"), one),
),
), one
),
)
# pattern + conc
# (b|cd) + za{2,3} = (b|cd)za{2,3}
assert pattern(
conc(
mult(charclass("b"), one),
),
conc(
mult(charclass("c"), one),
mult(charclass("d"), one),
),
) + conc(
mult(charclass("z"), one),
mult(charclass("a"), multiplier(bound(2), bound(3))),
) == conc(
mult(
pattern(
conc(
mult(charclass("b"), one),
),
conc(
mult(charclass("c"), one),
mult(charclass("d"), one),
),
), one
),
mult(charclass("z"), one),
mult(charclass("a"), multiplier(bound(2), bound(3))),
)
# (ba{2}|ca+) + a{2,3}z = (ba|ca*)a{3,4}z
assert pattern(
conc(
mult(charclass("b"), one),
mult(charclass("a"), multiplier(bound(2), bound(2))),
),
conc(
mult(charclass("c"), one),
mult(charclass("a"), plus),
),
) + conc(
mult(charclass("a"), multiplier(bound(2), bound(3))),
mult(charclass("z"), one),
) == conc(
mult(
pattern(
conc(
mult(charclass("b"), one),
mult(charclass("a"), one),
),
conc(
mult(charclass("c"), one),
mult(charclass("a"), star),
),
), one
),
mult(charclass("a"), multiplier(bound(3), bound(4))),
mult(charclass("z"), one),
)
# pattern + pattern
# (a|bc) + (c|de) = (a|bc)(c|de)
assert pattern(
conc(
mult(charclass("a"), one),
),
conc(
mult(charclass("b"), one),
mult(charclass("c"), one),
),
) + pattern(
conc(
mult(charclass("c"), one),
),
conc(
mult(charclass("d"), one),
mult(charclass("e"), one),
),
) == conc(
mult(
pattern(
conc(
mult(charclass("a"), one),
),
conc(
mult(charclass("b"), one),
mult(charclass("c"), one),
),
), one
),
mult(
pattern(
conc(
mult(charclass("c"), one),
),
conc(
mult(charclass("d"), one),
mult(charclass("e"), one),
),
), one
),
)
# (a|bc) + (a|bc) = (a|b){2}
assert pattern(
conc(
mult(charclass("a"), one),
),
conc(
mult(charclass("b"), one),
mult(charclass("c"), one),
),
) + pattern(
conc(
mult(charclass("a"), one),
),
conc(
mult(charclass("b"), one),
mult(charclass("c"), one),
),
) == mult(
pattern(
conc(
mult(charclass("a"), one),
),
conc(
mult(charclass("b"), one),
mult(charclass("c"), one),
),
), multiplier(bound(2), bound(2))
)
assert nothing.empty()
assert charclass().empty()
assert not dot.empty()
assert not mult(charclass("a"), zero).empty()
assert mult(charclass(), one).empty()
assert not mult(charclass(), qm).empty()
assert conc(mult(charclass("a"), one), mult(charclass(), one)).empty()
assert not conc(mult(charclass("a"), one), mult(charclass(), qm)).empty()
assert pattern().empty()
assert not pattern(conc(mult(charclass("a"), zero))).empty()
assert not pattern(conc(mult(charclass(), qm))).empty()
assert str(parse("a.b")) == "a.b" # not "a[ab]b"
assert str(parse("\\d{4}")) == "\\d{4}"
# Intersection tests
assert str(parse("a*") & parse("b*")) == ""
assert str(parse("a") & parse("b")) == "[]"
assert str(parse("\\d") & parse(".")) == "\\d"
assert str(parse("\\d{2}") & parse("0.")) == "0\\d"
assert str(parse("\\d{2}") & parse("19.*")) == "19"
assert str(parse("\\d{3}") & parse("19.*")) == "19\\d"
assert str(parse("abc...") & parse("...def")) == "abcdef"
assert str(parse("[bc]*[ab]*") & parse("[ab]*[bc]*")) == "([ab]*a|[bc]*c)?b*"
assert str(parse("\\W*") & parse("[a-g0-8$%\\^]+") & parse("[^d]{2,8}")) == "[$%\\^]{2,8}"
assert str(parse("\\d{4}-\\d{2}-\\d{2}") & parse("19.*")) == "19\\d\\d-\\d\\d-\\d\\d"
# Reduction tests
# ARGH, they need to be FSMed!!
assert str(parse("(|(|(|(|(|(|[$%\^])[$%\^])[$%\^])[$%\^])[$%\^])[$%\^])[$%\^][$%\^]")) == "[$%\^]{2,8}"
assert str(parse("[0-9a-fA-F][0-9a-fA-F][0-9a-fA-F]")) == "[0-9A-Fa-f]{3}"
long = \
"(aa|bb*aa)a*|((ab|bb*ab)|(aa|bb*aa)a*b)((ab|bb*ab)|(aa|bb*aa)a*b)*" + \
"(aa|bb*aa)a*|((ab|bb*ab)|(aa|bb*aa)a*b)((ab|bb*ab)|(aa|bb*aa)a*b)*"
assert str(parse(".*") & parse(long).reduce()) == "[ab]*a[ab]"
short = "[ab]*a?b*|[ab]*b?a*"
assert str(parse(".*") & parse(short).reduce()) == "[ab]*"
# DEFECT: "0{2}|1{2}" was erroneously reduced() to "[01]{2}"
bad = parse("0{2}|1{2}").fsm({"0", "1", otherchars})
assert bad.accepts("00")
assert bad.accepts("11")
assert not bad.accepts("01")
assert str(parse("0|[1-9]|ab")) == "\d|ab"
# lego.alphabet() should include "otherchars"
assert parse("").alphabet() == {otherchars}
# You should be able to fsm() a single lego piece without supplying a specific
# alphabet. That should be determinable from context.
assert str(parse("a.b").fsm().lego()) == "a.b" # not "a[ab]b"
# A suspiciously familiar example
bad = parse("0{2}|1{2}").fsm()
assert bad.accepts("00")
assert bad.accepts("11")
assert not bad.accepts("01")
assert str(parse("0|[1-9]|ab")) == "\d|ab"
# everythingbut().
# Regexes are usually gibberish but we make a few claims
a = parse("a")
notA = a.everythingbut().fsm()
assert notA.accepts("")
assert not notA.accepts("a")
assert notA.accepts("aa")
# everythingbut(), called twice, should take us back to where we started.
beer = parse("beer")
notBeer = beer.everythingbut()
beer2 = notBeer.everythingbut()
assert str(beer2) == "beer"
# ".*" becomes "[]" and vice versa under this call.
everything = parse(".*")
assert str(everything.everythingbut()) == str(nothing)
assert str(nothing.everythingbut()) == str(everything)
# epsilon reduction in patterns.
assert parse("|(ab)*|def").reduce() == parse("(ab)*|def")
assert parse("|(ab)+|def").reduce() == parse("(ab)*|def")
assert parse("|.+").reduce() == parse(".*")
assert parse("|a+|b+") in {parse("a+|b*"), parse("a*|b+")}
print("OK")
|
Honghe/greenery
|
lego.py
|
lego.py
|
py
| 95,333 |
python
|
en
|
code
| null |
github-code
|
6
|
2156579897
|
from crsql_correctness import connect, close, min_db_v
from pprint import pprint
# exploratory tests to debug changes
def sync_left_to_right(l, r, since):
changes = l.execute(
"SELECT * FROM crsql_changes WHERE db_version > ? ORDER BY db_version, seq ASC", (since,))
ret = 0
for change in changes:
ret = change[5]
r.execute(
"INSERT INTO crsql_changes VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)", change)
r.commit()
return ret
def test_sync():
def setup():
c = connect(":memory:")
c.execute("CREATE TABLE item (id PRIMARY KEY NOT NULL, width INTEGER, height INTEGER, name TEXT, dscription TEXT, weight INTEGER)")
c.execute("SELECT crsql_as_crr('item')")
c.commit()
return c
def insert_item(c, args):
c.execute("INSERT INTO item VALUES (?, ?, ?, ?, ?, ?)", args)
c.commit()
a = setup()
b = setup()
insert_item(a, ('9838abbe-6fa8-4755-af2b-9f0484888809',
None, None, None, None, None))
insert_item(b, ('f94ef174-459f-4b07-bc7a-c1104a97ceb5',
None, None, None, None, None))
since_a = sync_left_to_right(a, b, 0)
a.execute("DELETE FROM item WHERE id = '9838abbe-6fa8-4755-af2b-9f0484888809'")
a.commit()
insert_item(a, ('d5653f10-b858-46c7-97e5-5660eca47d28',
None, None, None, None, None))
sync_left_to_right(a, b, since_a)
sync_left_to_right(b, a, 0)
# pprint("A")
# pprint(a.execute("SELECT * FROM item").fetchall())
# pprint("B")
# pprint(b.execute("SELECT * FROM item").fetchall())
# pprint("A changes")
# pprint(a.execute("SELECT * FROM crsql_changes").fetchall())
|
vlcn-io/cr-sqlite
|
py/correctness/tests/test_sandbox.py
|
test_sandbox.py
|
py
| 1,707 |
python
|
en
|
code
| 2,036 |
github-code
|
6
|
75188719226
|
# 초기 거리를 1로 지정
# 가까운 곳부터 수행하는 bfs이기에 이미 최단거리가 기록된 경우에는 거리가 갱신되지 않도록 설정
from collections import deque
def bfs(x, y):
# 큐 구현을 위해 deque 라이브러리 사용
queue = deque()
# 초기 좌표 설정
queue.append((x, y))
# 큐가 빌 때까지 반복
while queue:
x, y = queue.popleft()
# 현재 위치에서 4가지 방향으로 위치 확인
for i in range(4):
nx = x + dx[i]
ny = y + dy[i]
# 미로 찾기 공간을 벗어난 경우 무시
if nx < 0 or nx >= N or ny < 0 or ny >= M:
continue
# 벽인 경우 무시
if graph[nx][ny] == 0:
continue
# 해당 노드를 처음 방문한 경우에만 최단거리 기록
if graph[nx][ny] == 1:
graph[nx][ny] = graph[x][y] + 1
queue.append((nx, ny))
# 가장 오른쪽 아래까지의 최단거리 반환
return graph[N-1][M-1]
N, M = map(int, input().split())
# 2차원 리스트 맵 정보 입력
graph = []
for i in range(N):
graph.append(list(map(int, input())))
# 상하좌우
dx = [-1, 1, 0, 0]
dy = [0, 0, -1, 1]
# BFS를 수행한 결과 출력
print(bfs(0, 0))
|
zacinthepark/Problem-Solving-Notes
|
na/02/DFS-BFS/미로탈출.py
|
미로탈출.py
|
py
| 1,348 |
python
|
ko
|
code
| 0 |
github-code
|
6
|
16351053586
|
from bs4 import BeautifulSoup as bs
import requests
from cardBeta import CardBeta
from cardWitj import CardWitj
urls = {
'beta':
'https://beta.gouv.fr/recrutement/developpement?',
'witj':
'https://www.welcometothejungle.com/fr/companies/communaute-beta-gouv/jobs'
}
divs = {'beta': 'fr-card__body', 'witj': 'sc-1peil1v-4'}
class Crawler:
"""Crawler class"""
def __init__(self, type):
self.type = type
self.stack = { 'total' : 0 }
def run(self):
print('... start crawl ' + self.type)
response = requests.get(urls[self.type])
html = response.content
soup = bs(html, "lxml")
if hasattr(self, f'crawl_{self.type}'):
getattr(self, f'crawl_{self.type}')(soup)
def crawl_witj(self, soup):
myCards = []
print(' title : ' + soup.title.get_text())
cards = soup.find_all("div", class_=divs[self.type])
print(' total found : {}'.format(len(cards)))
for data in cards:
myCard = CardWitj(data)
myCards.append(myCard)
print(' >>> loop myCards')
for card in myCards:
result = card.loadPage()
for key in result:
if key in self.stack :
self.stack[key] += 1
self.stack['total'] += 1
else :
self.stack[key] = 1
print(' resume stack ::::')
for key in self.stack:
print(' tech : {} : {}'.format(key, self.stack[key]))
def crawl_beta(self, soup):
myCards = []
print(' title : ' + soup.title.get_text())
cards = soup.find_all("div", class_=divs[self.type])
print(' total found : {}'.format(len(cards)))
for data in cards:
myCard = CardBeta(data)
myCards.append(myCard)
print(' >>> loop myCards')
for card in myCards:
card.loadPage()
|
apimobi/witj-beta-replit
|
crawler.py
|
crawler.py
|
py
| 1,963 |
python
|
en
|
code
| 0 |
github-code
|
6
|
14956977226
|
import argparse
import os
from scipy.interpolate import griddata
import numpy as np
from tqdm import tqdm
import cv2
import scipy.ndimage as sp
import matplotlib.pyplot as plt
from matplotlib import cm, patches
# Argument Parser
parser = argparse.ArgumentParser(description="Time-series Heatmap Generator")
parser.add_argument(
"--dataset_path",
type=str,
default="./data/dataset_08-12-2023_05-02-59",
help="Folder containing time-series data",
)
args = parser.parse_args()
# Data Load
dataset_path = args.dataset_path
data_file_path = os.path.join(dataset_path, "timeseries.txt")
data = np.loadtxt(data_file_path)
# Split Data
pos = data[:, :3]
force = data[:, -3:]
Frms = np.sqrt(np.sum(force**2, axis=1))
# Video Writer Setup
# fourcc = cv2.VideoWriter_fourcc(*"mp4v")
# out = cv2.VideoWriter("heatmap_video.mp4", fourcc, 20.0, (640, 480))
# Gaussian Smoothing
# Circle Data
point1 = [0.61346058, 0.07027999, 0.05241557] # magnet
radius1 = 0.01732 / 2
point2 = [0.60665408, 0.09511717, 0.05193599] # 3d print
radius2 = 0.005
pos_x = pos[Frms > 5, 1]
pos_y = pos[Frms > 5, 0]
pos_z = pos[Frms > 5, 2]
print("pos_y", pos_y.std())
print("pos_x", pos_x.std())
x_min, x_max = np.min(pos_x), np.max(pos_x)
y_min, y_max = np.min(pos_y), np.max(pos_y)
dim_x = 30
dim_y = 30
# Frms = Frms[pos[:, 2] < 0.055]
pos_palp = pos[pos[:, 2] < 0.06]
plt.axis("equal")
x = np.linspace(x_min, x_max, dim_x)
y = np.linspace(y_min, y_max, dim_y)
X, Y = np.meshgrid(x, y)
# Interpolate (x,y,z) points [mat] over a normal (x,y) grid [X,Y]
# Depending on your "error", you may be able to use other methods
Z = griddata((pos_x, pos_y), pos_z, (X, Y), method="nearest")
plt.pcolormesh(X, Y, Z)
# plt.scatter(pos_palp[:, 1], pos_palp[:, 0], marker="x")
# Add circles
circle1 = patches.Circle(
(point1[1], point1[0]),
radius1,
fill=False,
color="blue",
)
circle2 = patches.Circle(
(point2[1], point2[0]),
radius2,
fill=False,
color="green",
)
# plt.gca().add_patch(circle1)
# plt.gca().add_patch(circle2)
plt.title("Heatmap with smoothing")
plt.xlabel("Y (m)")
plt.ylabel("X (m)")
cbar = plt.colorbar()
cbar.set_label("Z (m)", rotation=270, labelpad=15)
plt.draw()
# Convert to OpenCV
fig = plt.gcf()
fig.canvas.draw()
img_arr = np.frombuffer(fig.canvas.tostring_rgb(), dtype=np.uint8)
img_arr = img_arr.reshape(fig.canvas.get_width_height()[::-1] + (3,))
img_arr = cv2.cvtColor(img_arr, cv2.COLOR_RGB2BGR)
dataset_name = dataset_path.split("/")[-1]
cv2.imwrite(f"{dataset_path}/{dataset_name}_2d_heatmap.png", img_arr)
|
raghavauppuluri13/robot-palpation
|
rpal/scripts/visualize_heatmap.py
|
visualize_heatmap.py
|
py
| 2,568 |
python
|
en
|
code
| 0 |
github-code
|
6
|
10958770997
|
import os
import csv
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from torch.utils.data import Dataset, DataLoader
from torchvision.io import read_image
import torchvision.datasets as datasets
import torchvision.transforms as transforms
from torchvision.io import read_image, ImageReadMode
np.random.seed(0)
DATA_FOLDER_PATH = "YOURPATH\\\Animals_with_Attributes2\\"
JPEGIMAGES_FOLDER_PATH = "YOURPATH\\JPEGImages\\"
labels_dirs = os.listdir(JPEGIMAGES_FOLDER_PATH)
ANNOTATIONS_FILENAME = 'annotations.csv'
def find_num_images_per_label(img_dir = JPEGIMAGES_FOLDER_PATH) -> tuple[dict,dict]:
"""
USEFUL FOR SAMPLING.
Return a dict with keys as the 50 labels, and values being the number of images in each subdirectory corresponding to label
and a second dict with the relative numbers (proportion) for every label compared to the total number of images (useful for sampling)"""
labels_dirs = os.listdir(img_dir)
num_images_per_label = dict.fromkeys(labels_dirs)
proportions_images_per_label = dict.fromkeys(labels_dirs)
total_num_images = 0
# Update absolute number of images per label
for i, label in enumerate(labels_dirs) :
specific_label_path = os.path.join(img_dir, labels_dirs[i])
num_images_label = len(os.listdir(specific_label_path))
total_num_images += num_images_label
num_images_per_label[label] = num_images_label
# Update relative number of images per label (proportion)
for i, label in enumerate(labels_dirs) :
num_images_label = num_images_per_label[label]
proportion_label = round(num_images_label / total_num_images, 4)
proportions_images_per_label[label] = proportion_label
return num_images_per_label, proportions_images_per_label
labels_dict = {}
with open(DATA_FOLDER_PATH+"classes.txt") as f:
for line in f:
(key,val) = line.split()
labels_dict[val] = int(key)-1
print(labels_dict)
def create_annotations_csv_file(annotations_filename = ANNOTATIONS_FILENAME, img_dir = JPEGIMAGES_FOLDER_PATH) :
"""
Create a csv annotations_file, annotations.csv, with two columns, in the format :
path/to/image, label
The annotation csv is necessary for DataLoader.
"""
labels_dirs:list = os.listdir(img_dir)
if os.path.exists(annotations_filename):
os.remove(annotations_filename)
print(f'Deleted existent {ANNOTATIONS_FILENAME} file.\n ---------------------------')
with open(annotations_filename, 'w', newline='') as file :
writer = csv.writer(file, dialect='excel', delimiter=',')
for i, label in enumerate(labels_dirs) :
specific_label_path = os.path.join(img_dir, label)
images_names = os.listdir(specific_label_path)
for j, image_name in enumerate(images_names):
full_path_to_img= os.path.join(specific_label_path, image_name)
full_path_to_img= os.path.join(label, image_name)
row = [full_path_to_img, label]
writer.writerow(row)
print(f'Sucessfully created {ANNOTATIONS_FILENAME} file.')
create_annotations_csv_file()
class AWA2Dataset(Dataset): # Dataset class to serve as input for the DataLoader.
"""
Dataset class to serve as input for the DataLoader.
Implements all the required methods and more.
"""
def __init__(self, annotations_file=ANNOTATIONS_FILENAME, img_dir=JPEGIMAGES_FOLDER_PATH,
transform=None, target_transform=None):
self.img_labels = pd.read_csv(annotations_file)
self.img_dir = img_dir
self.transform = transform
self.target_transform = target_transform
numbers_infos_dicts: tuple[dict,dict] = find_num_images_per_label(img_dir=JPEGIMAGES_FOLDER_PATH)
self.num_images_per_label = numbers_infos_dicts[0]
self.proportions_images_per_label = numbers_infos_dicts[1]
def __len__(self):
return len(self.img_labels)
def __getitem__(self, idx):
img_path = os.path.join(self.img_dir, self.img_labels.iloc[idx, 0])
# img_path = self.img_labels.iloc[idx, 0]
key = self.img_labels.iloc[idx, 1]
# Mapping the labels from string to tensor
label = labels_dict[key]
image = read_image(path = img_path, mode = ImageReadMode.RGB)
if self.transform:
image = self.transform(image)
if self.target_transform:
label = self.target_transform(label)
return image, label
class Subset_(AWA2Dataset) :
def __init__(self, dataset, indices, transform=None):
super().__init__()
self.dataset = dataset
self.indices = indices
self.transform = transform
def __len__(self):
return len(self.indices)
def __getitem__(self, index):
original_index_in_AWA2Dataset = self.indices[index]
image, label = self.dataset[original_index_in_AWA2Dataset]
if self.transform:
image = self.transform(image)
return image, label
'''
Procedure to Create Dataloader objects, and train-test split
'''
# With Data augmentation to remedy overfitting
transforms_pipeline_train = transforms.Compose([
## Input size
transforms.ToPILImage(),
transforms.Resize((256,256)),
## Data augmentation
transforms.RandomRotation(15),
transforms.RandomHorizontalFlip(p=0.4),
transforms.ColorJitter(brightness=0.2,
contrast=0.2,
saturation=0.2,
hue=0.1),
transforms.RandomCrop((224,224)),
## Normalize
transforms.ToTensor(),
transforms.Normalize(mean = [0.4643, 0.4640, 0.3985] , std=[0.2521, 0.2425, 0.2538]) # real mean and std of AwA2
])
transforms_pipeline_test = transforms.Compose([
## Input size
transforms.ToPILImage(),
transforms.Resize((256,256)),
transforms.CenterCrop((224,224)),
## Normalize
transforms.ToTensor(), # Already a tensor as implemented in Dataset class with the
transforms.Normalize(mean = [0.4643, 0.4640, 0.3985] , std=[0.2521, 0.2425, 0.2538]) # real mean and std of AwA2
])
# Initialize dataset and train/valid/test split
from sklearn.model_selection import train_test_split
dataset = AWA2Dataset()
n_images = len(dataset)
# Split all indices into training/testing sets
train_indices, test_indices = train_test_split(range(n_images), test_size=0.2, random_state=1)
# Split training indices into training/validation sets.
train_indices, valid_indices = train_test_split(train_indices, test_size=0.2, random_state=1)
# Initialize the 3 DataSet objects (as Subset_) and apply the relevant Transforms to each subset (train/test/valid)
train_data = Subset_(dataset, train_indices, transform = transforms_pipeline_train)
valid_data = Subset_(dataset, valid_indices, transform = transforms_pipeline_test)
test_data = Subset_(dataset, test_indices, transform = transforms_pipeline_test)
# Initalize DataLoaders
batch_size = 32
train_loader = DataLoader(dataset = train_data, batch_size=batch_size, shuffle=True, num_workers=6, pin_memory=True)
valid_loader = DataLoader(dataset = valid_data, batch_size=batch_size, shuffle=False, num_workers=6, pin_memory=True)
test_loader = DataLoader(dataset = test_data, batch_size=batch_size, shuffle=False, num_workers=6, pin_memory=True)
|
K-kiron/animal-detect
|
Helpers/AWA2_Dataloader.py
|
AWA2_Dataloader.py
|
py
| 7,864 |
python
|
en
|
code
| 1 |
github-code
|
6
|
26113397145
|
__authors__ = ["T. Vincent"]
__license__ = "MIT"
__date__ = "08/09/2017"
import weakref
from silx.gui import qt
from silx.gui.icons import getQIcon
from .. import actions
class ViewpointToolButton(qt.QToolButton):
"""A toolbutton with a drop-down list of ways to reset the viewpoint.
:param parent: See :class:`QToolButton`
"""
def __init__(self, parent=None):
super(ViewpointToolButton, self).__init__(parent)
self._plot3DRef = None
menu = qt.QMenu(self)
menu.addAction(actions.viewpoint.FrontViewpointAction(parent=self))
menu.addAction(actions.viewpoint.BackViewpointAction(parent=self))
menu.addAction(actions.viewpoint.TopViewpointAction(parent=self))
menu.addAction(actions.viewpoint.BottomViewpointAction(parent=self))
menu.addAction(actions.viewpoint.RightViewpointAction(parent=self))
menu.addAction(actions.viewpoint.LeftViewpointAction(parent=self))
menu.addAction(actions.viewpoint.SideViewpointAction(parent=self))
self.setMenu(menu)
self.setPopupMode(qt.QToolButton.InstantPopup)
self.setIcon(getQIcon('cube'))
self.setToolTip('Reset the viewpoint to a defined position')
def setPlot3DWidget(self, widget):
"""Set the Plot3DWidget this toolbar is associated with
:param ~silx.gui.plot3d.Plot3DWidget.Plot3DWidget widget:
The widget to control
"""
self._plot3DRef = None if widget is None else weakref.ref(widget)
for action in self.menu().actions():
action.setPlot3DWidget(widget)
def getPlot3DWidget(self):
"""Return the Plot3DWidget associated to this toolbar.
If no widget is associated, it returns None.
:rtype: ~silx.gui.plot3d.Plot3DWidget.Plot3DWidget or None
"""
return None if self._plot3DRef is None else self._plot3DRef()
|
silx-kit/silx
|
src/silx/gui/plot3d/tools/ViewpointTools.py
|
ViewpointTools.py
|
py
| 1,903 |
python
|
en
|
code
| 106 |
github-code
|
6
|
27002025081
|
from work1_wangb import DataSampling_wangb
from work2_wangb import foo_wangb
from work3_wangb import weather_wangb
def show():
print(u"请输入数字abc来选择查看作业1~3,输入字母d退出程序")
while True:
try:
x = str(input())
if x == 'a':
DataSampling_wangb.show()
elif x == 'b':
foo_wangb.show()
elif x == 'c':
weather_wangb.show()
elif x == 'd':
break
else:
print(u'字母的范围是a,b,c')
except ValueError:
print(u'请输入一个字母')
if __name__ == '__main__':
show()
|
wanghan79/2023_Python
|
python_wangb/theLastwork_wangb.py
|
theLastwork_wangb.py
|
py
| 717 |
python
|
en
|
code
| 8 |
github-code
|
6
|
14431348416
|
from concurrent import futures
import threading
words = ['hello', 'world']
result = []
def letter_by_letter(my_word):
for letter in my_word:
# proof that we are using 2 threads
print(threading.current_thread().getName())
result.append(letter)
# thanks to `with`, script will wait until all threads in executor stops
with futures.ThreadPoolExecutor(max_workers=2) as executor:
[executor.submit(letter_by_letter, word) for word in words]
print(''.join(result))
|
cuZzior/python-multithreading-helloworld
|
hello_world.py
|
hello_world.py
|
py
| 498 |
python
|
en
|
code
| 0 |
github-code
|
6
|
42156059489
|
import pytest
import responses
from repositories.app import APP
@pytest.fixture
def client():
with APP.test_client() as client:
APP.extensions["cache"].clear()
yield client
@responses.activate
def test_get_repo(client):
url = f"https://api.github.com/repos/owner/repo"
response = {
"full_name": "test/name",
"description": "description",
"clone_url": "clone url",
"stargazers_count": 500,
"created_at": "2020-01-17T22:24:45Z",
}
responses.add(responses.GET, url, json=response)
r = client.get("/repositories/owner/repo")
assert r.get_json() == {
"fullName": "test/name",
"description": "description",
"cloneUrl": "clone url",
"stars": 500,
"createdAt": "2020-01-17T22:24:45+00:00",
}
assert r.status_code == 200
assert r.is_json
@responses.activate
def test_404_error(client):
url = f"https://api.github.com/repos/owner/repo"
responses.add(responses.GET, url, status=404)
r = client.get("/repositories/owner/repo")
assert r.get_json() == {
"status": 404,
"error": "Not Found",
"message": "requested repository does not exist",
}
assert r.status_code == 404
@responses.activate
def test_500_error(client):
url = f"https://api.github.com/repos/owner/repo"
responses.add(responses.GET, url, status=500)
r = client.get("/repositories/owner/repo")
assert r.get_json() == {
"status": 500,
"error": "Internal Server Error",
"message": "the server encountered an unexpected internal server error",
}
assert r.status_code == 500
|
lukaszmenc/get-repository-data
|
tests/test_app.py
|
test_app.py
|
py
| 1,667 |
python
|
en
|
code
| 0 |
github-code
|
6
|
23850509915
|
from datasets import load_dataset,load_metric
from transformers import AutoTokenizer,AutoModelForSeq2SeqLM,Seq2SeqTrainingArguments,DataCollatorForSeq2Seq,Seq2SeqTrainer
import numpy as np
metric=load_metric("BLEU.py")
max_input_length = 64
max_target_length = 64
src_lang = "zh"
tag_lang = "en"
model_path = "Helsinki-NLP/opus-mt-zh-en"
# model_path = "translations/checkpoint-1500/"
batch_size = 4
learning_rate = 1e-5
output_dir = "translations"
def preprocess_function(examples):
inputs = [eval(ex)[src_lang] for ex in examples["text"]]
targets = [eval(ex)[tag_lang] for ex in examples["text"]]
model_inputs=tokenizer(inputs,max_length=max_input_length,truncation=True)
with tokenizer.as_target_tokenizer():
labels=tokenizer(targets,max_length=max_target_length,truncation=True)
model_inputs["labels"]=labels["input_ids"]
return model_inputs
def postprocess_text(preds, labels):
preds = [pred.strip() for pred in preds]
labels = [[label.strip()] for label in labels]
return preds, labels
def compute_metrics(eval_preds):
preds, labels = eval_preds
if isinstance(preds, tuple):
preds = preds[0]
decoded_preds = tokenizer.batch_decode(preds, skip_special_tokens=True)
labels = np.where(labels != -100, labels, tokenizer.pad_token_id)
decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True)
decoded_preds, decoded_labels = postprocess_text(decoded_preds, decoded_labels)
result = metric.compute(predictions=decoded_preds, references=decoded_labels)
result = {"bleu": result["score"]}
print(result)
prediction_lens = [np.count_nonzero(pred != tokenizer.pad_token_id) for pred in preds]
result["gen_len"] = np.mean(prediction_lens)
result = {k: round(v, 4) for k, v in result.items()}
return result
train_dataset = load_dataset("text",data_files="data/train.txt")
val_dataset = load_dataset("text",data_files="data/val.txt")
tokenizer = AutoTokenizer.from_pretrained(model_path)
tokenized_train_datasets = train_dataset.map(preprocess_function, batched=True)
tokenized_val_datasets = val_dataset.map(preprocess_function, batched=True)
model = AutoModelForSeq2SeqLM.from_pretrained(model_path)
data_collator = DataCollatorForSeq2Seq(tokenizer, model=model)
args = Seq2SeqTrainingArguments(
auto_find_batch_size = True,
learning_rate = learning_rate,
output_dir = output_dir,
predict_with_generate=True
)
trainer = Seq2SeqTrainer(
model,
args,
train_dataset=tokenized_train_datasets["train"],
eval_dataset=tokenized_val_datasets["train"],
data_collator=data_collator,
tokenizer=tokenizer,
compute_metrics=compute_metrics
)
trainer.train()
trainer.predict(test_dataset=tokenized_val_datasets["train"])
|
Scpjoker/NLP-Course-Homework-2022
|
translate.py
|
translate.py
|
py
| 2,866 |
python
|
en
|
code
| 1 |
github-code
|
6
|
3709328599
|
import os
from cloudservice import add_file, add_dir, get_dir_subs, get_root_dir_id
from pathlib import Path
import pandas as pd
def test():
uploadfile(os.path.join('我文件夹', 'test1.docx'), dirid=39, projid=36)
print()
def create_dir_test():
add_dir('addsub', 39, 36)
def uploadfile(fpath, dirid, projid):
# fpath = os.path.join(config.batch_file_upload_root, relative_fpath)
fdir, fname = os.path.split(fpath)
ftype = os.path.splitext(fname)[-1]
fsize = os.path.getsize(fpath)
fdata = {
"name": fname,
"remark": "",
"keyWord": "",
"abstract": "",
"url": fpath,
"fileSize": fsize,
"fileType": ftype,
"directoryId": dirid,
"creatorId": 1,
"uploaderId": 0,
"newWords": "",
"wordFrequency": "",
"phrases": ""
}
r = add_file(fdata, projid)
return r
def do_batch_upload(dpath: Path, projid, rootid):
for thing in dpath.iterdir():
# 是文件夹则递归
if thing.is_dir():
name = str(thing).split('\\')[-1]
if name.startswith('__'): # 双下划线跳过
print('skip ' + str(thing))
continue
do_batch_upload(thing, projid, get_dirid(str(thing), rootid, projid))
# 是文件则上传
if thing.is_file():
try:
uploadfile(str(thing), rootid, projid)
print('upload ' + str(thing))
except:
try:
print('failed ' + str(thing))
except:
print('solid failed')
# if exist return id, if not exist create it then return id
def get_dirid(p, curdirid, projid):
subs = get_dir_subs(curdirid, projid)
for sd in subs:
if sd['name'] == p.split('\\')[-1]:
return sd['id']
# 如果没返回 就是没这个文件夹 创建一个
createname = p.split('\\')[-1]
add_dir(createname, curdirid, projid)
print('create ' + p)
# 再找到文件夹ID
subs = get_dir_subs(curdirid, projid)
for sd in subs:
if sd['name'] == createname:
return sd['id']
return 0
if __name__ == '__main__':
pass
# do_batch_upload(Path(r'F:\402\004 小洋山资料备份-晓莉'), 240, 42)
# do_batch_upload(Path(r'F:\402\testupload'), 36, 200)
# do_batch_upload(Path(r'F:\402\001 交响乐团20130311需合并'), 434, 202)
# do_batch_upload(Path(r'F:\dfyyfile\东方医院'), projid=230, rootid=2211)
# do_batch_upload(Path(r'D:\技术群文档'), projid=687, rootid=2370)
# http:\\10.6.0.50:6789\files\工程资料 01\01 工程资料\404\008 解放日报-张雷\1.txt
# do_batch_upload(Path(r'\\192.168.11.70\工程资料 02\03 工程资料\404\国金资料'), projid=183, rootid=4000)
# uploadfile(r'E:\work\论文\空调故障诊断与风险评估.pdf',projid=33,dirid=38292)
# proj_infos = [['401', '001 中国馆', 196]]
# proj_infos = pd.read_csv(r'.\projs.csv')
# for indx, info in proj_infos.iterrows():
# subdir = str(info['sub'])
# projname = info['name']
# projid = info['pid']
#
# pathstr = os.path.join(r'\\192.168.11.70\工程资料 01\01 工程资料', subdir, projname)
# test = Path(pathstr)
#
# try:
# add_dir(projname, None, projid)
# except:
# pass
# rootid = get_root_dir_id(projid)
#
# do_batch_upload(Path(pathstr), projid=projid, rootid=rootid)
|
pengyang486868/PY-read-Document
|
batch_upload.py
|
batch_upload.py
|
py
| 3,549 |
python
|
en
|
code
| 0 |
github-code
|
6
|
31108358568
|
import tushare as ts
import pandas as pd
#当列太多时,显示不换行
pd.set_option('expand_frame_repr',False)
#显示所有的列
pd.set_option('display.max_columns', None)
'''
Created on 2020年12月24日
@author: My
'''
ts.set_token('b869861b624139897d87db589b6782ca0313e0e9378b2dd73a4baff5')
pro=ts.pro_api()
#data = pro.stock_basic(exchange='', list_status='L', fields='ts_code,symbol,name,area,industry,list_date')
"""stock='300001.SZ'
df=pro.daily(ts_code=stock,
start_date='20091001',
end_date='20161214')
df.rename(columns={'trade_date':'date'},inplace=True)
print(df)
df.to_csv('./data/日行情_特锐德_tushare.csv',
encoding='gbk',
index=False)"""
df=pd.read_csv('./data/日行情_特锐德_tushare.csv',encoding='gbk')
df.sort_values(by=['date',],inplace=True)
df['pct_chg']=df['pct_chg']/100.0
df['pct_chg_2']=df['close'].pct_change()
print(df[abs(df['pct_chg_2']-df['pct_chg'])>0.0001])
del df['pct_chg_2']
df['factor']=(df['pct_chg']+1).cumprod()
#print(df)
initi_price=df.iloc[0]['close']/df['factor'].iloc[0]
#print(initi_price)
df['close_post']=initi_price*df['factor']
#print(df)
initi_price_pre=df.iloc[-1]['close']/df['factor'].iloc[-1]
df['close_pre']=initi_price_pre*df['factor']
#print(df)
#df.sort_values(by=['date'],inplace=True)
print(df)
|
geekzhp/zhpLiangHua
|
tmp/tushareStudy.py
|
tushareStudy.py
|
py
| 1,356 |
python
|
en
|
code
| 0 |
github-code
|
6
|
41969655941
|
import cv2 as cv
src = cv.imread("./img_input/266679.png") #读取图片
# 新建一个窗口并展示
cv.namedWindow("input image", cv.WINDOW_AUTOSIZE)
cv.imshow("input image", src)
cv.waitKey(0)
cv.destroyAllWindows()
print("hello")
|
RMVision/study-opencv
|
chapter01/test.py
|
test.py
|
py
| 237 |
python
|
zh
|
code
| 1 |
github-code
|
6
|
75226774588
|
import logging
from kiteconnect import KiteConnect
import datetime
import pymongo
instrument_token = "738561"
from_date = "2021-04-01"
to_date = "2021-06-30"
interval = '5minute'
logging.basicConfig(level=logging.DEBUG)
api_key = "kpgos7e4vbsaam5x"
api_secret = "t9092opsldr1huxk1bgopmitovurftto"
request_token = "qRQhzRYukvQetbXDhiRYJI4XgLhwX51k"
access_token = "gP5gr51tDMpYiPBKTH95oNluvzS20c6Y"
kite = KiteConnect(api_key=api_key)
# data = kite.generate_session(request_token, api_secret=api_secret)
# print(data)
kite.set_access_token(access_token)
print(kite.quote(['NSE:INFY']))
myclient = pymongo.MongoClient("mongodb://localhost:27017/")
functional_col =myclient["core"]["functional"]
functional_data = {}
functional_data['description'] = 'Price limit for trading'
functional_data['variable'] = 'price_limit'
functional_data['values'] = 20
functional_col.insert_one(functional_data)
#print(kite.historical_data(instrument_token, from_date, to_date, interval, continuous=False, oi=True))
# print(datetime.datetime.now().strftime('%H:%M'))
# print(datetime.datetime.strptime('13:19', '%H:%M').strftime(('%H:%M')))
# print(datetime.datetime.now().strftime('%H:%M') == datetime.datetime.strptime('13:19', '%H:%M').strftime(('%H:%M')))
|
prashanth470/trading
|
source/sample.py
|
sample.py
|
py
| 1,284 |
python
|
en
|
code
| 0 |
github-code
|
6
|
34572128931
|
import random,server,time,istatistik,settings
import sqlite3 as sql
server_list=server.Server()
patlayan_power=6.5;kartopu_power=7;oyuk_power=2
_35power=10;_25power=9;_15power=5
def randomplayer():
global first,two
while True:
first=random.choice(server_list)
two=random.choice(server_list)
if first!=two:
break
return [first,two]
def fight(a=0,b=0):
x=a;xx=b
firstall=list();twoall=list()
players=randomplayer()
connect=sql.connect("C:\\Users\path\PycharmProjects\pythonProject\dosya\\denemetaban.db")
cursor=connect.cursor()
cursor.execute("SELECT * FROM players WHERE id={}".format(players[0]))
first=cursor.fetchall()
for i in range(len(first[0])):
firstall.append(first[0][i])
cursor.execute("SELECT * FROM players WHERE id={}".format(players[1]))
two=cursor.fetchall()
for i in range(len(two[0])):
twoall.append(two[0][i])
first_name=firstall[1];two_name=twoall[1]
first_35=firstall[5];two_35=twoall[5];first_25=firstall[6];two_25=twoall[6];first_15=firstall[7];two_15=twoall[7];first_kartopu=firstall[9]
two_kartopu=twoall[9];first_patlayan=firstall[10];two_patlayan=twoall[10];first_oyuk=firstall[11];two_oyuk=twoall[11];first_batirma=firstall[13]
two_batirma=twoall[13]
firstpower=((int(first_35)*kartopu_power*_35power+int(first_25)*kartopu_power*_25power+int(first_15)*kartopu_power*_15power))
twopower=((int(two_35) * kartopu_power * _35power+int(two_25) * kartopu_power * _25power+int(two_15) * kartopu_power * _15power))
first_hp=10000
two_hp=10000
a=6;b=5
kazanan=""
while True:
if first_hp > 0 and two_hp > 0:
if a % 6 == 0:
time.sleep(x)
if two_hp <= firstpower:
#print("{} Oyuncusu {} vurdu rakip battı".format(first_name, two_hp))
two_hp=0
break
#print("{} Oyuncusu {} vurdu".format(first_name, firstpower))
two_hp-=int(firstpower)
#print("{} oyuncusunun canı {}, {} oyuncusunun canı {}".format(first_name, first_hp, two_name, two_hp))
time.sleep(x)
if b % 5 == 0:
if first_hp <= twopower:
#print("{} Oyuncusu {} vurdu rakip battı".format(two_name, first_hp))
first_hp=0
break
#print("{} Oyuncusu {} vurdu".format(two_name, twopower))
first_hp-=int(twopower)
#print("{} oyuncusunun canı {}, {} oyuncusunun canı {}".format(first_name, first_hp, two_name, two_hp))
time.sleep(xx)
a+=1
b+=1
else:
time.sleep(xx)
a+=1
b+=1
else:
break
if first_hp >= two_hp:
#print("Kazanan {} {} oyuncusunun gemisi battı".format(first_name, two_name))
kazanan=first_name
else:
#print("Kazanan {} {} oyuncusunun gemisi battı".format(two_name, first_name))
kazanan=two_name
return kazanan
def xpfight():
try:
loop=0
while True:
print(loop)
winner=fight()
connect=sql.connect("C:\\Users\path\PycharmProjects\pythonProject\dosya\\denemetaban.db")
cursor=connect.cursor()
cursor.execute("SELECT xp,sunk,money FROM players WHERE username='{}'".format(winner))
data=cursor.fetchall()
xp=int(data[0][0]) + random.randint(1000, 1400)
sunk=int(data[0][1]) + 1
money=data[0][2] + random.randint(4000, 8000)
xp=str(xp)
sunk=str(sunk)
cursor.execute(
"UPDATE players SET xp='{}',sunk='{}',money={} WHERE username='{}'".format(xp, sunk, money, winner))
connect.commit()
loop+=1
except KeyboardInterrupt:
print("you are not allowed to quit right now")
exit()
def GetMoney(a=0,b=0):
x=a;xx=b
loop=0
while True:
for i in range(len(server_list)):
print(loop)
connect=sql.connect("C:\\Users\path\PycharmProjects\pythonProject\dosya\\denemetaban.db")
cursor=connect.cursor()
cursor.execute("SELECT level,cannon1,cannon2,cannon3,username,xp,money,npcsunk FROM players WHERE id={}".format(server_list[i]))
data=cursor.fetchall()
level=(data[0][0])
cannon1=data[0][1]
cannon2=data[0][2]
cannon3=data[0][3]
playername=data[0][4]
playerxp=int(data[0][5])
money=int(data[0][6])
npcsunk=int(data[0][7])
playerhp=10000
power=((int(cannon1)*kartopu_power*_35power+int(cannon2)*kartopu_power*_25power+int(cannon3)*kartopu_power*_15power))
npc_name=npc_list[0][0]
npc_hp=int(npc_list[0][1])
npc_power=140
npc_prize=int(npc_list[0][2])
npc_xp=int(npc_list[0][3])
a=6
b=5
while True:
if playerhp > 0 and npc_hp > 0:
if a % 6 == 0:
time.sleep(x)
if npc_hp <= power:
#print("{} Oyuncusu {} vurdu rakip battı".format(playername, npc_hp))
npc_hp=0
break
#print("{} Oyuncusu {} vurdu".format(playername, power))
npc_hp-=int(power)
#print("{} oyuncusunun canı {}, {} oyuncusunun canı {}".format(playername, playerhp, npc_name,npc_hp))
time.sleep(x)
if b % 5 == 0:
if playerhp <= npc_power:
#print("{} Oyuncusu {} vurdu rakip battı".format(npc_name, playerhp))
playerhp=0
break
#print("{} Oyuncusu {} vurdu".format(npc_name, npc_power))
playerhp-=int(npc_power)
#print("{} oyuncusunun canı {}, {} oyuncusunun canı {}".format(playername, playerhp, npc_name,npc_hp))
time.sleep(xx)
a+=1
b+=1
else:
time.sleep(xx)
a+=1
b+=1
else:
break
if playerhp >= npc_hp:
playerxp+=npc_xp
money+=npc_prize
npcsunk+=1
#print("Kazanan {} {} oyuncusunun gemisi battı".format(playername, npc_name))
cursor.execute("UPDATE players SET money={},xp={},npcsunk={} WHERE username='{}'".format(money,playerxp,npcsunk,playername))
connect.commit()
else:
print("Kazanan {} {} oyuncusunun gemisi battı".format(npc_name, playername))
loop+=1
i=0
def Event(a=0,b=0):
x=a;xx=b
loop=0
try:
while True:
npc_list=server.Npc()
print(loop)
connect=sql.connect("C:\\Users\path\PycharmProjects\pythonProject\dosya\\denemetaban.db")
cursor=connect.cursor()
cursor.execute(
"SELECT level,cannon1,cannon2,cannon3,username,xp,money,npcsunk FROM players WHERE id={}".format(
random.choice(server_list)))
data=cursor.fetchall()
level=(data[0][0])
cannon1=data[0][1]
cannon2=data[0][2]
cannon3=data[0][3]
playername=data[0][4]
playerxp=int(data[0][5])
money=int(data[0][6])
npcsunk=int(data[0][7])
playerhp=10000
power=((int(cannon1) * kartopu_power * _35power + int(cannon2) * kartopu_power * _25power + int(
cannon3) * kartopu_power * _15power))
npc_name=npc_list[9][0]
npc_hp=int(npc_list[9][1])
npc_power=4200
npc_prize=int(npc_list[9][2])
npc_xp=int(npc_list[9][3])
a=6
b=5
while True:
if playerhp > 0 and npc_hp > 0:
if a % 6 == 0:
time.sleep(x)
if npc_hp <= power:
# print("{} Oyuncusu {} vurdu rakip battı".format(playername, npc_hp))
npc_hp=0
break
# print("{} Oyuncusu {} vurdu".format(playername, power))
npc_hp-=int(power)
# print("{} oyuncusunun canı {}, {} oyuncusunun canı {}".format(playername, playerhp, npc_name,npc_hp))
time.sleep(x)
if b % 5 == 0:
if playerhp <= npc_power:
# print("{} Oyuncusu {} vurdu rakip battı".format(npc_name, playerhp))
playerhp=0
break
# print("{} Oyuncusu {} vurdu".format(npc_name, npc_power))
playerhp-=int(npc_power)
# print("{} oyuncusunun canı {}, {} oyuncusunun canı {}".format(playername, playerhp, npc_name,npc_hp))
time.sleep(xx)
a+=1
b+=1
else:
time.sleep(xx)
a+=1
b+=1
else:
break
if playerhp >= npc_hp:
playerxp+=npc_xp
money+=npc_prize
npcsunk+=1
print("Etkinliği Kazanan {} {} gemisi battı.{} {} altın ve {} xp kazandı".format(playername, npc_name,
playername,
npc_prize, npc_xp))
cursor.execute(
"UPDATE players SET money={},xp={},npcsunk={} WHERE username='{}'".format(money, playerxp, npcsunk,
playername))
connect.commit()
quit()
else:
npc_hp=npc_hp
print("Kazanan {} {} oyuncusunun gemisi battı".format(npc_name, playername))
cursor.execute("UPDATE npc SET hp={} WHERE npc='{}'".format(npc_hp, npc_name))
connect.commit()
loop+=1
except KeyboardInterrupt:
quit()
|
zeminkat/Game
|
savas.py
|
savas.py
|
py
| 11,157 |
python
|
en
|
code
| 0 |
github-code
|
6
|
71174596349
|
from __future__ import unicode_literals
import re
import os
import io
import sys
PY3 = sys.version_info.major > 2
try:
from urllib.parse import quote # py3
from urllib.request import urlopen
from urllib.error import HTTPError, URLError
except ImportError: # py2
from urllib import quote
from urllib2 import urlopen, HTTPError, URLError
import logging
from collections import namedtuple
from wx import GetTranslation as _
try:
from html import escape # py3
except ImportError:
from cgi import escape # py2
from abc_character_encoding import abc_text_to_unicode
if PY3:
unichr = chr
# this file contains many regular expression patterns
# for understanding these regular expressions:
# https://regex101.com/#python
# http://abcnotation.com/wiki/abc:standard:v2.1#information_field_definition
# keyword | name |file header | tune header | tune body | inline | type
abc_keywords = """\
A:|area |yes |yes |no |no |string
B:|book |yes |yes |no |no |string
C:|composer |yes |yes |no |no |string
D:|discography |yes |yes |no |no |string
F:|file url |yes |yes |no |no |string
G:|group |yes |yes |no |no |string
H:|history |yes |yes |no |no |string
I:|instruction |yes |yes |yes |yes |instruction
K:|key |no |last |yes |yes |instruction
L:|unit note length |yes |yes |yes |yes |instruction
M:|meter |yes |yes |yes |yes |instruction
m:|macro |yes |yes |yes |yes |instruction
N:|notes |yes |yes |yes |yes |string
O:|origin |yes |yes |no |no |string
P:|parts |no |yes |yes |yes |instruction
Q:|tempo |no |yes |yes |yes |instruction
R:|rhythm |yes |yes |yes |yes |string
r:|remark |yes |yes |yes |yes |string
S:|source |yes |yes |no |no |string
s:|symbol line |no |no |yes |no |instruction
T:|tune title |no |second |yes |no |string
U:|user defined |yes |yes |yes |yes |instruction
V:|voice |no |yes |yes |yes |instruction
W:|words (at the end) |no |yes |yes |no |string
w:|words (note aligned) |no |no |yes |no |string
X:|reference number |no |first |no |no |instruction
Z:|transcription |yes |yes |no |no |string
"""
clef_name_pattern = 'treble|bass3|bass|tenor|auto|baritone|soprano|mezzosoprano|alto2|alto1|alto|perc|none|C[1-5]|F[1-5]|G[1-5]'
simple_note_pattern = "[a-gA-G][',]*"
clef_pattern = ' *?(?P<clef>(?: (?P<clefprefix>(?:clef=)?)(?P<clefname>{1})(?P<stafftranspose>(?:[+^_-]8)?))?) *?(?P<octave>(?: octave=-?\d+)?) *?(?P<stafflines>(?: stafflines=\d+)?) *?(?P<playtranspose>(?: transpose=-?\d+)?) *?(?P<score>(?: score={0}{0})?) *?(?P<sound>(?: sound={0}{0})?) *?(?P<shift>(?: shift={0}{0})?) *?(?P<instrument>(?: instrument={0}(?:/{0})?)?)'.format(simple_note_pattern, clef_name_pattern)
key_ladder = 'Fb Cb Gb Db Ab Eb Bb F C G D A E B F# C# G# D# A# E# B#'.split(' ')
whitespace_chars = u' \r\n\t'
abc_inner_pattern = {
'K:': r' ?(?:(?P<tonic>(?:[A-G][b#]?|none)) ??(?P<mode>(?:[MmDdPpLl][A-Za-z]*)?)(?P<accidentals>(?: +(?P<accidental>_{1,2}|=|\^{1,2})(?P<note>[a-g]))*)'+clef_pattern+')?',
'Q:': r'(?P<pre_text>(?: ?"(?P<pre_name>(?:\\"|[^"])*)")?)(?P<metronome>(?: ?(?P<note1>\d+/\d+) ?(?P<note2>\d+/\d+)? ?(?P<note3>\d+/\d+)? ?(?P<note4>\d+/\d+)?=(?P<bpm>\d+))?)(?P<post_text>(?: ?"(?P<post_name>\w*)")?)',
'V:': r' ?(?P<name>\w+)' + clef_pattern
}
name_to_display_text = {
'staves' : _('Staff layout' ),
'area' : _('Area' ),
'book' : _('Book' ),
'composer' : _('Composer' ),
'discography' : _('Discography' ),
'file url' : _('File url' ),
'group' : _('Group' ),
'history' : _('History' ),
'instruction' : _('Instruction' ),
'key' : _('Key' ),
'unit note length' : _('Unit note length' ),
'meter' : _('Meter' ),
'macro' : _('Macro' ),
'notes' : _('Notes' ),
'origin' : _('Origin' ),
'parts' : _('Parts' ),
'tempo' : _('Tempo' ),
'rhythm' : _('Rhythm' ),
'remark' : _('Remark' ),
'source' : _('Source' ),
'symbol line' : _('Symbol line' ),
'tune title' : _('Tune title' ),
'user defined' : _('User defined' ),
'voice' : _('Voice' ),
'words (note aligned)' : _('Words (note aligned)'),
'words (at the end)' : _('Words (at the end)'),
'reference number' : _('Reference number' ),
'transcription' : _('Transcription' ),
}
def enum(*sequential, **named):
enums = dict(zip(sequential, range(len(sequential))), **named)
if PY3:
return type('Enum', (), enums)
else:
return type(b'Enum', (), enums)
TuneScope = enum('FullText', 'SelectedText', 'SelectedLines', 'TuneHeader', 'TuneBody', 'Tune', 'TuneUpToSelection', 'BodyUpToSelection', 'BodyAfterSelection', 'LineUpToSelection', 'FileHeader', 'PreviousLine', 'MatchText', 'InnerText', 'PreviousCharacter', 'NextCharacter')
TuneScopeInfo = namedtuple('TuneScopeInfo', 'text start stop encoded_text')
InnerMatch = namedtuple('InnerMatch', 'match offset')
class ValueDescription(object):
def __init__(self, value, description, common=True, show_value=False, alternate_values=None):
super(ValueDescription, self).__init__()
self.value = value
self.description = description
self.show_value = show_value
self.common = common
self.alternate_values = alternate_values or []
class CodeDescription(ValueDescription):
def __init__(self, value, description, common=True, alternate_values=None):
super(CodeDescription, self).__init__(value, description, common=common, show_value=True, alternate_values=alternate_values)
class ValueImageDescription(ValueDescription):
def __init__(self, value, image_name, description, common=True, show_value=False):
super(ValueImageDescription, self).__init__(value, description, common=common, show_value=show_value)
self.image_name = image_name
class CodeImageDescription(ValueImageDescription):
def __init__(self, value, image_name, description, common=True):
super(CodeImageDescription, self).__init__(value, image_name, description, common=common, show_value=True)
decoration_aliases = {
'!>!' : '!accent!',
'!^!' : '!marcato!',
'!emphasis!': '!accent!',
'!<(!' : '!crescendo(!',
'!<)!' : '!crescendo)!',
'!>(!' : '!diminuendo(!',
'!>)!' : '!diminuendo)!',
'!+!' : '!plus!',
}
decoration_to_description = {
'.' : _('staccato mark'),
'~' : _('Irish roll'),
'H' : _('fermata'),
'L' : _('accent or emphasis'),
'M' : _('lowermordent'),
'O' : _('coda'),
'P' : _('uppermordent'),
'S' : _('segno'),
'T' : _('trill'),
'u' : _('down-bow'),
'v' : _('up-bow'),
'!trill!' : _('trill'),
'!trill(!' : _('start of an extended trill'),
'!trill)!' : _('end of an extended trill'),
'!lowermordent!' : _('lower mordent'),
'!uppermordent!' : _('upper mordent'),
'!mordent!' : _('mordent'),
'!pralltriller!' : _('pralltriller'),
'!roll!' : _('Irish roll'),
'!turn!' : _('turn or gruppetto'),
'!turnx!' : _('a turn mark with a line through it'),
'!invertedturn!' : _('an inverted turn mark'),
'!invertedturnx!' : _('an inverted turn mark with a line through it'),
'!arpeggio!' : _('arpeggio'),
'!>!' : _('accent or emphasis'),
'!accent!' : _('accent or emphasis'),
'!emphasis!' : _('accent or emphasis'),
'!^!' : _('marcato'),
'!marcato!' : _('marcato'),
'!fermata!' : _('fermata or hold'),
'!invertedfermata!': _('upside down fermata'),
'!tenuto!' : _('tenuto'),
'!0!' : _('no finger'),
'!1!' : _('thumb'),
'!2!' : _('index finger'),
'!3!' : _('middle finger'),
'!4!' : _('ring finger'),
'!5!' : _('little finger'),
'!+!' : _('left-hand pizzicato'),
'!plus!' : _('left-hand pizzicato'),
'!snap!' : _('snap-pizzicato'),
'!slide!' : _('slide up to a note'),
'!wedge!' : _('staccatissimo or spiccato'),
'!upbow!' : _('up-bow'),
'!downbow!' : _('down-bow'),
'!open!' : _('open string or harmonic'),
'!thumb!' : _('cello thumb symbol'),
'!breath!' : _('breath mark'),
'!pppp!' : _('pianissimo possibile'),
'!ppp!' : _('pianississimo'),
'!pp!' : _('pianissimo'),
'!p!' : _('piano'),
'!mp!' : _('mezzopiano'),
'!mf!' : _('mezzoforte'),
'!f!' : _('forte'),
'!ff!' : _('fortissimo'),
'!fff!' : _('fortississimo'),
'!ffff!' : _('fortissimo possibile'),
'!sfz!' : _('sforzando'),
'!crescendo(!' : _('start of a < crescendo mark'),
'!<(!' : _('start of a < crescendo mark'),
'!crescendo)!' : _('end of a < crescendo mark'),
'!<)!' : _('end of a < crescendo mark'),
'!diminuendo(!' : _('start of a > diminuendo mark'),
'!>(!' : _('start of a > diminuendo mark'),
'!diminuendo)!' : _('end of a > diminuendo mark'),
'!>)!' : _('end of a > diminuendo mark'),
'!segno!' : _('segno'),
'!coda!' : _('coda'),
'!D.S.!' : _('the letters D.S. (=Da Segno)'),
'!D.C.!' : _('the letters D.C. (=either Da Coda or Da Capo)'),
'!dacoda!' : _('the word "Da" followed by a Coda sign'),
'!dacapo!' : _('the words "Da Capo"'),
'!D.C.alcoda!' : _('the words "D.C. al Coda"'),
'!D.C.alfine!' : _('the words "D.C. al Fine"'),
'!D.S.alcoda!' : _('the words "D.S. al Coda"'),
'!D.S.alfine!' : _('the words "D.S. al Fine"'),
'!fine!' : _('the word "fine"'),
'!shortphrase!' : _('vertical line on the upper part of the staff'),
'!mediumphrase!' : _('vertical line on the upper part of the staff, extending down to the centre line'),
'!longphrase!' : _('vertical line on the upper part of the staff, extending 3/4 of the way down'),
'!ped!' : _('sustain pedal down'),
'!ped-up!' : _('sustain pedal up'),
'!editorial!' : _('editorial accidental above note'),
'!courtesy!' : _('courtesy accidental between parentheses'),
}
ABC_TUNE_HEADER_NO = 0
ABC_TUNE_HEADER_FIRST = 1
ABC_TUNE_HEADER_SECOND = 2
ABC_TUNE_HEADER_YES = 3
ABC_TUNE_HEADER_LAST = 4
tune_header_lookup = {'no': ABC_TUNE_HEADER_NO, 'first': ABC_TUNE_HEADER_FIRST, 'second': ABC_TUNE_HEADER_SECOND, 'yes': ABC_TUNE_HEADER_YES, 'last': ABC_TUNE_HEADER_LAST}
AbcSection = enum('FileHeader', 'TuneHeader', 'TuneBody', 'OutsideTune')
ABC_SECTIONS = [
AbcSection.FileHeader,
AbcSection.TuneHeader,
AbcSection.TuneBody,
AbcSection.OutsideTune
]
chord_notes = {
'' : ( 0, 4, 7 ), # 'Major'
'm' : ( 0, 3, 7 ), # 'Minor'
'dim' : ( 0, 3, 6 ), # 'Diminished'
'+' : ( 0, 4, 8 ), # 'Augmented'
'sus' : ( 0, 5, 7 ), # 'Suspended'
'sus2' : ( 0, 2, 7 ), # 'Suspended (2nd)
'7' : ( 0, 4, 7, 10 ), # 'Seventh'
'M7' : ( 0, 4, 7, 11 ), # 'Major seventh'
'mM7' : ( 0, 3, 7, 11 ), # 'Minor-major seventh'
'm7' : ( 0, 3, 7, 10 ), # 'Minor seventh'
'augM7' : ( 0, 4, 8, 11 ), # 'Augmented-major seventh'
'aug7' : ( 0, 4, 8, 10 ), # 'Augmented seventh'
'6' : ( 0, 4, 7, 9 ), # 'Major sixth'
'm6' : ( 0, 3, 7, 9 ), # 'Minor sixth'
'm7b5' : ( 0, 3, 6, 10 ), # 'Half-diminished seventh'
'dim7' : ( 0, 3, 6, 9 ), # 'Diminished seventh'
'7b5' : ( 0, 4, 6, 10 ), # 'Seventh flat five'
'5' : ( 0, 7 ), # 'Power-chord (no third
'7sus' : ( 0, 5, 7, 10 ), # 'Seventh suspended'
'7sus2' : ( 0, 2, 7, 10 ), # 'Seventh suspended (2nd
'M9' : ( 0, 4, 7, 11, 14 ), # 'Major 9th'
'9' : ( 0, 4, 7, 10, 14 ), # 'Dominant 9th'
'mM9' : ( 0, 3, 7, 11, 14 ), # 'Minor Major 9th'
'm9' : ( 0, 3, 7, 10, 14 ), # 'Minor Dominant 9th'
'+M9' : ( 0, 4, 8, 11, 14 ), # 'Augmented Major 9th'
'+9' : ( 0, 4, 8, 10, 14 ), # 'Augmented Dominant 9th'
'o/9' : ( 0, 3, 6, 10, 14 ), # 'Half-Diminished 9th'
'o/9b' : ( 0, 3, 6, 10, 13 ), # 'Half-Diminished Minor 9th'
'dim9' : ( 0, 3, 6, 9, 14 ), # 'Diminished 9th'
'dim9b' : ( 0, 3, 6, 9, 13 ), # 'Diminished Minor 9th'
'11' : ( 0, 4, 7, 10, 14, 17 ), # 'Dominant 11th'
}
def replace_text(text, replacements):
"""
:param text: text that requires replacements
:param replacements: A sequence of tuples in the form (compiled regular expression object, replacement value)
:return: the original text with all replacements applied
"""
for regex, replace_value in replacements:
text = regex.sub(replace_value, text)
return text
def remove_named_groups(pattern):
"""
:param pattern: regular expression pattern
:return: regular expression pattern where named groups are removed
"""
return re.sub(r'(?<=\(\?)P<[^>]+>', ':', pattern)
def replace_named_group(pattern, old_group, new_group=None):
"""
:param pattern: regular expression pattern (containing named groups)
:param old_group: original groupname
:param new_group: desired groupname
:return: regular expression pattern where named group old_group is replaced by new_group
"""
if new_group is None:
replace_value = ':'
else:
replace_value = 'P<{0}>'.format(new_group)
return re.sub(r'(?<=\(\?)P<{0}>'.format(old_group), replace_value, pattern)
def get_html_from_url(url):
result = u''
try:
result = urlopen(url).read()
except HTTPError as ex:
pass
except URLError as ex:
pass
return result
class AbcElement(object):
"""
Base class for each element in abc-code where element is a piece of structured abc-code
"""
rest_of_line_pattern = r'(?P<inner>.*?)(?:(?<!\\)%.*)?$'
def __init__(self, name, keyword=None, display_name=None, description=None, validation_pattern=None):
self.name = name
self.keyword = keyword
if display_name is None:
self.__display_name = name_to_display_text.get(name, name[:1].upper() + name[1:])
else:
self.__display_name = display_name
self.description = description
self.mandatory = False
self.default = None
self.rest_of_line_pattern = AbcElement.rest_of_line_pattern
self._search_pattern = {}
self._search_re = {} # compiled regex
self.params = []
self.validation_pattern = validation_pattern
self.__validation_re = None
self.supported_values = None
self.tune_scope = TuneScope.SelectedLines
self.visible_match_group = None
self.removable_match_groups = {}
@staticmethod
def get_inline_pattern(keyword):
return r'\[' + re.escape(keyword) + r'([^\]\n\r]*)\]'
def freeze(self):
for section in ABC_SECTIONS:
pattern = self._search_pattern.get(section, None)
if pattern is not None:
self._search_re[section] = re.compile(pattern)
if self.validation_pattern is not None:
self.__validation_re = re.compile(self.validation_pattern)
@property
def valid_sections(self):
return [section for section in ABC_SECTIONS if self._search_pattern.get(section) is not None]
def matches(self, context):
regex = self._search_re.get(context.abc_section, None)
if regex is None:
return None
result = None
scope_info = context.get_scope_info(self.tune_scope)
encoded_text = scope_info.encoded_text
text = scope_info.text
p1, p2 = context.get_selection_within_scope(self.tune_scope)
if len(text) != len(encoded_text):
p1 = len(encoded_text[:p1].decode('utf-8'))
p2 = len(encoded_text[:p2].decode('utf-8'))
if p1 == p2 and 0 < p1 <= len(text) and text[p1 - 1] not in whitespace_chars:
p1 -= 1
for m in regex.finditer(text):
if m.start() <= p1 < m.end():
result = m
break
else:
# if p1 > len(text):
# print(u'Selection ({0}) past length ({1})'.format(p1, len(text)))
for m in regex.finditer(text):
if m.start() <= p1 <= p2 <= m.end():
result = m
break
return result
def get_regex_for_section(self, section):
return self._search_re.get(section, None)
def matches_text(self, context, text):
regex = self._search_re.get(context.abc_section, None)
if regex is not None:
return regex.search(text)
return None
def replace_text(self, context, text, replace_value):
return self._search_re[context.abc_section].sub(replace_value, text)
@property
def display_name(self):
return self.__display_name
def get_description_url(self, context):
return None
def get_header_text(self, context):
return self.__display_name
def get_description_text(self, context):
return self.description
def get_description_html(self, context):
result = None
url = self.get_description_url(context)
if url:
result = get_html_from_url(url)
if not result:
result = u'<h1>%s</h1>' % escape(self.get_header_text(context))
description = self.get_description_text(context)
if description:
result += u'{0}<br>'.format(escape(description))
if self.visible_match_group is not None:
# groups = context.current_match.groups()
# element_text = context.match_text
# if len(groups) == 1 and groups[0]:
# element_text = groups[0]
element_text = context.get_matchgroup(self.visible_match_group)
if element_text:
element_text = abc_text_to_unicode(element_text).strip()
if element_text:
result += u'<code>{0}</code><br>'.format(escape(element_text))
#for matchtext in context.current_match.groups():
# if matchtext:
# result += '<code>%s</code><br>' % escape(matchtext)
return result
def get_inner_element(self, context):
return self
class CompositeElement(AbcElement):
def __init__(self, name, keyword=None, display_name=None, description=None):
super(CompositeElement, self).__init__(name, keyword, display_name=display_name, description=description)
self._elements = {}
def add_element(self, element):
if element.keyword:
self._elements[element.keyword] = element
else:
raise Exception('Element has no keyword')
def get_element(self, keyword):
return self._elements.get(keyword)
def get_element_from_context(self, context):
inner_text = context.current_match.group(1)
if inner_text is None:
inner_text = context.current_match.group(2)
return self.get_element_from_inner_text(inner_text)
def get_element_from_inner_text(self, inner_text):
parts = inner_text.split(' ', 1)
keyword = parts[0]
result = self._elements.get(keyword)
if isinstance(result, CompositeElement) and len(parts) > 1:
subelement = result.get_element_from_inner_text(parts[1])
if subelement is not None:
result = subelement
return result
def get_header_text(self, context):
element = self.get_element_from_context(context)
if element:
return element.get_header_text(context)
return super(CompositeElement, self).get_header_text(context)
def get_description_text(self, context):
element = self.get_element_from_context(context)
if element:
return element.get_description_text(context)
return super(CompositeElement, self).get_description_text(context)
def get_inner_element(self, context):
return self.get_element_from_context(context) or self
class AbcUnknown(AbcElement):
pattern = ''
def __init__(self):
super(AbcUnknown, self).__init__('Unknown', display_name=_('Unknown'))
for section in ABC_SECTIONS:
self._search_pattern[section] = AbcUnknown.pattern
class AbcInformationField(AbcElement):
def __init__(self, keyword, name, file_header, tune_header, tune_body, inline, inner_pattern=None):
super(AbcInformationField, self).__init__(name, keyword)
self.file_header = file_header
self.tune_header = tune_header
self.tune_body = tune_body
self.inline = inline
self.inner_pattern = inner_pattern
self.inner_re = None
self.visible_match_group = 1
if inner_pattern:
self.visible_match_group = 0
line_pattern = r'(?m)^' + re.escape(self.keyword) + self.rest_of_line_pattern
if file_header:
self._search_pattern[AbcSection.FileHeader] = line_pattern
if tune_header in [ABC_TUNE_HEADER_YES, ABC_TUNE_HEADER_FIRST, ABC_TUNE_HEADER_SECOND, ABC_TUNE_HEADER_LAST]:
self._search_pattern[AbcSection.TuneHeader] = line_pattern
if tune_body or inline:
pattern = line_pattern
if inline:
pattern += '|' + self.get_inline_pattern(keyword)
self._search_pattern[AbcSection.TuneBody] = pattern
def freeze(self):
super(AbcInformationField, self).freeze()
if self.inner_pattern:
self.inner_re = re.compile(self.inner_pattern)
def matches(self, context):
match = super(AbcInformationField, self).matches(context)
result = match
if self.inner_re and match is not None:
i = 1
inner_text = match.group(i)
if inner_text is None:
i += 1
inner_text = match.group(i)
m = self.inner_re.search(inner_text)
if m:
result = (match, InnerMatch(m, match.start(i)))
return result
class AbcDirective(CompositeElement):
def __init__(self):
super(AbcDirective, self).__init__('Stylesheet directive', display_name=_('Stylesheet directive'), description=_('A stylesheet directive is a line that starts with %%, followed by a directive that gives instructions to typesetting or player programs.'))
pattern = r'(?m)^(?:%%|I:)(?!%)' + self.rest_of_line_pattern + '|' + self.get_inline_pattern('I:')
for section in ABC_SECTIONS:
self._search_pattern[section] = pattern
class AbcStringField(AbcInformationField):
def __init__(self, keyword, name, file_header, tune_header, tune_body, inline):
super(AbcStringField, self).__init__(name, keyword, file_header, tune_header, tune_body, inline)
class AbcInstructionField(AbcInformationField):
def __init__(self, keyword, name, file_header, tune_header, tune_body, inline, inner_pattern=None):
super(AbcInstructionField, self).__init__(name, keyword, file_header, tune_header, tune_body, inline, inner_pattern)
class AbcMidiDirective(CompositeElement):
def __init__(self):
super(AbcMidiDirective, self).__init__('MIDI directive', 'MIDI', display_name=_('MIDI directive'), description=_('A directive that gives instructions to player programs.'))
class AbcMidiProgramDirective(AbcElement):
pattern = r'(?m)^(?:%%|I:)MIDI program(?P<channel>(?:\s+\d+(?=\s+\d))?)(?:(?P<instrument>\s*\d*))?' + AbcElement.rest_of_line_pattern
def __init__(self):
super(AbcMidiProgramDirective, self).__init__('MIDI_program', display_name=_('Instrument'), description=_('Sets the instrument for a MIDI channel.'))
for section in ABC_SECTIONS:
self._search_pattern[section] = AbcMidiProgramDirective.pattern
class AbcMidiChordProgramDirective(AbcElement):
pattern = r'(?m)^(?:%%|I:)MIDI chordprog(?:(?P<instrument>\s*\d*))?' + AbcElement.rest_of_line_pattern
def __init__(self):
super(AbcMidiChordProgramDirective, self).__init__('MIDI_chordprog', display_name=_('Chord instrument'), description=_('Sets the instrument for playing chords.'))
for section in ABC_SECTIONS:
self._search_pattern[section] = AbcMidiChordProgramDirective.pattern
class AbcMidiBaseProgramDirective(AbcElement):
pattern = r'(?m)^(?:%%|I:)MIDI bassprog(?:(?P<instrument>\s*\d*))?' + AbcElement.rest_of_line_pattern
def __init__(self):
super(AbcMidiBaseProgramDirective, self).__init__('MIDI_bassprog', display_name=_('Bass instrument'), description=_('Sets the instrument for the base.'))
for section in ABC_SECTIONS:
self._search_pattern[section] = AbcMidiBaseProgramDirective.pattern
class AbcMidiChannelDirective(AbcElement):
pattern = r'(?m)^(?:%%|I:)MIDI channel(?P<channel>\s*\d*)' + AbcElement.rest_of_line_pattern
def __init__(self):
super(AbcMidiChannelDirective, self).__init__('MIDI_channel', display_name=_('Channel'), description=_('Sets the MIDI channel for the current voice.'))
for section in ABC_SECTIONS:
self._search_pattern[section] = AbcMidiChannelDirective.pattern
class AbcMidiDrumMapDirective(AbcElement):
pattern = r"(?m)^(?:%%|I:)(?:MIDI drummap|percmap)\s+(?P<note>[_^]*\w[,']*)\s+(?P<druminstrument>\d+)" + AbcElement.rest_of_line_pattern
def __init__(self):
super(AbcMidiDrumMapDirective, self).__init__('MIDI_drummap', display_name=_('Drum mapping'), description=_('Maps a note to an instrument.'))
for section in ABC_SECTIONS:
self._search_pattern[section] = AbcMidiDrumMapDirective.pattern
class AbcMidiVolumeDirective(AbcElement):
pattern = r"(?m)^(?:%%|I:)MIDI (?:control 7|chordvol|bassvol)\s+(?P<volume>\d*)" + AbcElement.rest_of_line_pattern
def __init__(self):
super(AbcMidiVolumeDirective, self).__init__('MIDI_volume', display_name=_('Volume'), description=_('Volume for current voice.'))
for section in ABC_SECTIONS:
self._search_pattern[section] = AbcMidiVolumeDirective.pattern
class AbcMidiGuitarChordDirective(AbcElement):
pattern = r"(?m)^(?:%%|I:)MIDI gchord (?P<pattern>\w*)" + AbcElement.rest_of_line_pattern
def __init__(self):
super(AbcMidiGuitarChordDirective, self).__init__('MIDI_gchord', display_name=_('Guitar chords'), description=_('Play guitar chords'))
for section in ABC_SECTIONS:
self._search_pattern[section] = AbcMidiGuitarChordDirective.pattern
class ScoreDirective(AbcElement):
pattern = r"(?m)^(?:%%|I:)(?:score|staves)\b"+ AbcElement.rest_of_line_pattern
def __init__(self):
super(ScoreDirective, self).__init__('score', display_name=_('Score layout'), description=_('Defines which staves are displayed.'))
for section in ABC_SECTIONS:
self._search_pattern[section] = ScoreDirective.pattern
class MeasureNumberDirective(AbcElement):
pattern = r"(?m)^(?:%%|I:)(?:measurenb|barnumbers) (?P<interval>-?\d*)"+ AbcElement.rest_of_line_pattern
def __init__(self):
super(MeasureNumberDirective, self).__init__('measurenb', display_name=_('Measure numbering'), description=_('Defines if and how measures are numbered.'))
for section in ABC_SECTIONS:
self._search_pattern[section] = MeasureNumberDirective.pattern
class HideFieldsDirective(AbcElement):
pattern = r"(?m)^(?:%%|I:)writefields\s+(?P<fields>[A-Za-z_]+)\s+(?:0|false)"+ AbcElement.rest_of_line_pattern
def __init__(self):
super(HideFieldsDirective, self).__init__('hide_fields', display_name=_('Hide fields'), description=_('Defines which fields should be hidden.'))
for section in ABC_SECTIONS:
self._search_pattern[section] = HideFieldsDirective.pattern
class ShowFieldsDirective(AbcElement):
pattern = r"(?m)^(?:%%|I:)writefields\s+(?P<fields>[A-Za-z]+)"+ AbcElement.rest_of_line_pattern
def __init__(self):
super(ShowFieldsDirective, self).__init__('show_fields', display_name=_('Show fields'), description=_('Defines which fields should be shown.'))
for section in ABC_SECTIONS:
self._search_pattern[section] = ShowFieldsDirective.pattern
class Abcm2psDirective(AbcElement):
""" Elements defined by abcm2ps """
anchor_replacement = (re.compile('<a (?:href|name)="[^"]*">|</a>', re.IGNORECASE), '')
table_replacement = (re.compile('<table>.*?</table>', re.IGNORECASE | re.DOTALL), '')
def __init__(self, keyword, name, description=None):
super(Abcm2psDirective, self).__init__(keyword, name, description=description)
self.html_replacements = [
Abcm2psDirective.anchor_replacement,
Abcm2psDirective.table_replacement
]
def get_description_url(self, context):
return 'http://moinejf.free.fr/abcm2ps-doc/%s.xhtml' % quote(self.name)
def get_html_from_url(self, url):
result = get_html_from_url(url)
result = replace_text(result, self.html_replacements)
return result
class AbcVersionDirective(AbcElement):
pattern = r'^%abc-(?P<version>[\d\.]+)'
def __init__(self):
super(AbcVersionDirective, self).__init__('abcversion', display_name=_('ABC version'), description=_('It starts with the version of the ABC specification this file conforms to.'))
self._search_pattern[AbcSection.FileHeader] = AbcVersionDirective.pattern
class AbcComment(AbcElement):
#pattern = r'(?<!\\|^)%\s*(.*)|^%(?!%)\s*(.*)$'
pattern = r'(?<!\\)%\s*(.*)$'
def __init__(self):
super(AbcComment, self).__init__('Comment', '%', display_name=_('Comment'))
for section in ABC_SECTIONS:
self._search_pattern[section] = AbcComment.pattern
self.visible_match_group = 1
def get_header_text(self, context):
if context.match_text and context.match_text.startswith('%%'):
return _('Stylesheet directive')
else:
return super(AbcComment, self).get_header_text(context)
def get_description_text(self, context):
if context.match_text and context.match_text.startswith('%%'):
return _('A stylesheet directive is a line that starts with %%, followed by a directive that gives instructions to typesetting or player programs.')
else:
return super(AbcComment, self).get_description_text(context)
def remove_comments(self, abc):
return self._search_re[AbcSection.TuneBody].sub('', abc)
class AbcBeam(AbcElement):
pattern = r'`+'
def __init__(self):
super(AbcBeam, self).__init__('Beam', '`', display_name=_('Beam'), description=_('Back quotes ` may be used freely between notes to be beamed, to increase legibility.'))
self._search_pattern[AbcSection.TuneBody] = AbcBeam.pattern
class AbcEmptyDocument(AbcElement):
pattern = r'^$'
def __init__(self):
super(AbcEmptyDocument, self).__init__('empty_document', display_name=_('Welcome to EasyABC'),
description=_('Creating an abc-file from scratch can be difficult. This assist panel tries to help by providing hints and actions. But remember, typing is usually faster.'))
for section in ABC_SECTIONS:
self._search_pattern[section] = AbcEmptyLine.pattern
self.tune_scope = TuneScope.FullText
def matches(self, context):
if context.contains_text:
return None
else:
regex = self._search_re.get(context.abc_section, None)
return regex.match('')
class AbcEmptyLine(AbcElement):
pattern = r'^\s*$'
def __init__(self):
super(AbcEmptyLine, self).__init__('empty_line', display_name=_('Empty line'), description=_('An empty line separates tunes.'))
for section in ABC_SECTIONS:
self._search_pattern[section] = AbcEmptyLine.pattern
class AbcEmptyLineWithinTuneHeader(AbcElement):
def __init__(self):
super(AbcEmptyLineWithinTuneHeader, self).__init__('empty_line_header', display_name=_('Empty line in header'), description=_('More directives can be added here in the tune header. After K: the music code begins.'))
self._search_pattern[AbcSection.TuneHeader] = AbcEmptyLine.pattern
class AbcEmptyLineWithinTuneBody(AbcElement):
def __init__(self):
super(AbcEmptyLineWithinTuneBody, self).__init__('empty_line_tune', display_name=_('Empty line'), description=_('Notes, rests, or directives can be added.'))
self._search_pattern[AbcSection.TuneBody] = AbcEmptyLine.pattern
class AbcEmptyLineWithinFileHeader(AbcElement):
def __init__(self):
super(AbcEmptyLineWithinFileHeader, self).__init__('empty_line_file_header', display_name=_('File header'), description=_('Everything above the first X: is the file header. The directives here apply to all the tunes that follow.'))
self._search_pattern[AbcSection.FileHeader] = AbcEmptyLine.pattern
class AbcBodyElement(AbcElement):
def __init__(self, name, pattern, display_name=None, description=None):
super(AbcBodyElement, self).__init__(name, display_name=display_name, description=description)
self._search_pattern[AbcSection.TuneBody] = pattern
self.pattern = pattern
class AbcSpace(AbcBodyElement):
pattern = r'\s+'
def __init__(self):
super(AbcSpace, self).__init__('Whitespace', AbcSpace.pattern, display_name=_('Whitespace'), description=_('Space is used to improve legibility and to prevent notes from sharing the same beam.'))
class AbcAnnotation(AbcBodyElement):
pattern = r'(?P<annotation>"(?P<pos>[\^_<>@])(?P<text>(?:\\"|[^"])*)")'
def __init__(self):
super(AbcAnnotation, self).__init__('Annotation', AbcAnnotation.pattern, display_name=_('Annotation'))
self.visible_match_group = 'text'
class AbcChordOrAnnotation(AbcBodyElement):
pattern = r'"(?P<pos>[\^_<>@])?(?P<text>(?:\\"|[^"])*)"'
def __init__(self):
super(AbcChordOrAnnotation, self).__init__('Chord or annotation', AbcChordOrAnnotation.pattern, display_name=_('Chord symbol or annotation'))
class AbcSlur(AbcBodyElement):
pattern = r'(?P<dash>\.?)\((?!\d)|\)'
def __init__(self):
super(AbcSlur, self).__init__('Slur', AbcSlur.pattern, display_name=_('Slur'))
class TypesettingSpace(AbcBodyElement):
pattern = 'y'
def __init__(self):
super(TypesettingSpace, self).__init__('Typesetting extra space', TypesettingSpace.pattern, display_name=_('Typesetting extra space'), description=_('y can be used to add extra space between the surrounding notes; moreover, chord symbols and decorations can be attached to it, to separate them from notes.'))
class RedefinableSymbol(AbcBodyElement):
pattern = '[H-Wh-w~]'
def __init__(self):
super(RedefinableSymbol, self).__init__('Redefinable symbol', RedefinableSymbol.pattern, display_name=_('Redefinable symbol'), description=_('The letters H-W and h-w and the symbol ~ can be assigned with the U: field to provide a shortcut for the !symbol! syntax. For example, to assign the letter T to represent the trill, you can write: U: T = !trill!'))
class AbcDecoration(AbcBodyElement):
pattern = r"!([^!]+)!|\+([^!]+)\+|\."
values = decoration_to_description
def __init__(self, name=None, subset=None, display_name=None):
if name is None:
name = 'Decoration'
if subset is None:
pattern = AbcDecoration.pattern
else:
with_exclamation = '|'.join(re.escape(value[1:-1]) for value in subset if value[0] == '!')
without_exclamation = '|'.join(re.escape(value) for value in subset if value[0] != '!')
if without_exclamation:
without_exclamation = '|' + without_exclamation
pattern = r'(?P<decoration>(?P<decomark>\+|!)(?P<deconame>{0})(?P=decomark){1})'.format(with_exclamation, without_exclamation)
super(AbcDecoration, self).__init__(name, pattern, display_name=display_name)
def get_description_html(self, context):
html = super(AbcDecoration, self).get_description_html(context)
html += '<br>'
symbol = context.match_text
if symbol and symbol[0] == symbol[-1] == '+': # convert old notation to new
symbol = '!%s!' % symbol[1:-1]
html += escape(decoration_to_description.get(symbol, _('Unknown symbol')))
html += '<br>'
return html
class AbcDynamicsDecoration(AbcDecoration):
values = [
'!ffff!', '!fff!', '!ff!', '!f!', '!mf!', '!mp!', '!p!', '!pp!', '!ppp!', '!pppp!', '!sfz!',
'!crescendo(!', '!<(!',
'!crescendo)!', '!<)!',
'!diminuendo(!', '!>(!',
'!diminuendo)!', '!>)!'
]
def __init__(self):
super(AbcDynamicsDecoration, self).__init__('Dynamics', AbcDynamicsDecoration.values, display_name=_('Dynamics'))
class AbcFingeringDecoration(AbcDecoration):
values = ['!0!', '!1!', '!2!', '!3!', '!4!', '!5!']
def __init__(self):
super(AbcFingeringDecoration, self).__init__('Fingering', AbcFingeringDecoration.values, display_name=_('Fingering'))
class AbcOrnamentDecoration(AbcDecoration):
values = [
'!trill!',
'!trill(!',
'!trill)!',
'!mordent!', #'!lowermordent!',
'!pralltriller!', #'!uppermordent!',
'!roll!',
'!turn!',
'!turnx!',
'!invertedturn!',
'!invertedturnx!',
'!arpeggio!'
]
def __init__(self):
super(AbcOrnamentDecoration, self).__init__('Ornament', AbcOrnamentDecoration.values, display_name=_('Ornament'))
class AbcDirectionDecoration(AbcDecoration):
values = [
'!segno!',
'!coda!',
'!D.S.!',
'!D.C.!',
'!dacoda!',
'!dacapo!',
'!D.C.alcoda!',
'!D.C.alfine!',
'!D.S.alcoda!',
'!D.S.alfine!',
'!fine!'
]
def __init__(self):
super(AbcDirectionDecoration, self).__init__('Direction', AbcDirectionDecoration.values, display_name=_('Direction'))
class AbcArticulationDecoration(AbcDecoration):
values = [
'.',
'!tenuto!',
'!accent!', '!>!', '!emphasis!',
'!marcato!', '!^!',
'!wedge!',
'!invertedfermata!',
'!fermata!',
'!plus!', '!+!',
'!snap!',
'!slide!',
'!upbow!',
'!downbow!',
'!open!',
'!thumb!',
'!breath!',
'!ped!',
'!ped-up!',
]
def __init__(self):
super(AbcArticulationDecoration, self).__init__('Articulation', AbcArticulationDecoration.values, display_name=_('Articulation'))
class AbcBrokenRhythm(AbcBodyElement):
pattern = r'\<+|\>+'
def __init__(self):
super(AbcBrokenRhythm, self).__init__('Broken rhythm', AbcBrokenRhythm.pattern)
def get_description_html(self, context):
html = super(AbcBrokenRhythm, self).get_description_html(context)
if '>' in context.match_text:
html += 'The previous note is dotted, the next note halved'
else: # if '<' in context.match_text:
html += 'The previous note is halved, the next dotted'
return html
class AbcTuplet(AbcBodyElement):
pattern = r"\([1-9](?:\:[1-9]?)?(?:\:[1-9]?)?"
def __init__(self):
super(AbcTuplet, self).__init__('Tuplet', AbcTuplet.pattern, display_name=_('Tuplet'), description=_('Duplets, triplets, quadruplets, etc.'))
class AbcBar(AbcBodyElement):
pattern = r"(?:\.?\|\||:*\|\]|\[\|:*|::|:+\||\|:+|\.?\||\[\|\])[1-9]?"
def __init__(self):
super(AbcBar, self).__init__('Bar', AbcBar.pattern, display_name=_('Bar'), description=_('Separates measures.'))
class AbcVariantEnding(AbcBodyElement):
pattern = r'\[[1-9](?:[,-][1-9])*|\|[1-9]'
def __init__(self):
super(AbcVariantEnding, self).__init__('Variant ending', AbcVariantEnding.pattern, display_name=_('Variant ending'), description=_('To play a different ending each time'))
class AbcVoiceOverlay(AbcBodyElement):
pattern = '&'
def __init__(self):
super(AbcVoiceOverlay, self).__init__('Voice overlay', AbcVoiceOverlay.pattern, display_name=_('Voice overlay'), description=_("The & operator may be used to temporarily overlay several voices within one measure. Each & operator sets the time point of the music back by one bar line, and the notes which follow it form a temporary voice in parallel with the preceding one. This may only be used to add one complete bar's worth of music for each &. "))
class AbcInvalidCharacter(AbcBodyElement):
pattern = r'[^\d\w\s%s]' % re.escape('!"#$%&\'()*+,-./:;<=>?@[\]^_`{|}~')
def __init__(self):
super(AbcInvalidCharacter, self).__init__('Invalid character', AbcInvalidCharacter.pattern, display_name=_('Invalid character'), description=_("This character is not allowed within the body of an abc tune."))
class AbcChordSymbol(AbcBodyElement):
basic_pattern = r'(?P<chordsymbol>"(?P<chordname>[^\^_<>@"\\](?:[^"\\]|\\.)*)")'
#pattern = ur'(?P<chordsymbol>"(?P<chordnote>[A-G][b#\u266D\u266E\u266F]?)(?P<quality>[^/\d]*)(?P<th>2|4|5|6|7|9|11|13)?(?P<sus>sus[2|4|9]?)?(?P<additional>.*?)(?P<bassnote>(?:/[A-Ga-g][b#\u266D\u266E\u266F]?)?)")'
pattern = r'"(?P<chordsymbol>(?P<chordnote>(?:[A-G][b#\u266D\u266E\u266F]?)?)(?P<chordname>.*?)(?P<bassnote>(?:/[A-Ga-g][b#\u266D\u266E\u266F]?)?))"'
def __init__(self):
super(AbcChordSymbol, self).__init__('Chord symbol', AbcChordSymbol.pattern, display_name=_('Chord symbol'))
class AbcBaseNote(AbcBodyElement):
accidental_pattern = r'(?P<accidental>(?:[_^](?:3/2?|1?/2?)|\^{1,2}|_{1,2}|=)?)?'
length_pattern = r'(?P<length>\d{0,3}(?:/\d{0,3})*)'
octave_pattern = r"(?P<octave>[',]*)"
pair_pattern = r'(?P<pair>(?:\s*>+|\s*<+)?)'
tie_pattern = r'(?P<tie>-?)'
basic_note_pattern_without_len = r'{0}(?P<note>[A-Ga-g]){1}'.format(accidental_pattern, octave_pattern)
basic_note_pattern = basic_note_pattern_without_len + length_pattern
basic_rest_pattern_without_len = '(?P<rest>[zx])'
basic_rest_pattern = basic_rest_pattern_without_len + length_pattern
basic_note_or_rest_pattern = '(?:{0}|{1})'.format(basic_note_pattern_without_len, basic_rest_pattern_without_len) + length_pattern
basic_measure_rest_pattern = '(?P<rest>[ZX])(?P<length>(?:[1-9][0-9]*)?)'
def __init__(self, name, pattern, display_name=None, description=None):
super(AbcBaseNote, self).__init__(name, pattern, display_name=display_name, description=description)
class AbcGraceNotes(AbcBaseNote):
pattern = r'(?P<grace>{(?P<acciaccatura>/?)(?P<gracenote>[^}]*)})'
def __init__(self):
super(AbcBaseNote, self).__init__('Grace notes', AbcGraceNotes.pattern, display_name=_('Grace notes'))
self.visible_match_group = 'gracenote'
class AbcNoteGroup(AbcBaseNote):
note_group_pattern_prefix = r'(?P<gracenotes>{0}?)(?P<chordsymbols>{1}?)(?P<decoanno>(?P<decorations>{2})|(?P<annotations>{3})*)'.format(
AbcGraceNotes.pattern, AbcChordSymbol.basic_pattern, AbcDecoration.pattern, AbcAnnotation.pattern)
note_group_pattern_postfix = AbcBaseNote.pair_pattern + AbcBaseNote.tie_pattern
note_pattern = note_group_pattern_prefix + AbcBaseNote.basic_note_pattern + note_group_pattern_postfix
normal_rest_pattern = note_group_pattern_prefix + AbcBaseNote.basic_rest_pattern + AbcBaseNote.pair_pattern
note_or_rest_pattern = note_group_pattern_prefix + AbcBaseNote.basic_note_or_rest_pattern
chord_pattern = r'(?P<chord>{0}\[(?:{1}\s*)*\])'.format(note_group_pattern_prefix, remove_named_groups(note_or_rest_pattern)) + AbcBaseNote.length_pattern + note_group_pattern_postfix
note_or_chord_pattern = r'({0}|{1})'.format(remove_named_groups(note_or_rest_pattern), remove_named_groups(chord_pattern)) + note_group_pattern_postfix
def __init__(self):
super(AbcNoteGroup, self).__init__('Note group', AbcNoteGroup.note_or_chord_pattern, display_name=_('Note group')) # '^{0}$'.format(AbcNoteGroup.pattern))
#self.exact_match_required = True
self.visible_match_group = 1
class AbcNoteOrChord(AbcBaseNote):
pattern = AbcNoteGroup.note_or_chord_pattern
def __init__(self):
super(AbcBaseNote, self).__init__('Note or chord', AbcNoteOrChord.pattern, display_name=_('Note or chord'))
class AbcChord(AbcBaseNote):
pattern = AbcNoteGroup.chord_pattern
def __init__(self):
super(AbcBaseNote, self).__init__('Chord', AbcChord.pattern, display_name=_('Chord'))
self.visible_match_group = 'chord'
class AbcNote(AbcBaseNote):
pattern = AbcNoteGroup.note_pattern
def __init__(self):
super(AbcNote, self).__init__('Note', '({0})'.format(AbcNote.pattern), display_name=_('Note'))
self.removable_match_groups = {
'grace': _('Grace notes'),
'chordsymbol': _('Chord symbol'),
'annotations': _('Annotation')
}
self.visible_match_group = 1
class AbcNormalRest(AbcBaseNote):
pattern = AbcNoteGroup.normal_rest_pattern
def __init__(self):
super(AbcNormalRest, self).__init__('Rest', AbcNormalRest.pattern, display_name=_('Rest'))
self.visible_match_group = 0
class AbcMeasureRest(AbcBaseNote):
pattern = AbcBaseNote.basic_measure_rest_pattern
def __init__(self):
super(AbcMeasureRest, self).__init__('Measure rest', AbcMeasureRest.pattern, display_name=_('Measure rest')) # _('This rest spans one or more measures.')
self.visible_match_group = 0
class AbcMultipleNotesAndChords(AbcBaseNote):
pattern = '(?:' + AbcNoteGroup.note_or_chord_pattern + '[ `]*){2,}'
def __init__(self):
super(AbcMultipleNotesAndChords, self).__init__('Multiple notes/chords', '^{0}$'.format(AbcMultipleNotesAndChords.pattern), display_name=_('Multiple notes/chords'))
self.tune_scope = TuneScope.SelectedText # a line always contains multiple notes so limit to selected text
class AbcMultipleNotes(AbcBaseNote):
pattern = '(?:' + AbcNoteGroup.note_or_rest_pattern + '[ `]*){2,}'
def __init__(self):
super(AbcMultipleNotes, self).__init__('Multiple notes', '^{0}$'.format(AbcMultipleNotes.pattern), display_name=_('Multiple notes'))
self.tune_scope = TuneScope.SelectedText # a line always contains multiple notes so limit to selected text
class AbcBackslash(AbcBodyElement):
pattern = r'\\[ \t]*$'
def __init__(self):
super(AbcBackslash, self).__init__('Backslash', AbcBackslash.pattern, display_name=_('Backslash'), description=_('In abc music code, by default, line-breaks in the code generate line-breaks in the typeset score and these can be suppressed by using a backslash.'))
class AbcStructure(object):
# static variables
replace_regexes = None
valid_directive_re = None
from_to_directive_re = None
abc_field_re = None
@staticmethod
def get_sections(cwd):
# [1.3.6.2 [JWDJ] bugfix This fixes 'str>ng' in Fields and Command Reference
reference_content = io.open(os.path.join(cwd, 'reference.txt'), 'rU', encoding='latin-1').read()
if AbcStructure.replace_regexes is None:
AbcStructure.replace_regexes = [
(re.compile(r'\bh((?:bass/chord|length|logical|string|int|fl-?\n?oat\s?|command|str|text|vol|h|n|char|clef|bass|chord)\d*\s?(?: (?:string|int|float)\d*?)*)i\b'), r'<\1>'), # enclose types with < and >
(re.compile(r'\[((?:bass/chord|length|logical|string|int|float|command|str|text|vol)\d*)\]'), r'<\1>'), # replace types enclosed [ and ] with < and >
(re.compile(r'(?m)\b(?<![- ])1\d\d[\s\n]+[A-Z]+[A-Z\s\.&]+$'), ''), # strip left page header
(re.compile(r'\bA\.\d+\.[\s\n]+[A-Z &]*1\d\d\b'), ''), # strip right page header
(re.compile(r'[\.,;]\s[\w\n\s]+Section\s(\d\.|[\d\w\s&:])*\.'), '.'), # removes references to sections
(re.compile(r' as was seen in Section \d+(\.\d+)*\.'), '.'), # removes references to sections
(re.compile(r'(?m)^(\w:)\s+((?:[a-z]+\s(?:in|of)\s)?(?:header(?:,\s?body)?|body))\s+(.*)$'), r'\1 \3 (\2)'), # places where-field at the end of description
(re.compile(r'\bh(\d+-\d+)i\b'), '(\1)') # fix midi numbers (0-127)
]
AbcStructure.valid_directive_re = re.compile(r'^%%\w+(\s[^:\n]*|\.\.\.[^:\n]*)?:') # 1.3.6.2 [JWDJ] 2015-03 fixes false positives
AbcStructure.from_to_directive_re = re.compile(r'(%%\w+)\.\.\.(%%\w+)')
AbcStructure.abc_field_re = re.compile(r'[A-Za-z]:')
reference_content = reference_content.replace(unichr(150), '-')
reference_content = replace_text(reference_content, AbcStructure.replace_regexes)
lines = reference_content.splitlines()
for i in range(len(lines)):
lines[i] = lines[i].replace('hinti', '<int>')
lines[i] = lines[i].replace('%%MIDI drumoff turns', '%%MIDI drumoff: turns')
lines[i] = lines[i].replace('%%MIDI drumon turns', '%%MIDI drumon: turns')
sections = []
cur_section = []
abc_fields_done = False
for line in lines:
line = line.rstrip()
if line.startswith('A.'):
title = line.split(' ', 1)[1]
cur_section = []
sections.append((title, cur_section))
elif AbcStructure.valid_directive_re.search(line): # 1.3.6.2 [JWDJ] 2015-03 fixes false positives
abc_fields_done = True
cur_section.append(line)
elif not abc_fields_done and AbcStructure.abc_field_re.match(line):
cur_section.append(line)
elif cur_section: # join two lines
if cur_section[-1].endswith('-'):
cur_section[-1] = cur_section[-1][:-1] + line
else:
cur_section[-1] = cur_section[-1] + ' ' + line
for i in range(len(sections)):
section_name, lines = sections[i]
tuples = []
for line in lines:
if AbcStructure.abc_field_re.match(line):
name, desc = line.split(' ', 1)
tuples.append((name, desc))
elif len(line.split(': ', 1)) == 2:
name, desc = tuple(line.split(': ', 1))
m = AbcStructure.from_to_directive_re.match(name)
if m:
tuples.append((m.group(1), desc))
tuples.append((m.group(2), desc))
else:
tuples.append((name, desc))
sections[i] = section_name, tuples
return sections
@staticmethod
def generate_abc_elements(cwd):
directive = AbcDirective()
midi_directive = AbcMidiDirective()
directive.add_element(midi_directive)
# [JWDJ] the order of elements in result is very important, because they get evaluated first to last
result = [
AbcEmptyDocument(),
AbcEmptyLineWithinTuneHeader(),
AbcEmptyLineWithinTuneBody(),
AbcEmptyLineWithinFileHeader(),
AbcEmptyLine(),
AbcVersionDirective(),
AbcMidiProgramDirective(),
AbcMidiChordProgramDirective(),
AbcMidiBaseProgramDirective(),
AbcMidiChannelDirective(),
AbcMidiDrumMapDirective(),
AbcMidiVolumeDirective(),
AbcMidiGuitarChordDirective(),
ScoreDirective(),
MeasureNumberDirective(),
HideFieldsDirective(),
ShowFieldsDirective(),
directive,
AbcComment(),
AbcBeam(),
AbcBackslash(),
]
elements_by_keyword = {}
lines = abc_keywords.splitlines()
for line in lines:
parts = line.split('|')
keyword = parts[0].strip()
name = parts[1].strip()
file_header = parts[2].strip() == 'yes'
tune_header = tune_header_lookup[parts[3].strip()]
tune_body = parts[4].strip() == 'yes'
inline = parts[5].strip() == 'yes'
abc_type = parts[6].strip()
if abc_type == 'instruction':
element = AbcInstructionField(name, keyword, file_header, tune_header, tune_body, inline, abc_inner_pattern.get(keyword, '.*'))
elif abc_type == 'string':
element = AbcStringField(name, keyword, file_header, tune_header, tune_body, inline)
else:
raise Exception('Unknown abc-type')
result.append(element)
elements_by_keyword[element.keyword] = element
for (title, fields) in AbcStructure.get_sections(cwd):
for (field_name, description) in fields:
parts = field_name.split('<', 1)
keyword = parts[0].rstrip()
name = keyword
element_holder = None
if name.startswith('%%'):
name = name[2:]
if name[0:4] == 'MIDI':
element_holder = midi_directive
name = name[5:]
keyword = name
else:
element_holder = directive
if element_holder:
existing_element = element_holder.get_element(keyword)
else:
existing_element = elements_by_keyword.get(keyword)
if existing_element is not None:
element.description = description
else:
if element_holder:
if element_holder == midi_directive:
element = AbcElement(field_name, name, description=description)
midi_directive.add_element(element)
else:
element = Abcm2psDirective(field_name, name, description=description)
directive.add_element(element)
else:
if len(name) == 2 and name[-1] == ':':
element = AbcElement(field_name, name, description=description)
elements_by_keyword[keyword] = element
result.append(element)
for part in parts[1:]:
param = part.strip()
if param[-1] == '>':
param = param[:-1]
element.params.append(param)
# elements = sorted(elements, key=lambda element: -len(element.keyword)) # longest match first
symbol_line = [element for element in result if element.keyword == 's:'][0]
result = [element for element in result if element.keyword != 's:']
# [JWDJ] the order of elements in result is very important, because they get evaluated first to last
result += [
AbcAnnotation(),
AbcChordSymbol(),
AbcChordOrAnnotation(),
AbcTuplet(),
AbcVariantEnding(),
AbcBar(),
AbcDynamicsDecoration(),
AbcFingeringDecoration(),
AbcOrnamentDecoration(),
AbcDirectionDecoration(),
AbcArticulationDecoration(),
AbcDecoration(),
symbol_line,
AbcGraceNotes(),
AbcSlur(),
AbcMultipleNotes(),
AbcMultipleNotesAndChords(),
AbcChord(),
AbcNote(),
AbcNormalRest(),
AbcMeasureRest(),
AbcVoiceOverlay(),
AbcBrokenRhythm(),
AbcInvalidCharacter(),
TypesettingSpace(),
RedefinableSymbol(),
AbcSpace(),
AbcUnknown()
]
elements_by_keyword['V:'].visible_match_group = 'name'
for element in result:
try:
element.freeze()
except Exception as ex:
print('Exception in element {0}: {1}'.format(element.name, ex))
logging.exception(ex)
return result
|
jwdj/EasyABC
|
tune_elements.py
|
tune_elements.py
|
py
| 58,593 |
python
|
en
|
code
| 67 |
github-code
|
6
|
23748260373
|
import os
import sys
from functools import partial
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
from toolBar import ToolBar
from Canvas.canvas import Canvas
import cv2
import numpy as np
from grab_cut import Grab_cut
from choiceDiaGen import ChoiceDiaGen
from choiceDiaStyle import ChoiceDiaStyle
from zoomWidget import ZoomWidget
from birdDialog import BirdDialog
from generator import Generator
from styleChanger import StyleChanger
__appname__ = 'grab_cut'
class ResizesQWidget(QWidget):
def sizeHint(self):
return QSize(100, 150)
class struct(object):
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
# 菜单栏和工具栏
class WindowMixin(object):
# 根据名字和action列表创建一个菜单,比如File,[new,edit]
def menu(self, title, actions=None):
menu = self.menuBar().addMenu(title)
if actions:
addActions(menu, actions)
return menu
def toolbar(self, title, actions=None):
toolbar = ToolBar(title)
toolbar.setObjectName('{}ToolBar'.format(title))
toolbar.setToolButtonStyle(Qt.ToolButtonTextUnderIcon)
if actions:
addActions(toolbar, actions)
self.addToolBar(Qt.LeftToolBarArea, toolbar) # 加到布局左侧
return toolbar
# 创建一个新Action
def newAction(parent, text, slot=None, shortcut=None,
tip=None, icon=None, checkable=False,
enable=True):
a = QAction(text, parent)
if icon is not None:
a.setIcon(QIcon(icon))
if shortcut is not None:
a.setShortcut(shortcut)
if tip is not None:
a.setToolTip(tip)
a.setStatusTip(tip)
if slot is not None:
a.triggered.connect(slot)
if checkable:
a.setChecked(True)
a.setEnabled(enable)
return a
# 讲actions加入到父控件
def addActions(widget, actions):
for action in actions:
if action is None:
widget.addSeparator()
widget.addAction(action) # weidget is toolBar or menu
# 主界面
class MainWindow(QMainWindow, WindowMixin):
FIT_WINDOW, FIT_WIDTH, MANUAL_ZOOM = list(range(3))
def __init__(self, defaultFilename=None):
super().__init__()
self.dirty = True # 文件是否已保存
self.mImgList = [] # 图片列表
self.dirname = None # 文件名
self._beginner = True #
self.image_out_np = None # 提取结果
self.default_save_dir = None # 默认保存路径
self.filePath = None # 当前载入的图片路径
self.mattingFile = None
# 垂直布局,
listLayout = QVBoxLayout()
listLayout.setContentsMargins(0, 0, 0, 0)
# ---#显示图片的label pic
matResultShow = ResizesQWidget() # 返回是是Qwidget
matResultShow.resize(150, 150)
self.pic = QLabel(matResultShow)
self.pic.resize(150, 150)
self.setGeometry(50, 20, 150, 150)
matResultShow.setLayout(listLayout)
# 建一个dockwidget放图片label
self.resultdock = QDockWidget('输出结果', self)
self.resultdock.setObjectName('result')
self.resultdock.setWidget(matResultShow)
self.resultdock.resize(150, 150)
# self.resultdock.setFeatures(QDockWidget.DockWidgetFloatable)
# 建一个fileDoc放文件
self.fileListWidget = QListWidget() # 列表布局
self.fileListWidget.itemDoubleClicked.connect(
self.fileItemDoubleClicked)
fileListLayout = QVBoxLayout()
fileListLayout.setContentsMargins(0, 0, 0, 0)
fileListLayout.addWidget(self.fileListWidget)
fileListContainer = QWidget()
fileListContainer.setLayout(fileListLayout)
self.filedock = QDockWidget('导入文件列表', self)
self.filedock.setObjectName('Files')
self.filedock.setWidget(fileListContainer)
self.zoomWidget = ZoomWidget()
self.canvas = Canvas(parent=self)
scroll = QScrollArea()
scroll.setWidget(self.canvas)
scroll.setWidgetResizable(True)
self.scrollBars = {
Qt.Vertical: scroll.verticalScrollBar(),
Qt.Horizontal: scroll.horizontalScrollBar()
}
self.scrollArea = scroll
self.canvas.scrollRequest.connect(self.scrollRequest)
self.setCentralWidget(scroll)
self.addDockWidget(Qt.RightDockWidgetArea, self.resultdock)
self.addDockWidget(Qt.RightDockWidgetArea, self.filedock)
# self.filedock.setFeatures(QDockWidget.DockWidgetFloatable)
self.dockFeatures = QDockWidget.DockWidgetClosable | QDockWidget.DockWidgetFloatable
self.resultdock.setFeatures(
self.resultdock.features() ^ self.dockFeatures)
# Actions
action = partial(newAction, self)
open_file = action('导入图片', self.openFile, 'Ctrl+O', '导入图片')
open_dir = action('导入文件夹', self.openDir,
'Ctrl+D', '导入文件夹中的所有图片到列表')
change_save_dir = action('&更改预设的保存路径', self.changeSavedirDialog)
# open_next_img = action('&Next Image', self.openNextImg,
# 'Ctrl+N', 'Open next image')
# open_pre_img = action('&Previous Image', self.openPreImg,
# 'Ctrl+M', 'Open previous image')
save = action('保存结果', self.saveFile, 'Crl+S', '保存输出结果图')
create = action('指定区域', self.createShape,
'w', '框选ROI')
mark = action('标记微调', self.markDown, None, '左键白色,标记前景;右键黑色,标记后景')
matting = action('迭代一次', self.grabcutMatting,
'e', '用当前标记迭代一次获取前景算法')
# 用预训练模型生成图片
generate = action('生成图片', self.generate, None, '输入文字,生成图片素材')
# 用预训练模型进行风格迁移
style = action('风格转换', self.styleChange, None, '选择一个风格,进行图像风格转换')
# 字典,对应一个放缩比
self.scalers = {
self.FIT_WINDOW: self.scaleFitWindow,
self.FIT_WIDTH: self.scaleFitWidth,
# Set to one to scale to 100% when loading files.
self.MANUAL_ZOOM: lambda: 1,
}
# store actions for further handling
self.actions = struct(save=save, open_file=open_file,
open_dir=open_dir, change_save_dir=change_save_dir,
# open_next_img=open_next_img, open_pre_img=open_pre_img,
create=create, mark=mark, matting=matting, generate=generate, style=style)
# Auto saving: enable auto saving if pressing next
# self.autoSaving = QAction('Auto Saving', self)
# self.autoSaving.setCheckable(True)
# self.autoSaving.setChecked()
# set toolbar
self.tools = self.toolbar('Tools')
self.actions.all = (open_file, open_dir,
change_save_dir, create,
# open_pre_img, open_next_img,
mark, matting, generate, style, save)
addActions(self.tools, self.actions.all)
# set status
self.statusBar().showMessage('{} 已就绪.'.format(__appname__))
def okToContinue(self):
if self.dirty:
reply = QMessageBox.question(self, "Attention",
"you have unsaved changes, proceed anyway?",
QMessageBox.Yes | QMessageBox.No | QMessageBox.Cancel)
if reply == QMessageBox.Cancel:
return False
elif reply == QMessageBox.Yes:
return self.fileSave
return True
def resetState(self):
self.canvas.resetState()
def errorMessage(self, title, message):
return QMessageBox.critical(self, title,
'<p><b>%s</b></p>%s' % (title, message))
def beginner(self):
return self._beginner
def advanced(self):
return not self.beginner()
def openFile(self, _value=False):
path = os.path.dirname(self.filePath) if self.filePath else '.'
formats = ['*.%s' % fmt.data().decode("ascii").lower()
for fmt in QImageReader.supportedImageFormats()]
filters = "Image (%s)" % ' '.join(formats)
filename = QFileDialog.getOpenFileName(
self, '%s - Choose Image or Label file' % __appname__, path, filters)
if filename:
if isinstance(filename, (tuple, list)):
filename = filename[0]
self.loadFile(filename)
def openDir(self, dirpath=None):
defaultOpenDirPath = dirpath if dirpath else '.'
targetDirPath = QFileDialog.getExistingDirectory(self,
'%s - Open Directory' % __appname__, defaultOpenDirPath,
QFileDialog.ShowDirsOnly | QFileDialog.DontResolveSymlinks)
self.importDirImages(targetDirPath)
# 将导入图片显示在列表栏
def importDirImages(self, dirpath):
self.fileListWidget.clear()
self.mImgList = self.scanAllImages(dirpath)
# self.openNextImg()
for imgPath in self.mImgList:
item = QListWidgetItem(imgPath)
self.fileListWidget.addItem(item)
# 扫描路径下的所有文件,返回图片列表
def scanAllImages(self, folderPath):
extensions = ['.%s' % fmt.data().decode("ascii").lower()
for fmt in QImageReader.supportedImageFormats()]
imageList = []
for root, dirs, files in os.walk(folderPath):
for file in files:
if file.lower().endswith(tuple(extensions)):
relativePath = os.path.join(root, file)
path = os.path.abspath(relativePath)
imageList.append(path)
imageList.sort(key=lambda x: x.lower())
return imageList
def fileItemDoubleClicked(self, item=None):
currIndex = self.mImgList.index(item.text()) # 获取图片列表的index
if currIndex < len(self.mImgList):
filename = self.mImgList[currIndex]
if filename:
self.loadFile(filename) # 载入图片列表
# 读取图片到canvas
def loadFile(self, filePath=None):
self.resetState() # 清理canvas
self.canvas.setEnabled(False)
# 高亮选中项
if filePath and self.fileListWidget.count() > 0:
index = self.mImgList.index(filePath)
fileWidgetItem = self.fileListWidget.item(index)
fileWidgetItem.setSelected(True)
if filePath and os.path.exists(filePath):
# load image
self.ImageData = read(filePath, None)
else:
return
image = QImage.fromData(self.ImageData)
# 内存中没有图片
if image.isNull():
self.errorMessage(u'Error opening file',
u'<p>Make sure <i>%s</i> is a valid image file.' % filePath)
self.status('Error reading %s' % filePath)
return False
self.status('Loaded %s' % os.path.basename(filePath))
self.image = image # Qimage格式
self.filePath = filePath # 当前载入的文件路径
self.canvas.loadPixmap(QPixmap.fromImage(image)) # canvas中放置图片
self.canvas.setEnabled(True)
self.adjustScale(initial=True)
self.paintCanvas()
# 显示当前状态
def status(self, message, delay=5000):
self.statusBar().showMessage(message, delay)
def adjustScale(self, initial=False):
value = self.scalers[self.FIT_WINDOW if initial else self.zoomMode]()
self.zoomWidget.setValue(int(100 * value))
def scaleFitWindow(self):
"""Figure out the size of the pixmap in order to fit the main widget."""
e = 2.0 # So that no scrollbars are generated.
w1 = self.centralWidget().width() - e
h1 = self.centralWidget().height() - e
a1 = w1 / h1
# Calculate a new scale value based on the pixmap's aspect ratio.
w2 = self.canvas.pixmap.width() - 0.0
h2 = self.canvas.pixmap.height() - 0.0
a2 = w2 / h2
return w1 / w2 if a2 >= a1 else h1 / h2
def scaleFitWidth(self):
# The epsilon does not seem to work too well here.
w = self.centralWidget().width() - 2.0
return w / self.canvas.pixmap.width()
def paintCanvas(self):
assert not self.image.isNull(), "cannot paint null image"
self.canvas.scale = 0.01 * self.zoomWidget.value()
self.canvas.adjustSize()
self.canvas.update()
def createShape(self):
assert self.beginner()
self.canvas.setEditing(False)
self.actions.create.setEnabled(False)
# 开始标记,mod换成editting
def markDown(self):
self.canvas.setEditing(True)
def toggleDrawMode(self, edit=True):
self.canvas.setEditing(edit)
self.actions.createMode.setEnabled(edit)
self.actions.editMode.setEnabled(not edit)
# 生成图片
def generate(self):
# 种类选择对话框
choiceDia = ChoiceDiaGen()
choiceDia.show()
choiceDia.hide()
# 由Generator类控制生成对话框
# 传入类型和属性列表
gen = Generator(choiceDia.type,choiceDia.attrList)
gen.generate()
# 将生成的图片取出来显示在主页
self.loadFile("StackGAN/resultImg/latest.png")
def styleChange(self):
# 风格选择对话框
choiceDia = ChoiceDiaStyle()
choiceDia.show()
choiceDia.hide()
print(choiceDia.type)
changer = StyleChanger(choiceDia.type, self.filePath)
changer.changeStyle()
# self.loadFile("CycleGAN/targetImg/latest.png")
result = cv2.imread("CycleGAN/targetImg/latest.png")
# 转换为四通道
result = self.addAchannel(result)
self.showResultImg(result)
self.image_out_np = result
# 接收opencv读入的格式
def addAchannel(self, x):
b_channel, g_channel, r_channel = cv2.split(x)
alpha_channel = np.ones(b_channel.shape, dtype=b_channel.dtype) * 255
result_BGAR = cv2.merge((b_channel, g_channel, r_channel, alpha_channel))
# result[np.all(result==[0,0,0,255],axis=2)]=[0,0,0,0]
result_BGAR[np.all(result_BGAR == [0, 0, 0, 255], axis=2)] = [0, 0, 0, 0]
return result_BGAR
# 提取前景操作
def grabcutMatting(self):
if self.mattingFile is None:
self.mattingFile = Grab_cut()
def format_shape(s):
return dict(line_color=s.line_color.getRgb(),
fill_color=s.fill_color.getRgb(),
points=[(p.x(), p.y()) for p in s.points],
backMark=self.canvas.getBackMark(),
whiteMark=self.canvas.getForMark())
# 有四个点(矩形的话)+填充线颜色和边缘线颜色
shape = format_shape(self.canvas.shapes[-1])
self.image_out_np = self.mattingFile.image_matting(self.filePath,
shape, iteration=10)
self.showResultImg(self.image_out_np)
self.actions.save.setEnabled(True)
# 接收opencv矩阵格式
def showResultImg(self, image_np):
# resize to pic
# factor = min(self.pic.width() /
# image_np.shape[1], self.pic.height() / image_np.shape[0])
# image_np = cv2.resize(image_np, None, fx=factor,
# fy=factor, interpolation=cv2.INTER_AREA)
# image_np = cv2.resize((self.pic.height(), self.pic.width()))
image = QImage(image_np, image_np.shape[1],
image_np.shape[0], QImage.Format_ARGB32)
matImg = QPixmap(image)
self.pic.setFixedSize(matImg.size())
self.pic.setPixmap(matImg)
def saveFile(self):
self._saveFile(self.saveFileDialog())
def _saveFile(self, saved_path):
print(saved_path)
if saved_path:
Grab_cut.resultSave(saved_path, self.image_out_np)
self.setClean()
self.statusBar().showMessage('Saved to %s' % saved_path)
self.statusBar().show()
def saveFileDialog(self):
caption = '%s - Choose File' % __appname__
filters = 'File (*%s)' % 'png'
if self.default_save_dir is not None and len(self.default_save_dir):
openDialogPath = self.default_save_dir
else:
openDialogPath = self.currentPath()
print(openDialogPath)
dlg = QFileDialog(self, caption, openDialogPath, filters)
dlg.setDefaultSuffix('png')
dlg.setAcceptMode(QFileDialog.AcceptSave)
filenameWithoutExtension = os.path.splitext(self.filePath)[0]
dlg.selectFile(filenameWithoutExtension)
dlg.setOption(QFileDialog.DontUseNativeDialog, False)
if dlg.exec_():
return dlg.selectedFiles()[0]
return ''
def currentPath(self):
return os.path.dirname(self.filePath) if self.filePath else '.'
def changeSavedirDialog(self, _value=False):
if self.default_save_dir is not None:
path = self.default_save_dir
else:
path = '.'
dirpath = QFileDialog.getExistingDirectory(self,
'%s - Save annotations to the directory' % __appname__, path,
QFileDialog.ShowDirsOnly
| QFileDialog.DontResolveSymlinks)
if dirpath is not None and len(dirpath) > 1:
self.default_save_dir = dirpath
self.statusBar().showMessage('%s . Annotation will be saved to %s' %
('Change saved folder', self.default_save_dir))
self.statusBar().show()
def setClean(self):
self.dirty = False
self.actions.save.setEnabled(False)
self.actions.create.setEnabled(True)
def openNextImg(self):
pass
def openPreImg(self):
pass
def scrollRequest(self, delta, orientation):
units = - delta / (8 * 15)
bar = self.scrollBars[orientation]
bar.setValue(bar.value() + bar.singleStep() * units)
# 读取二进制图片 返回
def read(filename, default=None):
try:
with open(filename, 'rb') as f:
return f.read()
except Exception:
return default
def resetState(self):
self.canvas.resetState()
if __name__ == "__main__":
app = QApplication(sys.argv)
ex = MainWindow()
ex.show()
sys.exit(app.exec_())
|
kisstherain8677/Image_generate
|
app.py
|
app.py
|
py
| 19,181 |
python
|
en
|
code
| 3 |
github-code
|
6
|
20463208050
|
from collections import defaultdict
d = defaultdict(int)
n = int(input())
for _ in range(n):
d[input()] += 1
allwords = list(d)
allwords_str = d.values()
listofx = []
for x in allwords_str:
listofx.append(str(x))
print(len(allwords))
print(" ".join(listofx))
# This line is the same as the above block > print(*d.values()) except print(len(allwords))
|
Ronen-EDH/Code-exercises
|
Python/Hackerrank/Hackrank_wordorder.py
|
Hackrank_wordorder.py
|
py
| 361 |
python
|
en
|
code
| 0 |
github-code
|
6
|
16098965612
|
from django.urls import path
from card import views
urlpatterns = [
path('create/', views.CreateFlashCardView.as_view(), name="create-flash-card"),
path('update/<id>/', views.UpdateFlashCardView.as_view(), name="update-flash-card"),
path('dalete/<id>/', views.DeleteFlashCardView.as_view(), name="delete-flash-card"),
path('list/<user_id>/', views.ListFlashCardView.as_view(), name="list-user-flash-card"),
]
|
leonardo0231/flash-card
|
card/urls.py
|
urls.py
|
py
| 428 |
python
|
en
|
code
| 0 |
github-code
|
6
|
70398650747
|
"""utilities for generation of CTRMs
Author: Keisuke Okumura
Affiliation: TokyoTech & OSX
"""
from __future__ import annotations
import numpy as np
from numba import f8, jit
from ..environment import Instance
from ..roadmap import TimedNode, TimedRoadmap
from ..roadmap.utils import valid_move
@jit(f8[:](f8[:, :], f8[:]), nopython=True)
def get_dist_arr(cands_pos: np.ndarray, loc: np.ndarray) -> np.ndarray:
return np.sum((cands_pos - loc) ** 2, axis=1)
def merge_samples(
loc: np.ndarray,
t: int,
agent: int,
trm: TimedRoadmap,
ins: Instance,
merge_distance: float = 0.01,
) -> np.ndarray:
"""find compatible sample, otherwise return loc
Args:
loc (np.ndarray): location
t (int): timestep
agent (int): target agent
trm (TimedRoadmap): target timed roadmap
ins (Instance): instance
merge_distance (:obj:`float`, optional):
distance regarding as spatially close enough
Returns:
np.ndarray: location of compatible sample if found, otherwise loc
Todo:
use efficient set operation
"""
rad = ins.rads[agent]
max_speed = ins.max_speeds[agent]
goal = ins.goals[agent]
# get necessary distance
cands_pos_arr = [u.pos for u in trm.V[t - 1]] # parents
if t + 1 <= len(trm.V) - 1:
cands_pos_arr += [u.pos for u in trm.V[t + 1]] # children
if len(trm.V) > t:
cands_pos_arr += [u.pos for u in trm.V[t]] # merge
dist_arr = get_dist_arr(np.array(cands_pos_arr), loc)
# compute parents
offset = len(trm.V[t - 1])
parents_cands_index = np.where(dist_arr[:offset] <= max_speed ** 2)[0]
parents = [
i
for i in parents_cands_index
if not ins.objs.collide_continuous_sphere(
trm.V[t - 1][i].pos, loc, rad
)
]
set_loc_parents = set(parents)
# compute children
if t + 1 <= len(trm.V) - 1:
children_cands_index = np.where(
dist_arr[offset : offset + len(trm.V[t + 1])] <= max_speed ** 2
)[0]
children = [
i
for i in children_cands_index
if not ins.objs.collide_continuous_sphere(
trm.V[t + 1][i].pos, loc, rad
)
]
else:
children = []
set_loc_children = set(children)
if len(trm.V) > t:
merge_cands_idx = np.where(
dist_arr[-len(trm.V[t]) :] <= merge_distance ** 2
)[0]
# get heuristics
h_loc = sum((loc - goal) ** 2)
for u_ind in merge_cands_idx:
u = trm.V[t][u_ind]
u_parents = trm.get_parents(u)
u_children = trm.E[t][u.index]
set_u_parents = set(u_parents)
set_u_children = set(u_children)
if (
set_u_parents == set_loc_parents
and set_u_children == set_loc_children
):
# merge to better one
h_u = sum((u.pos - goal) ** 2)
if h_loc < h_u:
# replace u by loc
trm.V[t][u.index] = TimedNode(t, u.index, loc)
return loc
else:
# abandon loc
return u.pos
if (
set_u_parents >= set_loc_parents
and set_u_children >= set_loc_children
):
# abandon loc
return u.pos
if (
set_u_parents <= set_loc_parents
and set_u_children <= set_loc_children
):
# replace u by loc
trm.V[t][u.index] = TimedNode(t, u.index, loc)
# append additional edge, children
trm.E[t][u.index] += list(set_loc_children - set_u_children)
# append parents
for p in set_loc_parents - set_u_parents:
trm.E[t - 1][p].append(u.index)
return loc
# append new sample
trm.append_sample(loc=loc, t=t, parents=parents, children=children)
return loc
def format_trms(ins: Instance, trms: list[TimedRoadmap]) -> None:
"""align length of timed roadmaps
Args:
ins (Instance): instance
trms (list[TimedRoadmap]): timed roadmaps
"""
T = max([len(trm.V) for trm in trms]) - 1
for i, trm in enumerate(trms):
def valid_edge(pos1: np.ndarray, pos2: np.ndarray) -> bool:
return valid_move(
pos1, pos2, ins.max_speeds[i], ins.rads[i], ins.objs
)
# technical point, add one additional layer
trm.extend_until(T + 1, valid_edge)
def append_goals(ins: Instance, trms: list[TimedRoadmap]) -> None:
"""append goals to timed roadmaps
Args:
ins (Instance): instance
trms (list[TimedRoadmap]): timed roadmaps
"""
for i, (trm, goal) in enumerate(zip(trms, ins.goals)):
def valid_edge(pos1: np.ndarray, pos2: np.ndarray) -> bool:
return valid_move(
pos1, pos2, ins.max_speeds[i], ins.rads[i], ins.objs
)
for t in range(1, len(trm.V)):
trm.append_sample(goal, t, valid_edge)
|
omron-sinicx/ctrm
|
src/ctrm/roadmap_learned/utils.py
|
utils.py
|
py
| 5,210 |
python
|
en
|
code
| 21 |
github-code
|
6
|
29821357591
|
import docker
class MicroDockerClient:
def __init__(self, micro_configuration):
self.client = docker.from_env()
self.config = micro_configuration
def pull(self):
self.client.images.pull(self.config.image_name)
def run(self):
self.client.containers.run(
self.config.image_name,
ports={F'{self.config.container_port}/tcp':str(self.config.exposed_port)},
name=self.config.name,
detach=True)
def delete(self):
try:
ctr = self.client.containers.list(filters={'name':self.config.name})[0]
ctr.kill()
ctr.remove()
except Exception :
print("No ctr to delete")
|
alichamouda/micro-cd
|
micro_docker_client.py
|
micro_docker_client.py
|
py
| 713 |
python
|
en
|
code
| 0 |
github-code
|
6
|
72474543549
|
import Accelerometer
import ButtonControl
import RollPitch
import ParserSettings
# a parser for just one wii data
class oneWii :
def __init__(self):
self.accelerometer = Accelerometer.Accelerometer()
self.buttons = ButtonControl.ButtonControl()
self.rollPitch = RollPitch.RollPitch()
def __call__(self,ts,wid,xyz=None,buttons=None):
if xyz is not None :
self.accelerometer.testEvent( ts, wid, *xyz )
self.rollPitch.testEvent( ts, wid, *xyz )
if buttons is not None :
self.buttons.testEvent( ts, wid, buttons )
|
cloew/WiiCanDoIt-Framework
|
src/ProtocolGame/wiis/onewii.py
|
onewii.py
|
py
| 535 |
python
|
en
|
code
| 2 |
github-code
|
6
|
28663549378
|
# Please develop your ingestion service in Python. You may select the delivery format (e.g., Jupyter
# Notebook, containerized microservice). For this exercise, you may assume that a scheduling service
# to regularly invoke your ingestion is provided.
# Where and how you process the data is at your discretion.
import os
import requests
# import psycopg2
import pandas as pd
import geopandas as gpd
from zipfile import ZipFile
from shapely.geometry import Point
from urllib.request import urlretrieve
from requests.exceptions import RequestException
from zipfile import BadZipFile
from psycopg2 import OperationalError
from mappings import event_root_codes, event_base_codes, event_codes, map_fips_to_iso2
def main():
""" Main controller function
"""
try:
# add folders because git won't push empty folders
try:
os.mkdir('files')
os.mkdir('extracted')
except Exception:
print('Folders already exist, no problem! Continuing...')
extracted_file_path, zip_file_path = retrieve_event_data()
geo_data = retrieve_geo_data()
cleaned_data = clean_data(extracted_file_path)
filtered_event_data = filter_data(cleaned_data, geo_data)
load_db(filtered_event_data, event_root_codes, event_base_codes, event_codes, map_fips_to_iso2)
cleanup(extracted_file_path, zip_file_path)
except RequestException as e:
print(f"Error while retrieving data: {e}")
except BadZipFile as e:
print(f"Error while extracting the zip file: {e}")
except OperationalError as e:
print(f"Database connection error: {e}")
except Exception as e:
print(f"An unexpected error occurred: {e}")
def retrieve_event_data() -> str:
""" Gets event data from external source.
I would improve this by looking into the GDELT API.
"""
# Retrieve data from the source site
data_files = requests.get('http://data.gdeltproject.org/gdeltv2/lastupdate.txt').content.decode()
# Selecting the first entry with “export” in it will
# give you the latest 15 min worth of data
file_download_location = data_files.replace("\n", " ").split(" ")[2]
# get just the file name out of the url
file_name = file_download_location.split("/")[-1]
file_path = 'files/' + file_name
# downloading the file to files/
urlretrieve(file_download_location, file_path)
# unzip and extract file to extracted/
with ZipFile(file_path, 'r') as zip:
zip.extractall('extracted/')
# remove .zip suffix
extracted_file_path = 'extracted/' + file_name[0:-4]
print('File downloaded')
return extracted_file_path, file_path
def clean_data(extracted_file_path):
""" Perform some foundational data prep and quality assurance
"""
try:
# load event data into pandas df
event_df = pd.read_csv(extracted_file_path, sep='\t')
# name cols so df is easier to use
event_df.columns = ['col_' + str(i) for i in range(61)]
# there are many things I could do here if I had more time
# for now I will drop duplicates and remove columns that aren't needed
# To make this more robust, I would clean and standardize the text
# and convert dates and floats to the appropriate formats/types
# I would also do ifnull checks and add in logic to fill in null values as needed
# Select cols needed in final output defined in assignment
event_df = event_df[['col_0', 'col_1', 'col_26', 'col_27', 'col_28', 'col_52', 'col_53', 'col_56', 'col_57', 'col_59', 'col_60']]
# name the columns according to doc
event_df.columns = ['GLOBALEVENTID', 'SQLDATE', 'EventCode', 'EventBaseCode', 'EventRootCode', 'ActionGeo_FullName', 'ActionGeo_CountryCode', 'ActionGeo_Lat', 'ActionGeo_Long', 'DATEADDED', 'SOURCEURL']
# Drop duplicates
event_df = event_df.drop_duplicates()
return event_df
except pd.errors.EmptyDataError as e:
raise pd.errors.EmptyDataError(f"Empty data error: {e}")
except pd.errors.ParserError as e:
raise pd.errors.ParserError(f"Parser error: {e}")
except Exception as e:
raise Exception(f"An unexpected error occurred during data cleaning: {e}")
def retrieve_geo_data():
""" In addition to the above source data, geometric location data for US counties may be located at:
https://raw.githubusercontent.com/plotly/datasets/master/geojson-counties-fips.json
"""
print('Retrieving geo data')
return requests.get('https://raw.githubusercontent.com/plotly/datasets/master/geojson-counties-fips.json').content.decode()
def filter_data(event_df, geo_data):
""" Please filter the event data to those events located within the US
based on their lat/lon coordinates (lat: ActionGeo_Long, long:ActionGeo_Lat)
"""
# Load choropleth data using geopandas
choropleth_df = gpd.read_file(geo_data)
# Convert the event dataframe to a GeoDataFrame using "ActionGeo_Lat" and "ActionGeo_Long" columns
event_df['geometry'] = event_df.apply(lambda row: Point(row['ActionGeo_Long'], row['ActionGeo_Lat']), axis=1)
# Specify the CRS for the event data
event_gdf = gpd.GeoDataFrame(event_df, geometry='geometry', crs="EPSG:4326")
# Ensure that both datasets have the same CRS
event_gdf = event_gdf.to_crs(choropleth_df.crs)
# Perform the spatial join to filter events in the U.S.
us_events = gpd.sjoin(event_gdf, choropleth_df, how='inner', predicate='intersects')
print('Data filtered - might add in specifics using variables here')
return us_events
def load_db(filtered_event_data, event_root_codes, event_base_codes, event_codes, map_fips_to_iso2):
""" Please use Postgres/GIS as your target database. You should demonstrate how you might make
and manage the database connection, as well as the execution of needed transactions. You do not
need to configure and run the actual database except as it is helpful to you to do so.
"""
# This is just example code
# # Define the database connection parameters
# database_uri = "<insert your uri connection string here>"
# # Establish a connection to the database
# connection = psycopg2.connect(database_uri)
# # Create a cursor for executing SQL commands
# cursor = connection.cursor()
# create_table_sql = """
# CREATE TABLE events (
# GLOBALEVENTID SERIAL PRIMARY KEY,
# SQLDATE DATE,
# EventCode VARCHAR,
# EventBaseCode VARCHAR,
# EventRootCode VARCHAR,
# ActionGeo_FullName VARCHAR,
# ActionGeo_CountryCode VARCHAR,
# ActionGeo_Lat FLOAT,
# ActionGeo_Long FLOAT,
# DATEADDED DATE,
# SOURCEURL TEXT
# )
# """
# I would also add the JSON mappings into the database as dimension tables
# By creating the tables and inserting the given values into them
# # Execute the SQL command to create the table
# cursor.execute(create_table_sql)
# connection.commit()
# us_events.to_sql("events", connection, if_exists="replace", index=False)
# connection.commit()
# cursor.close()
# connection.close()
print('DB fictionally loaded: fictionally variable number of rows inserted')
def cleanup(extracted_file_path, zip_file_path):
""" Removes downloaded and extracted files at end
"""
print('Removing files')
os.remove(extracted_file_path)
os.remove(zip_file_path)
if __name__ == "__main__":
main()
|
madelinepet/take_home_assignment
|
assignment.py
|
assignment.py
|
py
| 7,586 |
python
|
en
|
code
| 0 |
github-code
|
6
|
2988023971
|
import spoonacular as sp
api = sp.API("eaf1205e8c26404a8cda30c46c86f1cd")
def find_from_ingredients(ingredient_list):
#ranking = 2 means minimizing missing ingredients
recipe_list = api.search_recipes_by_ingredients(ingredients=ingredient_list, number=1, ranking=2)
return recipe_list
def get_recipe_nutrition(ingredient_list, serving_num):
recipe_list = api.visualize_recipe_nutrition(ingredientList=ingredient_list, servings=serving_num)
return recipe_list
ingredients = input("Enter ingredients you would like to cook with: ")
nutrition = get_recipe_nutrition(ingredients, 1)
for recipe in nutrition:
print(recipe)
|
emilyhua/hintofyum
|
nutrition.py
|
nutrition.py
|
py
| 626 |
python
|
en
|
code
| 0 |
github-code
|
6
|
41798087631
|
# Task 1 For a given integer n calculate the value which is equal to a:
# squared number, if its value is strictly positive;
# modulus of a number, if its value is strictly negative;
# zero, if the integer n is zero.
# Example: n=4 result= 16; n=-5 result= 5; n=0 result=0
n = float(input("Please, enter n for calculation:"))
if n > 0:
print((n**2))
elif n < 0:
print(abs(n))
else:
print('0')
print('------------ TASK2')
# Task 2.
# Find the maximum integer, that can be obtained by permutation of numbers of an arbitrary three-digit
# positive integer n (100<=n<=999). Example: n =165 result = 651
n = input("Please, enter n for finding the maximum:")
n = (sorted(n, reverse=True))
print([int(x) for x in n])
print('------------ TASK3')
# Task 3.
# For a positive integer n calculate the result value, which is equal to the sum of the odd numbers of n.
# Example: n = 1234 result = 4, n = 246 result = 0
n = input("Please, enter n for calculation the sum of odd numbers:")
count_odd = 0
for i in n:
if int(i) % 2 != 0:
count_odd+=1
print('The sum of odd numbers:', count_odd)
print('------------ TASK4')
# Task 4. For a positive integer n calculate the result value, which is equal to the sum of the “1” in the
# binary representation of n. Example: n = 1410 = 11102 result = 3, n = 12810 = 1000 00002 result = 1
n = input("Please, enter n for calculation of '1s': ")
n = int(n)
n = (bin(n) [2:])
print(n)
count_1_in_bin_format = 0
for i in n:
if int(i) == 1:
count_1_in_bin_format+=1
print('The count of "1" on binary format is:', count_1_in_bin_format)
print('------------ TASK5')
# Task 5. Create function Fibonacci for a positive integer n, calculate the result value, which is equal to the sum
# of the first n Fibonacci numbers. Note. Fibonacci numbers are a series of numbers in which each next number is equal
# to the sum of the two preceding ones: 0, 1, 1, 2, 3, 5, 8, 13... (F0=0, F1=F2=1, then F(n)=F(n-1)+F(n-2) for n>2)
# Example: n=8 result=33, n = 11 result = 143
def fibinocci(n):
if n == 1 or n == 2:
return 1
return fibinocci(n - 1) + fibinocci(n - 2)
x = fibinocci(8)
print('sum_of_n_Fibonacci', x)
print('------------ TASK6')
|
natshabat/Nataliya_Shabat
|
epam_hw2_ShabatN.py
|
epam_hw2_ShabatN.py
|
py
| 2,221 |
python
|
en
|
code
| 0 |
github-code
|
6
|
12483812629
|
import numpy as np
import matplotlib.pyplot as plt
from scipy.constants import degree
from FallingCat import FallingCat
JI = 0.25
alpha = 30*degree
plt.figure(figsize=(5,7))
c = FallingCat(JI, alpha)
t = c.theta/degree
psi = c.lean()/degree
gamma = c.bend()/degree
phi = c.twist()/degree
print(phi[-1])
print((c.alpha + c.beta)/degree)
print((c.beta - c.alpha)/degree)
plt.subplot(3,1,1)
plt.plot(t, psi)
plt.ylabel(r'$\psi$ / deg')
plt.subplot(3,1,2)
plt.plot(t, gamma)
plt.ylabel(r'$\gamma$ / deg')
plt.subplot(3,1,3)
plt.plot(t, phi)
plt.ylabel(r'$\phi$ / deg')
plt.xlabel(r'$\theta$ / deg')
plt.tight_layout()
plt.savefig('fig2.eps')
plt.show()
|
tt-nakamura/cat
|
fig2.py
|
fig2.py
|
py
| 660 |
python
|
en
|
code
| 0 |
github-code
|
6
|
73652393469
|
# 给你一个字符串 s ,仅反转字符串中的所有元音字母,并返回结果字符串。
# 元音字母包括 'a'、'e'、'i'、'o'、'u',且可能以大小写两种形式出现。
class Solution(object):
def reverseVowels(self, s):
"""
:type s: str
:rtype: str
"""
Vowels = ['a', 'e', 'i', 'o', 'u', 'A', 'E', 'I', 'O', 'U']
s = list(s)
i = 0
j = len(s) - 1
while(i < j):
while(i < len(s) and s[i] not in Vowels):
i += 1
while(j >= 0 and s[j] not in Vowels):
j -= 1
if i >= j:
break
s[i], s[j] = s[j], s[i]
i += 1
j -= 1
return ''.join(s)
s = ",."
a = Solution()
print(a.reverseVowels(s))
|
xxxxlc/leetcode
|
array/reverseVowels.py
|
reverseVowels.py
|
py
| 834 |
python
|
zh
|
code
| 0 |
github-code
|
6
|
26664284885
|
import json
import logging
import os
from http.client import HTTPConnection
from pathlib import Path
from typing import Dict, Any
from mmcc_framework import DictCallback, Framework
from mmcc_framework.nlu_adapters import NluAdapter
from tuning.mmcc_config.callbacks import my_callbacks
from tuning.types import Pipeline, PipelineCallback
# Load the process description and kb from file.
with open(Path(__file__).parent / 'mmcc_config' / 'process_desc.json', "r") as process_file:
proc = json.loads(process_file.read())
logging.getLogger(__name__).info('Reading process_desc file')
with open(Path(__file__).parent / 'mmcc_config' / 'process_kb.json', "r") as process_file:
kb = json.loads(process_file.read())
logging.getLogger(__name__).info('Reading process_kb file')
def get_framework(pipeline: Pipeline, result: str, start_work: PipelineCallback) -> Framework:
"""Creates a new framework object, remember to call `handle_data_input({})` to get the first sentence.
The framework will have no NLU and the kb will not be saved at the end of execution.
The context will contain the dataset and the pipeline.
:param pipeline: the pipeline used in the last analysis
:param result: base64 string representation of the previous analysis result
:param start_work: callback that takes the pipeline and starts the execution in another thread
"""
return Framework(process=proc,
kb=kb,
initial_context={'pipeline': pipeline, 'result': result, 'start_work': start_work},
callback_getter=DictCallback(callbacks=my_callbacks),
nlu=MyRasaNlu(),
on_save=lambda *args: None)
class MyRasaNlu(NluAdapter):
""" This adapter uses Rasa, to use this adapter it is necessary to first setup and train the interpreter.
The instructions on how to use Rasa are available on Rasa's website, and consist basically in the following steps:
- Install Rasa and its dependencies;
- Run `rasa init` in your folder of choice;
- Edit the `data/nlu` file with the utterances used for training;
- Run `rasa train nlu` to produce a model;
- Start rasa on port 5005 and pass the location of the model:
for example `rasa run --enable-api -m models/nlu-20201228-183937.tar.gz`
Example:
Suppose that the nlu is trained with, among the others, the intent "insert_name" with a entity "name".
Initialize the adapter: `my_adapter = RasaNlu()`
Suppose that it is time to insert the name. If it is necessary to insert it as text use:
`my_framework.handle_text_input("Mark")`. The callback corresponding to the current activity will receive
(if the intent is recognized): `{"intent": "insert_name", "name": "Mark"}`.
If it is necessary to insert the name as data use:
`my_framework.handle_data_input(RasaNlu.dict("insert_name", {"name": "Mark"}))`, which will pass to the callback
the same structure as above.
:ivar interpreter: the instance of the rasa interpreter used by this adapter
"""
def __init__(self):
self.host = os.getenv("RASA_IP", "localhost") # TODO(giubots): fix here (host.docker.internal)
self.port = int(os.getenv("RASA_PORT", "5005"))
def parse(self, utterance: str) -> Dict[str, Any]:
""" Runs the interpreter to parse the given utterance and returns a dictionary containing the parsed data.
If no intent can be extracted from the provided utterance, this returns an empty dictionary.
:param utterance: the text input from the user
:return: a dictionary containing the detected intent and corresponding entities if any exists.
"""
connection = HTTPConnection(host=self.host, port=self.port)
connection.request("POST", "/model/parse", json.dumps({"text": utterance}))
response = json.loads(connection.getresponse().read())
if response["intent"]["name"] is None:
return {"intent": ""}
res = self.dict(response["intent"]["name"], {item['entity']: item["value"] for item in response["entities"]})
logging.getLogger(__name__).info('Detected intent: %s', res)
return res
@staticmethod
def dict(intent: str, values: Dict[str, Any] = None) -> Dict[str, Any]:
""" Helper method that can be used to produce a dictionary equivalent to the one of the parse method.
Use this method with framework.handle_data_input.
:param intent: the intent corresponding to this input
:param values: an optional dictionary containing pairs of entity-value
:return: a dictionary equivalent to the one produced by the parse method
"""
if values is None:
values = {}
return {"intent": intent, **values}
|
DEIB-GECO/DSBot
|
DSBot/tuning/mmcc_integration.py
|
mmcc_integration.py
|
py
| 4,842 |
python
|
en
|
code
| 0 |
github-code
|
6
|
8918699138
|
import sys
if __name__ == '__main__':
if len(sys.argv) == 1:
print("Usage: python {} [rom.ch8] >> [output.csv]".format(sys.argv[0]))
exit()
rom = sys.argv[-1]
bytes = bytearray()
with open(rom, 'rb') as r:
byte = r.read(1)
while byte != "":
bytes.append(byte)
byte = r.read(1)
disassemble = []
byteindex = 0
while byteindex < len(bytes):
disassemble.append(bytes[byteindex] << 8 | bytes[byteindex + 1])
byteindex += 2
pc = 0x200
print("address,instruction,opcode,x,y,n,kk,nnn,description")
for ip in range(0x00, len(disassemble)):
mem_addr = '0x{0:0{1}X}'.format(pc, 4)
instr = '0x{0:0{1}X}'.format(disassemble[ip], 4)
opcode = '0x{0:0{1}X}'.format(disassemble[ip] & 0xF000, 4)
x = '0x{0:0{1}X}'.format((disassemble[ip] >> 8) & 0x000F, 4)
y = '0x{0:0{1}X}'.format((disassemble[ip] >> 4) & 0x000F, 4)
n = '0x{0:0{1}X}'.format(disassemble[ip] & 0x000F, 4)
kk = '0x{0:0{1}X}'.format(disassemble[ip] & 0x0FF, 4)
nnn = '0x{0:0{1}X}'.format(disassemble[ip] & 0x0FFF, 4)
desc = ""
kk
if int(opcode, 16) == 0x0000:
if int(kk, 16) == 0x00E0:
desc = "CLEAR_SCREEN"
elif int(kk, 16) == 0x00EE:
desc = "SUBROUTINE_RETURN"
else:
desc = "UNKNOWN_0x00_INSTRUCTION"
elif int(opcode, 16) == 0x1000:
desc = "JUMP_{}".format(nnn)
elif int(opcode, 16) == 0x2000:
desc = "SUBROUTINE_CALL_{}".format(nnn)
elif int(opcode, 16) == 0x3000:
desc = "SKIP_NEXT_INSTRUCTION_IF_V[{}]_EQUALS_{}".format(x, kk)
elif int(opcode, 16) == 0x4000:
desc = "SKIP_NEXT_INSTRUCTION_IF_V[{}]_NOT_EQUALS_{}".format(x, kk)
elif int(opcode, 16) == 0x5000:
desc = "SKIP_NEXT_INSTRUCTION_IF_V[{}]_EQUALS_V[{}]".format(x, y)
elif int(opcode, 16) == 0x6000:
desc = "SET_V[{}]_EQUALSTO_{}".format(x, kk)
elif int(opcode, 16) == 0x7000:
desc = "SET_V[{}]_EQUALSTO_V[{}]+{}".format(x, x, kk)
elif int(opcode, 16) == 0x8000:
if int(n, 16) == 0x0000:
desc = "SET_V[{}]_EQUALSTO_V[{}]".format(x, y)
elif int(n, 16) == 0x0001:
desc = "SET_V[{}]_EQUALSTO_V[{}]_OR_V[{}]".format(x, x, y)
elif int(n, 16) == 0x0002:
desc = "SET_V[{}]_EQUALSTO_V[{}]_AND_V[{}]".format(x, x, y)
elif int(n, 16) == 0x0003:
desc = "SET_V[{}]_EQUALSTO_V[{}]_XOR_V[{}]".format(x, x, y)
elif int(n, 16) == 0x0004:
desc = "SET_V[{}]_EQUALSTO_V[{}]_PLUS_V[{}]".format(x, x, y)
elif int(n, 16) == 0x0005:
desc = "SET_V[{}]_EQUALSTO_V[{}]_MINUS_V[{}]".format(x, x, y)
elif int(n, 16) == 0x0006:
desc = "SET_V[{}]_EQUALSTO_V[{}] >> 1".format(x, x)
elif int(n, 16) == 0x0007:
desc = "SET_V[{}]_EQUALSTO_V[{}]_MINUS_V[{}]".format(x, y, x)
elif int(n, 16) == 0x000E:
desc = "SET_V[{}]_EQUALSTO_V[{}] << 1".format(x, x)
else:
desc = "UNKNOWN_0x8000_INSTRUCTION"
elif int(opcode, 16) == 0x9000:
desc = "SKIP_NEXT_INSTRUCTION_IF_V[{}]_NOT_EQUALS_V[{}]".format(x, y)
elif int(opcode, 16) == 0xA000:
desc = "SET_I_EQUALSTO_{}".format(nnn)
elif int(opcode, 16) == 0xB000:
desc = "JUMP_V0_PLUS_{}".format(nnn)
elif int(opcode, 16) == 0xC000:
desc = "SET_V[{}]_EQUALSTO_RANDOM".format(x)
elif int(opcode, 16) == 0xD000:
desc = "DRAW_SPRITE_AT_COORDINATES_V[{}]V[{}]_WIDTH_8_HEIGHT_{}".format(x, y, n)
elif int(opcode, 16) == 0xE000:
if int(kk, 16) == 0x009E:
desc = "SKIP_NEXT_INSTRUCTION_IF_V[{}]_PRESSED".format(x)
elif int(kk, 16) == 0x00A1:
desc = "SKIP_NEXT_INSTRUTION_IF_V[{}]_NOTPRESSED".format(x)
else:
desc = "UNKNOWN_0xE000_INSTRUCTION"
elif int(opcode, 16) == 0xF000:
if int(kk, 16) == 0x0007:
desc = "SET_V[{}]_EQUALSTO_DELAYTIMER".format(x)
elif int(kk, 16) == 0x00A:
desc = "KEYPRESS_WAIT_AND_STOREKEY_IN_V[{}]".format(x)
elif int(kk, 16) == 0x0015:
desc = "SET_DELAYTIMER_EQUALSTO_V[{}]".format(x)
elif int(kk, 16) == 0x0018:
desc = "SET_SOUNDTIMER_EQUALSTO_V[{}]".format(x)
elif int(kk, 16) == 0x001E:
desc = "SET_I_EQUALSTO_I_PLUS_V[{}]".format(x)
elif int(kk, 16) == 0x0029:
desc = "SET_I_EQUALSTO_SPRITE_ADDRESS_OF_CHAR_IN_V[{}]".format(x)
elif int(kk, 16) == 0x0033:
desc = "SET_BCD_V[{}]".format(x)
elif int(kk, 16) == 0x0055:
desc = "STORE_V[0-{}]_STARTINGAT_MEM[I]".format(x)
elif int(kk, 16) == 0x0065:
desc = "LOAD_MEM[I - I+{}]_IN_V[0-{}]".format(x, x)
else:
desc = "UNKNOWN_0xF000_INSTRUCTION"
else:
desc = "UNKNOWN_{}_OPCODE".format(opcode)
pc += 2
print("{},{},{},{},{},{},{},{},{}".format(mem_addr, instr, opcode, x, y, n, kk, nnn, desc))
|
FrancescoTerrosi/chip8emu
|
CHIP8/disassembler/disassemble.py
|
disassemble.py
|
py
| 5,468 |
python
|
en
|
code
| 0 |
github-code
|
6
|
41675665840
|
# 영역 구하기
import sys
sys.setrecursionlimit(100000000)
input = sys.stdin.readline
m, n, k = map(int, input().split())
# 종이
paper = [[0 for _ in range(n)] for _ in range(m)]
# 방문 처리
visited = [[False for _ in range(n)] for _ in range(m)]
res = []
# 종이에 색칠하기
for _ in range(k):
x1, y1, x2, y2 = map(int, input().split())
for y in range(y1, y2):
for x in range(x1, x2):
paper[y][x] = 1
def dfs(y, x):
res = 0
def _dfs(y, x):
nonlocal res
res += 1
visited[y][x] = True
direction = [(1, 0), (0, -1), (-1, 0), (0, 1)]
for d in direction:
next_step = (y + d[0], x + d[1])
if (
0 <= next_step[0] < m
and 0 <= next_step[1] < n
and paper[next_step[0]][next_step[1]] == 0
and not visited[next_step[0]][next_step[1]]
):
_dfs(next_step[0], next_step[1])
_dfs(y, x)
return res
# 모든 종이를 숞회하면서 방문처리 되어있지 않은 곳이면 dfs 호출
for y in range(m):
for x in range(n):
if paper[y][x] == 0 and not visited[y][x]:
cnt = dfs(y, x)
res.append(cnt)
print(len(res))
for num in sorted(res):
print(num, end=" ")
|
jisupark123/Python-Coding-Test
|
알쓰/week1/2583.py
|
2583.py
|
py
| 1,313 |
python
|
en
|
code
| 1 |
github-code
|
6
|
34248836732
|
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
plt.style.use("bmh")
def exact(r1, r2, w):
return 2 * np.sqrt(w/np.pi) * np.exp(- w * (r1 * r1 + r2 * r2))
def fmt(x, pos):
a, b = '{:.1e}'.format(x).split('e')
b = int(b)
return r'${} \times 10^{{{}}}$'.format(a, b)
if __name__ == "__main__":
N = 1000
radius = 3
r = np.linspace(-radius, radius, N)
data = np.zeros((N, N))
for i in range(N):
for j in range(N):
data[i, j] = exact(r[i], r[j], 1)
#data /= np.sum(data)
size = 28
size_ticks = 20
label_size = {"size":str(size)}
plt.rcParams["font.family"] = "Serif"
plt.rcParams.update({'figure.autolayout': True})
fig, ax = plt.subplots(figsize=(8,6))
img = ax.imshow(data, cmap=plt.cm.jet, extent=[-radius,radius,-radius,radius])
cbar = fig.colorbar(img, fraction=0.046, pad=0.04) #, format=ticker.FuncFormatter(fmt))
cbar.set_label(r'$\rho(r_i,r_j)$', rotation=90, labelpad=10, y=0.5, **label_size)
cbar.ax.tick_params(labelsize=size_ticks)
plt.tight_layout()
ax.set_xlabel("$r_j$", **label_size)
ax.set_ylabel("$r_i$", **label_size)
ax.tick_params(labelsize=size_ticks)
tick = [-3, -2, -1, 0, 1, 2, 3]
ax.set_xticks(tick)
ax.set_yticks(tick)
plt.grid()
plt.show()
|
evenmn/Master-thesis
|
scripts/plot_exact_tb.py
|
plot_exact_tb.py
|
py
| 1,391 |
python
|
en
|
code
| 4 |
github-code
|
6
|
1904177195
|
from fastapi import FastAPI, HTTPException
from fastapi.middleware.cors import CORSMiddleware
import json, os
app = FastAPI()
app.add_middleware(
CORSMiddleware,
allow_origins=['*'],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
@app.get('/contents/{page_id}/{content_id}')
async def content(
page_id: str,
content_id: str):
json_path = f"./data/json/{page_id}/{content_id}.json"
if not os.path.exists(json_path):
raise HTTPException(status_code=404, detail="Page not found")
with open(json_path, 'r', encoding='utf-8') as j:
json_load = json.load(j)
return json_load
@app.get('/contents/{page_id}')
async def contentslist(
page_id: str,):
json_path = f"./data/pagelist/{page_id}.json"
if not os.path.exists(json_path):
raise HTTPException(status_code=404, detail="Page not found")
with open(json_path, 'r', encoding='utf-8') as j:
json_load = json.load(j)
return json_load
@app.get('/pagelist')
async def pagelist():
json_path = "./data/pagelist/all.json"
with open(json_path, 'r', encoding='utf-8') as j:
json_load = json.load(j)
return json_load
|
tetla/knowledge-reader
|
backend/offdemy-api.py
|
offdemy-api.py
|
py
| 1,203 |
python
|
en
|
code
| 0 |
github-code
|
6
|
33381013184
|
from django.contrib import admin
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.urls import path, include
from django.contrib.auth import views as auth_views
from polls.views import (
RegistrationView,
CreateBoardView,
BoardDetailView,
BoardDeleteView,
CreateListView,
# ListDetailView,
ListEditView,
ListDeleteView,
CreateCardView,
CardEditView,
CardDeleteView,
CardMoveView,
)
urlpatterns = [
path('admin/', admin.site.urls),
path("accounts/", include("django.contrib.auth.urls")),
path("accounts/register/", RegistrationView.as_view()),
path("", CreateBoardView.as_view(), name="board"),
path("board/detail/<id>/", BoardDetailView.as_view(), name="board_detail"),
path("board/delete<id>/", BoardDeleteView.as_view(), name="board_delete"),
path("list/<id>", CreateListView.as_view(), name="list_create"),
# path("list/detail/<id>/", ListDetailView.as_view(), name="list_detail"),
path("list/edit/<id>/", ListEditView.as_view(), name="list_edit"),
path("list/delete/<id>/", ListDeleteView.as_view(), name="list_delete"),
path("card/<id>/", CreateCardView.as_view(), name="card_create"),
path("card/edit/<id>/", CardEditView.as_view(), name="card_edit"),
path("card/delete/<id>/", CardDeleteView.as_view(), name="card_delete"),
path("card/<id>/move/", CardMoveView.as_view(), name="card_move"),
]
urlpatterns += staticfiles_urlpatterns()
|
destinymalone/projectmanagement-capstone
|
mysite/urls.py
|
urls.py
|
py
| 1,486 |
python
|
en
|
code
| 0 |
github-code
|
6
|
2665829226
|
from heatSink import HeatSink
from waterPipes import WaterPipes
from solarPanel import SolarPanel
from system import System
import matplotlib.pyplot as plt
flow_rates = [0.00025, 0.0005, 0.001, 0.002, 0.003, 0.005]
panel_temp = []
no_pipes = []
inlet_temp = 30
for f in flow_rates:
temps = []
pipes = []
for p in [1, 2, 3, 4, 5]:
heat_sink = HeatSink()
solar_panel = SolarPanel()
water_pipes = WaterPipes(no_pipes=p)
final_temp = 0
for i in range(0, 40):
system = System(heat_sink=heat_sink,
solar_panel=solar_panel,
water_pipes=water_pipes,
ambient_temp=30,
flow_rate=f,
flow_temp=inlet_temp)
system.update()
inlet_temp = system.outletTemp
final_temp = system.T_2
temps.append(final_temp)
pipes.append(p)
panel_temp.append(temps)
no_pipes.append(pipes)
for i in range(0, len(flow_rates)):
plt.plot(no_pipes[i], panel_temp[i], 'o-', label='Flow rate: ' + str(flow_rates[i]) + ' m3/s')
plt.legend()
plt.xlabel('Number of Pipes')
plt.ylabel('Panel Surface Temperature (°C)')
plt.show()
|
southwelljake/HeatSinkModelling
|
src/comparePipes.py
|
comparePipes.py
|
py
| 1,260 |
python
|
en
|
code
| 0 |
github-code
|
6
|
73400221629
|
# Configuration file for the Sphinx documentation builder.
#
# For the full list of built-in configuration values, see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Project information -----------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information
project = "Polaris' NoteBook"
copyright = '2023, PolarisXQ'
author = 'PolarisXQ'
release = '0.0'
# -- General configuration ---------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration
extensions = [
'sphinx_markdown_tables',
# 'sphinxemoji.sphinxemoji',
'sphinx.ext.githubpages',
'sphinx_copybutton',
'sphinx.ext.mathjax',
# 'pallets_sphinx_themes'
'myst_parser'
]
myst_enable_extensions = [
"amsmath",
"attrs_inline",
"colon_fence",
"deflist",
"dollarmath",
"fieldlist",
"html_admonition",
"html_image",
"linkify",
"replacements",
"smartquotes",
"strikethrough",
"substitution",
"tasklist",
]
templates_path = ['_templates']
exclude_patterns = []
language = 'zh_CN'
# -- Options for HTML output -------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output
html_theme = 'press'
html_static_path = ['_static']
html_sidebars = {
'***': ['util/searchbox.html', 'util/sidetoc.html'],
}
from recommonmark.parser import CommonMarkParser
source_parsers = {
'.md': CommonMarkParser,
}
source_suffix = ['.rst', '.md']
html_logo = '_static/madcat_mini.png'
html_favicon='_static/madcat_mini.png'
html_theme_options = {
"external_links": [
("Github", "https://github.com/PolarisXQ"),
# ("Other", "https://bla.com")
]
}
|
PolarisXQ/Polaris-NoteBook
|
source/conf.py
|
conf.py
|
py
| 1,924 |
python
|
en
|
code
| 0 |
github-code
|
6
|
40070373372
|
import boto3
import json
from tqdm import tqdm
dynamodb = boto3.resource('dynamodb',region_name='us-east-2')
table = dynamodb.Table('FSBP_tree')
print(table.creation_date_time)
'''
with open('/hdd/c3s/data/aws_data/breach_compilation-pw_tree_1000000.json') as f:
data = json.load(f)
with table.batch_writer() as batch:
for item in data:
batch.put_item(
Item={
'NodeId': item,
'Info': data[item]
}
)
'''
f = open('/hdd/c3s/data/aws_data/splits/intr_tree_lucy_0.txt','r')
t = 0
bar= tqdm(f)
with table.batch_writer() as batch:
for line in bar:
item = line.split('\t')
batch.put_item(
Item={
'NodeId': item[0],
'Info': item[1]
}
)
|
lucy7li/compromised-credential-checking
|
perfomance_simulations/fsbp/save_amazon.py
|
save_amazon.py
|
py
| 793 |
python
|
en
|
code
| 6 |
github-code
|
6
|
25814131906
|
import errno
from flask import current_app, request, render_template
from flask.views import MethodView
from werkzeug.exceptions import Forbidden, NotFound
from ..constants import COMPLETE, FILENAME, LOCKED, TYPE
from ..utils.date_funcs import delete_if_lifetime_over
from ..utils.http import redirect_next_referrer
from ..utils.permissions import ADMIN, CREATE, may
from ..utils.upload import Upload
class ModifyView(MethodView):
def error(self, item, error):
return render_template('error.html', heading=item.meta[FILENAME], body=error), 409
def response(self, name):
return redirect_next_referrer('bepasty.display', name=name)
def get_params(self):
return {
FILENAME: request.form.get('filename'),
TYPE: request.form.get('contenttype'),
}
def post(self, name):
if not may(CREATE):
raise Forbidden()
try:
with current_app.storage.openwrite(name) as item:
if not item.meta[COMPLETE] and not may(ADMIN):
error = 'Upload incomplete. Try again later.'
return self.error(item, error)
if item.meta[LOCKED] and not may(ADMIN):
raise Forbidden()
if delete_if_lifetime_over(item, name):
raise NotFound()
params = self.get_params()
if params[FILENAME]:
item.meta[FILENAME] = Upload.filter_filename(
params[FILENAME], name, params[TYPE], item.meta[TYPE]
)
if params[TYPE]:
item.meta[TYPE], _ = Upload.filter_type(
params[TYPE], item.meta[TYPE]
)
return self.response(name)
except OSError as e:
if e.errno == errno.ENOENT:
raise NotFound()
raise
|
bepasty/bepasty-server
|
src/bepasty/views/modify.py
|
modify.py
|
py
| 1,929 |
python
|
en
|
code
| 162 |
github-code
|
6
|
8778305417
|
from flask_restful import Resource, reqparse
from flask_jwt import jwt_required, current_identity
from models.player import PlayerModel
from models.team import TeamModel
class Player(Resource):
parser = reqparse.RequestParser()
parser.add_argument(
'back_number',
type=int,
required=True,
help="This field must be integer"
)
parser.add_argument(
'team_name',
type=str,
required=True,
help="This field must be integer"
)
parser.add_argument(
'new_back_number',
type=str,
required=False,
help="This field must be integer"
)
def get(self, name, division=None, team_name=None):
if team_name is None and division is None:
players = PlayerModel.find_by_name(name)
if players.count() > 0:
return {"players": [player.json() for player in players]}
elif team_name is None:
print (division)
else:
team = TeamModel.find_by_name_division(team_name, division)
if team:
players = PlayerModel.find_by_team_id(team.id)
if players.count() > 0:
return {"players": [player.json() for player in players]}
return {"message": "Player does not exists"}
def post(self, name):
data = Player.parser.parse_args()
team = TeamModel.find_by_name(data['team_name'])
if team is None:
return {"message" : "Team does not exist"}
if PlayerModel.find_by_back_number_in_team(data['back_number'], team.id):
return {"message" : "Back number is already taken."}
player = PlayerModel(name, data['back_number'], team.id)
player.save_to_db()
return player.json()
@jwt_required()
def delete(self, name):
data = Player.parser.parse_args()
team = TeamModel.find_by_name(data['team_name'])
if team is None:
return {"message" : "Team does not exist"}
player = PlayerModel.find_player_in_team(name, team.id, data['back_number'])
if player:
player.delete_from_db()
return {"message":"Player deleted"}
def put(self, name):
data = Player.parser.parse_args()
team = TeamModel.find_by_name(data['team_name'])
if team is None:
return {"message" : "Team does not exist"}
if PlayerModel.find_by_back_number_in_team(data['new_back_number'], team.id):
return {"message" : "Back number is already taken."}
player = PlayerModel.find_player_in_team(name, team.id, data['back_number'])
if player is None:
return {"message" : "Player doesn not exist"}
else:
if data['new_back_number'] is None:
return {"message" : "New Back Number is missing"}
player.back_number = data['new_back_number']
player.save_to_db()
return player.json()
class PlayerList(Resource):
def get(self, team_name=None, division=None):
if team_name is None and division is None:
players = PlayerModel.query.all()
else:
team = TeamModel.find_by_name(team_name)
if team:
players = PlayerModel.find_by_team_id(team.id)
else:
players = PlayerModel.find_by_division(division)
return {"players" : [player.json() for player in players]}
|
baehs1989/flask-RESTful-project
|
resources/player.py
|
player.py
|
py
| 3,467 |
python
|
en
|
code
| 0 |
github-code
|
6
|
29186498876
|
import numpy
import multiprocessing as mp
import scipy.fftpack as fft
import scipy.signal as signal
import h5py
from .utilities import working_dir
from .stationbandpass import lofar_station_subband_bandpass
def fir_filter_coefficients(num_chan, num_taps, cal_factor=1./50.0):
'''
Compute FIR filter coefficients for channel separation.
**Parameters**
num_chan : int
Required number of channels in PPF output.
num_taps : int
Number of PPF taps.
**Returns**
A num_taps x num_chan numpy.array of float32.
**Example**
>>> fir_filter_coefficients(num_chan=4, num_taps=8)
array([[-0.00337621, 0.01111862, -0.01466139, 0.00781696],
[ 0.00988741, -0.02981976, 0.03694931, -0.01888615],
[-0.0233096 , 0.06982564, -0.08770566, 0.0466728 ],
[ 0.06241577, -0.21720791, 0.36907339, -0.46305624],
[ 0.46305624, -0.36907339, 0.21720791, -0.06241577],
[-0.0466728 , 0.08770566, -0.06982564, 0.0233096 ],
[ 0.01888615, -0.03694931, 0.02981976, -0.00988741],
[-0.00781696, 0.01466139, -0.01111862, 0.00337621]], dtype=float32)
'''
raw_coefficients = signal.firwin((num_taps)*num_chan, 1/(num_chan),
width=0.5/(num_chan))
auto_fftshift = raw_coefficients*(-1)**numpy.arange(num_taps*num_chan)
coefficients = numpy.array(auto_fftshift*(num_chan**0.5),
dtype=numpy.float32)
coefficients *= cal_factor
return coefficients.reshape((num_taps, num_chan))
def channelize_ppf(timeseries_taps, fir_coefficients):
'''
Make a polyphase-filtered spectrum of a timeseries.
**Parameters**
timeseries_taps : 2D numpy.array of complex64
A `num_taps x num_chan` array containing the timeseries data,
where `timeseries_taps.ravel()` should yield the input (single
channel) timeseries data.
fir_coefficients : 2D numpy.array of float32
A `num_taps x num_chan` array containing the FIR coefficients,
where `fir_coefficients.ravel()` should yield the FIR filter to
multiply with the original (single channel) timeseries data.
**Returns**
A 1D numpy.array of complex64 with length num_chan containing the
PPF output.
**Example**
>>> fir = fir_filter_coefficients(num_chan=4, num_taps=2, cal_factor=1)
>>> fir.dtype
dtype('float32')
>>> timeseries = numpy.array(numpy.exp(2j*numpy.pi*2.8*numpy.arange(8)),
... dtype=numpy.complex64)
>>> timeseries
array([ 1.000000 +0.00000000e+00j, 0.309017 -9.51056540e-01j,
-0.809017 -5.87785244e-01j, -0.809017 +5.87785244e-01j,
0.309017 +9.51056540e-01j, 1.000000 -3.42901108e-15j,
0.309017 -9.51056540e-01j, -0.809017 -5.87785244e-01j], dtype=complex64)
>>> spectrum = channelize_ppf(timeseries.reshape(fir.shape), fir)
>>> spectrum
array([-0.03263591-0.01060404j, -0.00383157+0.00195229j,
-0.00848089+0.02610143j, 0.78864020+1.54779351j], dtype=complex64)
'''
return (fft.fft((timeseries_taps*fir_coefficients).sum(axis=0)))
def channelize_ppf_multi_ts(timeseries_taps, fir_coefficients):
'''FIR coefficients are num_taps x num_chan, blocks are num_timeslots x num_taps x num_chan arrays'''
return (fft.fft((timeseries_taps*fir_coefficients[numpy.newaxis,:,:]).sum(axis=1),
axis=1))
def channelize_ppf_contiguous_block(timeseries_taps, fir_coefficients):
num_taps, num_chan = fir_coefficients.shape
num_ts_blocks = timeseries_taps.shape[0]
num_spectra = num_ts_blocks -(num_taps-1)
output_spectra = numpy.zeros((num_spectra, num_chan),
dtype=numpy.complex64)
for sp in range(num_spectra):
output_spectra[sp,:] += channelize_ppf(timeseries_taps[sp:sp+num_taps,:],
fir_coefficients)
return output_spectra
def samples_per_block(block_length_s, sample_duration_s, num_chan, num_taps):
r'''
Calculate the number of samples per correlator intergration time,
as well as the number of samples that must be read. The latter is
larger because a certain number of samples before and after the
actual interval must be read to properly fill the PPF.
**Parameters**
block_length_s : float
Number of seconds per correlator interval.
sample_duration_s : float
Number of seconds per sample in the time series data.
num_chan : int
Number of channels for the PPF.
num_taps : int
Number of taps in the PPF
**Returns**
Tuple (block_length samples, samples_to_read_per_block). Both
integers.
**Examples**
>>> block_length_samples, samples_to_read = samples_per_block(0.1, 1024/200e6, num_chan=256, num_taps=16)
>>> block_length_samples, block_length_samples/256, samples_to_read/256
(19456, 76.0, 91.0)
>>> print(block_length_samples*1024/200e6, ' seconds')
0.09961472 seconds
'''
num_spectra = int(round(block_length_s/sample_duration_s/num_chan))
block_length_samples = num_spectra*num_chan
samples_to_read_per_block = (num_spectra+(num_taps-1))*num_chan
return block_length_samples, samples_to_read_per_block
def read_and_process_antenna_worker(h5_names, sap_id,
num_sb, fir_coefficients,
connection):
r'''
Read a complex time series from a sequence of four HDF5 groups
containing, X_re, X_im , Y_re, Y_im, respectively. Read
num_timeslots starting at first_timeslot. If apply_fn is not None,
apply it to the resulting time series per sub band and return its
result.
**Parameters**
h5_names : sequence strings
The HDF5 file names of X_re, X_im, Y_re, and Y_im.
first_timeslot : int
The first timeslot to read.
num_timeslots : int
The number of timeslots to read.
num_sb : int
The number of sub bands expected in the data.
fir_coefficients : 2D numpy.array of float32
A `num_taps x num_chan` array containing the FIR coefficients,
where `fir_coefficients.ravel()` should yield the FIR filter to
multiply with the original (single channel) timeseries data.
**Returns**
Tuple of x and y numpy arrays(time, sb, channel).
**Example**
>>> None
None
'''
sap_fmt = 'SUB_ARRAY_POINTING_%03d/BEAM_000/STOKES_%d'
num_pol = len(h5_names)
num_taps, num_chan = fir_coefficients.shape
bandpass = lofar_station_subband_bandpass(num_chan)
# with working_dir(dir_name):
h5_files = [h5py.File(file_name, mode='r') for file_name in h5_names]
h5_groups = [h5_file[sap_fmt % (sap_id, pol)]
for pol, h5_file in enumerate(h5_files)]
while True:
message = connection.recv()
if message == 'done':
connection.close()
[h5_file.close() for h5_file in h5_files]
break
first_timeslot, num_timeslots = message
time_series_real = numpy.zeros((4, num_timeslots, num_sb), dtype=numpy.float32)
[h5_groups[pol].read_direct(time_series_real,
numpy.s_[first_timeslot:first_timeslot+num_timeslots,:],
numpy.s_[pol, :, :])
for pol in range(num_pol)]
time_series_complex_x = time_series_real[0,:,:] + 1j*time_series_real[1,:,:]
time_series_complex_y = time_series_real[2,:,:] + 1j*time_series_real[3,:,:]
result_x = numpy.array([channelize_ppf_contiguous_block(
time_series_complex_x[:, sb].reshape((-1, num_chan)),
fir_coefficients)/bandpass[numpy.newaxis,:]
for sb in range(num_sb)],
dtype=numpy.complex64)
result_y = numpy.array([channelize_ppf_contiguous_block(
time_series_complex_y[:, sb].reshape((-1, num_chan)),
fir_coefficients)/bandpass[numpy.newaxis,:]
for sb in range(num_sb)],
dtype=numpy.complex64)
connection.send(['x', result_x.shape, result_x.dtype])
connection.send_bytes(result_x.tobytes())
connection.send(['y', result_y.shape, result_y.dtype])
connection.send_bytes(result_y.tobytes())
def time_and_freq_axes(h5_filename, sap_id=0):
r'''
'''
coordinate_fmt = 'SUB_ARRAY_POINTING_%03d/BEAM_000/COORDINATES/COORDINATE_%d'
h5_file = h5py.File(h5_filename, mode='r')
time_axis, freq_axis = [
dict([item
for item in h5_file[coordinate_fmt %
(sap_id, axis_id)].attrs.items()])
for axis_id in [0, 1]]
h5_file.close()
return time_axis, freq_axis
def read_and_process_antenna_block_mp(dir_name, sas_id_string, sap_ids,
fir_coefficients, interval_s=None,
interval_samples=None, num_samples=256*16,
max_duration_s=None):
sap_fmt = 'SUB_ARRAY_POINTING_%03d/BEAM_000/STOKES_%d'
with working_dir(dir_name):
sap_names = [[('%s_SAP%03d_B000_S%d_P000_bf.h5' % (sas_id_string, sap_id, pol))
for pol in [0, 1, 2, 3]]
for sap_id in sap_ids]
first_file = h5py.File(sap_names[0][0], mode='r')
timeslots_per_file = first_file[sap_fmt % (0, 0)].shape[0]
first_file.close()
time_axis, freq_axis = time_and_freq_axes(sap_names[0][0], sap_id=0)
num_sb = len(freq_axis['AXIS_VALUES_WORLD'])
sample_duration_s = time_axis['INCREMENT']
if interval_samples is None:
samples_per_interval = int(numpy.floor(interval_s/sample_duration_s))
else:
samples_per_interval = interval_samples
first_timeslot = 0
pipes = [mp.Pipe() for sap_id in sap_ids]
manager_ends = [pipe[0] for pipe in pipes]
worker_ends = [pipe[1] for pipe in pipes]
processes = [mp.Process(target=read_and_process_antenna_worker,
args=(h5_names, sap_id, num_sb, fir_coefficients, connection))
for h5_names, sap_id, connection in zip(sap_names, sap_ids, worker_ends)]
[process.start() for process in processes]
while first_timeslot < timeslots_per_file - samples_per_interval - num_samples:
time_axis['REFERENCE_VALUE'] = (first_timeslot + num_samples/2)*sample_duration_s
if max_duration_s is not None and (first_timeslot +num_samples)*sample_duration_s > max_duration_s:
break
[pipe.send([first_timeslot, num_samples]) for pipe in manager_ends]
x_metadata = [pipe.recv() for pipe in manager_ends]
x_data = [numpy.frombuffer(pipe.recv_bytes(), dtype=x_meta[2]).reshape(x_meta[1])
for x_meta, pipe in zip(x_metadata, manager_ends)]
y_metadata = [pipe.recv() for pipe in manager_ends]
y_data = [numpy.frombuffer(pipe.recv_bytes(), dtype=y_meta[2]).reshape(y_meta[1])
for y_meta, pipe in zip(y_metadata, manager_ends)]
first_timeslot += samples_per_interval
# Return X[sap, sb, time, chan], Y[sap, sb, time, chan], time, freq
yield (numpy.array(x_data, dtype=numpy.complex64),
numpy.array(y_data, dtype=numpy.complex64), time_axis, freq_axis)
[pipe.send('done') for pipe in manager_ends]
[pipe.close() for pipe in manager_ends]
[process.join() for process in processes]
return None
|
brentjens/software-correlator
|
softwarecorrelator/stationprocessing.py
|
stationprocessing.py
|
py
| 11,844 |
python
|
en
|
code
| 4 |
github-code
|
6
|
25816673887
|
# Write a program to convert decimal number to equivalent binary, octal, and hexadecimal numbers.
def binary(num):
b=''
while num:
r=num%2
b+=str(r)
num=num// 2
return b[::-1]
def octal(num):
x=''
while num:
r=num%8
x=x+str(r)
num=num//8
return x[::-1]
def hexdec(num):
x=''
while num:
r=num%16
if r>10:
x+=chr(r+55)
else:
x=x+str(r)
num=num//16
return x[::-1]
num=int(input("Enter any decimal number : "))
print("The binary of the ",num," are : ",binary(num))
print("The octal of the ",num," are : ",octal(num))
print("The hexadecimal of the ",num," are : ",hexdec(num))
|
asteekgoswami/5th-sem-python
|
assignment-4/Q4.py
|
Q4.py
|
py
| 723 |
python
|
en
|
code
| 0 |
github-code
|
6
|
24168209609
|
#!/usr/bin/env python
'''
summarise slurm job details
Usage: summarise.py --files slurm-*.log > summary.tsv
Time is in hours.
Memory is in GB.
'''
#(venv_somatic_2) spartan-login1 18:48:20 msi-evaluation$ sacct -j 18860471 --format="JobName,CPUTime,MaxRSS,Elapsed,MaxVMSize,Timelimit"
# JobName CPUTime MaxRSS Elapsed MaxVMSize Timelimit
#---------- ---------- ---------- ---------- ---------- ----------
# mantis 17:37:48 02:56:18 08:00:00
# batch 17:37:48 733264K 02:56:18 47907692K
# extern 17:37:48 1212K 02:56:18 144788K
import argparse
import logging
import subprocess
import sys
def to_hours(v):
# d-hh:mm:ss or hh:mm:ss
if '-' in v:
d = float(v.split('-')[0])
return 24 * d + to_hours(v.split('-')[1])
else:
h, m, s = [int(x) for x in v.split(':')]
return h + m / 60 + s / 3600
def to_g(v):
if v.endswith('K'):
return float(v[:-1]) / 1024 / 1024
elif v.endswith('M'):
return float(v[:-1]) / 1024
elif v.endswith('Mn'):
return float(v[:-2]) / 1024
elif v.endswith('Gn'):
return float(v[:-2])
else:
logging.warn('tricky memory value: %s', v)
return float(v)
def main(files, filter_name):
logging.info('starting...')
sys.stdout.write('ID,Name,TimeRequested,TimeUsed,MemoryRequested,MemoryUsed,TimeDiff,MemoryDiff\n')
for f in files:
logging.info('%s...', f)
i = f.split('/')[-1].split('.')[0].split('-')[-1]
output = subprocess.check_output("sacct -j {} -p --format JobName,Elapsed,MaxRSS,ReqMem,TimeLimit".format(i), shell=True).decode()
lines = output.split('\n')
jobname = lines[1].split('|')[0]
time_requested = to_hours(lines[1].split('|')[4])
time_used = to_hours(lines[2].split('|')[1])
memory_used = to_g(lines[2].split('|')[2])
memory_requested = to_g(lines[2].split('|')[3])
if filter_name == 'snakemake':
jobname = '-'.join(jobname.split('-')[-2:-1])
logging.debug('new jobname is %s', jobname)
sys.stdout.write('{},{},{:.1f},{:.1f},{:.1f},{:.1f},{:.1f},{:.1f}\n'.format(i, jobname, time_requested, time_used, memory_requested, memory_used, time_requested - time_used, memory_requested - memory_used))
logging.info('done')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Slurm summariser')
parser.add_argument('--files', required=True, nargs='+', help='files containing slurm ids')
parser.add_argument('--filter_name', required=False, help='filter names in snakemake format *-name-*')
parser.add_argument('--verbose', action='store_true', help='more logging')
args = parser.parse_args()
if args.verbose:
logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s', level=logging.DEBUG)
else:
logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s', level=logging.INFO)
main(args.files, args.filter_name)
|
supernifty/slurm_util
|
summarise.py
|
summarise.py
|
py
| 2,901 |
python
|
en
|
code
| 0 |
github-code
|
6
|
22682272557
|
# -*- coding: utf-8 -*-
"""
Created on Wed May 12 04:34:12 2021
@author: Zakaria
"""
import pandas as pd
data = pd.read_csv('prediction_de_fraud_2.csv')
caracteristiques = data.drop('isFraud', axis=1).values
cible = data['isFraud'].values
from sklearn.preprocessing import LabelEncoder
LabEncdr_X = LabelEncoder()
caracteristiques[:, 1] = LabEncdr_X.fit_transform(caracteristiques[:, 1])
caracteristiques[:, 3] = LabEncdr_X.fit_transform(caracteristiques[:, 3])
caracteristiques[:, 6] = LabEncdr_X.fit_transform(caracteristiques[:, 6])
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(caracteristiques, cible, test_size=.3, random_state=50)
from sklearn.ensemble import RandomForestClassifier
Random_frst_cls = RandomForestClassifier(random_state=50)
Random_frst_cls.fit(x_train, y_train)
Random_frst_cls.score(x_test, y_test) ## ==> 0.9550561797752809
|
Baxx95/6-10-Programmes-Data-Science-SL-Random_Forest_Classifier
|
Random_Forest_Classifier.py
|
Random_Forest_Classifier.py
|
py
| 961 |
python
|
en
|
code
| 0 |
github-code
|
6
|
42549531170
|
### This file has been adopted from
### https://github.com/openlawlibrary/pygls/blob/master/examples/json-extension/server/server.py
import asyncio
from bisect import bisect
from cromwell_tools import api as cromwell_api
from cromwell_tools.cromwell_auth import CromwellAuth
from cromwell_tools.utilities import download
from functools import wraps
from pygls.features import (
CODE_ACTION,
DEFINITION,
REFERENCES,
TEXT_DOCUMENT_DID_OPEN,
TEXT_DOCUMENT_DID_CHANGE,
TEXT_DOCUMENT_DID_SAVE,
TEXT_DOCUMENT_WILL_SAVE,
WORKSPACE_DID_CHANGE_CONFIGURATION,
WORKSPACE_DID_CHANGE_WATCHED_FILES,
)
from pygls.server import LanguageServer
from pygls.types import (
CodeActionParams,
ConfigurationItem,
ConfigurationParams,
Diagnostic,
DiagnosticSeverity,
DidChangeConfigurationParams,
DidOpenTextDocumentParams,
DidChangeTextDocumentParams,
DidSaveTextDocumentParams,
WillSaveTextDocumentParams,
TextDocumentPositionParams,
DidChangeWatchedFiles,
FileChangeType,
MessageType,
Location,
Position,
Range,
)
from os import environ, name as platform, pathsep
from pathlib import Path
import re, sys
from requests import HTTPError
from threading import Timer
from time import sleep
from typing import Callable, Dict, Iterable, List, Set, Tuple, Union
from urllib.parse import urlparse
import WDL
from WDL import SourceNode, SourcePosition, Lint
PARSE_DELAY_SEC = 0.5 # delay parsing of WDL until no more keystrokes are sent
class Server(LanguageServer):
SERVER_NAME = 'wdl'
CONFIG_SECTION = SERVER_NAME
CMD_RUN_WDL = SERVER_NAME + '.run'
def __init__(self):
super().__init__()
self.wdl_paths: Dict[str, Set[str]] = dict()
self.wdl_types: Dict[str, Dict[str, SourcePosition]] = dict()
self.wdl_defs: Dict[str, Dict[SourcePosition, SourcePosition]] = dict()
self.wdl_refs: Dict[str, Dict[SourcePosition, List[SourcePosition]]] = dict()
self.wdl_symbols: Dict[str, List[SourcePosition]] = dict()
self.aborting_workflows: Set[str] = set()
def catch_error(self, log = False):
def decorator(func: Callable):
@wraps(func)
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as e:
if log:
self.show_message_log(str(e), MessageType.Error)
else:
self.show_message(str(e), MessageType.Error)
return wrapper
return decorator
server = Server()
def _get_client_config(ls: Server):
config = ls.get_configuration(ConfigurationParams([
ConfigurationItem(section=Server.CONFIG_SECTION)
])).result()
return config[0]
# https://gist.github.com/walkermatt/2871026
def debounce(delay_sec: float, id_arg: Union[int, str]):
""" Decorator that will postpone a functions
execution until after wait seconds
have elapsed since the last time it was invoked. """
def decorator(func: Callable):
@wraps(func)
def debounced(*args, **kwargs):
if not hasattr(debounced, 'timers'):
debounced.timers: Dict[str, Timer] = dict()
id = args[id_arg] if isinstance(id_arg, int) else kwargs[id_arg]
if id in debounced.timers:
debounced.timers[id].cancel()
timer = Timer(delay_sec, lambda: func(*args, **kwargs))
debounced.timers[id] = timer
timer.start()
return debounced
return decorator
@debounce(PARSE_DELAY_SEC, 1)
def parse_wdl(ls: Server, uri: str):
ls.show_message_log('Validating ' + uri, MessageType.Info)
diagnostics, wdl = _parse_wdl(ls, uri)
ls.publish_diagnostics(uri, diagnostics)
ls.show_message_log(
'{} {}'.format('Valid' if wdl else 'Invalid', uri),
MessageType.Info if wdl else MessageType.Warning
)
def _parse_wdl(ls: Server, uri: str):
try:
paths = _get_wdl_paths(ls, uri)
doc = asyncio.run(
WDL.load_async(uri, path=paths, read_source=_read_source(ls))
)
types = _get_types(doc.children, dict())
ls.wdl_types[uri] = types
ls.wdl_defs[uri], ls.wdl_refs[uri] = _get_links(doc.children, types, dict(), dict())
ls.wdl_symbols[uri] = sorted(_get_symbols(doc.children, []))
return list(_lint_wdl(ls, doc)), doc
except WDL.Error.MultipleValidationErrors as errs:
return [_diagnostic_err(e) for e in errs.exceptions], None
except WDLError as e:
return [_diagnostic_err(e)], None
except Exception as e:
ls.show_message_log(str(e), MessageType.Error)
return [], None
def _read_source(ls: Server):
async def read_source(uri: str, path, importer):
uri = await WDL.resolve_file_import(uri, path, importer)
if uri.startswith('/'):
uri = 'file://' + uri
source = ls.workspace.get_document(uri).source
return WDL.ReadSourceResult(source_text=source, abspath=uri)
return read_source
def _get_symbols(nodes: Iterable[SourceNode], symbols: List[SourcePosition]):
for node in nodes:
symbols.append(node.pos)
_get_symbols(node.children, symbols)
return symbols
# find SourcePosition as the minimum bounding box for cursor Position
def _find_symbol(ls: Server, uri: str, p: Position):
if uri not in ls.wdl_symbols:
return
symbols = ls.wdl_symbols[uri]
best_score = (sys.maxsize, sys.maxsize)
best_sym: SourcePosition = None
line = p.line + 1
col = p.character + 1
min_pos = SourcePosition(uri, uri, line, 0, line, 0)
i = bisect(symbols, min_pos)
while i < len(symbols):
sym = symbols[i]
if sym.line > line or (sym.line == line and sym.column > col):
break
elif sym.end_line > line or (sym.end_line == line and sym.end_column >= col):
score = (sym.end_line - sym.line, sym.end_column - sym.column)
if score <= best_score:
best_score = score
best_sym = sym
i += 1
return best_sym
def _get_types(nodes: Iterable[SourceNode], types: Dict[str, SourcePosition]):
for node in nodes:
if isinstance(node, WDL.StructTypeDef):
types[node.type_id] = node.pos
_get_types(node.children, types)
return types
def _get_links(
nodes: Iterable[SourceNode],
types: Dict[str, SourcePosition],
defs: Dict[SourcePosition, SourcePosition],
refs: Dict[SourcePosition, List[SourcePosition]],
):
for node in nodes:
source: SourcePosition = None
if isinstance(node, WDL.Call):
source = node.callee.pos
elif isinstance(node, WDL.Decl) and isinstance(node.type, WDL.Type.StructInstance):
source = types[node.type.type_id]
elif isinstance(node, WDL.Expr.Ident):
ref = node.referee
if isinstance(ref, WDL.Tree.Gather):
source = ref.final_referee.pos
else:
source = ref.pos
if source is not None:
defs[node.pos] = source
refs.setdefault(source, []).append(node.pos)
_get_links(node.children, types, defs, refs)
return defs, refs
SourceLinks = Union[SourcePosition, List[SourcePosition]]
def _find_links(ls: Server, uri: str, pos: Position, links: Dict[str, Dict[SourcePosition, SourceLinks]]):
symbol = _find_symbol(ls, uri, pos)
if (symbol is None) or (uri not in links):
return
symbols = links[uri]
if symbol in symbols:
return symbols[symbol]
def _find_def(ls: Server, uri: str, pos: Position):
link = _find_links(ls, uri, pos, ls.wdl_defs)
if link is not None:
return Location(link.abspath, _get_range(link))
def _find_refs(ls: Server, uri: str, pos: Position):
links = _find_links(ls, uri, pos, ls.wdl_refs)
if links is not None:
return [Location(link.abspath, _get_range(link)) for link in links]
def _lint_wdl(ls: Server, doc: WDL.Document):
_check_linter_path()
warnings = Lint.collect(Lint.lint(doc, descend_imports=False))
_check_linter_available(ls)
for pos, _, msg, _ in warnings:
yield _diagnostic(msg, pos, DiagnosticSeverity.Warning)
def _check_linter_path():
if getattr(_check_linter_path, 'skip', False):
return
LOCAL_BIN = '/usr/local/bin'
PATH = environ['PATH'].split(pathsep)
if platform == 'posix' and LOCAL_BIN not in PATH:
environ['PATH'] = pathsep.join([LOCAL_BIN] + PATH)
_check_linter_path.skip = True
def _check_linter_available(ls: Server):
if getattr(_check_linter_available, 'skip', False):
return
if not Lint._shellcheck_available:
ls.show_message('''
WDL task command linter is not available on the system PATH.
Please install ShellCheck and/or add it to the PATH:
https://github.com/koalaman/shellcheck#installing
''', MessageType.Warning)
_check_linter_available.skip = True
def _get_wdl_paths(ls: Server, wdl_uri: str, reuse_paths = True) -> List[str]:
ws = ls.workspace
if ws.folders:
ws_uris = [f for f in ws.folders if wdl_uri.startswith(f)]
elif ws.root_uri:
ws_uris = [ws.root_uri]
else:
ws_uris = []
wdl_paths: Set[str] = set()
for ws_uri in ws_uris:
if reuse_paths and (ws_uri in ls.wdl_paths):
ws_paths = ls.wdl_paths[ws_uri]
else:
ws_paths: Set[str] = set()
ws_root = Path(urlparse(ws_uri).path)
for p in ws_root.rglob('*.wdl'):
ws_paths.add(str(p.parent))
ls.wdl_paths[ws_uri] = ws_paths
wdl_paths.update(ws_paths)
return list(wdl_paths)
WDLError = (WDL.Error.ImportError, WDL.Error.SyntaxError, WDL.Error.ValidationError)
def _diagnostic(msg: str, pos: SourcePosition = None, severity = DiagnosticSeverity.Error):
return Diagnostic(_get_range(pos), msg, severity=severity)
def _get_range(p: SourcePosition = None):
if p is None:
return Range(
Position(),
Position(0, sys.maxsize),
)
else:
return Range(
Position(p.line - 1, p.column - 1),
Position(p.end_line - 1, p.end_column - 1),
)
def _diagnostic_err(e: WDLError):
cause = ': {}'.format(e.__cause__) if e.__cause__ else ''
msg = str(e) + cause
return _diagnostic(msg, e.pos)
@server.thread()
@server.feature(TEXT_DOCUMENT_DID_OPEN)
@server.catch_error()
def did_open(ls: Server, params: DidOpenTextDocumentParams):
parse_wdl(ls, params.textDocument.uri)
@server.thread()
@server.feature(TEXT_DOCUMENT_DID_CHANGE)
@server.catch_error()
def did_change(ls: Server, params: DidChangeTextDocumentParams):
parse_wdl(ls, params.textDocument.uri)
@server.thread()
@server.feature(TEXT_DOCUMENT_DID_SAVE)
@server.catch_error()
def did_save(ls: Server, params: DidSaveTextDocumentParams):
pass
@server.thread()
@server.feature(TEXT_DOCUMENT_WILL_SAVE)
@server.catch_error()
def will_save(ls: Server, params: WillSaveTextDocumentParams):
pass
@server.feature(WORKSPACE_DID_CHANGE_CONFIGURATION)
def did_change_configuration(ls: Server, params: DidChangeConfigurationParams):
pass
@server.thread()
@server.feature(WORKSPACE_DID_CHANGE_WATCHED_FILES)
@server.catch_error()
def did_change_watched_files(ls: Server, params: DidChangeWatchedFiles):
for change in params.changes:
if change.type in [FileChangeType.Created, FileChangeType.Deleted] and \
change.uri.endswith('.wdl'):
_get_wdl_paths(ls, change.uri, reuse_paths=False)
@server.thread()
@server.feature(DEFINITION)
@server.catch_error()
def goto_definition(ls: Server, params: TextDocumentPositionParams):
return _find_def(ls, params.textDocument.uri, params.position)
@server.thread()
@server.feature(REFERENCES)
@server.catch_error()
def find_references(ls: Server, params: TextDocumentPositionParams):
return _find_refs(ls, params.textDocument.uri, params.position)
class RunWDLParams:
def __init__(self, wdl_uri: str):
self.wdl_uri = wdl_uri
@server.feature(CODE_ACTION)
@server.catch_error()
def code_action(ls: Server, params: CodeActionParams):
return [{
'title': 'Run WDL',
'kind': Server.CMD_RUN_WDL,
'command': {
'command': Server.CMD_RUN_WDL,
'arguments': [RunWDLParams(params.textDocument.uri)],
},
}]
@server.thread()
@server.command(Server.CMD_RUN_WDL)
@server.catch_error()
def run_wdl(ls: Server, params: Tuple[RunWDLParams]):
wdl_uri = params[0].wdl_uri
wdl_path = urlparse(wdl_uri).path
_, wdl = _parse_wdl(ls, wdl_uri)
if not wdl:
return ls.show_message('Unable to submit: WDL contains error(s)', MessageType.Error)
config = _get_client_config(ls)
auth = CromwellAuth.from_no_authentication(config.cromwell.url)
workflow = cromwell_api.submit(
auth, wdl_path, raise_for_status=True,
).json()
id = workflow['id']
title = 'Workflow {} for {}'.format(id, wdl_path)
_progress(ls, 'start', {
'id': id,
'title': title,
'cancellable': True,
'message': workflow['status'],
})
status: str = ''
while True:
if status != workflow['status']:
status = workflow['status']
if status == 'Succeeded':
message_type = MessageType.Info
elif status in ('Aborting', 'Aborted'):
message_type = MessageType.Warning
elif status == 'Failed':
message_type = MessageType.Error
else:
_progress(ls, 'report', {
'id': id,
'message': status,
})
continue
_progress(ls, 'done', {
'id': id,
})
message = '{}: {}'.format(title, status)
ls.show_message(message, message_type)
diagnostics = _parse_failures(wdl, id, auth)
return ls.publish_diagnostics(wdl_uri, diagnostics)
sleep(config.cromwell.pollSec)
if id in ls.aborting_workflows:
workflow = cromwell_api.abort(
id, auth, raise_for_status=True,
).json()
ls.aborting_workflows.remove(id)
continue
try:
workflow = cromwell_api.status(
id, auth, raise_for_status=True,
).json()
except HTTPError as e:
ls.show_message_log(str(e), MessageType.Error)
def _progress(ls: Server, action: str, params):
ls.send_notification('window/progress/' + action, params)
@server.feature('window/progress/cancel')
def abort_workflow(ls: Server, params):
ls.aborting_workflows.add(params.id)
def _parse_failures(wdl: WDL.Document, id: str, auth: CromwellAuth):
workflow = cromwell_api.metadata(
id, auth,
includeKey=['status', 'executionStatus', 'failures', 'stderr'],
expandSubWorkflows=True,
raise_for_status=True,
).json()
if workflow['status'] != 'Failed':
return
calls = workflow['calls']
if calls:
diagnostics: List[Diagnostic] = []
elements = wdl.workflow.elements
for call, attempts in calls.items():
for attempt in attempts:
if attempt['executionStatus'] == 'Failed':
pos = _find_call(wdl.workflow.elements, wdl.workflow.name, call)
failures = _collect_failures(attempt['failures'], [])
stderr = _download(attempt['stderr'])
if stderr is not None:
failures.append(stderr)
msg = '\n\n'.join(failures)
diagnostics.append(_diagnostic(msg, pos))
return diagnostics
else:
failures = _collect_failures(workflow['failures'], [])
msg = '\n\n'.join(failures)
return [_diagnostic(msg)]
class CausedBy:
def __init__(self, causedBy: List['CausedBy'], message: str):
self.causedBy = causedBy
self.message = message
def _collect_failures(causedBy: List[CausedBy], failures: List[str]):
for failure in causedBy:
if failure['causedBy']:
_collect_failures(failure['causedBy'], failures)
failures.append(failure['message'])
return failures
WorkflowElements = List[Union[WDL.Decl, WDL.Call, WDL.Scatter, WDL.Conditional]]
def _find_call(elements: WorkflowElements, wf_name: str, call_name: str):
found: SourcePosition = None
for el in elements:
if found:
break
elif isinstance(el, WDL.Call) and '{}.{}'.format(wf_name, el.name) == call_name:
found = el.pos
elif isinstance(el, WDL.Conditional) or isinstance(el, WDL.Scatter):
found = _find_call(el.elements, wf_name, call_name)
return found
@server.catch_error(log=True)
def _download(url: str):
return str(download(url), 'utf-8')
|
broadinstitute/wdl-ide
|
server/wdl_lsp/server.py
|
server.py
|
py
| 17,170 |
python
|
en
|
code
| 38 |
github-code
|
6
|
7796988085
|
import xml.dom.minidom
import string;
import logging;
def LoadSession(system, FileName):
Logger = logging.getLogger("PPLT");
Logger.debug("Try to load Session from %s"%FileName);
doc = xml.dom.minidom.parse(FileName);
dev_tag = doc.getElementsByTagName("Devices")[0];
sym_tag = doc.getElementsByTagName("SymbolTree")[0];
srv_tag = doc.getElementsByTagName("Servers")[0];
LoadDevices(system, dev_tag);
LoadSymTree(system, sym_tag.firstChild);
LoadServers(system, srv_tag);
def LoadDevices(system, Tag):
devlst = Tag.getElementsByTagName("Device");
for dev in devlst:
Para = xmlFetchParameters(dev);
Alias = dev.getAttribute("alias");
FQDN = dev.getAttribute("fqdn");
system.LoadDevice(FQDN, Alias, Para);
def LoadServers(system, Tag):
srvlst = Tag.getElementsByTagName("Server");
for srv in srvlst:
Para = xmlFetchParameters(srv);
Alias = srv.getAttribute("alias");
FQSN = srv.getAttribute("fqsn");
DefUser = srv.getAttribute("user");
Root = srv.getAttribute("root");
if not Root:
Root = "/";
system.LoadServer(FQSN, Alias, DefUser, Para,Root);
def LoadSymTree(system, Tag, PathList=[]):
if not Tag:
return(None);
if Tag.nodeType != Tag.ELEMENT_NODE:
return(LoadSymTree(system, Tag.nextSibling, PathList));
if Tag.localName == "Symbol":
Name = Tag.getAttribute("name");
Slot = Tag.getAttribute("slot");
Refresh = Tag.getAttribute("refresh");
Group = Tag.getAttribute("group");
Owner = Tag.getAttribute("owner");
Modus = str(Tag.getAttribute("modus"));
Path = PathList2Str(PathList+[Name]);
system.CreateSymbol(Path, Slot, Refresh, Modus, Owner, Group);
if Tag.localName == "Folder":
Name = Tag.getAttribute("name");
Group = Tag.getAttribute("group");
Owner = Tag.getAttribute("owner");
Modus = Tag.getAttribute("modus");
Path = PathList2Str(PathList+[Name]);
system.CreateFolder(Path, Modus, Owner, Group);
if Tag.hasChildNodes():
LoadSymTree(system,Tag.firstChild,PathList+[Name]);
return(LoadSymTree(system,Tag.nextSibling,PathList));
def xmlFetchParameters(Node):
parameter = {};
parlst = Node.getElementsByTagName("Parameter");
for par in parlst:
name = par.getAttribute("name");
value = xmlFetchText(par.firstChild);
parameter.update( {name:value} );
return(parameter);
def xmlFetchText(Node,txt=""):
if not Node:
return(txt);
if Node.nodeType == Node.TEXT_NODE:
txt += string.strip(Node.data);
return(xmlFetchText(Node.nextSibling,txt));
def PathList2Str(PathLst):
p = "";
if len(PathLst) == 0:
return("/");
for item in PathLst:
p += "/"+item;
return(p);
|
BackupTheBerlios/pplt-svn
|
PPLT/PPLT/LoadSession.py
|
LoadSession.py
|
py
| 2,939 |
python
|
en
|
code
| 0 |
github-code
|
6
|
34038204278
|
from aio_proxy.response.formatters.elus import format_elus
from aio_proxy.response.unite_legale_model import CollectiviteTerritoriale
def format_collectivite_territoriale(
colter_code=None,
colter_code_insee=None,
colter_elus=None,
colter_niveau=None,
):
if colter_code is None:
return None
else:
return CollectiviteTerritoriale(
code=colter_code,
code_insee=colter_code_insee,
elus=format_elus(colter_elus), # Format elus if provided
niveau=colter_niveau,
).dict()
|
etalab/annuaire-entreprises-search-api
|
aio/aio-proxy/aio_proxy/response/formatters/collectivite_territoriale.py
|
collectivite_territoriale.py
|
py
| 565 |
python
|
it
|
code
| 13 |
github-code
|
6
|
20927340765
|
import tensorflow as tf
from architecture import eda_net
MOVING_AVERAGE_DECAY = 0.995
IGNORE_LABEL = 255
def model_fn(features, labels, mode, params):
"""
This is a function for creating a computational tensorflow graph.
The function is in format required by tf.estimator.
"""
is_training = mode == tf.estimator.ModeKeys.TRAIN
images = features
logits = eda_net(
images, is_training, k=params['k'],
num_classes=params['num_labels']
)
predictions = {
'probabilities': tf.nn.softmax(logits, axis=3),
'labels': tf.argmax(logits, axis=3, output_type=tf.int32)
}
if mode == tf.estimator.ModeKeys.PREDICT:
export_outputs = tf.estimator.export.PredictOutput({
name: tf.identity(tensor, name)
for name, tensor in predictions.items()
})
return tf.estimator.EstimatorSpec(
mode, predictions=predictions,
export_outputs={'outputs': export_outputs}
)
# add l2 regularization
with tf.name_scope('weight_decay'):
add_weight_decay(params['weight_decay'])
regularization_loss = tf.losses.get_regularization_loss()
tf.summary.scalar('regularization_loss', regularization_loss)
with tf.name_scope('losses'):
class_weights = tf.constant(params['class_weights'], tf.float32)
# it has shape [num_labels]
shape = tf.shape(logits)
batch_size, height, width, num_labels = tf.unstack(shape, axis=0)
labels_flat = tf.reshape(labels, [-1])
logits = tf.reshape(logits, [batch_size * height * width, num_labels])
not_ignore = tf.not_equal(labels_flatten, IGNORE_LABEL)
labels_flat = tf.boolean_mask(labels_flat, not_ignore)
logits = tf.boolean_mask(logits, not_ignore)
weights = tf.gather(class_weights, labels_flat)
losses = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels_flat, logits=logits)
cross_entropy = tf.reduce_mean(losses * weights, axis=0)
tf.losses.add_loss(cross_entropy)
tf.summary.scalar('cross_entropy', cross_entropy)
total_loss = tf.losses.get_total_loss(add_regularization_losses=True)
with tf.name_scope('eval_metrics'):
# this is a stupid metric actually
accuracy = tf.reduce_mean(tf.to_float(tf.equal(predictions['labels'], labels)), axis=[0, 1, 2])
# this is better
mean_iou = compute_iou(predictions['labels'], labels, params['num_labels'])
if mode == tf.estimator.ModeKeys.EVAL:
eval_metric_ops = {
'eval_accuracy': tf.metrics.mean(accuracy),
'eval_mean_iou': tf.metrics.mean(mean_iou)
}
return tf.estimator.EstimatorSpec(
mode, loss=total_loss,
eval_metric_ops=eval_metric_ops
)
assert mode == tf.estimator.ModeKeys.TRAIN
with tf.variable_scope('learning_rate'):
global_step = tf.train.get_global_step()
learning_rate = tf.train.polynomial_decay(
params['initial_learning_rate'], global_step,
params['decay_steps'], params['end_learning_rate'],
power=1.0 # linear decay
)
tf.summary.scalar('learning_rate', learning_rate)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops), tf.variable_scope('optimizer'):
optimizer = tf.train.AdamOptimizer(learning_rate)
grads_and_vars = optimizer.compute_gradients(total_loss)
train_op = optimizer.apply_gradients(grads_and_vars, global_step)
for g, v in grads_and_vars:
tf.summary.histogram(v.name[:-2] + '_hist', v)
tf.summary.histogram(v.name[:-2] + '_grad_hist', g)
with tf.control_dependencies([train_op]), tf.name_scope('ema'):
ema = tf.train.ExponentialMovingAverage(decay=MOVING_AVERAGE_DECAY, num_updates=global_step)
train_op = ema.apply(tf.trainable_variables())
tf.summary.scalar('train_accuracy', accuracy)
tf.summary.scalar('train_mean_iou', mean_iou)
return tf.estimator.EstimatorSpec(mode, loss=total_loss, train_op=train_op)
def add_weight_decay(weight_decay):
"""Add L2 regularization to all (or some) trainable kernel weights."""
kernels = [v for v in tf.trainable_variables() if 'weights' in v.name]
for K in kernels:
x = tf.multiply(weight_decay, tf.nn.l2_loss(K))
tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES, x)
class RestoreMovingAverageHook(tf.train.SessionRunHook):
def __init__(self, model_dir):
super(RestoreMovingAverageHook, self).__init__()
self.model_dir = model_dir
def begin(self):
ema = tf.train.ExponentialMovingAverage(decay=MOVING_AVERAGE_DECAY)
variables_to_restore = ema.variables_to_restore()
self.load_ema = tf.contrib.framework.assign_from_checkpoint_fn(
tf.train.latest_checkpoint(self.model_dir), variables_to_restore
)
def after_create_session(self, sess, coord):
tf.logging.info('Loading EMA weights...')
self.load_ema(sess)
def compute_iou(x, y, num_labels):
"""
Arguments:
x, y: int tensors with shape [b, h, w],
possible values that they can contain
are {0, 1, ..., num_labels - 1, IGNORE_LABEL}.
Note that ignore label is ignored here.
num_labels: an integer.
Returns:
a float tensor with shape [].
"""
unique_labels = tf.range(num_labels, dtype=tf.int32)
x = tf.equal(tf.expand_dims(x, 3), unique_labels)
y = tf.equal(tf.expand_dims(y, 3), unique_labels)
intersection = tf.to_float(tf.logical_and(x, y))
union = tf.to_float(tf.logical_or(x, y))
# they all have shape [b, h, w, num_labels]
intersection = tf.reduce_sum(intersection, axis=[1, 2])
union = tf.reduce_sum(union, axis=[1, 2])
union = tf.maximum(union, 1.0)
# they have shape [b, num_labels]
return tf.reduce_mean(intersection/union, axis=[0, 1])
|
TropComplique/EDANet
|
model.py
|
model.py
|
py
| 6,023 |
python
|
en
|
code
| 2 |
github-code
|
6
|
3019297917
|
def getBestCand(candidates,visited):
bestIndex = -1
cost = 0x3f3f3f
for i in range(len(visited)):
if not visited[i] and candidates[i] < cost:
cost = candidates[i]
bestIndex = i
return bestIndex,cost
def bfs(g):
n = len(g)
visited = [False] * n
candidates = [float('inf')] * n
sol = 0
roads = []
init = 0
ant = 0
visited[init] = True
for st,end,w in g[init]:
candidates[end] = w
for i in range(n-1):
bestCand, cost = getBestCand(candidates,visited)
if cost < float('inf'):
sol += cost
visited[bestCand] = True
roads.append(cost)
for st,end,w in g[bestCand]:
if w < candidates[end]:
candidates[end] = w
return sol,roads
if __name__ == '__main__':
N,M = map(int,input().strip().split())
g = [[] for _ in range(N)]
solRoads = [[] for i in range(N)]
for _ in range(M):
a,b,w = map(int,input().strip().split())
g[a].append((a,b,w))
g[b].append((b,a,w))
sol,costs = bfs(g)
print(sol)
print(costs)
C = int(input().strip())
for _ in range(C):
N = int(input().strip())
print(N)
|
medranoGG/AlgorithmsPython
|
test01/bisbal.py
|
bisbal.py
|
py
| 1,252 |
python
|
en
|
code
| 0 |
github-code
|
6
|
19511071794
|
import random
def magic_ball() -> str:
"""
Ask this crabby Magic 8 Ball a question out loud
and run to see the answer.
:return: `str`, a random answer.
"""
# Possible responses that the Magic 8 Ball can give
responses = ["Don't ask me!", "Next question, loser.",
"Go away.", "I've had enough of your questions.",
"Maybe you should ask your Mom that question.",
"Does that topic really concern me?", "You talk a lot, so no.",
"What? Sorry, I wasn't listening.",
"The answer is: You should see a therapist.",
"Wow, you've got more problems than a math book!",
"Hmmn...bring me cookies and I might answer.",
"Yes! I mean no! I can't decide...", "Nope.",
"Sure, I guess.", "You woke me up to ask that?!",
"I should ask you the same question.", "I don't think so.",
"How should I know?", "Maybe yes, maybe no.",
"Look inside yourself.", "That is a definite yes...I think.",
"I'm tired, ask again when I care.", "Ugh...this question again?",
"My sixth sense says....no.", "Magically yes!",
"I'm in a good mood so I'll say yes.",
"What is this, twenty questions?"]
answer = random.choice(responses)
return answer
print(magic_ball())
|
CrochetGamer/small-projects
|
magic_8_ball.py
|
magic_8_ball.py
|
py
| 1,440 |
python
|
en
|
code
| 0 |
github-code
|
6
|
40310342893
|
import time
import random
import pandas as pd
import multiprocessing as mp
import numpy as np
import os
import torch
import torch.nn.functional as F
import copy
from .utils import strToListF
from .models import makeDataSet_Vec
from .utils import strToListF, colorizar, getSTime
# models
from .drlearning import Agent_DQL, ExperienceReplay, ICM as ICM_DQL
class VecDataEnvironment:
''' If this environment return done=True, reset it or some errors may apears'''
VERY_BAD_REWARD = -1.
def __init__(self, data_path, eval_path=None, max_backpack_size=200, vname='vecs', lname='is_humor', frmethod='acc', rdata_weval=False):
self.data = pd.read_csv(data_path)
self.data_eval = None
self.max_backpack_size = max_backpack_size
self.vec_size = len(self.data.loc[0,vname].split())
self.vname = vname
self.lname = lname
self.done = False
self.backpack = []
self.backpack_l = []
self.pos_gone = None
self.iterator = [i for i in range(len(self.data))]
self.iter_modulo = len(self.data)
self.iter_pos = None
self.current_vector = None
self.final_reward = None
self.frmethod = frmethod
if eval_path is not None:
self.data_eval = pd.read_csv(eval_path)
if rdata_weval:
self.resetIterator(True)
def mulIterModulo(self, mul=1.0):
tmp = int(self.iter_modulo * mul)
self.iter_modulo = min(len(self.data), tmp)
self.iter_pos = None
def resetIterator(self, use_reduced=False, porsion=1.):
if not use_reduced:
self.iterator = [i for i in range(len(self.data))]
self.iter_modulo = int(len(self.data) * porsion)
self.iter_pos = 0
else:
print ('# Reducing Data trick')
file_path = os.path.join('data', 'itEnvRed.npy')
if os.path.isfile(file_path):
rel = np.load(file_path)
self.iterator = rel.tolist()
self.iter_modulo = len(self.iterator)
del rel
ides = dict([(i,1) for i in self.iterator])
for i in range(len(self.data)):
if i not in ides:
self.iterator.append(i)
del ides
print (' Taken from', colorizar(file_path))
else:
cnt = mp.cpu_count()
pool = mp.Pool(cnt)
dx = int(len(self.data_eval) / cnt )
dx = [(i*dx, i*dx + dx + (0 if i != cnt-1 else len(self.data_eval) % cnt)) for i in range(cnt)]
label_list = pool.map(self.reduceData, dx)
del pool
del cnt
del dx
ides = {}
for li in label_list:
for v in li:
ides.update({v:1})
del label_list
self.iterator = [ v for v in ides ]
self.iter_modulo = len(self.iterator)
save = np.array(self.iterator, dtype=np.int64)
np.save(file_path, save)
del save
for i in range(len(self.data)):
if i not in ides:
self.iterator.append(i)
del ides
def reduceData(self, ini_fin):
sol = []
for k in range(ini_fin[0],ini_fin[1]):
vec = np.array(strToListF(self.data_eval.loc[k, self.vname]), dtype=np.float32)
lab = int(self.data_eval.loc[k, self.lname])
ide, dist = None, None
for i in range(len(self.data)):
curr_vec = np.array(strToListF(self.data.loc[i, self.vname]), dtype=np.float32)
curr_lab = int(self.data.loc[i, self.lname])
if lab != curr_lab: continue
distance = np.sqrt(((curr_vec - vec) ** 2).sum()).item()
if dist is None or dist > distance:
dist = distance
ide = i
sol.append(ide)
del self.data_eval
del self.data
return sol
def __next(self):
if self.iter_pos is None:
self.iter_pos = 0
selection_part = self.iterator[:self.iter_modulo]
other_part = self.iterator[self.iter_modulo:]
random.shuffle(selection_part) # RANDOMIZE
random.shuffle(other_part)
self.iterator = selection_part + other_part
self.iter_pos += 1
if (self.iter_pos >= len(self.iterator)) or ((self.iter_pos % self.iter_modulo == 0) and self.iter_pos > 0):
self.done = True
self.__calculate_final_R()
return None, None
i = self.iterator[self.iter_pos]
cad = strToListF(self.data.loc[i, self.vname])
lab = int(self.data.loc[i, self.lname])
return cad, lab
def export_prototypes(self, file_list, label_list, silense=False):
''' export to a .npy the vectors in the backpak\n
filelist: [f1:Str, f2:str, ... , fn:str] \n
label_list: [l1:int, l2:int, ..., ln:int] \n
the vectors with li label will be placed in fi file for all i'''
for file_, label_ in zip(file_list, label_list):
if not silense:
print ('# Exporting prototypes to', colorizar(os.path.basename(file_)))
expo = []
for v,l in zip(self.backpack, self.backpack_l):
if l != label_: continue
expo.append(v.reshape(1,-1))
expo = np.concatenate(expo, axis=0)
np.save(file_+'.npy', expo)
def proto_cmp_data_csv(self, ini_fin):
''' function used with paralellism to calculate the labels of the data with the prototypes.\n
ini_fin:pair (ini:int, fin:int) the initial and final position of data, accesed by data.loc[i, vname] for i in [ini,fin) '''
sol = []
for i in range(ini_fin[0], ini_fin[1]):
# lab = int(data.loc[i, lname])
vec = None
if self.data_eval is not None:
vec = np.array(strToListF(self.data_eval.loc[i, self.vname]), dtype=np.float32)
else:
vec = np.array(strToListF(self.data.loc[i, self.vname]), dtype=np.float32)
min_val, l_min = None, None
for v, l in zip(self.backpack, self.backpack_l):
if l is None : continue
# euclidiean distance
current_value = np.sqrt(((v - vec) ** 2).sum())
if min_val is None or min_val > current_value:
min_val = current_value
l_min = l
if l_min is None:
break
sol.append(l_min)
del self.data
if self.data_eval is not None:
del self.data_eval
return np.array(sol, np.int32) # check this later, the int32 ------------------------------------------ OJO -----------------
def __calculate_final_R(self):
''' Inside this, self.iterator is seted to None, be aware of future errors '''
cnt = mp.cpu_count()
pool = mp.Pool(cnt)
if self.data_eval is not None:
dx = int(len(self.data_eval) / cnt )
dx = [(i*dx, i*dx + dx + (0 if i != cnt-1 else len(self.data_eval) % cnt)) for i in range(cnt)]
else:
dx = int(len(self.data) / cnt )
dx = [(i*dx, i*dx + dx + (0 if i != cnt-1 else len(self.data) % cnt)) for i in range(cnt)]
label_list = pool.map(self.proto_cmp_data_csv, dx)
del pool
label_list = np.concatenate(label_list, axis=0)
if label_list.shape[0] <= 0:
# The backpack is empty !
self.final_reward = self.VERY_BAD_REWARD
return
if self.data_eval is not None:
original_label = np.array(self.data_eval[self.lname].tolist(), dtype=np.int32)
else:
original_label = np.array(self.data[self.lname].tolist(), dtype=np.int32)
if self.frmethod == 'acc':
self.final_reward = ((label_list == original_label).sum() / original_label.shape[0]).item()
del label_list
del original_label
def __reset_backpack(self):
if len(self.backpack) <= 0:
for _ in range(self.max_backpack_size):
self.backpack.append(np.array([0 for _ in range(self.vec_size)], dtype=np.float32))
self.backpack_l.append(None)
else:
for k in range(self.max_backpack_size):
self.backpack[k] = np.array([0 for _ in range(self.vec_size)], dtype=np.float32)
self.backpack_l[k] = None
def __makeState(self):
self.current_vector = self.__next()
backP = np.stack(self.backpack, axis=0)
if self.done:
vecI = np.zeros(self.vec_size, dtype=np.float32)
else:
vecI = np.array(self.current_vector[0], dtype=np.float32)
return (backP, vecI)
def reset(self):
''' Return the pair: a np.array of shape (max_backpack_size, vec_size) and a np.array of shape (vec_size).
They are: (backpack state, incoming vector from data). '''
self.done = False
self.final_reward = None
if (self.iter_pos is not None) and (self.iter_pos >= len(self.iterator)):
self.iter_pos = None
self.__reset_backpack()
s,v = self.__makeState()
return s,v
def step(self, action:int):
''' Return four objects: \n
\t BackPack State, Incoming Vector from data, reward, done \n
\t types: np.array(max_backpack_size, vec_size) , np.array (vec_size), float, bool '''
if action < 0 or action > self.max_backpack_size:
raise ValueError('ERROR in action input variable, action: {} not in [0,{}]'.format(action,self.max_backpack_size))
self.pos_gone = action if action < self.max_backpack_size else None
reward = 0.
if action < self.max_backpack_size:
self.backpack[action] = np.array(self.current_vector[0], dtype=np.float32)
self.backpack_l[action] = int(self.current_vector[1])
s,v = self.__makeState()
if self.final_reward is not None: reward += self.final_reward
return s, v, reward, self.done
def prepareBackpackState(blist, vec):
state = np.concatenate([blist,vec.reshape(1,-1)], axis=0)
state = torch.from_numpy(state)
return state
def __policy_dql(qvalues, nactions=12,eps=None):
with torch.no_grad():
if eps is not None:
if torch.rand(1) < eps:
return torch.randint(low=0,high=nactions, size=(1,))
else:
return torch.argmax(qvalues)
else:
return torch.multinomial(F.softmax(F.normalize(qvalues), dim=0), num_samples=1)
def __minibatch_train_dql(Qmodel, Qtarget, qloss, replay, params, DEVICE, icm=None):
state1_batch, action_batch, reward_batch, state2_batch = replay.get_batch()
action_batch = action_batch.view(action_batch.shape[0],1).to(device=DEVICE)
reward_batch = reward_batch.view(reward_batch.shape[0],1).to(device=DEVICE)
state1_batch = state1_batch.to(device=DEVICE)
state2_batch = state2_batch.to(device=DEVICE)
forward_pred_err , inverse_pred_err = 0., 0.
reward = reward_batch
if icm is not None:
forward_pred_err , inverse_pred_err = icm(state1_batch, action_batch, state2_batch)
i_reward = (1. / float(params['eta'])) * forward_pred_err
reward += i_reward.detach()
# qvals = Qmodel(state2_batch) # recordar usar target net later
qvals = Qtarget(state2_batch)
reward += float(params['gamma']) * torch.max(qvals)
reward_pred = Qmodel(state1_batch)
reward_target = reward_pred.clone()
indices = torch.stack((torch.arange(action_batch.shape[0]).to(device=DEVICE),action_batch.squeeze().to(device=DEVICE)), dim=0)
indices = indices.tolist()
reward_target[indices] = reward.squeeze()
q_loss = 1e5 * qloss(F.normalize(reward_pred), F.normalize(reward_target.detach()))
return forward_pred_err, inverse_pred_err, q_loss
def __loss_fn(q_loss, inverse_loss, forward_loss, params):
loss_ = (1 - float(params['beta'])) * inverse_loss
loss_ += float(params['beta']) * forward_loss
loss_ = loss_.mean() # loss_.sum() / loss.flatten().shape[0]
loss = loss_ + float(params['lambda']) * q_loss
return loss
# params (data_path, lcolumn, vcolumn, param)
def __prototypes_with_dql(params):
print ('# Start:','Deep Q Learning algorithm. Relax, this will take a wille.')
BACKPACK_SIZE, EPS = int(params['max_prototypes']), float(params['eps'])
EPOCHS, LR, BSIZE = int(params['epochs']), float(params['lr']), int(params['batch_size'])
DMODEL = int(params['d_model'])
target_refill, i_targetFill = int(params['target_refill']), 0
use_icm = params['ICM']
losses = []
switch_to_eps_greedy = int(EPOCHS * (2/5))
env = VecDataEnvironment(params['data_path'], eval_path=params['eval_data_path'], max_backpack_size=BACKPACK_SIZE, vname=params['vcolumn'], lname=params['lcolumn'], rdata_weval=bool(params['reduced_data_prototypes']))
DEVICE = torch.device("cuda:0") if torch.cuda.is_available() else torch.device("cpu")
# max_len : 5000, antes BACKPACK_SIZE+11, de esta forma quisas se adapte a ir cresiendo poco a poco
max_len = min(5000, BACKPACK_SIZE+100)
Qmodel = Agent_DQL(BACKPACK_SIZE+1, nhead=int(params['nhead']),nhid=int(params['nhid']),d_model=DMODEL,nlayers=int(params['n_layers']), max_len=max_len,dropout=float(params['dropout']))
qloss = torch.nn.MSELoss().to(device=DEVICE)
# seting up the taget net, and memory replay stuff
Qtarget = copy.deepcopy(Qmodel).to(device=DEVICE)
Qtarget.load_state_dict(Qmodel.state_dict())
replay = ExperienceReplay(N=int(params['memory_size']), batch_size=BSIZE)
all_model_params = list(Qmodel.parameters())
icm = None
if use_icm:
icm = ICM_DQL(BACKPACK_SIZE+1, DMODEL*(BACKPACK_SIZE+1), DMODEL, max_len=max_len, forward_scale=1., inverse_scale=1e4, nhead=int(params['nhead']),hiden_size=int(params['nhid']),nlayers=int(params['n_layers']), dropout=float(params['dropout']))
all_model_params += list(icm.parameters())
icm.train()
opt = torch.optim.Adam(lr=LR, params=all_model_params)
Qmodel.train()
greater_reward = -(2**30)
greater_reward_c = greater_reward
triple_sch = [float(i) / 100. for i in params['distribution_train'].split('-')]
for i in range(1,len(triple_sch)): triple_sch[i] += triple_sch[i-1]
# triple_sch = [ triple_sch[i] + (triple_sch[i-1] if i > 0 else 0) for i in range(len(triple_sch))]
if abs(triple_sch[-1] - 1.) > 1e-9:
raise ValueError("Parameter 'distribution_train' most add 100, but has {}.".format(triple_sch[-1]*100.))
pos_tr = 0
for i in range(EPOCHS):
print('# Epoch {}/{} {}'.format(i+1, EPOCHS, 'with eps' if i >= switch_to_eps_greedy else 'with softmax policy'))
while pos_tr < len(triple_sch) and int(EPOCHS * triple_sch[pos_tr]) <= i+1:
env.mulIterModulo(2.0)
pos_tr += 1
all_obj_seeit = False
state1 = prepareBackpackState(*env.reset()).unsqueeze(0).to(device=DEVICE)
acc_reward = 0.
it_episode = 0
init_time = time.time()
while not all_obj_seeit:
# parafernalia ----------------------------
it_episode += 1
print ('\r It {} with reward {:.4f} | {}'.format(it_episode, acc_reward, getSTime(time.time()-init_time)), end=' ')
# -----------------------------------------
opt.zero_grad()
q_val_pred = Qmodel(state1)
# Use softmax policy only at the begining
if i >= switch_to_eps_greedy:
action = int(__policy_dql(q_val_pred, nactions=BACKPACK_SIZE+1,eps=EPS))
else:
action = int(__policy_dql(q_val_pred, nactions=BACKPACK_SIZE+1))
back_state, vec_state , e_reward, done = env.step(action)
state2 = prepareBackpackState(back_state, vec_state).unsqueeze(0).to(device=DEVICE)
replay.add_memory(state1, action, e_reward, state2)
acc_reward += e_reward
all_obj_seeit = done
if not done:
state1 = state2
if len(replay.memory) < BSIZE:
continue
forward_pred_err, inverse_pred_err, q_loss = __minibatch_train_dql(Qmodel, Qtarget, qloss, replay, params, DEVICE, icm=icm)
loss = __loss_fn(q_loss, forward_pred_err, inverse_pred_err, params)
loss_list = (q_loss.mean().item(), forward_pred_err.flatten().mean().item(), inverse_pred_err.flatten().mean().item())
losses.append(loss_list)
loss.backward()
opt.step()
i_targetFill += 1
if i_targetFill % target_refill == 0:
i_targetFill = 0
Qtarget.load_state_dict(Qmodel.state_dict())
if greater_reward_c < acc_reward:
greater_reward_c = acc_reward
env.export_prototypes(file_list = [os.path.join('data','pos_center'), os.path.join('data','neg_center')], label_list = [1, 0])
if greater_reward <= acc_reward and (pos_tr >= len(triple_sch)):
greater_reward = acc_reward
Qmodel.save(os.path.join('pts', 'dql_model.pt'))
if icm is not None:
icm.save(os.path.join('pts', 'icm_model.pt'))
print ('\r It {} with reward:{:.4f} | {}'.format(it_episode, acc_reward, getSTime(time.time()-init_time)), end='\n')
losses_ = np.array(losses)
np.save(os.path.join('out', 'dql_losses.npy'), losses_)
del icm
del opt
del replay
# best model
# Qmodel.load(os.path.join('pts', 'dql_model.pt'))
# Qmodel.eval()
# it_episode, acc_reward = 0, 0.
# init_time = time.time()
# env.resetIterator()
print ('# Ending:','Deep Q Learning algorithm')
# state1 = prepareBackpackState(*env.reset()).unsqueeze(0)
# with torch.no_grad():
# while True:
# # parafernalia ----------------------------
# it_episode += 1
# # -----------------------------------------
# q_val_pred = Qmodel(state1)
# action = int(__policy_dql(q_val_pred, nactions=BACKPACK_SIZE+1, eps=0.01))
# back_state, vec_state , e_reward, done = env.step(action)
# state1 = prepareBackpackState(back_state, vec_state).unsqueeze(0)
# acc_reward += e_reward
# all_obj_seeit = done
# if done:
# print ('\r It {} with reward {:.4f} | {}'.format(it_episode, acc_reward, getSTime(time.time()-init_time)))
# break
# print ('\r It {} with reward {:.4f} | {}'.format(it_episode, acc_reward, getSTime(time.time()-init_time)), end=' ')
# esporting final state of the backpack
# env.export_prototypes(file_list = [os.path.join('data','pos_center'), os.path.join('data','neg_center')],
# label_list = [1 , 0])
del env
def extractPrototypes(method, params):
""" Apply a method to extract prototypes from data. \n
method: the method used to select prototypes, most be in [\'dql\', \'dql-intrinsic\']\n
data_path:str a path to a \'.csv\' file with at most the columns [vcolumn, lcolumn]. \n
eval_data_path: same as data_path, but treated as evaluation data \n
The column vcolumn most be a list a floating points, a vector.\n
The column lcolumn is the label of the vectors, [0,1]. """
__paramu = {'intrinsic':True, 'lambda':0.1, 'eta':1.0, 'gamma':0.2, 'eps':0.15, 'beta':0.2,
'lcolumn':'is_humor', 'vcolumn':'vecs', 'max_prototypes':20, # 200
'batch_size':10, 'lr':0.001, 'epochs':20, 'memory_size':50}
__paramu.update(params)
methods_ = [('dql', __prototypes_with_dql), ('dql-intrinsic', __prototypes_with_dql)]
for mname, fun in methods_:
if method == mname:
fun(__paramu)
return
print ('ERROR::extractPrototypes Method parameter', '\''+method+'\'', 'is not in [', ' , '.join(['\''+s+'\'' for s,_,_ in methods_]), '] !!!!')
|
mjason98/haha21
|
code/protos.py
|
protos.py
|
py
| 20,732 |
python
|
en
|
code
| 0 |
github-code
|
6
|
13610828545
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import taggit.managers
class Migration(migrations.Migration):
dependencies = [
('taggit', '0001_initial'),
('learn', '0003_project_photo'),
]
operations = [
migrations.CreateModel(
name='Area',
fields=[
('id', models.AutoField(auto_created=True, serialize=False, verbose_name='ID', primary_key=True)),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Criado em')),
('modified', models.DateTimeField(auto_now=True, verbose_name='Modificado em')),
('name', models.CharField(max_length=100, verbose_name='Nome')),
('slug', models.SlugField(max_length=100, verbose_name='Identificador')),
('tags', taggit.managers.TaggableManager(help_text='A comma-separated list of tags.', verbose_name='Tags', through='taggit.TaggedItem', to='taggit.Tag')),
],
options={
'ordering': ['name'],
'verbose_name': 'Área de Estudo',
'verbose_name_plural': 'Áreas de Estudo',
},
bases=(models.Model,),
),
migrations.AddField(
model_name='project',
name='area',
field=models.ForeignKey(verbose_name='Área', blank=True, related_name='projects', null=True, to='learn.Area'),
preserve_default=True,
),
migrations.AddField(
model_name='project',
name='open_enrollment',
field=models.BooleanField(default=False, verbose_name='Inscrições Abertas'),
preserve_default=True,
),
]
|
klebercode/sofia
|
sofia/apps/learn/migrations/0004_auto_20141215_1723.py
|
0004_auto_20141215_1723.py
|
py
| 1,769 |
python
|
en
|
code
| 0 |
github-code
|
6
|
1922022592
|
from sklearn import preprocessing
import pandas as pd
import numpy as np
import pickle
data_path = './data/STT.csv'
window = 15
def normalize(df):
min_max_scaler = preprocessing.MinMaxScaler()
df['open'] = min_max_scaler.fit_transform(df.open.values.reshape(-1, 1))
df['close'] = min_max_scaler.fit_transform(df.close.values.reshape(-1, 1))
df['high'] = min_max_scaler.fit_transform(df.high.values.reshape(-1, 1))
df['low'] = min_max_scaler.fit_transform(df.low.values.reshape(-1, 1))
df['volume'] = min_max_scaler.fit_transform(
df.volume.values.reshape(-1, 1))
return df
def split_data(stock, window, percent=0.85):
amount_of_features = len(stock.columns) # 5
data = stock.values
sequence_length = window + 1 # index starting from 0
result = []
for index in range(len(data) - sequence_length):
result.append(data[index: index + sequence_length])
row = round(percent * data.shape[0])
result = np.array(result)
train = result[:int(row), :]
x_train = train[:, :-1]
y_train = np.array(train[:, -1][:, -1])
x_test = result[int(row):, :-1]
y_test = np.array(result[int(row):, -1][:, -1])
x_train = np.reshape(
x_train, (x_train.shape[0], x_train.shape[1], amount_of_features))
x_test = np.reshape(
x_test, (x_test.shape[0], x_test.shape[1], amount_of_features))
return [x_train, y_train, x_test, y_test]
if __name__ == "__main__":
df = pd.read_csv(data_path, index_col=0)
target_df = df[df.symbol == 'STT'].copy()
target_df.drop(['symbol'], 1, inplace=True)
target_df_normalized = normalize(target_df)
x_train, y_train, x_test, y_test = split_data(
target_df_normalized, window)
with open('./data/train.pickle', 'wb') as f:
pickle.dump((x_train, y_train), f)
with open('./data/test.pickle', 'wb') as f:
pickle.dump((x_test, y_test), f)
|
sinlin0908/ML_course
|
hw4/prepro.py
|
prepro.py
|
py
| 1,925 |
python
|
en
|
code
| 0 |
github-code
|
6
|
8677677831
|
import xarray as xr
import xesmf as xe
import pandas as pd
import datetime
import os
first_date = '2021-01-01'
last_date = '2022-12-31'
lonmin,lonmax = 360-90,360-69
latmin,latmax = -40,-15
variables = [
'surf_el',
'water_temp',
'salinity',
'water_u',
'water_v']
renamedict = {'surf_el':'zos',
'water_temp':'thetao',
'salinity':'so',
'water_u':'uo',
'water_v':'vo'}
def get_hycom_filename(ftype):
if ftype=='hindcast':
url = 'https://<Lucas.Glasner:y4vkrp7lqcv>@tds.hycom.org/thredds/dodsC/GLBy0.08/expt_93.0'
return url
def get_hycom_hindcast(first_date, last_date, lonmin, lonmax, latmin, latmax, variables):
url = get_hycom_filename('hindcast')
data = xr.open_dataset(url, decode_times=False)
data = data[variables]
data = data.sel(lat=slice(latmin,latmax), lon=slice(lonmin, lonmax))
attrs = data.time.attrs
units,reference_date = data.time.attrs['units'].split('since')
time = [pd.Timedelta(hours=t)+pd.to_datetime(reference_date) for t in data.time.values]
data.coords['time'] = ('time',time, {'long_name':attrs['long_name'],
'axis':attrs['axis'],
'NAVO_code':attrs['NAVO_code']})
data = data.sel(time=slice(first_date, last_date))
return data
if __name__=='__main__':
data = get_hycom_hindcast(first_date=first_date,
last_date=last_date,
lonmin=lonmin, lonmax=lonmax,
latmin=latmin, latmax=latmax,
variables=variables)
data = data.rename(renamedict)
daterange = pd.date_range(first_date, last_date, freq='d')
for date in daterange:
datestr = date.strftime('%Y-%m-%d')
try:
print('Downloading data for ',datestr,'please wait...')
x = data.sel(time=datestr).resample({'time':'d'}).mean()
x.coords['time'] = x.time+pd.Timedelta(hours=12)
x.to_netcdf(
'HINDCAST/hycom_hindcast_0p08_{}.nc'.format(date.strftime('%Y%m%d')),
encoding={
'time':{'units':'hours since 2000-01-01', 'dtype':float},
'zos':{'zlib':True, 'complevel':3},
'so':{'zlib':True, 'complevel':3},
'uo':{'zlib':True, 'complevel':3},
'vo':{'zlib':True, 'complevel':3},
'thetao':{'zlib':True, 'complevel':3}
}
)
except Exception as e:
print('Download for ',datestr,' failed:',e)
|
lucasglasner/DOWNLOADSCRIPTS
|
HYCOM/download_hycom_hindcast.py
|
download_hycom_hindcast.py
|
py
| 2,754 |
python
|
en
|
code
| 0 |
github-code
|
6
|
29099471877
|
from openpyxl import load_workbook, Workbook
from openpyxl.formatting.rule import ColorScaleRule
from openpyxl.styles import PatternFill, Font
def _cal_writer_final_report(barcode, ws_report, all_data, init_row, init_col, report_output):
row_counter = init_row
ws_report.cell(column=-1 + init_col, row=row_counter, value=barcode).font = Font(b=True, underline="single")
row_counter += 1
for plate_analysed in all_data["calculations"]:
# Removing other calculations than avg and stdev
if plate_analysed != "other_data":
# Checks to see if the overview of avg and stv should be included
if report_output[plate_analysed]["overview"]:
# Writes the analysed method in, if the overview is set to true
ws_report.cell(column=-1 + init_col, row=row_counter, value=plate_analysed).font = Font(b=True)
# row_counter += 1
for state in all_data["calculations"][plate_analysed]:
if report_output[plate_analysed][state]:
ws_report.cell(column=init_col, row=row_counter, value=state).font = Font(b=True)
for calc in all_data["calculations"][plate_analysed][state]:
# Writes avg and stdev including values
ws_report.cell(column=init_col + 1, row=row_counter, value=calc)
ws_report.cell(column=init_col + 2, row=row_counter,
value=all_data["calculations"][plate_analysed][state][calc])
row_counter += 1
else:
if report_output["z_prime"]:
ws_report.cell(column=init_col, row=row_counter,
value="z-Prime").font = Font(b=True)
try:
ws_report.cell(column=init_col + 2, row=row_counter,
value=all_data["calculations"][plate_analysed]["z_prime"])
except KeyError:
ws_report.cell(column=init_col + 2, row=row_counter,
value="Z-Prime is not calculated for the plates")
row_counter += 1
row_counter += 1
return ws_report, row_counter
def _well_writer_final_report(ws, hits, final_report_setup, init_row):
indent_col = 1
row_counter = init_row
for barcode in hits:
# Writes headline for data inserts to see where the data is coming from
ws.cell(column=indent_col, row=row_counter, value=barcode).font = Font(b=True, underline="single")
row_counter += 1
for method in hits[barcode]:
if final_report_setup["methods"][method]:
# writes method
ws.cell(column=indent_col, row=row_counter, value=method).font = Font(b=True)
row_counter += 1
for split in hits[barcode][method]:
ws.cell(column=indent_col, row=row_counter, value=split).font = Font(b=True)
ws.cell(column=indent_col+1, row=row_counter,
value=final_report_setup["pora_threshold"][split]["min"]).font = \
Font(underline="single")
ws.cell(column=indent_col+2, row=row_counter,
value=final_report_setup["pora_threshold"][split]["max"]).font = \
Font(underline="single")
row_counter += 1
for well in hits[barcode][method][split]:
ws.cell(column=indent_col + 1, row=row_counter, value=well)
ws.cell(column=indent_col + 2, row=row_counter,
value=hits[barcode][method][split][well])
row_counter += 1
indent_col += 4
row_counter = init_row
def _get_data(all_plate_data, final_report_setup):
data_calc_dict = {}
temp_hits = {}
plate_counter = 0
all_states = []
all_methods = []
for barcode in all_plate_data:
plate_counter += 1
temp_hits[barcode] = {}
data_calc_dict[barcode] = {}
for method in all_plate_data[barcode]["plates"]:
if method != "other_data":
if method not in all_methods:
all_methods.append(method)
if final_report_setup["methods"][method]:
temp_hits[barcode][method] = {"low": {}, "mid": {}, "high": {}}
for well in all_plate_data[barcode]["plates"][method]["wells"]:
if well in all_plate_data[barcode]["plates"][method]["sample"]:
for split in final_report_setup["pora_threshold"]:
temp_well_value = all_plate_data[barcode]["plates"][method]["wells"][well]
if float(final_report_setup["pora_threshold"][split]["min"]) < float(temp_well_value) < \
float(final_report_setup["pora_threshold"][split]["max"]):
temp_hits[barcode][method][split][well] = temp_well_value
for method in all_plate_data[barcode]["calculations"]:
data_calc_dict[barcode][method] = {}
if method != "other_data":
for state in all_plate_data[barcode]["calculations"][method]:
if state not in all_states:
all_states.append(state)
data_calc_dict[barcode][method][state] = {}
for calc in all_plate_data[barcode]["calculations"][method][state]:
data_calc_dict[barcode][method][state][calc] = \
all_plate_data[barcode]["calculations"][method][state][calc]
else:
for other_calc in all_plate_data[barcode]["calculations"][method]:
data_calc_dict[barcode][method][other_calc] = \
all_plate_data[barcode]["calculations"][method][other_calc]
return temp_hits, data_calc_dict, plate_counter, all_states, all_methods
def _ws_creator(wb, name):
return wb.create_sheet(f"{name}_Matrix")
def _matrix_writer(ws, data_calc_dict, state, plate_counter, all_methods):
init_row = 2
init_col = 2
spacer = 4
col_stdev = init_col + plate_counter + spacer
col_counter = init_col + 1
row_counter = init_row + 1
col_stdev_counter = col_stdev + 1
row_offset = init_row
for method in all_methods:
temp_avg_list = []
temp_stdev_list = []
mw_col = col_counter
mw_row = row_counter
mw_col_stdev = col_stdev_counter
for barcodes in data_calc_dict:
# Writes Plate names in row and clm for avg
ws.cell(column=init_col - 1, row=row_counter, value=barcodes).font = Font(b=True)
ws.cell(column=col_counter, row=row_offset - 1, value=barcodes).font = Font(b=True)
# Writes Plate names in row and clm for stdev
ws.cell(column=col_stdev - 1, row=row_counter, value=barcodes).font = Font(b=True)
ws.cell(column=col_stdev_counter, row=row_offset - 1, value=barcodes).font = Font(b=True)
for index_method, _ in enumerate(data_calc_dict[barcodes]):
if index_method == 0:
# Writes method for avg
ws.cell(column=init_col, row=row_offset - 1, value=method).font = Font(b=True)
# Writes method for stdev
ws.cell(column=col_stdev, row=row_offset - 1, value=method).font = Font(b=True)
if method != "other_data":
for calc in data_calc_dict[barcodes][method][state]:
temp_value = data_calc_dict[barcodes][method][state][calc]
# gets avg values
if calc == "avg":
ws.cell(column=init_col, row=row_offset, value=calc).font = Font(b=True)
ws.cell(column=init_col, row=row_counter, value=temp_value)
ws.cell(column=col_counter, row=row_offset, value=temp_value)
temp_avg_list.append(temp_value)
elif calc == "stdev":
ws.cell(column=col_stdev, row=row_offset, value=calc).font = Font(b=True)
ws.cell(column=col_stdev, row=row_counter, value=temp_value)
ws.cell(column=col_stdev_counter, row=row_offset, value=temp_value)
temp_stdev_list.append(temp_value)
# Sets offset for next loop, for writing headlines the right place
col_counter += 1
row_counter += 1
col_stdev_counter += 1
# calculate the % difference between avg for each plate
_matrix_calculator(ws, mw_row, mw_col, temp_avg_list)
# calculate the % difference between stdev for each plate
_matrix_calculator(ws, mw_row, mw_col_stdev, temp_stdev_list)
# makes sure that next loop is writen below the first method. One method per row, with avg and stdev for each.
col_stdev = init_col + plate_counter + spacer
col_counter = init_col + 1
row_counter += spacer
col_stdev_counter = col_stdev + 1
row_offset += (plate_counter + spacer)
def _matrix_calculator(ws, row, col, temp_data_list):
start_row = row
start_col = col
for index_x, _ in enumerate(temp_data_list):
for index_y, _ in enumerate(temp_data_list):
try:
temp_value = (float(temp_data_list[index_x]) / float(temp_data_list[index_y])) * 100
except ZeroDivisionError:
temp_value = "Na"
ws.cell(column=start_col + index_x, row=start_row + index_y, value=temp_value)
def _z_prime(ws, data_calc_dict):
init_row = 2
init_col = 2
col_counter = init_col + 1
row_counter = init_row + 1
z_prime_list = []
for barcodes in data_calc_dict:
# Writes Plate names
ws.cell(column=init_col-1, row=row_counter, value=barcodes).font = Font(b=True)
ws.cell(column=col_counter, row=init_row-1, value=barcodes).font = Font(b=True)
# Writes values for Z-Prime
z_prime = data_calc_dict[barcodes]["other_data"]["z_prime"]
ws.cell(column=init_col, row=row_counter, value=z_prime)
ws.cell(column=col_counter, row=init_row, value=z_prime)
col_counter += 1
row_counter += 1
z_prime_list.append(z_prime)
col_counter = init_col + 1
row_counter = init_row + 1
for index_x, _ in enumerate(z_prime_list):
for index_y, _ in enumerate(z_prime_list):
temp_value = (z_prime_list[index_x] / z_prime_list[index_y]) * 100
ws.cell(column=col_counter + index_x, row=row_counter + index_y, value=temp_value)
def bio_final_report_controller(analyse_method, all_plate_data, output_file, final_report_setup):
wb = Workbook()
ws_report = wb.active
ws_report.title = "Full report"
ws_well_info = wb.create_sheet("Well Info")
ws_z_prime = wb.create_sheet("Z-Prime")
# ws_minimum = wb.create_sheet("Minimum")
# ws_maximum = wb.create_sheet("Maximum")
init_row = 2
init_col = 2
row = init_row
col = init_col
# calc overview:
for index, barcode in enumerate(all_plate_data):
ws, row_counter = _cal_writer_final_report(barcode, ws_report, all_plate_data[barcode], row, col,
final_report_setup["calc"])
# Writes 5 plates horizontal, before changing rows.
col += 5
if index % 5 == 0 and index > 0:
row += row_counter
col = init_col
# gets data:
temp_hits, data_calc_dict, plate_counter, all_states, all_methods = _get_data(all_plate_data, final_report_setup)
# write well data
_well_writer_final_report(ws_well_info, temp_hits, final_report_setup, init_row)
# writes Matrix of data:
# inside guard ! ! ! !
print(all_states)
for states in all_states:
if final_report_setup["full_report_matrix"][states]:
_matrix_writer(_ws_creator(wb, states), data_calc_dict, states, plate_counter, all_methods)
# writes Z-prime
if final_report_setup["full_report_matrix"]["z_prime"]:
_z_prime(ws_z_prime, data_calc_dict)
wb.save(output_file)
|
ZexiDilling/structure_search
|
report_setup.py
|
report_setup.py
|
py
| 12,580 |
python
|
en
|
code
| 0 |
github-code
|
6
|
1526323654
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 3 11:13:08 2020
@author: jiaxinli
"""
import re
def readlines(filepath):
fd = open(filepath, 'r')
lines = []
for line in fd:
### Uncomment if needed to fileter things other that alphanum and $%
# line = re.sub(r"[^a-zA-Z0-9\%\$]+", ' ', line)
line = line.lower().strip()
lines.append(line)
fd.close()
return lines
# Description:
# This function is used to find any dates within list of line
#
# Input:
# A list (array) of lines (text)
# Output:
# A list of dates found from list of lines
def find_time(lines):
potential_data_list = []
for line in lines:
potential_data_list += re.findall(r'\d{1,2}[/-]\d{1,2}[/-]\d{2,4}',line) #for format 23-10-2002 23/10/2002 23/10/02 10/23/2002
potential_data_list += re.findall(r'(?:\d{1,2} )?(?:jan|feb|mar|apr|may|jun|jul|aug|sep|oct|nov|dec)[a-z]* (?:\d{1,2},)?\d{2,4}',line) #for format 23 Oct 2002\n23 October 2002\nOct 23,2002\nOctober 23,2002\n
return potential_data_list
lines = readlines('sample_cv.txt')
result = find_time(lines)
|
jiaxinli980115/getting-job-description
|
find_time.py
|
find_time.py
|
py
| 1,179 |
python
|
en
|
code
| 0 |
github-code
|
6
|
21645750883
|
#Tutorial de Umbral OpenCV
import cv2
import numpy as np
img = cv2.imread('Pagina.jpg')
#Imagen a escala de grises
grayscaled = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
#Umbral de 10
retval, threshold = cv2.threshold(img, 12, 255, cv2.THRESH_BINARY)
#Umbral en escala de grises
retval, threshold2 = cv2.threshold(grayscaled, 10, 255, cv2.THRESH_BINARY)
#Umbral Adaptativo
th = cv2.adaptiveThreshold(grayscaled, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 115, 1)
cv2.imshow('Original',img)
cv2.imshow('Umbral',threshold)
cv2.imshow('Umbral en Escala de grises',threshold2)
cv2.imshow('Umbral Adaptativo',threshold2)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
Deniry/Practicas_OpenCV
|
Practica5.py
|
Practica5.py
|
py
| 666 |
python
|
en
|
code
| 0 |
github-code
|
6
|
38785952057
|
import cv2 as cv
import sys
img = cv.imread("Photos/cat_large.jpg")
print(img.shape)
cv.imshow("Cat", img)
def rescale(frame, scale=0.75):
width = frame.shape[1] * scale
height = frame.shape[0] * scale
dimensions = (int(width), int(height))
new_frame = cv.resize(frame, dimensions, interpolation=cv.INTER_AREA)
return new_frame
new_img = rescale(img, 0.2)
print(new_img.shape)
cv.imshow("Catnew", new_img)
cv.waitKey(0)
|
adamferencz/opencv-course-ghb
|
rescale.py
|
rescale.py
|
py
| 446 |
python
|
en
|
code
| 0 |
github-code
|
6
|
41584679238
|
# 윈도우에서는 한글 인코딩 오류가 발생할 수 있습니다.
# 한글 인코딩 오류가 발생한다면
# Message.log(message_type="info", msg="데이터를 저장했습니다.")
# 위의 코드 부분의 msg를 영어로 수정해서 사용해주세요.
import json
import sys
from eliot import Message, start_action, to_file, write_traceback
import requests
# 로그 출력을 표준 출력으로 설정(터미널에 출력하기)
to_file(sys.stdout)
# 크롤링 대상 URL 리스트
PAGE_URL_LIST = [
'https://eliot.readthedocs.io/en/1.0.0/',
'https://eliot.readthedocs.io/en/1.0.0/generating/index.html',
'https://example.com/notfound.html',
]
def fetch_pages():
"""페이지의 내용을 추출합니다."""
# 어떤 처리의 로그인지는 action_type으로 지정
with start_action(action_type="fetch_pages"):
page_contents = {}
for page_url in PAGE_URL_LIST:
# 어떤 처리의 로그인지 action_type으로 출력
with start_action(action_type="download", url=page_url):
try:
r = requests.get(page_url, timeout=30)
r.raise_for_status()
except requests.exceptions.RequestException as e:
write_traceback() # 예외가 발생하면 트레이스백 출력
continue
page_contents[page_url] = r.text
return page_contents
if __name__ == '__main__':
page_contents = fetch_pages()
with open('page_contents.json', 'w') as f_page_contents:
json.dump(page_contents, f_page_contents, ensure_ascii=False)
# 단순하게 로그 메시지만 출력할 수도 있음
Message.log(message_type="info", msg="데이터를 저장했습니다.")
|
JSJeong-me/2021-K-Digital-Training
|
Web_Crawling/python-crawler/chapter_5/sample_eliot.py
|
sample_eliot.py
|
py
| 1,833 |
python
|
ko
|
code
| 7 |
github-code
|
6
|
24916898593
|
import time
from datetime import datetime
from bluepy.btle import BTLEDisconnectError
from miband import miband
from ibmcloudant.cloudant_v1 import CloudantV1
from ibm_cloud_sdk_core.authenticators import IAMAuthenticator
from ibmcloudant.cloudant_v1 import CloudantV1, Document
import os
from dotenv import load_dotenv
# All necessary imports
load_dotenv()
SERVICE_URL = os.getenv("SERVICE_URL")
API_KEY = os.getenv("API_KEY")
AUTH_KEY = os.getenv("AUTH_KEY")
MAC_ADDR = os.getenv("MAC_ADDR")
AUTH_KEY = bytes.fromhex(AUTH_KEY)
alternate = True
authenticator = IAMAuthenticator(API_KEY)
client = CloudantV1(authenticator=authenticator)
client.set_service_url(SERVICE_URL)
# All private keys loaded from .env file
def general_info(): # Prints general info about the band
global band
print("MiBand-4")
print("Soft revision:", band.get_revision())
print("Hardware revision:", band.get_hrdw_revision())
print("Serial:", band.get_serial())
print("Battery:", band.get_battery_info()["level"])
print("Time:", band.get_current_time()["date"].isoformat())
# function to create connection and band object ;-;
def create_connection():
success = False
while not success:
try:
band = miband(MAC_ADDR, AUTH_KEY, debug=True)
success = band.initialize()
return band
except BTLEDisconnectError:
print("Connection to the MIBand failed. Trying out again in 3 seconds")
time.sleep(3)
continue
except KeyboardInterrupt:
print("\nExit.")
exit()
band = create_connection()
general_info()
hr_list = {}
count = 0
def get_realtime():
try:
band.start_heart_rate_realtime(heart_measure_callback=heart_logger)
except KeyboardInterrupt:
print("\nExit.")
def heart_logger(data): # data is the heart rate value
data = abs(data)
global count # global variable to count the number of heart rate values
print("Realtime heart BPM:", data) # print the heart rate value
hr_list[
datetime.now().strftime("%d/%m/%y %H:%M:%S")
] = data # add the heart rate value to the dictionary
print(len(hr_list) // 2)
if count % 3 == 0: # Using every 10th heart rate value to create a new document
time_ = str(datetime.now().strftime("%d/%m/%y %H:%M:%S"))
data_entry: Document = Document(id=time_)
# Add "add heart rate reading as value" field to the document
data_entry.value = data
# Save the document in the database
create_document_response = client.post_document(
db="jxtin", document=data_entry
).get_result()
print(
f"You have created the document:\n{data_entry}"
) # print the document that was created
print("Logged the data")
else:
print("Didnt log the data")
count += 1
get_realtime()
|
Rushour0/MSIT-The-New-Normal-Submission
|
WebVersions/web_v1/cloudant-module.py
|
cloudant-module.py
|
py
| 2,911 |
python
|
en
|
code
| 1 |
github-code
|
6
|
6905801846
|
"""
Write a function count_letters(word_list) that takes as input a list of words that
are composed entirely of lower case letters . This function should return the lower
case letter that appears most frequently (total number of occurrences) in the words
in word_list. (In the case of ties, return the earliest letter in alphabetical order.)
The Python code snippet below represents a start at implementing count_letters using
a dictionary letter_count whose keys are the lower case letters and whose values are
the corresponding number of occurrences of each letter in the strings in word_list..
"""
def find_most_occurences(word_list):
chars = {}
word_list = word_list.replace(" ", "")
for char in word_list:
chars[char] = chars.get(char, 0) + 1
reversed_chars = {}
for k, v in chars.items():
reversed_chars[v] = reversed_chars.get(v, k)
for item in sorted(reversed_chars, reverse=True):
print(reversed_chars[item] + ": #" + str(item))
break
my_string = "isten strange women lying in ponds distributing swords is no basis for a system of government supreme executive power derives from a mandate from the masses not from some farcical aquatic ceremony"
find_most_occurences(my_string)
|
hqpiotr/learning-python
|
2. Python - Rice/c3-dataAnalysis/week1/c3_w1_ex.py
|
c3_w1_ex.py
|
py
| 1,251 |
python
|
en
|
code
| 0 |
github-code
|
6
|
74543338748
|
'''Q5.
Write a Python program to sort (ascending and descending) a dictionary by value.
Original dictionary : {1: 2, 3: 4, 4: 3, 2: 1, 0: 0}
Dictionary in ascending order by value : [(0, 0), (2, 1), (1, 2), (4, 3), (3, 4)]
Dictionary in descending order by value : {3: 4, 4: 3, 1: 2, 2: 1, 0: 0}'''
d = {1: 2, 3: 4, 4: 3, 2: 1, 0: 0}
n = {}
temp = 0
for i in d :
for j in d:
if d[i] > d[j]:
temp = d[i]
d[i] = d[j]
d[j] = temp
print(d)
|
Jija-sarak/python_dictionary
|
q5.py
|
q5.py
|
py
| 493 |
python
|
en
|
code
| 0 |
github-code
|
6
|
71082529787
|
from classes.rayon import *
from classes.point import *
from classes.mur import *
from classes.base import *
from resources.const import *
from processing.diffraction import get_direction
from processing.transmission import get_theta_i, get_theta_t, get_s, get_reflexion_perpendiculaire
from math import pi as PI
from math import atan, atan2, sin, sqrt, pow, asin,cos
from cmath import exp as cexp
from cmath import polar
from processing.analysis import intersect
#renvoie la list des rayons reflechis
def image_points(start_point, origin_point, murs):
#renvoie une liste de liste avec les points images et les murs correspodant a chacun de ces points (point image, mur pour la reflexion)
ls=[]
for mur in murs:
if(mur.is_horizontal()):
image_point = Point(start_point.x, 2*mur.coin1.y-start_point.y)
sub_ls = [image_point, mur]
if((origin_point.x !=image_point.x) or(origin_point.y !=image_point.y)):
ls.append(sub_ls)
else:
image_point = Point(2*mur.coin1.x-start_point.x, start_point.y)
sub_ls = [image_point, mur]
if((origin_point.x !=image_point.x) or(origin_point.y !=image_point.y)):
ls.append(sub_ls)
return ls
def rayons_reflexion(start_point,end_point, murs):
#Renvoie la liste des rayons ayant effectue 1 ou 2 reflexions entre start_point et end_point
list_rayons = []
image_elems = image_points(start_point, start_point, murs)
z = 0
#UNE SEULE REFLEXION
for elem in image_elems:
mur_intersec = [elem[1]] #Mur a tester avec la fonction intersect
intersect_point =intersect(elem[0],end_point,mur_intersec) #intersect_point est une liste de 1 element
if(len(intersect_point)):
new_ray = Rayon(start_point)
intersect_point[0].set_interaction_type("r")
intersect_point[0].set_direction(get_direction(intersect_point[0],start_point)) #donne la direction incidente
new_ray.add_point_reflexion(intersect_point[0])
new_ray.add_point_principal(end_point)
new_ray.find_all_intersections(murs) #Intersection du rayon avec les murs pour la transmission
list_rayons.append(new_ray)
#DEUX REFLEXIONS
for elem in image_elems:
#creation d'une liste de murs ne contenant par le mur du point image elem
mur_intermediaire_ls = []
for wall in murs:
if (Mur.is_different(wall,elem[1])):
mur_intermediaire_ls.append(wall)
#on cherche les points images des points images par les AUTRES murs
image_elems2 = image_points(elem[0],start_point, mur_intermediaire_ls)
if(z==0):
save = elem[1]
im = image_elems2
z+=1
for elem2 in image_elems2:
mur_intersec2 = [elem2[1]] #une list de 1 elem avec le mur pour la deuxieme reflexion
intersect_point2_2 = intersect(elem2[0],end_point,mur_intersec2) #comme avant. ici point de deuxieme point de reflexion
if(len(intersect_point2_2)):
new_ray = Rayon(start_point)
intersect_point2_1 = intersect(elem[0],intersect_point2_2[0],[elem[1]]) #premier point de reflexion
if(len(intersect_point2_1)):
intersect_point2_1[0].set_interaction_type("r")
intersect_point2_1[0].set_direction(get_direction( intersect_point2_1[0],start_point))
intersect_point2_2[0].set_interaction_type("r")
intersect_point2_2[0].set_direction(get_direction( intersect_point2_1[0],intersect_point2_2[0]))
new_ray.add_point_reflexion(intersect_point2_1[0]) #1 ere reflexion
new_ray.add_point_reflexion(intersect_point2_2[0]) #2 eme reflexion
new_ray.add_point_principal(end_point)
new_ray.find_all_intersections(murs) #Points de transmission
list_rayons.append(new_ray)
return list_rayons
def set_reflexion_coefficient(rayon):
#effectue le calcul des coefficients de reflexion
points_reflexion = rayon.get_points_reflexions()
for pt_reflexion in points_reflexion:
mur = pt_reflexion.mur
alpha = mur.alpha
beta = mur.beta
gamma = complex(alpha,beta)
if(pt_reflexion.direction != None):
direction = abs(pt_reflexion.direction)
else:
direction = None
theta_i = get_theta_i(direction,pt_reflexion)
theta_t = get_theta_t(theta_i,mur.epsilon)
s = get_s(theta_t,mur.epaisseur)
Z1 = sqrt(UO/EPS_0)
Z2 = sqrt(UO/mur.epsilon)
r = get_reflexion_perpendiculaire(Z1,Z2,theta_i,theta_t)
num = (1-pow(r,2))* r *cexp(-2*gamma*s)*cexp(2*gamma*s*sin(theta_t)*sin(theta_i))
den = 1-(pow(r,2)*cexp((-2*gamma*s)+(gamma*2*s*sin(theta_t)*sin(theta_i))))
coeff_abs = polar(r + num/den)[0] #module
pt_reflexion.set_coefficient_value(coeff_abs)
|
bjoukovs/PHYSRayTracing2017
|
processing/reflexion.py
|
reflexion.py
|
py
| 5,192 |
python
|
en
|
code
| 0 |
github-code
|
6
|
19218028573
|
from rest_framework import serializers
from api.v1.auth.schemas import LanguageChoiceField, TimeZoneNameChoiceField
from users.models import User
class CurrentUserOutputSchema(serializers.ModelSerializer):
language_code = LanguageChoiceField()
time_zone = TimeZoneNameChoiceField()
class Meta:
model = User
fields = (
"id",
"email",
"full_name",
"notification_token",
"language_code",
"time_zone",
"date_joined",
"is_staff",
"is_superuser",
)
|
plathanus-tech/django_boilerplate
|
src/api/v1/users/schemas.py
|
schemas.py
|
py
| 591 |
python
|
en
|
code
| 2 |
github-code
|
6
|
40309032747
|
from initdata import db
'''
Simple way to persist record. KEY==>VALUE in each line. EndRec. on a line for end of Record
and EndDb. for end of database.
'''
ENDREC='EndRec.'
ENDDB='EndDb.'
RECSEP='==>'
#file_name='people-file.txt'
file_name='people-file.txt'
print('2'+ __name__)
def writeDb(db, dbfname=file_name):
dbf = open(dbfname,'w')
print('Opened file')
for keyx in db:
print(keyx, file=dbf)
for (name, value) in db[keyx].items():
print(name + RECSEP + repr(value), file=dbf)
print(ENDREC, file=dbf)
print(ENDDB, file=dbf)
dbf.close()
print('wrote to file')
if __name__ == '__main__':
writeDb(db)
|
mathewjoy/testpython
|
programmingpython/make_db_file.py
|
make_db_file.py
|
py
| 678 |
python
|
en
|
code
| 0 |
github-code
|
6
|
14321604555
|
#calculate n^2 jaccard values and create companion of every file
import sys
import glob
import os
import shutil
if os.path.exists("modified_files"):
shutil.rmtree("modified_files")
os.mkdir("modified_files")
def extract(directory):
#looks at the sync folder , creates our own folder of exercises
files = glob.glob(directory + '/**/*.ml',recursive=True)
for fname in files:
fname_str = str(fname)
print(fname_str+"\n")
if(fname_str[0] == 'X' and fname_str[1] == '/'):
continue
stud_id = fname_str.split('/')
outfilename = ""
exercise_name = ''
'''
#creating a directory for each exercise
exercise_name = (stud_id[len(stud_id)-1]).split('.',1)[0]
if not os.path.exists("modified_files/"+exercise_name):
os.mkdir("modified_files/"+exercise_name)
del stud_id[-1]
'''
'''
#creating a copy
flip = False
for wrd in stud_id:
if(flip):
outfilename = outfilename+wrd
if(wrd=="sync"):
flip=True
'''
outfilename = stud_id[len(stud_id)-1]
#copy_file_name = "modified_files/"+exercise_name+"/"+outfilename+".ml"
copy_file_name = "modified_files/"+outfilename
copy_file = open(copy_file_name,'w')
with open(fname, 'r') as readfile:
contents = readfile.read()
copy_file.write(contents)
#creating a companion
companion_file_name = copy_file_name.split('.')[0] +"_companion.ml"
companion_file = open(companion_file_name,'a')
'''
for comp in files :
other_stud_id = str(comp).split('/')
other_exercise_name = (other_stud_id[len(other_stud_id)-1]).split('.',1)[0]
if(other_exercise_name != exercise_name):
continue
if comp == fname:
continue
with open(comp, 'r') as readfile:
contents = readfile.read()
companion_file.write(contents)
'''
for comp in files :
if comp == fname:
continue
with open(comp, 'r') as readfile:
contents = readfile.read()
companion_file.write(contents)
directory=""
if(len(sys.argv)>1):
directory=sys.argv[1]
else:
directory="/home/abhinav/Desktop/mitacs/code/mini/nate/data"
extract(directory)
|
AbhinavDutta/mini-moss
|
extract.py
|
extract.py
|
py
| 2,490 |
python
|
en
|
code
| 0 |
github-code
|
6
|
26023690910
|
import matplotlib.pyplot as plt
import numpy as np
x=np.arange(-10,10,0.01)
y=1/(np.sin(x)+2)
z=1/(np.cos(x)+2)
plt.plot(x,y,x,z) #生成在一张图像上
fig2,(axs1,axs2)=plt.subplots(2,1) #分配两个坐标轴并且按照(2,1)的形状
axs1.plot(x,y)
axs2.plot(x,z) #在两个轴上单独生成一次
plt.show()
|
suanhaitech/pythonstudy2023
|
Wangwenbin/Matplotlib1.py
|
Matplotlib1.py
|
py
| 388 |
python
|
en
|
code
| 2 |
github-code
|
6
|
45017345126
|
#!/usr/bin/env python3
#@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
# DESCRIPTION:
#
# CALL SAMPLE:
# ~/data/solarity/sit-raspi/modbus/direct_marketing_interface.py --host_ip '192.168.0.34' --host_mac '00:90:E8:7B:76:9C' -v -t
#
# REQUIRE
#
# CALL PARAMETERS:
# 1)
#
# @author: Philippe Gachoud
# @creation: 20200408
# @last modification:
# @version: 1.0
# @URL: $URL
#@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
# INCLUDES
try:
import sys
import os, errno
sys.path.append(os.path.join(os.path.dirname(__file__), '../lib')) #the way to import directories
from sit_logger import SitLogger
from pymodbus.constants import Endian
from sit_modbus_device import SitModbusDevice #from file_name import ClassName
from sit_modbus_register import SitModbusRegister
from inverter_manager import InverterManager
#import sitmodbus#, SitModbusRegister
import logging # http://www.onlamp.com/pub/a/python/2005/06/02/logging.html
from logging import handlers
import argparse
#sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'pysunspec'))
from datetime import datetime, date, time, timedelta
except ImportError as l_err:
print("ImportError: {0}".format(l_err))
raise l_err
class DirectMarketerInterface(InverterManager):
# CONSTANTS
DEFAULT_SLAVE_ADDRESS = 200
# CLASS ATTRIBUTES
# FUNCTIONS DEFINITION
"""
Initialize
"""
def __init__(self, a_slave_address=DEFAULT_SLAVE_ADDRESS):
try:
self.init_arg_parse()
l_slave_address = self.DEFAULT_SLAVE_ADDRESS
if self._args.slave_address:
if self.valid_slave_address(self._args.slave_address):
self._slave_address = int(self._args.slave_address)
#self, a_slave_address=DEFAULT_SLAVE_ADDRESS, a_port=DEFAULT_MODBUS_PORT, an_ip_address=None
super().__init__(l_slave_address, a_port=self.DEFAULT_MODBUS_PORT, an_ip_address=self._args.host_ip)
self._logger = SitLogger().new_logger(__name__, self._args.host_mac)
self._init_sit_modbus_registers()
#self._logger.debug('init->' + self.out())
except OSError as l_e:
self._logger.warning("init-> OSError, probably rollingfileAppender" % (l_e))
if e.errno != errno.ENOENT:
raise l_e
except Exception as l_e:
print('Error in init: %s' % (l_e))
raise l_e
#exit(1)
def _init_sit_modbus_registers(self):
"""
Initializes self._sit_modbus_registers
"""
# P.44 of doc
self.add_modbus_register('OutLimitPerc', 'Specified output limitation through direct marketer n% (0-10000)', 1, SitModbusRegister.REGISTER_TYPE_INT_16, SitModbusRegister.ACCESS_MODE_RW, 'uint16')
self.add_modbus_register('OutLimitPercMan', 'Manual output limitation that has been set via Sunspec Modbus', 2, SitModbusRegister.REGISTER_TYPE_INT_16, SitModbusRegister.ACCESS_MODE_R, 'uint16')
self.add_modbus_register('OutLimitPercIoBox', 'Output limitation through the electric utility company that has been set via the IO box.', 3, SitModbusRegister.REGISTER_TYPE_INT_16, SitModbusRegister.ACCESS_MODE_R, 'uint16')
self.add_modbus_register('OutLimitMin', 'Minimum of all output limitations. The nominal PV system power is derated to this value.', 4, SitModbusRegister.REGISTER_TYPE_INT_16, SitModbusRegister.ACCESS_MODE_R, 'uint16')
# self.add_modbus_register('Md', 'Model (Md): SMA Inverter Manager', 40021, SitModbusRegister.REGISTER_TYPE_STRING_16, SitModbusRegister.ACCESS_MODE_R, 'String16')
# self.add_modbus_register('Opt', 'Options (Opt): Inverter Manager name', 40037, SitModbusRegister.REGISTER_TYPE_STRING_8, SitModbusRegister.ACCESS_MODE_R, 'String8')
# self.add_modbus_register('Vr', 'Version (Vr): Version number of the installed firmware', 40045, SitModbusRegister.REGISTER_TYPE_STRING_8, SitModbusRegister.ACCESS_MODE_R, 'String8')
# self.add_modbus_register('SN', 'Serial number (SN) of the device that uses the Modbus unit ID', 40053, SitModbusRegister.REGISTER_TYPE_STRING_16, SitModbusRegister.ACCESS_MODE_R, 'String16')
# self.add_modbus_register('PPVphA', 'Voltage, line conductor L1 to N (PPVphA), in V-V_SF (40199): average value of all inverters', 40196, SitModbusRegister.REGISTER_TYPE_INT_16, SitModbusRegister.ACCESS_MODE_R, 'V', 40199)
# self.add_modbus_register('AC_A', 'AC Current sum of all inverters', 40188, SitModbusRegister.REGISTER_TYPE_INT_16, SitModbusRegister.ACCESS_MODE_R, 'A', 40192)
# self.add_modbus_register('W', 'Active power (W), in W-W_SF (40201): sum of all inverters', 40200, SitModbusRegister.REGISTER_TYPE_INT_16, SitModbusRegister.ACCESS_MODE_R, 'W', 40192)
# self.add_modbus_register('WH', 'Total yield (WH), in Wh WH_SF (40212): sum of all inverters', 40210, SitModbusRegister.REGISTER_TYPE_INT_32, SitModbusRegister.ACCESS_MODE_R, 'WH', 40212)
# self.add_modbus_register('TmpCab', 'Internal temperature, in °C Tmp_SF (40223): average value of all inverters', 40219, SitModbusRegister.REGISTER_TYPE_INT_16, SitModbusRegister.ACCESS_MODE_R, '°C', 40223)
# self.add_modbus_register('ID', 'Model ID (ID): 120 = Sunspec nameplate model', 40238, SitModbusRegister.REGISTER_TYPE_INT_16, SitModbusRegister.ACCESS_MODE_R, 'uint16')
# self.add_modbus_register('VArPct_Mod', 'Mode of the percentile reactive power limitation: 1 = in % of WMax', 40365, SitModbusRegister.REGISTER_TYPE_ENUM_16, SitModbusRegister.ACCESS_MODE_R, 'enum16')
# self.add_modbus_register('VArPct_Ena', 'Control of the percentile reactive power limitation,(SMA: Qext): 1 = activated', 40365, SitModbusRegister.REGISTER_TYPE_ENUM_16, SitModbusRegister.ACCESS_MODE_RW, 'enum16')
def init_arg_parse(self):
"""
Parsing arguments
"""
self._parser = argparse.ArgumentParser(description='Actions with Inverter Manager through TCP')
self._parser.add_argument('-v', '--verbose', help='increase output verbosity', action="store_true")
self._parser.add_argument('-t', '--test', help='Runs test method', action="store_true")
self._parser.add_argument('-u', '--slave_address', help='Slave address of modbus device', nargs='?')
#self._parser.add_argument('-u', '--base_url', help='NOT_IMPLEMENTED:Gives the base URL for requests actions', nargs='?', default=self.DEFAULT_BASE_URL)
l_required_named = self._parser.add_argument_group('required named arguments')
l_required_named.add_argument('-i', '--host_ip', help='Host IP', nargs='?', required=True)
l_required_named.add_argument('-m', '--host_mac', help='Host MAC', nargs='?', required=True)
# l_required_named.add_argument('-l', '--longitude', help='Longitude coordinate (beware timezone is set to Chile)', nargs='?', required=True)
# l_required_named.add_argument('-a', '--lattitude', help='Lattitude coordinate (beware timezone is set to Chile)', nargs='?', required=True)
# l_required_named.add_argument('-d', '--device_type', help='Device Type:' + ('|'.join(str(l) for l in self.DEVICE_TYPES_ARRAY)), nargs='?', required=True)
l_args = self._parser.parse_args()
self._args = l_args
# ACCESS
# IMPLEMENTATION
# EXECUTE ARGS
"""
Parsing arguments and calling corresponding functions
"""
def execute_corresponding_args(self):
if self._args.verbose:
self._logger.setLevel(logging.DEBUG)
else:
self._logger.setLevel(logging.DEBUG)
if self._args.test:
self.test()
#if self._args.store_values:
"""
Test function
"""
def test(self):
try:
self.connect()
self.read_all_sit_modbus_registers()
print ("################# BEGIN #################")
# self._logger.info("--> ************* device models *************: %s" % (l_d.models)) #Lists properties to be loaded with l_d.<property>.read() and then access them
# self._logger.info("-->inverter ************* l_d.inverter.points *************: %s" % (l_d.inverter.points)) #Gives the inverter available properties
# self._logger.info("-->inverter ************* common *************: %s" % (l_d.common))
# self._logger.info("-->inverter ************* common Serial Number *************: %s" % (l_d.common.SN))
print ("################# END #################")
except Exception as l_e:
self._logger.exception("Exception occured: %s" % (l_e))
print('Error: %s' % (l_e))
self._logger.error('Error: %s' % (l_e))
raise l_e
finally:
self.disconnect()
"""
Main method
"""
def main():
#logging.basicConfig(level=logging.DEBUG, stream=sys.stdout, filemode="a+", format="%(asctime)-15s %(levelname)-8s %(message)s")
logger = logging.getLogger(__name__)
try:
l_obj = DirectMarketerInterface()
l_obj.execute_corresponding_args()
# l_id.test()
pass
except KeyboardInterrupt:
logger.exception("Keyboard interruption")
except Exception:
logger.exception("Exception occured")
finally:
logger.info("Main method end -- end of script")
if __name__ == '__main__':
main()
|
phgachoud/sty-pub-raspi-modbus-drivers
|
sma/direct_marketing_interface.py
|
direct_marketing_interface.py
|
py
| 8,844 |
python
|
en
|
code
| 0 |
github-code
|
6
|
42597128032
|
import seaborn as sns
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import sklearn
df=pd.read_csv("insurance.csv")
tem=pd.get_dummies(df["region"])
df.drop("region",axis=1,inplace=True)
df=pd.concat([df,tem],axis=1)
print(df.head(10))
map={"yes":1,"no":0}
df["smoker"]=df["smoker"].map(map)
map1={"female":0,"male":1}
df["sex"]=df["sex"].map(map1)
print(df.head(10))
df.corr()
plt.figure(figsize=(20,20))
sns.heatmap(df.corr(),annot=True,cmap="coolwarm",linewidths=2)
plt.show()
x=df["smoker"]
y=df["expenses"]
plt.figure(figsize=(12,9))
plt.scatter(x,y)
plt.xlabel("Non Smoker Vs Smoker")
plt.ylabel("Charges")
Y=df["charges"]
X=df.drop("charges",axis=1)
from sklearn.model_selection import train_test_split
#Splitting the data into 85% for training and 15% for testing
x_train,x_test,y_train,y_test=train_test_split(X,Y,random_state=1,test_size=0.15)
from sklearn.linear_model import LinearRegression
#Training a multiple linear regression model
reg=LinearRegression().fit(x_train,y_train)
y_pred=reg.predict(x_test)
from sklearn.metrics import r2_score
#Checking the R squared error on test data
r2_score(y_test,y_pred)
# Storing independent features in a temporary variable
P_X=X
from sklearn.preprocessing import PolynomialFeatures
#Changing the data to a 3rd degree polynomial
pol=PolynomialFeatures(degree=3)
P_X=pol.fit_transform(X)
P_X
#Training the model similarly but with 3rd degree polynomial of X this time
x_train,x_test,y_train,y_test=train_test_split(P_X,Y,random_state=1,test_size=0.15)
reg=LinearRegression().fit(x_train,y_train)
y_pred=reg.predict(x_test)
r2_score(y_test,y_pred)
#Cross validating the score to check and avoid overfitting
from sklearn.model_selection import cross_val_score
c=cross_val_score(reg,P_X,Y,cv=4)
c
# Final Mean Accuracy
print("Mean accuracy after cross validation is:",c.mean()*100,end="%")
|
manav88/Medical-cost-prediction
|
med_cost.py
|
med_cost.py
|
py
| 1,956 |
python
|
en
|
code
| 0 |
github-code
|
6
|
8670869554
|
import numpy as np
from neuroglancer_interface.utils.rotation_utils import (
rotate_matrix)
def test_rotate_matrix():
rng = np.random.default_rng(665234)
base_arr = rng.random((5, 6, 7))
actual = rotate_matrix(
data=base_arr,
rotation_matrix = [[0, 0, 1],
[0, 1, 0],
[1, 0, 0]])
expected = base_arr.transpose(2, 1, 0)
np.testing.assert_allclose(
actual,
expected,
rtol=1.0e-6,
atol=0.0)
actual = rotate_matrix(
data=base_arr,
rotation_matrix = [[0, 1, 0],
[0, 0, 1],
[1, 0, 0]])
expected = base_arr.transpose(1, 2, 0)
np.testing.assert_allclose(
actual,
expected,
rtol=1.0e-6,
atol=0.0)
actual = rotate_matrix(
data=base_arr,
rotation_matrix = [[0, 1, 0],
[0, 0, -1],
[1, 0, 0]])
expected = np.zeros((6, 7, 5), dtype=float)
for ix in range(6):
for iy in range(7):
for iz in range(5):
expected[ix, iy, iz] = base_arr[iz, ix, base_arr.shape[2]-1-iy]
np.testing.assert_allclose(
actual,
expected,
rtol=1.0e-6,
atol=0.0)
|
AllenInstitute/neuroglancer_formatting_scripts
|
tests/utils/test_rotation_utils.py
|
test_rotation_utils.py
|
py
| 1,318 |
python
|
en
|
code
| 2 |
github-code
|
6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.