content
stringlengths 0
894k
| type
stringclasses 2
values |
---|---|
"""
Tatoeba (https://tatoeba.org/) is a collection of sentences and translation, mainly aiming for language learning.
It is available for more than 300 languages.
This script downloads the Tatoeba corpus and extracts the sentences & translations in the languages you like
"""
import os
import sentence_transformers
import tarfile
import gzip
# Note: Tatoeba uses 3 letter languages codes (ISO-639-2),
# while other datasets like OPUS / TED2020 use 2 letter language codes (ISO-639-1)
# For training of sentence transformers, which type of language code is used doesn't matter.
# For language codes, see: https://en.wikipedia.org/wiki/List_of_ISO_639-2_codes
source_languages = set(['eng'])
target_languages = set(['deu', 'ara', 'tur', 'spa', 'ita', 'fra'])
num_dev_sentences = 1000 #Number of sentences that are used to create a development set
tatoeba_folder = "../datasets/tatoeba"
output_folder = "parallel-sentences/"
sentences_file_bz2 = os.path.join(tatoeba_folder, 'sentences.tar.bz2')
sentences_file = os.path.join(tatoeba_folder, 'sentences.csv')
links_file_bz2 = os.path.join(tatoeba_folder, 'links.tar.bz2')
links_file = os.path.join(tatoeba_folder, 'links.csv')
download_url = "https://downloads.tatoeba.org/exports/"
os.makedirs(tatoeba_folder, exist_ok=True)
os.makedirs(output_folder, exist_ok=True)
#Download files if needed
for filepath in [sentences_file_bz2, links_file_bz2]:
if not os.path.exists(filepath):
url = download_url+os.path.basename(filepath)
print("Download", url)
sentence_transformers.util.http_get(url, filepath)
#Extract files if needed
if not os.path.exists(sentences_file):
print("Extract", sentences_file_bz2)
tar = tarfile.open(sentences_file_bz2, "r:bz2")
tar.extract('sentences.csv', path=tatoeba_folder)
tar.close()
if not os.path.exists(links_file):
print("Extract", links_file_bz2)
tar = tarfile.open(links_file_bz2, "r:bz2")
tar.extract('links.csv', path=tatoeba_folder)
tar.close()
#Read sentences
sentences = {}
all_langs = target_languages.union(source_languages)
print("Read sentences.csv file")
with open(sentences_file, encoding='utf8') as fIn:
for line in fIn:
id, lang, sentence = line.strip().split('\t')
if lang in all_langs:
sentences[id] = (lang, sentence)
#Read links that map the translations between different languages
print("Read links.csv")
translations = {src_lang: {trg_lang: {} for trg_lang in target_languages} for src_lang in source_languages}
with open(links_file, encoding='utf8') as fIn:
for line in fIn:
src_id, target_id = line.strip().split()
if src_id in sentences and target_id in sentences:
src_lang, src_sent = sentences[src_id]
trg_lang, trg_sent = sentences[target_id]
if src_lang in source_languages and trg_lang in target_languages:
if src_sent not in translations[src_lang][trg_lang]:
translations[src_lang][trg_lang][src_sent] = []
translations[src_lang][trg_lang][src_sent].append(trg_sent)
#Write everything to the output folder
print("Write output files")
for src_lang in source_languages:
for trg_lang in target_languages:
source_sentences = list(translations[src_lang][trg_lang])
train_sentences = source_sentences[num_dev_sentences:]
dev_sentences = source_sentences[0:num_dev_sentences]
print("{}-{} has {} sentences".format(src_lang, trg_lang, len(source_sentences)))
if len(dev_sentences) > 0:
with gzip.open(os.path.join(output_folder, 'Tatoeba-{}-{}-dev.tsv.gz'.format(src_lang, trg_lang)), 'wt', encoding='utf8') as fOut:
for sent in dev_sentences:
fOut.write("\t".join([sent]+translations[src_lang][trg_lang][sent]))
fOut.write("\n")
if len(train_sentences) > 0:
with gzip.open(os.path.join(output_folder, 'Tatoeba-{}-{}-train.tsv.gz'.format(src_lang, trg_lang)), 'wt', encoding='utf8') as fOut:
for sent in train_sentences:
fOut.write("\t".join([sent]+translations[src_lang][trg_lang][sent]))
fOut.write("\n")
print("---DONE---")
|
python
|
from django.test import TestCase
from unittest2 import skipIf
from django.db import connection
import json
import re
from sqlshare_rest.util.db import get_backend
from sqlshare_rest.test import missing_url
from django.test.utils import override_settings
from django.test.client import Client
from django.core.urlresolvers import reverse
from sqlshare_rest.test.api.base import BaseAPITest
from sqlshare_rest.dao.dataset import create_dataset_from_query
from sqlshare_rest.util.db import is_mssql, is_mysql, is_sqlite3, is_pg
import six
if six.PY2:
from StringIO import StringIO
elif six.PY3:
from io import StringIO
@skipIf(missing_url("sqlshare_view_dataset_list"), "SQLShare REST URLs not configured")
@override_settings(MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.RemoteUserMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
),
SQLSHARE_QUERY_CACHE_DB="test_ss_query_db",
AUTHENTICATION_BACKENDS = ('django.contrib.auth.backends.ModelBackend',)
)
class DownloadAPITest(BaseAPITest):
token = None
query_id = None
def test_download(self):
owner = "test_dataset_download2"
self.remove_users.append(owner)
auth_headers = self.get_auth_header_for_username(owner)
post_url = reverse("sqlshare_view_init_download")
response = self.client.post(post_url, data=json.dumps({'sql': 'SELECT (1)', 'downloads': 1}), content_type="application/json", **auth_headers)
self.assertEqual(response.status_code, 200)
download_url = response["Location"]
response2 = self.client.get(download_url, content_type='application/json')
self.assertEqual(response2.status_code, 200)
self.assertTrue(response2.streaming)
response_body = StringIO("".join(map(lambda x: x.decode("utf-8-sig"), response2.streaming_content))).read()
if is_mssql():
resp = '""\n"1"\n'
elif is_mysql():
resp = '"1"\n"1"\n'
elif is_pg():
resp = '"?column?"\n"1"\n'
else:
resp = '"(1)"\n"1"\n'
self.assertEqual(response_body, resp)
# Ensure download only works once
response = self.client.get(download_url, content_type='application/json')
self.assertEqual(response.status_code, 404)
def test_bad_query(self):
owner = "test_invalid_download_owner"
other = "test_invalid_download_sneak"
self.remove_users.append(owner)
self.remove_users.append(other)
get_backend().get_user(other)
model = create_dataset_from_query(username=owner, dataset_name="test_download_2", sql="SELECT (3)")
if is_mssql():
sql = "SELECT * FROM [test_invalid_download_owner].[test_download_2]"
elif is_mysql():
sql = "SELECT * FROM `test_invalid_download_owner`.`test_download_2`";
else:
sql = "SELECT * FROM test_download_2";
post_url = reverse("sqlshare_view_init_download")
other_auth_headers = self.get_auth_header_for_username(other)
# Now try just invalid sql
response = self.client.post(post_url, data=json.dumps({'sql': "SELECT (1", 'downloads': 1}), content_type="application/json", **other_auth_headers)
self.assertEqual(response.status_code, 200)
download_url = response["Location"]
response2 = self.client.get(download_url, content_type='application/json')
self.assertEqual(response2.status_code, 200)
if is_sqlite3():
# sqlite3 doesn't have permissions for the test below to fail on...
return
# Test a user w/ no access trying to download a dataset's content.
response = self.client.post(post_url, data=json.dumps({'sql': sql, 'downloads': 1}), content_type="application/json", **other_auth_headers)
self.assertEqual(response.status_code, 200)
download_url = response["Location"]
response2 = self.client.get(download_url, content_type='application/json')
self.assertEqual(response2.status_code, 200)
def test_bad_download(self):
owner = "query_user1"
self.remove_users.append(owner)
# bad query id
post_url = reverse("sqlshare_view_run_download", kwargs={'token': 'asd'})
auth_headers = self.get_auth_header_for_username(owner)
response = self.client.get(post_url, content_type='application/json')
self.assertEqual(response.status_code, 404)
def test_bad_methods(self):
owner = "query_user1"
auth_headers = self.get_auth_header_for_username(owner)
init_url = reverse("sqlshare_view_init_download")
init_response = self.client.get(init_url, content_type='application/json', **auth_headers)
self.assertEqual(init_response.status_code, 405)
download_url = reverse("sqlshare_view_run_download", kwargs={ 'token' : 'asd1234'})
download_response = self.client.post(download_url, content_type='application/json')
self.assertEqual(download_response.status_code, 405)
|
python
|
from rest_framework import status
from rest_framework.test import APITestCase
from .. import models
from .. import serializers
class DocumentTopicTestCase(APITestCase):
URL = '/v1/m2m/document/topic/'
DOCUMENT_URL = '/v1/document/'
def test_create_document_topic(self):
topic_data = {
'short_descriptor': 'support',
'long_descriptor': 'support for developers'
}
document_data = {
'title': 'Test',
'content': 'This is the test content'
}
document = models.Document.objects.create(**document_data)
topic = models.Topic.objects.create(**topic_data)
data = {
'document': str(document.id),
'topic': str(topic.id)
}
res = self.client.post(self.URL, data)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
document_topic = models.DocumentTopic.objects.filter(
document__id=data['document']
).first()
self.assertDictEqual(res.data, serializers.DocumentTopicSerializer(
document_topic
).data)
res_document = self.client.get(f'{self.DOCUMENT_URL}{document.id}/')
self.assertIn(topic.id, res_document.data['topics'])
def test_delete_document_topic(self):
topic_data = {
'short_descriptor': 'support',
'long_descriptor': 'support for developers'
}
document_data = {
'title': 'Test',
'content': 'This is the test content'
}
document = models.Document.objects.create(**document_data)
topic = models.Topic.objects.create(**topic_data)
data = {
'document': document,
'topic': topic
}
document_topic = models.DocumentTopic.objects.create(**data)
res = self.client.delete(f'{self.URL}{document_topic.id}/')
self.assertEqual(res.status_code, status.HTTP_204_NO_CONTENT)
res_document = self.client.get(f'{self.DOCUMENT_URL}{document.id}/')
self.assertNotIn(topic.id, res_document.data['topics'])
|
python
|
#!/usr/bin/env python
import sys
import numpy
from numpy import matrix
class Policy(object):
actions = None
policy = None
def __init__(self, num_states, num_actions, filename='policy/default.policy'):
try:
f = open(filename, 'r')
except:
print('\nError: unable to open file: ' + filename)
lines = f.readlines()
# the first three and the last lines are not related to the actual policy
lines = lines[3:]
self.actions = -1 * numpy.ones((len(lines)-1, 1, ))
self.policy = numpy.zeros((len(lines)-1, num_states, ))
for i in range(len(lines)-1):
# print("this line:\n\n" + lines[i])
if lines[i].find('/AlphaVector') >= 0:
break
l = lines[i].find('"')
r = lines[i].find('"', l + 1)
self.actions[i] = int(lines[i][l + 1 : r])
ll = lines[i].find('>')
rr = lines[i].find(' <')
# print(str(i))
self.policy[i] = numpy.matrix(lines[i][ll + 1 : rr])
f.close()
def select_action(self, b):
# sanity check if probabilities sum up to 1
if sum(b) - 1.0 > 0.00001:
print('Error: belief does not sum to 1, diff: ', sum(b)[0] - 1.0)
sys.exit()
return self.actions[numpy.argmax(numpy.dot(self.policy, b.T)), 0]
# return numpy.argmax(b) + 12
# return numpy.random.randint(24, size=1)[0]
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Credits: Benjamin Dartigues, Emmanuel Bouilhol, Hayssam Soueidan, Macha Nikolski
import pathlib
from loguru import logger
import constants
import plot
import numpy as np
import helpers
from image_set import ImageSet
from helpers import open_repo
from path import global_root_dir
import collections
def mrna_cytoplasmic_total_count(analysis_repo, keyorder):
gene2image_set = {}
gene2cyto_count = {}
gene2median_cyto_count = {}
gene2error = {}
gene2confidence_interval = {}
for gene in constants.analysis_config['MRNA_GENES']:
logger.info("Running mrna cytoplasmic total count analysis for {}", gene)
gene2image_set[gene] = ImageSet(analysis_repo, ['mrna/%s/' % gene])
gene2cyto_count[gene] = gene2image_set[gene].compute_cytoplasmic_spots_counts()
gene2median_cyto_count[gene] = np.median(gene2cyto_count[gene])
gene2error[gene] = helpers.sem(gene2cyto_count[gene], factor=0)
lower, higher = helpers.median_confidence_interval(gene2cyto_count[gene])
gene2confidence_interval[gene] = [lower, higher]
# generate bar plot image
gene2median_cyto_count = collections.OrderedDict(sorted(gene2median_cyto_count.items(), key=lambda i: keyorder.index(i[0])))
gene2error = collections.OrderedDict(sorted(gene2error.items(), key=lambda i: keyorder.index(i[0])))
gene2confidence_interval = collections.OrderedDict(sorted(gene2confidence_interval.items(), key=lambda i: keyorder.index(i[0])))
xlabels = constants.analysis_config['MRNA_GENES_LABEL']
tgt_image_name = constants.analysis_config['FIGURE_NAME_FORMAT'].format(molecule_type="mrna")
tgt_fp = pathlib.Path(constants.analysis_config['FIGURE_OUTPUT_PATH'].format(root_dir=global_root_dir),
tgt_image_name)
plot.bar_profile_median(gene2median_cyto_count,
gene2error.values(),
'mrna',
xlabels,
tgt_fp,
gene2confidence_interval,
annot=False,
data_to_annot=gene2cyto_count
)
# generate violin plot image
tgt_image_name = constants.analysis_config['FIGURE_NAME_VIOLIN_FORMAT'].format(molecule_type="mrna")
tgt_fp = pathlib.Path(constants.analysis_config['FIGURE_OUTPUT_PATH'].format(root_dir=global_root_dir),
tgt_image_name)
plot.violin_profile(gene2cyto_count, tgt_fp, xlabels, rotation=0, annot=False)
def intensities_cytoplasmic_total_count(analysis_repo, keyorder):
gene2cyto_count = {}
gene2median_cyto_count = {}
gene2error = {}
gene2confidence_interval = {}
for gene in constants.analysis_config['PROTEINS']:
logger.info("Running protein cytoplasmic total count analysis for {}", gene)
imageset = ImageSet(analysis_repo, ['protein/%s/' % gene])
gene2cyto_count[gene] = imageset.compute_cytoplasmic_intensities()
gene2median_cyto_count[gene] = np.median(gene2cyto_count[gene])
gene2error[gene] = helpers.sem(gene2cyto_count[gene], factor=0)
lower, higher = helpers.median_confidence_interval(gene2cyto_count[gene])
gene2confidence_interval[gene] = [lower, higher]
# generate bar plot image
gene2median_cyto_count = collections.OrderedDict(sorted(gene2median_cyto_count.items(), key=lambda i: keyorder.index(i[0])))
gene2error = collections.OrderedDict(sorted(gene2error.items(), key=lambda i: keyorder.index(i[0])))
gene2confidence_interval = collections.OrderedDict(sorted(gene2confidence_interval.items(), key=lambda i: keyorder.index(i[0])))
xlabels = constants.analysis_config['PROTEINS_LABEL']
tgt_image_name = constants.analysis_config['FIGURE_NAME_FORMAT'].format(molecule_type="protein")
tgt_fp = pathlib.Path(constants.analysis_config['FIGURE_OUTPUT_PATH'].format(root_dir=global_root_dir),
tgt_image_name)
plot.bar_profile_median(gene2median_cyto_count,
gene2error.values(),
'proteins',
xlabels,
tgt_fp,
gene2confidence_interval,
annot=False,
data_to_annot=gene2cyto_count
)
# generate violin plot image
tgt_image_name = constants.analysis_config['FIGURE_NAME_VIOLIN_FORMAT'].format(molecule_type="protein")
tgt_fp = pathlib.Path(constants.analysis_config['FIGURE_OUTPUT_PATH'].format(root_dir=global_root_dir),
tgt_image_name)
plot.violin_profile(gene2cyto_count, tgt_fp, xlabels, rotation=0, annot=True)
'''
Figure 5A left panel: arhgdia and arhgdia prrc2c mRNA cytoplasmic total count
Figure 5A left panel: arhgdia and arhgdia prrc2c protein cytoplasmic total count
Figure S6A top left panel: arhgdia and arhgdia nocodazole mRNA cytoplasmic total count
Figure S6A top right panel: arhgdia and arhgdia nocodazole protein cytoplasmic total count
Figure S6A middle left panel: pard3 and pard3 nocodazole mRNA cytoplasmic total count
Figure S6A middle right panel: arhgdia and arhgdia CytoD protein cytoplasmic total count
Figure S6A bottom left panel: arhgdia cytod mRNA cytoplasmic total count
Figure S6A bottom right panel: arhgdia cytod protein cytoplasmic total count
'''
configurations = [
["src/analysis/cytoplasmic_total_count/config_prrc2c.json", ["arhgdia/control", "arhgdia/prrc2c_depleted"], "Timepoint"],
["src/analysis/cytoplasmic_total_count/config_nocodazole_arhgdia.json", ["arhgdia", "arhgdia_nocodazole"], "Gene"],
["src/analysis/cytoplasmic_total_count/config_nocodazole_pard3.json", ["pard3", "pard3_nocodazole"], "Gene"],
["src/analysis/cytoplasmic_total_count/config_cytod.json", ["arhgdia_control", "arhgdia_cytod"], "Gene"]
]
if __name__ == '__main__':
for conf in configurations:
conf_full_path = pathlib.Path(global_root_dir, conf[0])
constants.init_config(analysis_config_js_path=conf_full_path)
repo = open_repo()
key_order = conf[1]
mrna_cytoplasmic_total_count(repo, key_order)
intensities_cytoplasmic_total_count(repo, key_order)
|
python
|
import matplotlib
import matplotlib.colors as colors
import matplotlib.pyplot as plt
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
import networkx as nx
import numpy as np
import sklearn.metrics as metrics
import torch
import torch.nn as nn
from torch.autograd import Variable
import tensorboardX
from tensorboardX import SummaryWriter
import argparse
import os
import pickle
import random
import shutil
import time
import cross_test
import cross_val
import encoders
import gen.feat as featgen
import gen.data as datagen
from graph_sampler import GraphSampler
import load_data
import util
import copy
import math
'''
改动的地方
1.把数据集分割成train,valid,test(test_num在cross_val.prepare_val_data 调用的时候是写死的)
2.调用benchmark_task_val(prog_args, writer=writer,feat='node-feat')加了参数feat,默认是只用node_label的onehot编码
3.模型改动:(1)加入残差网络
'''
def evaluate(dataset, model, args, name='Validation', max_num_examples=None):
model.eval()
labels = []
preds = []
for batch_idx, data in enumerate(dataset):
adj = Variable(data['adj'].float(), requires_grad=False)
h0 = Variable(data['feats'].float())
labels.append(data['label'].long().numpy())
batch_num_nodes = data['num_nodes'].int().numpy()
assign_input = Variable(data['assign_feats'].float(), requires_grad=False)
res = Variable(data['res'].float(), requires_grad=False)
ypred = model(h0, adj, batch_num_nodes, assign_x=assign_input,res_x=res)
_, indices = torch.max(ypred, 1)
preds.append(indices.cpu().data.numpy())
if max_num_examples is not None:
if (batch_idx+1)*args.batch_size > max_num_examples:
break
labels = np.hstack(labels)
preds = np.hstack(preds)
fpr, tpr, thresholds = metrics.roc_curve(labels, preds, pos_label=0)
ravel = metrics.confusion_matrix(labels, preds).ravel()
TP,FN ,FP,TN =ravel
a = TP + FP
b = TP + FN
c = TN + FP
d = TN + FN
mcc=((TP*TN)-(FP*FN))/(math.sqrt(float(a*b*c*d)+0.0001))
result = {'prec': metrics.precision_score(labels, preds, pos_label=0, average='binary'),#macro
'recall': metrics.recall_score(labels, preds, pos_label=0, average='binary'),#macro
'acc': metrics.accuracy_score(labels, preds),
'F1': metrics.f1_score(labels, preds, pos_label=0, average="binary"),#micro
'ravel': ravel,
'auc': metrics.auc(fpr, tpr),
'mcc': mcc}
print(name, " accuracy:", result['acc'])
# print(name, ' tn, fp, fn, tp=', result['ravel'])
# print(name, ' auc:', result['auc'])
# print(name, ' mcc:', result['mcc'])
# print(name, " recall:", result['recall'])
return result
def gen_prefix(args):
if args.bmname is not None:
name = args.bmname
else:
name = args.dataset
name += '_' + args.method
if args.method == 'soft-assign':
name += '_l' + str(args.num_gc_layers) + 'x' + str(args.num_pool)
name += '_ar' + str(int(args.assign_ratio*100))
if args.linkpred:
name += '_lp'
else:
name += '_l' + str(args.num_gc_layers)
name += '_h' + str(args.hidden_dim) + '_o' + str(args.output_dim)
if not args.bias:
name += '_nobias'
if len(args.name_suffix) > 0:
name += '_' + args.name_suffix
return name
def gen_train_plt_name(args):
return 'results/' + gen_prefix(args) + '.png'
def log_assignment(assign_tensor, writer, epoch, batch_idx):
plt.switch_backend('agg')
fig = plt.figure(figsize=(8,6), dpi=300)
# has to be smaller than args.batch_size
for i in range(len(batch_idx)):
plt.subplot(2, 2, i+1)
plt.imshow(assign_tensor.cpu().data.numpy()[batch_idx[i]], cmap=plt.get_cmap('BuPu'))
cbar = plt.colorbar()
cbar.solids.set_edgecolor("face")
plt.tight_layout()
fig.canvas.draw()
#data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
#data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
data = tensorboardX.utils.figure_to_image(fig)
writer.add_image('assignment', data, epoch)
def log_graph(adj, batch_num_nodes, writer, epoch, batch_idx, assign_tensor=None):
plt.switch_backend('agg')
fig = plt.figure(figsize=(8,6), dpi=300)
for i in range(len(batch_idx)):
ax = plt.subplot(2, 2, i+1)
num_nodes = batch_num_nodes[batch_idx[i]]
adj_matrix = adj[batch_idx[i], :num_nodes, :num_nodes].cpu().data.numpy()
G = nx.from_numpy_matrix(adj_matrix)
nx.draw(G, pos=nx.spring_layout(G), with_labels=True, node_color='#336699',
edge_color='grey', width=0.5, node_size=300,
alpha=0.7)
ax.xaxis.set_visible(False)
plt.tight_layout()
fig.canvas.draw()
#data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
#data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
data = tensorboardX.utils.figure_to_image(fig)
writer.add_image('graphs', data, epoch)
# log a label-less version
#fig = plt.figure(figsize=(8,6), dpi=300)
#for i in range(len(batch_idx)):
# ax = plt.subplot(2, 2, i+1)
# num_nodes = batch_num_nodes[batch_idx[i]]
# adj_matrix = adj[batch_idx[i], :num_nodes, :num_nodes].cpu().data.numpy()
# G = nx.from_numpy_matrix(adj_matrix)
# nx.draw(G, pos=nx.spring_layout(G), with_labels=False, node_color='#336699',
# edge_color='grey', width=0.5, node_size=25,
# alpha=0.8)
#plt.tight_layout()
#fig.canvas.draw()
#data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
#data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
#writer.add_image('graphs_no_label', data, epoch)
# colored according to assignment
assignment = assign_tensor.cpu().data.numpy()
fig = plt.figure(figsize=(8,6), dpi=300)
num_clusters = assignment.shape[2]
all_colors = np.array(range(num_clusters))
for i in range(len(batch_idx)):
ax = plt.subplot(2, 2, i+1)
num_nodes = batch_num_nodes[batch_idx[i]]
adj_matrix = adj[batch_idx[i], :num_nodes, :num_nodes].cpu().data.numpy()
label = np.argmax(assignment[batch_idx[i]], axis=1).astype(int)
label = label[: batch_num_nodes[batch_idx[i]]]
node_colors = all_colors[label]
G = nx.from_numpy_matrix(adj_matrix)
nx.draw(G, pos=nx.spring_layout(G), with_labels=False, node_color=node_colors,
edge_color='grey', width=0.4, node_size=50, cmap=plt.get_cmap('Set1'),
vmin=0, vmax=num_clusters-1,
alpha=0.8)
plt.tight_layout()
fig.canvas.draw()
#data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
#data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
data = tensorboardX.utils.figure_to_image(fig)
writer.add_image('graphs_colored', data, epoch)
def train(dataset, model, args, same_feat=True, val_dataset=None, test_dataset=None, writer=None,
mask_nodes = True,graphs=None,idx=0):
writer_batch_idx = [0, 3, 6, 9]
optimizer = torch.optim.Adam(filter(lambda p : p.requires_grad, model.parameters()), lr=0.001)
iter = 0
best_val_result = {
'epoch': 0,
'loss': 0,
'acc': 0}
test_result = {
'epoch': 0,
'loss': 0,
'acc': 0}
train_accs = []
train_epochs = []
best_val_accs = []
best_val_epochs = []
test_accs = []
test_epochs = []
val_accs = []
for epoch in range(args.num_epochs):
total_time = 0
avg_loss = 0.0
model.train()
print('idx:',idx,'Epoch: ', epoch)
for batch_idx, data in enumerate(dataset):
begin_time = time.time()
model.zero_grad()
adj = Variable(data['adj'].float(), requires_grad=False)
h0 = Variable(data['feats'].float(), requires_grad=False)
label = Variable(data['label'].long())
batch_num_nodes = data['num_nodes'].int().numpy() if mask_nodes else None
assign_input = Variable(data['assign_feats'].float(), requires_grad=False)
res = Variable(data['res'].float(), requires_grad=False)
ypred = model(h0, adj, batch_num_nodes, assign_x=assign_input, res_x=res)
if not args.method == 'soft-assign' or not args.linkpred:
loss = model.loss(ypred, label)
else:
loss = model.loss(ypred, label, adj, batch_num_nodes)
loss.backward()
nn.utils.clip_grad_norm_(model.parameters(), args.clip)
optimizer.step()
iter += 1
avg_loss += loss
#if iter % 20 == 0:
# print('Iter: ', iter, ', loss: ', loss.data[0])
elapsed = time.time() - begin_time
total_time += elapsed
# log once per XX epochs
if epoch % 10 == 0 and batch_idx == len(dataset) // 2 and args.method == 'soft-assign' and writer is not None:
log_assignment(model.assign_tensor, writer, epoch, writer_batch_idx)
if args.log_graph:
log_graph(adj, batch_num_nodes, writer, epoch, writer_batch_idx, model.assign_tensor)
avg_loss /= batch_idx + 1
if writer is not None:
writer.add_scalar('loss/avg_loss', avg_loss, epoch)
if args.linkpred:
writer.add_scalar('loss/linkpred_loss', model.link_loss, epoch)
print('Avg loss: ', avg_loss, '; epoch time: ', total_time)
result = evaluate(dataset, model, args, name='Train', max_num_examples=100)
train_accs.append(result['acc'])
train_epochs.append(epoch)
'''
'prec': metrics.precision_score(labels, preds, average='macro'),
'recall': metrics.recall_score(labels, preds, average='macro'),
'acc': metrics.accuracy_score(labels, preds),
'F1': metrics.f1_score(labels,
'''
if val_dataset is not None:
val_result = evaluate(val_dataset, model, args, name='Validation')
val_accs.append(val_result['acc'])
print('Val result:', val_result)
if val_result['acc'] > best_val_result['acc'] - 1e-7:
best_val_result['acc'] = val_result['acc']
best_val_result['epoch'] = epoch
best_val_result['loss'] = avg_loss
best_val_result['model'] = copy.deepcopy(model.state_dict())
print('Best val result: ', 'acc:', best_val_result['acc'], 'epoch:', best_val_result['epoch'], 'loss:',
best_val_result['loss'])
# print('acc:')
# print(best_val_result['acc'])
# print('epoch:')
# print(best_val_result['epoch'])
# print('loss:')
# print(best_val_result['loss'])
if test_dataset is not None:
test_result = evaluate(test_dataset, model, args, name='Test')
test_result['epoch'] = epoch
if writer is not None:
writer.add_scalar('acc/train_acc', result['acc'], epoch)
writer.add_scalar('acc/val_acc', val_result['acc'], epoch)
writer.add_scalar('loss/best_val_loss', best_val_result['loss'], epoch)
if test_dataset is not None:
writer.add_scalar('acc/test_acc', test_result['acc'], epoch)
best_val_epochs.append(best_val_result['epoch'])
best_val_accs.append(best_val_result['acc'])
if test_dataset is not None:
print('Test result: ', test_result)
test_epochs.append(test_result['epoch'])
test_accs.append(test_result['acc'])
for i in range(10):
test_loader = cross_test.prepare_test_data(graphs=graphs,args=args,max_nodes=args.max_nodes)
model.load_state_dict(best_val_result['model'])
evaluate(test_loader,model,args,name='Best model Test')
matplotlib.style.use('seaborn')
plt.switch_backend('agg')
plt.figure()
plt.plot(train_epochs, util.exp_moving_avg(train_accs, 0.85), '-', lw=1)
if test_dataset is not None:
plt.plot(best_val_epochs, best_val_accs, 'bo', test_epochs, test_accs, 'go')
plt.legend(['train', 'val', 'test'])
else:
plt.plot(best_val_epochs, best_val_accs, 'bo')
plt.legend(['train', 'val'])
plt.savefig(gen_train_plt_name(args), dpi=600)
plt.close()
matplotlib.style.use('default')
return model, val_accs
def prepare_data(graphs, args, test_graphs=None, max_nodes=0):
random.shuffle(graphs)
if test_graphs is None:
train_idx = int(len(graphs) * args.train_ratio)
test_idx = int(len(graphs) * (1-args.test_ratio))
train_graphs = graphs[:train_idx]
val_graphs = graphs[train_idx: test_idx]
test_graphs = graphs[test_idx:]
else:
train_idx = int(len(graphs) * args.train_ratio)
train_graphs = graphs[:train_idx]
val_graphs = graphs[train_idx:]
print('Num training graphs: ', len(train_graphs),
'; Num validation graphs: ', len(val_graphs),
'; Num testing graphs: ', len(test_graphs))
print('Number of graphs: ', len(graphs))
print('Number of edges: ', sum([G.number_of_edges() for G in graphs]))
print('Max, avg, std of graph size: ',
max([G.number_of_nodes() for G in graphs]), ', '
"{0:.2f}".format(np.mean([G.number_of_nodes() for G in graphs])), ', '
"{0:.2f}".format(np.std([G.number_of_nodes() for G in graphs])))
# minibatch
dataset_sampler = GraphSampler(train_graphs, normalize=False, max_num_nodes=max_nodes,
features=args.feature_type)
train_dataset_loader = torch.utils.data.DataLoader(
dataset_sampler,
batch_size=args.batch_size,
shuffle=True,
num_workers=args.num_workers)
dataset_sampler = GraphSampler(val_graphs, normalize=False, max_num_nodes=max_nodes,
features=args.feature_type)
val_dataset_loader = torch.utils.data.DataLoader(
dataset_sampler,
batch_size=args.batch_size,
shuffle=False,
num_workers=args.num_workers)
dataset_sampler = GraphSampler(test_graphs, normalize=False, max_num_nodes=max_nodes,
features=args.feature_type)
test_dataset_loader = torch.utils.data.DataLoader(
dataset_sampler,
batch_size=args.batch_size,
shuffle=False,
num_workers=args.num_workers)
return train_dataset_loader, val_dataset_loader, test_dataset_loader, \
dataset_sampler.max_num_nodes, dataset_sampler.feat_dim, dataset_sampler.assign_feat_dim
def syn_community1v2(args, writer=None, export_graphs=False):
# data
graphs1 = datagen.gen_ba(range(40, 60), range(4, 5), 500,
featgen.ConstFeatureGen(np.ones(args.input_dim, dtype=float)))
for G in graphs1:
G.graph['label'] = 0
if export_graphs:
util.draw_graph_list(graphs1[:16], 4, 4, 'figs/ba')
graphs2 = datagen.gen_2community_ba(range(20, 30), range(4, 5), 500, 0.3,
[featgen.ConstFeatureGen(np.ones(args.input_dim, dtype=float))])
for G in graphs2:
G.graph['label'] = 1
if export_graphs:
util.draw_graph_list(graphs2[:16], 4, 4, 'figs/ba2')
graphs = graphs1 + graphs2
train_dataset, val_dataset, test_dataset, max_num_nodes, input_dim, assign_input_dim = prepare_data(graphs, args)
if args.method == 'soft-assign':
print('Method: soft-assign')
model = encoders.SoftPoolingGcnEncoder(
max_num_nodes,
input_dim, args.hidden_dim, args.output_dim, args.num_classes, args.num_gc_layers,
args.hidden_dim, assign_ratio=args.assign_ratio, num_pooling=args.num_pool,
bn=args.bn, linkpred=args.linkpred, assign_input_dim=assign_input_dim)
elif args.method == 'base-set2set':
print('Method: base-set2set')
model = encoders.GcnSet2SetEncoder(input_dim, args.hidden_dim, args.output_dim, 2,
args.num_gc_layers, bn=args.bn)
else:
print('Method: base')
model = encoders.GcnEncoderGraph(input_dim, args.hidden_dim, args.output_dim, 2,
args.num_gc_layers, bn=args.bn)
train(train_dataset, model, args, val_dataset=val_dataset, test_dataset=test_dataset,
writer=writer)
def syn_community2hier(args, writer=None):
# data
feat_gen = [featgen.ConstFeatureGen(np.ones(args.input_dim, dtype=float))]
graphs1 = datagen.gen_2hier(1000, [2,4], 10, range(4,5), 0.1, 0.03, feat_gen)
graphs2 = datagen.gen_2hier(1000, [3,3], 10, range(4,5), 0.1, 0.03, feat_gen)
graphs3 = datagen.gen_2community_ba(range(28, 33), range(4,7), 1000, 0.25, feat_gen)
for G in graphs1:
G.graph['label'] = 0
for G in graphs2:
G.graph['label'] = 1
for G in graphs3:
G.graph['label'] = 2
graphs = graphs1 + graphs2 + graphs3
train_dataset, val_dataset, test_dataset, max_num_nodes, input_dim, assign_input_dim = prepare_data(graphs, args)
if args.method == 'soft-assign':
print('Method: soft-assign')
model = encoders.SoftPoolingGcnEncoder(
max_num_nodes,
input_dim, args.hidden_dim, args.output_dim, args.num_classes, args.num_gc_layers,
args.hidden_dim, assign_ratio=args.assign_ratio, num_pooling=args.num_pool,
bn=args.bn, linkpred=args.linkpred, args=args, assign_input_dim=assign_input_dim)
elif args.method == 'base-set2set':
print('Method: base-set2set')
model = encoders.GcnSet2SetEncoder(input_dim, args.hidden_dim, args.output_dim, 2,
args.num_gc_layers, bn=args.bn, args=args, assign_input_dim=assign_input_dim)
else:
print('Method: base')
model = encoders.GcnEncoderGraph(input_dim, args.hidden_dim, args.output_dim, 2,
args.num_gc_layers, bn=args.bn, args=args)
train(train_dataset, model, args, val_dataset=val_dataset, test_dataset=test_dataset,
writer=writer)
def pkl_task(args, feat=None):
with open(os.path.join(args.datadir, args.pkl_fname), 'rb') as pkl_file:
data = pickle.load(pkl_file)
graphs = data[0]
labels = data[1]
test_graphs = data[2]
test_labels = data[3]
for i in range(len(graphs)):
graphs[i].graph['label'] = labels[i]
for i in range(len(test_graphs)):
test_graphs[i].graph['label'] = test_labels[i]
if feat is None:
featgen_const = featgen.ConstFeatureGen(np.ones(args.input_dim, dtype=float))
for G in graphs:
featgen_const.gen_node_features(G)
for G in test_graphs:
featgen_const.gen_node_features(G)
train_dataset, test_dataset, max_num_nodes = prepare_data(graphs, args, test_graphs=test_graphs)
model = encoders.GcnEncoderGraph(
args.input_dim, args.hidden_dim, args.output_dim, args.num_classes,
args.num_gc_layers, bn=args.bn)
train(train_dataset, model, args, test_dataset=test_dataset)
evaluate(test_dataset, model, args, 'Validation')
def benchmark_task(args, writer=None, feat='node-label'):
graphs = load_data.read_graphfile(args.datadir, args.bmname, max_nodes=args.max_nodes)
if feat == 'node-feat' and 'feat_dim' in graphs[0].graph:
print('Using node features')
input_dim = graphs[0].graph['feat_dim']
elif feat == 'node-label' and 'label' in graphs[0].node[0]:
print('Using node labels')
for G in graphs:
for u in G.nodes():
G.node[u]['feat'] = np.array(G.node[u]['label'])
else:
print('Using constant labels')
featgen_const = featgen.ConstFeatureGen(np.ones(args.input_dim, dtype=float))
for G in graphs:
featgen_const.gen_node_features(G)
train_dataset, val_dataset, test_dataset, max_num_nodes, input_dim, assign_input_dim = \
prepare_data(graphs, args, max_nodes=args.max_nodes)
if args.method == 'soft-assign':
print('Method: soft-assign')
model = encoders.SoftPoolingGcnEncoder(
max_num_nodes,
input_dim, args.hidden_dim, args.output_dim, args.num_classes, args.num_gc_layers,
args.hidden_dim, assign_ratio=args.assign_ratio, num_pooling=args.num_pool,
bn=args.bn, dropout=args.dropout, linkpred=args.linkpred, args=args,
assign_input_dim=assign_input_dim)
elif args.method == 'base-set2set':
print('Method: base-set2set')
model = encoders.GcnSet2SetEncoder(
input_dim, args.hidden_dim, args.output_dim, args.num_classes,
args.num_gc_layers, bn=args.bn, dropout=args.dropout, args=args)
else:
print('Method: base')
model = encoders.GcnEncoderGraph(
input_dim, args.hidden_dim, args.output_dim, args.num_classes,
args.num_gc_layers, bn=args.bn, dropout=args.dropout, args=args)
train(train_dataset, model, args, val_dataset=val_dataset, test_dataset=test_dataset,
writer=writer)
evaluate(test_dataset, model, args, 'Validation')
# benchmark:基准;标准检查程序
def benchmark_task_val(args, writer=None, feat='node-label'):
all_vals = []
graphs = load_data.read_graphfile(args.datadir, args.bmname, max_nodes=args.max_nodes)
example_node = util.node_dict(graphs[0])[0]
if feat == 'node-feat' and 'feat_dim' in graphs[0].graph:
print('Using node features')
input_dim = graphs[0].graph['feat_dim']
elif feat == 'node-label' and 'label' in example_node:
print('Using node labels')
for G in graphs:
for u in G.nodes():
util.node_dict(G)[u]['feat'] = np.array(util.node_dict(G)[u]['label'])
else:
print('Using constant labels')
featgen_const = featgen.ConstFeatureGen(np.ones(args.input_dim, dtype=float))
for G in graphs:
featgen_const.gen_node_features(G)
for i in range(1):
train_dataset, val_dataset, test_dataset,max_num_nodes, input_dim, assign_input_dim, res_dim = \
cross_val.prepare_val_data(graphs, args, i, max_nodes=args.max_nodes,train_num=500)#938
if args.method == 'soft-assign':
print('Method: soft-assign')
model = encoders.SoftPoolingGcnEncoder(
max_num_nodes,
input_dim, args.hidden_dim, args.output_dim, args.num_classes, args.num_gc_layers,
args.hidden_dim, assign_ratio=args.assign_ratio, num_pooling=args.num_pool,
bn=args.bn, dropout=args.dropout, linkpred=args.linkpred, args=args,
assign_input_dim=assign_input_dim,res_dim=res_dim)
elif args.method == 'base-set2set':
print('Method: base-set2set')
model = encoders.GcnSet2SetEncoder(
input_dim, args.hidden_dim, args.output_dim, args.num_classes,
args.num_gc_layers, bn=args.bn, dropout=args.dropout, args=args)
else:
print('Method: base')
model = encoders.GcnEncoderGraph(
input_dim, args.hidden_dim, args.output_dim, args.num_classes,
args.num_gc_layers,bn=args.bn, dropout=args.dropout, args=args,assign_input_dim=assign_input_dim,res_dim=res_dim)
_, val_accs = train(train_dataset, model, args, val_dataset=val_dataset, test_dataset=test_dataset,
writer=writer,graphs=graphs,idx=i)
all_vals.append(np.array(val_accs))
all_vals = np.vstack(all_vals)
all_vals = np.mean(all_vals, axis=0)
print(all_vals)
print(np.max(all_vals))
print(np.argmax(all_vals))
def arg_parse():
parser = argparse.ArgumentParser(description='GraphPool arguments.')
#add_mutually_exclusive_group 设置冲突参数,当需要设置指定输入参数只能包含其中一个时即可用此方法。
io_parser = parser.add_mutually_exclusive_group(required=False)
io_parser.add_argument('--dataset', dest='dataset',
help='Input dataset.')
#add_argument_group() 参数分组设置。当有分组命令的需求时可用,输入参数将归于所属分组下。
benchmark_parser = io_parser.add_argument_group()
benchmark_parser.add_argument('--bmname', dest='bmname',
help='Name of the benchmark dataset')
io_parser.add_argument('--pkl', dest='pkl_fname',
help='Name of the pkl data file')
#另外一个分组
softpool_parser = parser.add_argument_group()
softpool_parser.add_argument('--assign-ratio', dest='assign_ratio', type=float,
help='ratio of number of nodes in consecutive layers')
softpool_parser.add_argument('--num-pool', dest='num_pool', type=int,
help='number of pooling layers')
parser.add_argument('--linkpred', dest='linkpred', action='store_const',
const=True, default=False,
help='Whether link prediction side objective is used')
parser.add_argument('--datadir', dest='datadir',
help='Directory where benchmark is located')
parser.add_argument('--logdir', dest='logdir',
help='Tensorboard log directory')
parser.add_argument('--cpu', dest='cpu',
help='CPU.')
# parser.add_argument('--cuda', dest='cuda',
# help='CUDA.')
parser.add_argument('--max-nodes', dest='max_nodes', type=int,
help='Maximum number of nodes (ignore graghs with nodes exceeding the number.')
parser.add_argument('--lr', dest='lr', type=float,
help='Learning rate.')
parser.add_argument('--clip', dest='clip', type=float,
help='Gradient clipping.')
parser.add_argument('--batch-size', dest='batch_size', type=int,
help='Batch size.')
parser.add_argument('--epochs', dest='num_epochs', type=int,
help='Number of epochs to train.')
parser.add_argument('--train-ratio', dest='train_ratio', type=float,
help='Ratio of number of graphs training set to all graphs.')
parser.add_argument('--num_workers', dest='num_workers', type=int,
help='Number of workers to load data.')
parser.add_argument('--feature', dest='feature_type',
help='Feature used for encoder. Can be: id, deg')
parser.add_argument('--input-dim', dest='input_dim', type=int,
help='Input feature dimension')
parser.add_argument('--hidden-dim', dest='hidden_dim', type=int,
help='Hidden dimension')
parser.add_argument('--output-dim', dest='output_dim', type=int,
help='Output dimension')
parser.add_argument('--num-classes', dest='num_classes', type=int,
help='Number of label classes')
parser.add_argument('--num-gc-layers', dest='num_gc_layers', type=int,
help='Number of graph convolution layers before each pooling')
parser.add_argument('--nobn', dest='bn', action='store_const',
const=False, default=True,
help='Whether batch normalization is used')
parser.add_argument('--dropout', dest='dropout', type=float,
help='Dropout rate.')
parser.add_argument('--nobias', dest='bias', action='store_const',
const=False, default=True,
help='Whether to add bias. Default to True.')
parser.add_argument('--no-log-graph', dest='log_graph', action='store_const',
const=False, default=True,
help='Whether disable log graph')
parser.add_argument('--method', dest='method',
help='Method. Possible values: base, base-set2set, soft-assign')
parser.add_argument('--name-suffix', dest='name_suffix',
help='suffix added to the output filename')
parser.set_defaults(datadir='data',
logdir='log',
dataset='syn1v2',#syn1v2
max_nodes=500,#1000
device='0',
# cuda='1',
feature_type='default',
lr=0.001,
clip=2.0,
batch_size=20,
num_epochs=10,#1000
train_ratio=0.8,
test_ratio=0.1,
num_workers=1,
input_dim=10,
hidden_dim=20,
output_dim=20,
num_classes=2,
num_gc_layers=3,
dropout=0.0,
method='base',
name_suffix='',
assign_ratio=0.1,#0.1
num_pool=1,#1
res=True
)
return parser.parse_args()
def main():
prog_args = arg_parse()
# export scalar data to JSON for external processing
path = os.path.join(prog_args.logdir, gen_prefix(prog_args))
if os.path.isdir(path):
print('Remove existing log dir: ', path)
shutil.rmtree(path)
writer = SummaryWriter(path)
#writer = None
# os.environ['CUDA_VISIBLE_DEVICES'] = prog_args.cuda
# print('CUDA', prog_args.cuda)
if prog_args.bmname is not None:
benchmark_task_val(prog_args, writer=writer,feat='node-feat')#,feat='node-feat'
elif prog_args.pkl_fname is not None:
pkl_task(prog_args)
elif prog_args.dataset is not None:
if prog_args.dataset == 'syn1v2':
syn_community1v2(prog_args, writer=writer)
if prog_args.dataset == 'syn2hier':
syn_community2hier(prog_args, writer=writer)
writer.close()
if __name__ == "__main__":
main()
|
python
|
#!/usr/bin/env python3
import pandas as pd
def cyclists():
df = pd.read_csv('src/Helsingin_pyorailijamaarat.csv', sep=';')
# [Same as line below]: df = df[df.notna().any(axis=1)]
df = df.dropna(how='all')
df = df.dropna(how='all', axis=1)
return df
def main():
print(cyclists())
if __name__ == "__main__":
main()
|
python
|
def calc_fitness(pop):
from to_decimal import to_decimal
from math import sin, sqrt
for index, elem in enumerate(pop):
# só atribui a fitness a cromossomos que ainda não possuem fitness
# print(elem[0], elem[1])
x = to_decimal(elem[0])
y = to_decimal(elem[1])
# x = elem[0]
# y = elem[1]
f6 = 0.5 - ((sin(sqrt(x**2 + y**2)))**2 - 0.5) / (1 + 0.001 * (x**2 + y**2))**2
pop[index] = [f6, elem]
return 0
# populacao = [[0,0],[-3,1]]
# calc_fitness(pop=populacao)
# print(populacao)
|
python
|
import os
from typing import Tuple, List
from tokenizers import BertWordPieceTokenizer, Tokenizer
import sentencepiece as spm
from enums.configuration import Configuration
from services.arguments.pretrained_arguments_service import PretrainedArgumentsService
from services.file_service import FileService
class BaseTokenizeService:
def __init__(self):
pass
def encode_tokens(self, tokens: List[str]) -> List[int]:
pass
def decode_tokens(self, character_ids: List[int]) -> List[str]:
pass
def decode_string(self, character_ids: List[int]) -> List[str]:
pass
def id_to_token(self, character_id: int) -> str:
pass
def encode_sequence(self, sequence: str) -> Tuple[List[int], List[str], List[Tuple[int,int]], List[int]]:
pass
def encode_sequences(self, sequences: List[str]) -> List[Tuple[List[int], List[str], List[Tuple[int,int]], List[int]]]:
pass
def tokenize_sequences(self, sequences: List[str]) -> List[List[str]]:
pass
@property
def vocabulary_size(self) -> int:
return 0
@property
def mask_token(self) -> str:
return '[MASK]'
|
python
|
#!/usr/bin/env python3
import fileinput
import hashlib
salt = ''
for line in fileinput.input():
salt = line.strip()
def md5(i):
return hashlib.md5((salt + str(i)).encode('utf-8')).hexdigest()
def checkNple(s, n):
i = 0
while i < len(s):
char = s[i]
consecutive = 0
while i < len(s) and s[i] == char:
consecutive += 1
i += 1
if consecutive >= n:
return char
return False
def checkTriple(s):
return checkNple(s, 3)
def checkPentuple(s):
return checkNple(s, 5)
def checkKey(n):
char = checkTriple(md5(n))
if char != False:
for i in range(n + 1, n + 1001):
if char * 5 in md5(i):
return True
return False
i = 0
keysFound = 0
while keysFound < 64:
if checkKey(i):
keysFound += 1
i += 1
print(i - 1)
|
python
|
txt = ''.join(format(ord(x), 'b') for x in 'my foo is bar and baz')
print txt
from collections import Counter
secs = {
'60': '111100',
'30': '11110',
'20': '10100',
'12': '1100',
'10': '1010',
'6': '110',
'5': '101',
'3': '11',
'2': '10',
'1': '1',
'0': '0',
}
for sz, bitr in secs.iteritems():
cnt = 0
for i in range(len(txt)):
if txt[i:].startswith(bitr):
cnt += 1
print sz, str(cnt)
txt_bin = ''
bin1 = []
bin2 = []
bin3 = []
bin4 = []
bin5 = []
bin6 = []
bin7 = []
bin8 = []
with open('vid.mp4', 'rb') as f:
for l in f:
# print '{!r}'.format(l)
# break
for c in l:
# print '{}'.format(c)
c_b = ''.join(format(ord(b), '08b') for b in c)
bin1.append(c_b[0])
bin2.append(c_b[1])
bin3.append(c_b[2])
bin4.append(c_b[3])
bin5.append(c_b[4])
bin6.append(c_b[5])
bin7.append(c_b[6])
bin8.append(c_b[7])
txt_bin += c_b
if len(txt_bin) > 10000000:
break
# print txt_bin
bin1_counted = Counter(bin1)
bin2_counted = Counter(bin2)
bin3_counted = Counter(bin3)
bin4_counted = Counter(bin4)
bin5_counted = Counter(bin5)
bin6_counted = Counter(bin6)
bin7_counted = Counter(bin7)
bin8_counted = Counter(bin8)
print bin1_counted
print bin2_counted
print bin3_counted
print bin4_counted
print bin5_counted
print bin6_counted
print bin7_counted
|
python
|
#############################################
# --- Day 8: I Heard You Like Registers --- #
#############################################
import AOCUtils
class Instruction:
def __init__(self, inst):
inst = inst.split()
self.reg = inst[0]
self.mul = {"inc": 1, "dec": -1}[inst[1]]
self.val = int(inst[2])
self.condReg = inst[4]
self.cond = inst[5]
self.condVal = int(inst[6])
def meetsCondition(self, regs):
conditions = {">": lambda x, y: x > y,
"<": lambda x, y: x < y,
">=": lambda x, y: x >= y,
"<=": lambda x, y: x <= y,
"==": lambda x, y: x == y,
"!=": lambda x, y: x != y}
return conditions[self.cond](regs.get(self.condReg, 0), self.condVal)
#############################################
instructions = [Instruction(inst) for inst in AOCUtils.loadInput(8)]
registers = dict()
maxEver = 0
for inst in instructions:
if inst.meetsCondition(registers):
registers[inst.reg] = registers.get(inst.reg, 0) + inst.mul*inst.val
maxEver = max(maxEver, registers[inst.reg])
print("Part 1: {}".format(max(registers.values())))
print("Part 2: {}".format(maxEver))
AOCUtils.printTimeTaken()
|
python
|
import datetime
import geospacelab.express.eiscat_dashboard as eiscat
dt_fr = datetime.datetime.strptime('20201209' + '1800', '%Y%m%d%H%M')
dt_to = datetime.datetime.strptime('20201210' + '0600', '%Y%m%d%H%M')
# check the eiscat-hdf5 filename from the EISCAT schedule page, e.g., "[email protected]"
site = 'UHF'
antenna = 'UHF'
modulation = '60'
load_mode = 'AUTO'
# The code will download and load the data automatically as long as the parameters above are set correctly.
viewer = eiscat.EISCATDashboard(
dt_fr, dt_to, site=site, antenna=antenna, modulation=modulation, load_mode='AUTO'
)
viewer.quicklook()
|
python
|
from django.db import models
from django.core.validators import MinValueValidator, MaxValueValidator
from django.contrib.auth.models import User
from django.conf import settings
from postgresqleu.util.fields import LowercaseEmailField
from postgresqleu.countries.models import Country
from postgresqleu.invoices.models import Invoice, InvoicePaymentMethod
from postgresqleu.membership.util import country_validator_choices
from datetime import date, datetime, timedelta
class MembershipConfiguration(models.Model):
id = models.IntegerField(null=False, blank=False, primary_key=True)
sender_email = LowercaseEmailField(null=False, blank=False)
membership_years = models.IntegerField(null=False, blank=False, default=1,
validators=[MinValueValidator(1), MaxValueValidator(10)],
verbose_name="Membership length",
help_text="Membership length in years")
membership_cost = models.IntegerField(null=False, blank=False, default=10,
validators=[MinValueValidator(1), ],
verbose_name="Membership cost")
country_validator = models.CharField(max_length=100, null=False, blank=True,
verbose_name="Country validator",
help_text="Validate member countries against this rule",
choices=country_validator_choices)
paymentmethods = models.ManyToManyField(InvoicePaymentMethod, blank=False, verbose_name='Invoice payment methods')
def get_config():
return MembershipConfiguration.objects.get(id=1)
class Member(models.Model):
user = models.OneToOneField(User, null=False, blank=False, primary_key=True, on_delete=models.CASCADE)
fullname = models.CharField(max_length=500, null=False, blank=False,
verbose_name='Full name')
country = models.ForeignKey(Country, null=False, blank=False, on_delete=models.CASCADE)
listed = models.BooleanField(null=False, blank=False, default=True,
verbose_name='Listed in the public membership list')
paiduntil = models.DateField(null=True, blank=True, verbose_name='Paid until')
membersince = models.DateField(null=True, blank=True, verbose_name='Member since')
# If there is a currently active invoice, link to it here so we can
# easily render the information on the page.
activeinvoice = models.ForeignKey(Invoice, null=True, blank=True, on_delete=models.CASCADE)
# When a membeship expiry warning was last sent, so we don't keep
# sending them over and over again
expiry_warning_sent = models.DateTimeField(null=True, blank=True, verbose_name='Expiry warning sent')
country_exception = models.BooleanField(null=False, blank=False, default=False, help_text="Enable to allow member to bypass country validation")
# WARNING! New fields should most likely be added to the exclude list
# in MemberForm!!!
@property
def expiressoon(self):
if self.paiduntil:
if self.paiduntil < date.today() + timedelta(60):
return True
else:
return False
else:
return True
def __str__(self):
return "%s (%s)" % (self.fullname, self.user.username)
class MemberLog(models.Model):
member = models.ForeignKey(Member, null=False, blank=False, on_delete=models.CASCADE)
timestamp = models.DateTimeField(null=False)
message = models.TextField(null=False, blank=False)
def __str__(self):
return "%s: %s" % (self.timestamp, self.message)
class Meeting(models.Model):
name = models.CharField(max_length=100, null=False, blank=False)
dateandtime = models.DateTimeField(null=False, blank=False)
allmembers = models.BooleanField(null=False, blank=False)
members = models.ManyToManyField(Member, blank=True)
botname = models.CharField(max_length=50, null=False, blank=False)
def __str__(self):
return "%s (%s)" % (self.name, self.dateandtime)
class Meta:
ordering = ['-dateandtime', ]
@property
def joining_active(self):
if datetime.now() > self.dateandtime - timedelta(hours=4):
return True
return False
def get_key_for(self, member):
try:
return MemberMeetingKey.objects.get(meeting=self, member=member)
except MemberMeetingKey.DoesNotExist:
return None
class MemberMeetingKey(models.Model):
member = models.ForeignKey(Member, null=False, blank=False, on_delete=models.CASCADE)
meeting = models.ForeignKey(Meeting, null=False, blank=False, on_delete=models.CASCADE)
key = models.CharField(max_length=100, null=False, blank=False)
proxyname = models.CharField(max_length=200, null=True, blank=False)
proxyaccesskey = models.CharField(max_length=100, null=True, blank=False)
class Meta:
unique_together = (('member', 'meeting'), )
|
python
|
import numpy as np
def readLine(line):
return [ int(c) for c in line.split(' ') ]
def schedule(activities):
lastC = 0
lastJ = 0
sortedSchedule = ''
sortedActivities = sorted(activities, key=lambda a: a[1])
for a in sortedActivities:
if a[1] >= lastC:
sortedSchedule += 'C'
lastC = a[2]
elif a[1] >= lastJ:
sortedSchedule += 'J'
lastJ = a[2]
else:
return 'IMPOSSIBLE'
schedule = [''] * len(sortedSchedule)
for i, a in enumerate(sortedActivities):
schedule[a[0]] = sortedSchedule[i]
return ''.join(schedule)
def play():
cases = int(input())
for i in range(cases):
numAct = int(input())
activities = []
for n in range(numAct):
activities.append([n] + readLine(input()))
s = schedule(activities)
print('Case #{}: {}'.format(i+1, s))
play()
|
python
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
# slim主要是做代码瘦身-2016年开始
slim = tf.contrib.slim
# 5 x Inception-Resnet-A
def block35(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None):
"""Build the 35 x 35 resnet block"""
# 用于管理一个graph钟变量的名字,避免变量之间的命名冲突
with tf.variable_scope(scope, 'Block35', [net], reuse=reuse):
with tf.variable_scope('Branch_0'):
tower_conv = slim.conv2d(net, 32, 1, scope='Conv2d_1x1')
with tf.variable_scope('Branch_1'):
tower_conv1_0 = slim.conv2d(net, 32, 1, scope='Conv2d_0a_1x1')
tower_conv1_1 = slim.conv2d(tower_conv1_0, 32, 3, scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
tower_conv2_0 = slim.conv2d(net, 32, 1, scope='Conv2d_0a_1x1')
tower_conv2_1 = slim.conv2d(tower_conv2_0, 48, 3, scope='Conv2d_0b_3x3')
tower_conv2_2 = slim.conv2d(tower_conv2_1, 64, 3, scope='Conv2d_0c_3x3')
mixed = tf.concat(axis=3,values=[tower_conv,tower_conv1_1,tower_conv2_2])
up = slim.conv2d(mixed, net.get_shape()[3], 1, normalizer_fn=None,
activation_fn=None, scope='Conv2d_1x1')
net += scale * up
if activation_fn:
net = activation_fn(net)
return net
# 10 x Inception-Resnet-B
def block17(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None):
"""Build the 17x17 resnet block"""
with tf.variable_scope(scope, 'Block17', [net], reuse=reuse):
with tf.variable_scope('Branch_0'):
tower_conv = slim.conv2d(net, 192, 1, scope='Conv2d_1x1')
with tf.variable_scope('Branch_1'):
tower_conv1_0 = slim.conv2d(net, 128, 1, scope='Conv2d_0a_1x1')
tower_conv1_1 = slim.conv2d(tower_conv1_0, 160, [1,7],
scope='Conv2d_0b_1x7')
tower_conv1_2 = slim.conv2d(tower_conv1_1, 192, [7,1],
scope='Conv2d_0c_7x1')
mixed = tf.concat(axis=3, values=[tower_conv,tower_conv1_2])
up = slim.conv2d(mixed,net.get_shape()[3], 1, normalizer_fn=None,
activation_fn=None, scope='Conv2d_1x1')
net += scale * up
if activation_fn:
net = activation_fn(net)
return net
# 5 x Inception-Resnet-C
def block8(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None):
with tf.variable_scope(scope, 'Block8', [net], reuse=reuse):
with tf.variable_scope('Branch_0'):
tower_conv = slim.conv2d(net, 192, 1, scope='Conv2d_1x1')
with tf.variable_scope('Branch_1'):
tower_conv1_0 = slim.conv2d(net, 192, 1, scope='Conv2d_0a_1x1')
tower_conv1_1 = slim.conv2d(tower_conv1_0, 224, [1,3],
scope='Conv2d_0b_1x3')
tower_conv1_2 = slim.conv2d(tower_conv1_1,256, [3,1],
scope='Conv2d_0c_3x1')
mixed = tf.concat(axis=3, values=[tower_conv,tower_conv1_2])
up = slim.conv2d(mixed, net.get_shape()[3], 1, normalizer_fn=None,
activation_fn=None, scope='Conv2d_1x1')
net += scale * up
if activation_fn:
net = activation_fn(net)
return net
# Inception-ResNet-V1
def inception_resnet_v2_base(inputs,
final_endpoint='Conv2d_7b_1x1',
output_stride=16,
align_feature_maps=False,
scope=None):
if output_stride != 8 and output_stride != 16:
raise ValueError('output_stride must be 8 or 16.')
padding = 'SAME' if align_feature_maps else 'VALID'
end_points = {}
def add_and_check_final(name, net):
end_points[name] = net
return name == final_endpoint
with tf.variable_scope(scope, 'InceptionResnetV2', [inputs]):
with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d],
stride=1, padding='SAME'):
# 149 x 149 x 32
net = slim.conv2d(inputs, 32, 3, stride=2, padding=padding,
scope='Conv2d_1a_3x3')
if add_and_check_final('Conv2d_1a_3x3',net):return net, end_points
# 147 x 147 x 32
net = slim.conv2d(net, 32, 3, padding=padding,
scope='Conv2d_2a_3x3')
if add_and_check_final('Conv2d_2b_3x3',net):return net, end_points
# 147 x 147 x 64
net = slim.conv2d(net, 64, 3, padding=padding,
scope='Conv2d_2b_3x3')
if add_and_check_final('Conv2d_2b_3x3', net): return net, end_points
# 73 x 73 x 64
net = slim.max_pool2d(net, 3, strides=2, padding=padding,
scope='MaxPool_3a_3x3')
if add_and_check_final('Conv2d_3a_3x3', net):return net, end_points
# 73 x 73 x 80
net = slim.conv2d(net, 80, 1, padding=padding,
scope='Conv2d_3b_1x1')
if add_and_check_final('Conv2d_3b_1x1',net):return net, end_points
# 71 x 71 x 192
net = slim.conv2d(net, 192, 3, padding=padding,
scope='Conv2d_4a_3x3')
if add_and_check_final('Conv2d_4a_3x3',net):return net,end_points
# 35 x 35 x 192
net = slim.max_pool2d(net, 3, strides=2, padding=padding,
scope='MaxPool_5a_3x3')
if add_and_check_final('MaxPool_5a_3x3',net):return net, end_points
# 35 x 35 x 320
with tf.variable_scope('Mixed_5b'):
with tf.variable_scope('Branch_0'):
tower_conv = slim.conv2d(net, 96,1,scope='Conv2d_1x1')
with tf.variable_scope('Branch_1'):
tower_conv1_0 = slim.conv2d(net, 48, 1, scope='Conv2d_0a_1x1')
tower_conv1_1 = slim.conv2d(tower_conv1_0, 64, 5,
scope='Conv2d_0b_5x5')
with tf.variable_scope('Branch_2'):
tower_conv2_0 = slim.conv2d(net, 64, 1, scope='Conv2d_0a_1x1')
tower_conv2_1 = slim.conv2d(tower_conv2_0, 96, 3,
scope='Conv2d_0b_3x3')
tower_conv2_2 = slim.conv2d(tower_conv2_1, 96, 3,
scope='Conv2d_0c_3x3')
with tf.variable_scope('Branch_3'):
tower_pool = slim.avg_pool2d(net, 3, strides=1, padding='SAME',
scope='Avgpool_0a_3x3')
tower_pool_1 = slim.conv2d(tower_pool, 64, 1,
scope='Conv2d_ob_1x1')
net = tf.concat(
[tower_conv, tower_conv1_1, tower_conv2_2, tower_pool_1],3
)
if add_and_check_final('Mixed_5b', net):return net, end_points
# TODO(alemi):Register intermediate endpoints
net = slim.repeat(net, 10, block35(), scale=0.17)
|
python
|
# -------------------------------------------------------------------------
# function: classes,type = dbscan(x, k, Eps)
# -------------------------------------------------------------------------
# Objective:
# Clustering the data with Density - Based Scan Algorithm with Noise (DBSCAN)
# -------------------------------------------------------------------------
# Input:
# x - dataset(m, n) m - objects, n - variables
# k - number of objects in a neighborhood of an object
# (minimal number of objects considered as a cluster)
# Eps - neighborhood radius, if not known avoid this parameter or put[]
# -------------------------------------------------------------------------
# Output:
# classes - vector specifying assignment of the i-th object to certain
# cluster(m, 1)
# type - vector specifying type of the i-th object
# (core: 1, border: 0, outlier: -1)
# -------------------------------------------------------------------------
# Example of use:
# x = [randn(30, 2)*.4 randn(40, 2)*.5 + ones(40, 1)*[4 4]]
# classes,type = dbscan(x, 5, [])
# -------------------------------------------------------------------------
# References:
# [1] M.Ester, H.Kriegel, J.Sander, X.Xu, A density - based algorithm for
# discovering clusters in large spatial databases with noise, proc.
# 2nd Int.Conf.on Knowledge Discovery and Data Mining, Portland, OR, 1996,
# p.226, available from:
# www.dbs.informatik.uni - muenchen.de / cgi - bin / papers?query = --CO
# [2] M.Daszykowski, B.Walczak, D.L.Massart, Looking for
# Natural Patterns in Data.Part 1: Density Based Approach,
# Chemom.Intell.Lab.Syst. 56(2001) 83 - 92
# -------------------------------------------------------------------------
# Written by Michal Daszykowski
# Department of Chemometrics, Institute of Chemistry,
# The University of Silesia
# December 2004
# http://www.chemometria.us.edu.pl
from numpy import min, abs, max, sqrt, arange, prod, pi, ones, insert, sum, zeros, argwhere, empty, append, delete
from scipy.special import gamma
# ...........................................
def epsilon(x, k):
# function: epsi = epsilon(x, k)
#
# Objective:
# Analytical way used for estimating neighborhood radius for the DBSCAN algorithm
#
# Input:
# x - data matrix(m, n); m - data points, n - dimensions
# k - number of data points in a neighborhood of a given data point
# (minimal number of data points considered as a cluster)
m, n = x.shape
maxmin = max(x, axis = 0) - min(x, axis = 0)
epsi = ((prod(maxmin)*k*gamma(0.5*n + 1))/(m*sqrt(pi**n)))**(1./n)
return epsi
# ............................................
def edist(i, x):
# function: D = edist(i, x)
#
# Objective:
# Calculate the Euclidean distances between the i-th sample vector and all m sample vectors in x
#
# Input:
# i - an n-dimensional sample vector (1, n)
# x - sample matrix (m, n); m - sample vector, n - dimension
#
# Output:
# D - Euclidean distance(m, 1)
m, n = x.shape
if n == 1:
D = abs(ones((m, 1))*i - x)
else:
squ = (ones((m, 1))*i - x)**2
D = sqrt(sum(squ, axis = 1))
return D
def dbscan(x, k, Eps):
m, n = x.shape
if len(Eps) != 1: Eps = epsilon(x, k)
x = insert(x, 0, arange(m), 1)
m, n = x.shape
p_type = zeros((m, 1))
classes = -ones((m, 1))
no = 1
touched = zeros((m, 1))
for i in range(m):
if touched[i] == 0:
ob = x[i, :]
D = edist(ob[1:n], x[:, 1:n])
ind = argwhere(D <= Eps)
if len(ind) > 1 and len(ind) < k + 1: # do not deal with
p_type[i] = 0
classes[i] = 0
if len(ind) == 1: # this is noise
p_type[i] = -1
classes[i] = -1
touched[i] = 1
if len(ind) >= k + 1: # make clustering
p_type[i] = 1
for j in range(len(ind)):
classes[ind[j]] = max(no)
while len(ind) >= 1:
ob = x[int(ind[0]), :]
touched[int(ind[0])] = 1
ind = ind[1:len(ind)]
D = edist(ob[1:n], x[:, 1:n])
i1 = argwhere(D <= Eps)
if len(i1) > 1:
for j in range(len(i1)):
classes[i1[j]] = no
if len(i1) >= k + 1:
p_type[int(ob[0])] = 1
else:
p_type[int(ob[0])] = 0
for i in range(len(i1)):
if touched[i1[i]] == 0:
touched[i1[i]] = 1
ind = append(ind, i1[i])
classes[i1[i]] = no
no = no + 1
no = no - 1
i1 = argwhere(classes == 0)
classes[i1] = -1
cl = classes.transpose()
p_type[i1] = -1
return cl, p_type, no
|
python
|
# form test
|
python
|
# -*- coding: utf-8 -*-
"""
测试方法:
1. 执行 s1, s2, s3 确保 document 被创建, 并且 value = 0
2. 打开两个 terminal 输入 python e3_concurrent_update.py
3. 快速在两个 terminal 按下 enter 运行. 效果是对 value 进行 1000 次 +1, 由于有两个
并发, 所以互相之间会争抢
4. 执行 s4, 查看 value 是否是 2000
ES 中处理并发的策略详解 https://www.elastic.co/guide/en/elasticsearch/reference/current/optimistic-concurrency-control.html
"""
from rich import print
from learn_elasticsearch.tests import (
es_sanhe_dev as es,
create_index, delete_index,
)
index = "concurrent_update_test"
id_ = 1
def s1_create_initial_doc():
print(es.index(index=index, id=id_, body={"value": 0}))
def s2_inspect_doc():
print(es.get(index=index, id=id_))
def s3_update():
for i in range(1000):
print(f"{i}th update ...")
body = {
"script": {
"source": "ctx._source.value = ctx._source.value + params.increment",
"lang": "painless",
"params": {
"increment": 1,
}
}
}
res = es.update(index=index, id=id_, body=body, retry_on_conflict=5)
print(res)
if __name__ == "__main__":
# delete_index(es, index)
# create_index(es, index)
# s1_create_initial_doc()
# s2_inspect_doc()
# s3_update()
pass
|
python
|
# -*- coding: utf-8 -*-
import numpy as np
from skued import azimuthal_average, powdersim
from crystals import Crystal
import unittest
from skimage.filters import gaussian
np.random.seed(23)
def circle_image(shape, center, radii, intensities):
""" Creates an image with circle or thickness 2 """
im = np.zeros(shape=shape, dtype=np.float)
xx, yy = np.ogrid[0 : shape[0], 0 : shape[1]]
xx, yy = xx - center[0], yy - center[1]
for radius, intensity in zip(radii, intensities):
rr = np.sqrt(xx ** 2 + yy ** 2)
im[np.logical_and(rr < radius + 1, rr > radius - 1)] = intensity
im[:] = gaussian(im, 5)
return im
class TestAzimuthalAverage(unittest.TestCase):
def test_trivial_array(self):
""" Test azimuthal_average on an array of zeroes """
image = np.zeros(shape=(256, 256), dtype=np.float)
center = (image.shape[0] / 2, image.shape[1] / 2)
radius, intensity = azimuthal_average(image, center)
self.assertTrue(intensity.sum() == 0)
self.assertSequenceEqual(intensity.shape, radius.shape)
def test_ring(self):
""" Test azimuthal_average on an image with a wide ring """
image = np.zeros(shape=(256, 256), dtype=np.float)
center = (image.shape[0] / 2, image.shape[1] / 2)
xc, yc = center
# Create an image with a wide ring
extent = np.arange(0, image.shape[0])
xx, yy = np.meshgrid(extent, extent)
rr = np.sqrt((xx - xc) ** 2 + (yy - yc) ** 2)
image[np.logical_and(24 < rr, rr < 26)] = 1
radius, intensity = azimuthal_average(image, center)
self.assertEqual(intensity.max(), image.max())
self.assertSequenceEqual(radius.shape, intensity.shape)
def test_angular_bounds(self):
""" Test azimuthal_average with a restrictive angular_bounds argument """
image = np.zeros(shape=(256, 256), dtype=np.float)
center = (image.shape[0] / 2, image.shape[1] / 2)
xc, yc = center
# Create an image with a wide ring
extent = np.arange(0, image.shape[0])
xx, yy = np.meshgrid(extent, extent)
rr = np.sqrt((xx - xc) ** 2 + (yy - yc) ** 2)
angles = np.rad2deg(np.arctan2(yy - yc, xx - xc)) + 180
image[np.logical_and(0 <= angles, angles <= 60)] = 1
with self.subTest("0 - 360"):
radius, intensity = azimuthal_average(image, center, angular_bounds=None)
r360, int360 = azimuthal_average(image, center, angular_bounds=(0, 360))
self.assertTrue(np.allclose(intensity, int360))
with self.subTest("Inside angle bounds"):
radius, intensity = azimuthal_average(image, center, angular_bounds=(0, 60))
self.assertTrue(np.allclose(intensity, np.ones_like(intensity)))
with self.subTest("Overlapping bounds"):
radius, intensity = azimuthal_average(
image, center, angular_bounds=(15, 75)
)
self.assertFalse(np.all(intensity < np.ones_like(intensity)))
with self.subTest("Outside angle bounds"):
radius, intensity = azimuthal_average(
image, center, angular_bounds=(60, 360)
)
self.assertTrue(np.allclose(intensity, np.zeros_like(intensity)))
with self.subTest("Inside angle bounds with 360deg rollover"):
radius, intensity = azimuthal_average(
image, center, angular_bounds=(60 + 360, 360 + 360)
)
self.assertTrue(np.allclose(intensity, np.zeros_like(intensity)))
def test_ring_with_mask(self):
""" Test azimuthal_average on an image with a wide ring """
image = np.zeros(shape=(256, 256), dtype=np.float)
center = (image.shape[0] / 2, image.shape[1] / 2)
xc, yc = center
mask = np.ones_like(image, dtype=np.bool)
mask[120:140, 0:140] = False
# Create an image with a wide ring
extent = np.arange(0, image.shape[0])
xx, yy = np.meshgrid(extent, extent)
rr = np.sqrt((xx - xc) ** 2 + (yy - yc) ** 2)
image[np.logical_and(24 < rr, rr < 26)] = 1
radius, intensity = azimuthal_average(image, center, mask=mask)
self.assertEqual(intensity.max(), image.max())
self.assertSequenceEqual(radius.shape, intensity.shape)
def test_trim_and_mask(self):
""" Test that regions that only have masks contributions are not present
in the angular average """
image = np.ones(shape=(256, 256), dtype=np.float)
center = (image.shape[0] / 2, image.shape[1] / 2)
xc, yc = center
# Create an image with a wide ring
extent = np.arange(0, image.shape[0])
xx, yy = np.meshgrid(extent, extent)
rr = np.hypot(xx - xc, yy - yc)
mask = np.ones_like(image, dtype=np.bool)
mask[rr < 20] = False
# image[rr < 20] = 0
radius, intensity = azimuthal_average(image, center, mask=mask, trim=False)
self.assertEqual(radius.min(), 0)
radius_trimmed, intensity_trimmed = azimuthal_average(
image, center, mask=mask, trim=True
)
self.assertEqual(radius_trimmed.min(), 20)
def test_mask_and_nan(self):
""" Test that azimuthal_average with masks does not yield NaNs. This can happen for large masks. """
image = np.ones(shape=(256, 256), dtype=np.int16)
mask = np.zeros_like(image, dtype=np.bool)
mask[100:156, 100:156] = True
_, av = azimuthal_average(image, center=(128, 128), mask=mask, trim=False)
self.assertFalse(np.any(np.isnan(av)))
if __name__ == "__main__":
unittest.main()
|
python
|
# Copyright 2018 Luddite Labs Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module provides unknown (non-registered) roles support.
It injects a hook to the docutils workflow by replacing
:func:`docutils.parsers.rst.roles.role` and returns
:func:`common_role` handler if no role is found.
:func:`common_role` creates special document node :class:`autodoc_unknown_role`
which is handled by the :class:`CommonTranslator`.
"""
from docutils.parsers.rst import nodes, roles
# List of known but not registered roles.
# They are from the Sphinx.
# http://www.sphinx-doc.org/en/stable/markup/inline.html
# sphinx/roles.py
known_roles = ['any', 'download', 'doc', 'guilabel', 'menuselection',
'file', 'samp', 'abbr', 'index', 'command', 'dfn',
'kbd', 'mailheader', 'makevar', 'manpage', 'mimetype',
'newsgroup', 'program', 'regexp', 'ref', 'numref',
'envvar', 'token', 'keyword', 'option', 'term',
'index', 'attr', 'attribute', 'class', 'meth', 'method', 'obj',
'func', 'exc', 'mod']
class autodoc_unknown_role(nodes.Inline, nodes.TextElement):
"""Unknown role node."""
pass
def common_role(role, rawtext, text, lineno, inliner, options=None,
content=None):
"""Unknown role handler.
It used to have a test node in the document.
"""
options = options if options is not None else {}
roles.set_classes(options)
options['attributes'] = {'text': text}
node = autodoc_unknown_role(rawtext, rawtext, **options)
node.role_name = role
node.source, node.line = inliner.reporter.get_source_and_line(lineno)
return [node], []
# This role is applied to interpreted text without a role: `text`.
def default_role(role, rawtext, *args, **kwargs):
"""Default role to return raw text node."""
# return [nodes.Text(rawtext)], []
text = rawtext.strip('`')
return [nodes.emphasis(text, text, default_role=True)], []
def register_roles():
for name in known_roles:
roles.register_local_role(name, common_role)
def set_default_role():
"""Set custom default role.
By default::
`text` -> :title:`text`
we override with our role::
`text` -> `text`
See Also:
:attr:`roles.DEFAULT_INTERPRETED_ROLE`.
"""
if roles._roles.get('') != default_role:
roles._roles[''] = default_role
def role_hook(role_name, language_module, lineno, reporter):
"""Hook to provide common role if nothing is found."""
role_fn, messages = role_hook.orig(role_name, language_module, lineno,
reporter)
return role_fn or common_role, messages
def set_role_hook():
"""Replace :func:`roles.role` with custom function.
It returns common role node for all nonexistent roles.
"""
registered = hasattr(role_hook, 'orig')
if not registered:
role_hook.orig = roles.role
roles.role = role_hook
return registered
def init():
already_init = set_role_hook()
if not already_init:
set_default_role()
register_roles()
|
python
|
import os
import io
import sys
import time
import string
import random
import pstats
import unittest
import cProfile
import itertools
import statistics
from unittest.mock import patch, MagicMock
import bucky3.statsd as statsd
class RoughFloat(float):
def __eq__(self, other):
if not isinstance(other, float):
return super().__eq__(other)
return round(self, 2) == round(other, 2)
def statsd_verify(output_pipe, expected_values):
found_values = sum((i[0][0] for i in output_pipe.send.call_args_list), [])
for v in found_values:
if v in expected_values:
expected_values.remove(v)
else:
assert False, str(v) + " was not expected"
if expected_values:
assert False, "missing " + str(expected_values.pop())
output_pipe.reset_mock()
def statsd_setup(timestamps, **extra_cfg):
def run(fun, self):
with patch('time.monotonic') as monotonic_time, \
patch('time.time') as system_time:
if callable(timestamps):
system_time_mock, monotonic_time_mock = itertools.tee((t for t in timestamps()), 2)
else:
system_time_mock, monotonic_time_mock = itertools.tee(timestamps, 2)
system_time_mock, monotonic_time_mock = iter(system_time_mock), iter(monotonic_time_mock)
monotonic_time0 = next(monotonic_time_mock)
# Statsd module consumes one monotonic tick for self.init_timestamp, we need to inject it
monotonic_time_mock = itertools.chain(iter([monotonic_time0]), iter([monotonic_time0]), monotonic_time_mock)
system_time.side_effect = system_time_mock
monotonic_time.side_effect = monotonic_time_mock
cfg = dict(
# log_level=INFO triggers a log line in src module and that calls the mocked system_time
# which consumes one tick and fails the tests. So up the log_level, really ugly.
log_level='WARN',
flush_interval=1,
add_timestamps=True,
timers_bucket="stats_timers",
histograms_bucket="stats_histograms",
sets_bucket="stats_sets",
gauges_bucket="stats_gauges",
counters_bucket="stats_counters",
destination_modules=(),
)
cfg.update(**extra_cfg)
output_pipe = MagicMock()
statsd_module = statsd.StatsDServer('statsd_test', cfg, [output_pipe])
statsd_module.init_cfg()
expected_output = fun(self, statsd_module)
if expected_output is None:
return
statsd_module.tick()
statsd_verify(output_pipe, expected_output)
if callable(timestamps):
fun = timestamps
timestamps = None
return lambda self: run(fun, self)
else:
def wrapper(fun):
return lambda self: run(fun, self)
return wrapper
def single_histogram_1_bucket(x):
if x < 300: return 'under_300'
def single_histogram_3_buckets(x):
if x < 100: return 'under_100'
if x < 300: return 'under_300'
return 'over_300'
def single_histogram_10_buckets(x):
if x < 100: return 'under_100'
if x < 200: return 'under_200'
if x < 300: return 'under_300'
if x < 400: return 'under_400'
if x < 500: return 'under_500'
if x < 600: return 'under_600'
if x < 700: return 'under_700'
if x < 800: return 'under_800'
if x < 900: return 'under_900'
return 'over_900'
def multiple_histogram_selector(key):
def gorm_selector(x):
if x < 100: return 'gorm_under_100'
return 'gorm_over_100'
def gurm_selector(x):
if x < 300: return 'gurm_under_300'
if x < 1000: return 'gurm_under_1000'
return 'gurm_over_1000'
if key['name'] == 'gorm': return gorm_selector
if key['name'] == 'gurm': return gurm_selector
class TestStatsDServer(unittest.TestCase):
def malformed_entries(self, statsd_module, entry_type, check_numeric=True, check_rate=False):
mock_pipe = statsd_module.dst_pipes[0]
def test(s):
statsd_module.handle_packet(s.encode("utf-8"))
statsd_module.tick()
assert not mock_pipe.called
assert not mock_pipe.send.called
mock_pipe.reset_mock()
test(":1|" + entry_type)
test("_gorm:1|" + entry_type)
test("g.o.r.m:1|" + entry_type)
test("gorm:|" + entry_type)
if check_numeric:
test("gorm:abc|" + entry_type)
if check_rate:
test("gorm:1|" + entry_type + "|@")
test("gorm:1|" + entry_type + "|@0")
test("gorm:1|" + entry_type + "|@1.1")
test("gorm:1|" + entry_type + "|@-0.3")
def malformed_metadata(self, statsd_module, entry):
mock_pipe = statsd_module.dst_pipes[0]
legal_name_chars = string.ascii_letters
illegal_name_chars = string.punctuation.replace('_', '').replace(':', '').replace('=', '')
illegal_value_chars = ','
legal_value_chars = ''.join(
set(string.ascii_letters + string.punctuation + string.digits + ' ') - set(',')
)
def get_random_word(chars, min_len=1, max_len=5):
return ''.join(random.choice(chars) for i in range(random.randint(min_len, max_len)))
def get_token(first_chars, legal_chars, illegal_char=None):
n = get_random_word(first_chars, 1, 1) + get_random_word(legal_chars)
if illegal_char:
n = n + get_random_word(illegal_char, 1, 1) + get_random_word(legal_chars)
return n
i = 0
for c in illegal_name_chars:
name = get_token(legal_name_chars, legal_name_chars, c)
value = get_token(legal_value_chars, legal_value_chars)
statsd_module.handle_line(i, entry + '|#' + name + '=' + value)
statsd_module.tick()
assert not mock_pipe.called, "Failed to k=" + name + " and v=" + value
assert not mock_pipe.send.called, "Failed to k=" + name + " and v=" + value
mock_pipe.reset_mock()
i += 1
for c in illegal_value_chars:
name = get_token(legal_name_chars, legal_name_chars)
value = get_token(legal_value_chars, legal_value_chars, c)
statsd_module.handle_line(i, entry + '|#' + name + '=' + value)
statsd_module.tick()
assert not mock_pipe.called
assert not mock_pipe.send.called
mock_pipe.reset_mock()
i += 1
def timestamped_metadata(self, statsd_module, entry):
mock_pipe = statsd_module.dst_pipes[0]
def test(condition, s):
statsd_module.handle_packet((entry + "|#timestamp=" + s).encode("ascii"))
statsd_module.tick()
assert not mock_pipe.called
assert mock_pipe.send.called == condition
mock_pipe.reset_mock()
test(False, "")
test(False, "not-a-timestamp")
test(False, "-1000") # Beyond 10min window
test(False, "1000") # Beyond 10min window
test(True, "-123") # Within 10min window
test(True, "123.4") # Within 10min window
def bucketed_metadata(self, statsd_module, entry, expected_metadata_size=2):
mock_pipe = statsd_module.dst_pipes[0]
def test(condition, s):
statsd_module.handle_packet((entry + "|#hello=world,bucket=" + s).encode("ascii"))
statsd_module.tick()
assert not mock_pipe.called
assert mock_pipe.send.called == condition
if condition:
args, kwargs = mock_pipe.send.call_args
assert len(args) == 1
payload = args[0]
assert len(payload) == 1
payload = payload[0]
assert payload[0] == s
assert len(payload[3]) == expected_metadata_size
mock_pipe.reset_mock()
test(False, "")
test(False, "not-a-bucket-name")
test(True, "valid_bucket_name")
@statsd_setup(timestamps=(2, 4, 6, 8, 10, 12, 14))
def test_counters(self, statsd_module):
mock_pipe = statsd_module.dst_pipes[0]
statsd_module.handle_line(0, "gorm:1.5|c")
statsd_module.handle_line(0, "gurm:1|c|@0.1")
statsd_module.handle_line(0, "gorm:3|c")
statsd_module.handle_line(0, "gorm:0.5|c")
statsd_module.handle_line(0, "form:10|c|@0.2")
statsd_module.tick()
statsd_verify(mock_pipe, [
('stats_counters', dict(rate=2.5, count=5), 2, dict(name='gorm')),
('stats_counters', dict(rate=5.0, count=10), 2, dict(name='gurm')),
('stats_counters', dict(rate=25.0, count=50), 2, dict(name='form'))
])
statsd_module.handle_line(2, "gorm:1|c")
statsd_module.handle_line(2, "gurm:1.3|c|@0.2")
statsd_module.tick()
statsd_verify(mock_pipe, [
('stats_counters', dict(rate=0.5, count=1), 4, dict(name='gorm')),
('stats_counters', dict(rate=3.25, count=6.5), 4, dict(name='gurm'))
])
statsd_module.handle_line(4, "gurm:3|c|@0.2")
statsd_module.tick()
statsd_verify(mock_pipe, [
('stats_counters', dict(rate=7.5, count=15), 6, dict(name='gurm'))
])
statsd_module.tick()
statsd_verify(mock_pipe, [])
@statsd_setup(timestamps=range(1, 100))
def test_counters_metadata(self, statsd_module):
mock_pipe = statsd_module.dst_pipes[0]
statsd_module.handle_line(0, "gorm:1.5|c")
statsd_module.handle_line(0, "gorm:2.0|c|#a=b")
statsd_module.handle_line(0, "gorm:2.5|c|#a=b,c=5")
statsd_module.handle_line(0, "gorm:3.0|c|#a=z,c=5")
statsd_module.handle_line(0, "gorm:3.5|c|#c=5,a=b")
statsd_module.handle_line(0, "pi:3.14|c|#a=,b=c")
statsd_module.tick()
statsd_verify(mock_pipe, [
('stats_counters', dict(rate=1.5, count=1.5), 1, dict(name='gorm')),
('stats_counters', dict(rate=2.0, count=2.0), 1, dict(name='gorm', a='b')),
('stats_counters', dict(rate=6.0, count=6.0), 1, dict(name='gorm', a='b', c='5')),
('stats_counters', dict(rate=3.0, count=3.0), 1, dict(name='gorm', a='z', c='5')),
('stats_counters', dict(rate=3.14, count=3.14), 1, dict(name='pi', a='', b='c')),
])
statsd_module.handle_line(1, "gorm:4.0|c|#c=5,a=z")
statsd_module.tick()
statsd_verify(mock_pipe, [
('stats_counters', dict(rate=4.0, count=4.0), 2, dict(name='gorm', a='z', c='5')),
])
statsd_module.tick()
statsd_verify(mock_pipe, [])
@statsd_setup(timestamps=range(1, 1000))
def test_malformed_counters(self, statsd_module):
self.malformed_entries(statsd_module, 'c', check_rate=True)
@statsd_setup(timestamps=range(1, 1000))
def test_malformed_counters_metadata(self, statsd_module):
self.malformed_metadata(statsd_module, "gorm:1|c")
@statsd_setup(timestamps=range(1, 1000))
def test_timestamped_counters_metadata(self, statsd_module):
self.timestamped_metadata(statsd_module, "gorm:1|c")
@statsd_setup(timestamps=range(1, 1000))
def test_bucketed_counters_metadata(self, statsd_module):
self.bucketed_metadata(statsd_module, "gorm:1|c")
@statsd_setup(timestamps=(1, 2, 3, 4, 5, 6, 7, 8))
def test_gauges(self, statsd_module):
mock_pipe = statsd_module.dst_pipes[0]
statsd_module.handle_line(0, "gorm:6.7|g")
statsd_module.tick()
statsd_verify(mock_pipe, [
('stats_gauges', dict(value=6.7), 1, dict(name='gorm'))
])
statsd_module.handle_line(1, "gorm:3|g|@0.5")
statsd_module.handle_line(1, "gorm:8.1|g")
statsd_module.handle_line(1, "gurm:123|g|@0.2")
statsd_module.tick()
statsd_verify(mock_pipe, [
('stats_gauges', dict(value=8.1), 2, dict(name='gorm')),
('stats_gauges', dict(value=123), 2, dict(name='gurm'))
])
statsd_module.handle_line(2, "gurm:12|g|@0.5")
statsd_module.tick()
statsd_verify(mock_pipe, [
('stats_gauges', dict(value=12), 3, dict(name='gurm')),
])
statsd_module.tick()
statsd_verify(mock_pipe, [])
@statsd_setup(timestamps=range(1, 100))
def test_gauges_metadata(self, statsd_module):
mock_pipe = statsd_module.dst_pipes[0]
statsd_module.handle_line(0, "gorm:1.5|g")
statsd_module.handle_line(0, "gorm:2.0|g|#a=b")
statsd_module.handle_line(0, "gorm:2.5|g|#a=b,c=5")
statsd_module.handle_line(0, "gorm:3.0|g|#a=z,c=5")
statsd_module.handle_line(0, "gorm:3.5|g|#c=5,a=b")
statsd_module.handle_line(0, "pi:3.14|g|#a=,b=c")
statsd_module.tick()
statsd_verify(mock_pipe, [
('stats_gauges', dict(value=1.5), 1, dict(name='gorm')),
('stats_gauges', dict(value=2.0), 1, dict(name='gorm', a='b')),
('stats_gauges', dict(value=3.5), 1, dict(name='gorm', a='b', c='5')),
('stats_gauges', dict(value=3.0), 1, dict(name='gorm', a='z', c='5')),
('stats_gauges', dict(value=3.14), 1, dict(name='pi', a='', b='c')),
])
statsd_module.handle_line(1, "gorm:4.0|g|#c=5,a=z")
statsd_module.tick()
statsd_verify(mock_pipe, [
('stats_gauges', dict(value=4.0), 2, dict(name='gorm', a='z', c='5')),
])
statsd_module.tick()
statsd_verify(mock_pipe, [])
@statsd_setup(timestamps=range(1, 1000))
def test_malformed_gauges(self, statsd_module):
self.malformed_entries(statsd_module, 'g')
@statsd_setup(timestamps=range(1, 1000))
def test_malformed_gauges_metadata(self, statsd_module):
self.malformed_metadata(statsd_module, "gorm:1|g")
@statsd_setup(timestamps=range(1, 1000))
def test_timestamped_gauges_metadata(self, statsd_module):
self.timestamped_metadata(statsd_module, "gorm:1|g")
@statsd_setup(timestamps=range(1, 1000))
def test_bucketed_gauges_metadata(self, statsd_module):
self.bucketed_metadata(statsd_module, "gorm:1|g")
@statsd_setup(timestamps=(1, 2, 3, 4, 5, 6, 7, 8))
def test_sets(self, statsd_module):
mock_pipe = statsd_module.dst_pipes[0]
statsd_module.handle_line(0, "gorm:abc|s|@0.2")
statsd_module.tick()
statsd_verify(mock_pipe, [
('stats_sets', dict(count=1.0), 1, dict(name='gorm'))
])
statsd_module.handle_line(1, "gurm:x|s")
statsd_module.handle_line(1, "gurm:y|s|@0.2")
statsd_module.handle_line(1, "gurm:z|s|@0.2")
statsd_module.tick()
statsd_verify(mock_pipe, [
('stats_sets', dict(count=3.0), 2, dict(name='gurm'))
])
statsd_module.handle_line(2, "gurm:y|s|@0.2")
statsd_module.handle_line(2, "gurm:y|s")
statsd_module.tick()
statsd_verify(mock_pipe, [
('stats_sets', dict(count=1.0), 3, dict(name='gurm'))
])
statsd_module.tick()
statsd_verify(mock_pipe, [])
@statsd_setup(timestamps=range(1, 100))
def test_sets_metadata(self, statsd_module):
mock_pipe = statsd_module.dst_pipes[0]
statsd_module.handle_line(0, "gorm:p|s")
statsd_module.handle_line(0, "gorm:q|s|#a=b")
statsd_module.handle_line(0, "gorm:r|s|#a=b,c=5")
statsd_module.handle_line(0, "gorm:s|s|#a=z,c=5")
statsd_module.handle_line(0, "gorm:t|s|#c=5,a=b")
statsd_module.tick()
statsd_verify(mock_pipe, [
('stats_sets', dict(count=1), 1, dict(name='gorm')),
('stats_sets', dict(count=1), 1, dict(name='gorm', a='b')),
('stats_sets', dict(count=2), 1, dict(name='gorm', a='b', c='5')),
('stats_sets', dict(count=1), 1, dict(name='gorm', a='z', c='5')),
])
statsd_module.handle_line(1, "gorm:u|s|#c=5,a=z")
statsd_module.tick()
statsd_verify(mock_pipe, [
('stats_sets', dict(count=1), 2, dict(name='gorm', a='z', c='5')),
])
statsd_module.tick()
statsd_verify(mock_pipe, [])
@statsd_setup(timestamps=range(1, 1000))
def test_malformed_sets(self, statsd_module):
self.malformed_entries(statsd_module, 's', check_numeric=False)
@statsd_setup(timestamps=range(1, 1000))
def test_malformed_sets_metadata(self, statsd_module):
self.malformed_metadata(statsd_module, "gorm:x|s")
@statsd_setup(timestamps=range(1, 1000))
def test_timestamped_sets_metadata(self, statsd_module):
self.timestamped_metadata(statsd_module, "gorm:x|s")
@statsd_setup(timestamps=range(1, 1000))
def test_bucketed_sets_metadata(self, statsd_module):
self.bucketed_metadata(statsd_module, "gorm:x|s")
@statsd_setup(flush_interval=0.1,
percentile_thresholds=(90,),
timestamps=(0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7))
def test_single_timer_sample(self, statsd_module):
mock_pipe = statsd_module.dst_pipes[0]
statsd_module.handle_line(0, "gorm:100|ms")
expected_value = {
"mean": 100.0,
"upper": 100.0,
"lower": 100.0,
"count": 1,
"count_ps": 10.0,
}
statsd_module.tick()
statsd_verify(mock_pipe, [
('stats_timers', expected_value, 0.1, dict(name='gorm', percentile='90.0'))
])
statsd_module.handle_line(0.1, "gorm:100|ms")
statsd_module.tick()
statsd_verify(mock_pipe, [
('stats_timers', expected_value, 0.2, dict(name='gorm', percentile='90.0'))
])
statsd_module.tick()
statsd_verify(mock_pipe, [])
@statsd_setup(flush_interval=0.1,
percentile_thresholds=(90,),
timestamps=(0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7))
def test_timer_samples1(self, statsd_module):
mock_pipe = statsd_module.dst_pipes[0]
statsd_module.handle_line(0, "gorm:100|ms")
statsd_module.handle_line(0, "gorm:200|ms|@0.2")
statsd_module.handle_line(0, "gorm:300|ms") # Out of the 90% threshold
expected_value = {
"mean": 150,
"lower": 100,
"upper": 200,
"count": 2,
"count_ps": 20,
"stdev": 70.71067811865476
}
statsd_module.tick()
statsd_verify(mock_pipe, [
('stats_timers', expected_value, 0.1, dict(name='gorm', percentile='90.0'))
])
@statsd_setup(percentile_thresholds=(90,),
timestamps=(0.5, 1.0, 1.5, 2.0, 2.5, 3.0))
def test_timer_samples2(self, statsd_module):
mock_pipe = statsd_module.dst_pipes[0]
for i in range(9):
statsd_module.handle_line(0, "gorm:1|ms")
statsd_module.handle_line(0, "gorm:2|ms") # Out of the 90% threshold
expected_value = {
"mean": 1,
"lower": 1,
"upper": 1,
"count": 9,
"count_ps": 18.0,
"stdev": 0.0
}
statsd_module.tick()
statsd_verify(mock_pipe, [
('stats_timers', expected_value, 0.5, dict(name='gorm', percentile='90.0'))
])
@statsd_setup(percentile_thresholds=(90,),
timestamps=(0.5, 1.0, 1.5, 2.0, 2.5, 3.0))
def test_timer_samples3(self, statsd_module):
mock_pipe = statsd_module.dst_pipes[0]
statsd_module.handle_line(0, "gorm:2|ms")
statsd_module.handle_line(0, "gorm:5|ms")
statsd_module.handle_line(0, "gorm:7|ms") # Out of the 90% threshold
statsd_module.handle_line(0, "gorm:3|ms")
expected_value = {
"mean": 10 / 3.0,
"lower": 2,
"upper": 5,
"count": 3,
"count_ps": 6,
"stdev": 1.5275252316519463
}
statsd_module.tick()
statsd_verify(mock_pipe, [
('stats_timers', expected_value, 0.5, dict(name='gorm', percentile='90.0'))
])
_percentile_thresholds = (10, 20, 30, 40, 50, 60, 70, 80, 90, 95, 97, 98, 99, 99.9, 100)
@statsd_setup(timestamps=range(1, 100), percentile_thresholds=_percentile_thresholds)
def test_timer_large_series(self, statsd_module):
test_name = 'gorm'
test_vector = self.rand_vec(length=3000)
for sample in test_vector:
statsd_module.handle_line(0, test_name + ":" + str(sample) + "|ms")
statsd_module.tick()
test_vector.sort()
expected_values = []
for threshold_v in self._percentile_thresholds:
threshold_i = len(test_vector) if threshold_v == 100 else (threshold_v * len(test_vector)) // 100
threshold_slice = test_vector[:int(threshold_i)]
expected_value = {
"mean": RoughFloat(statistics.mean(threshold_slice)),
"upper": RoughFloat(max(threshold_slice)),
"lower": RoughFloat(min(threshold_slice)),
"count": len(threshold_slice),
"count_ps": len(threshold_slice),
"stdev": RoughFloat(statistics.stdev(threshold_slice))
}
expected_values.append(('stats_timers', expected_value, 1,
dict(name=test_name, percentile=str(float(threshold_v)))))
statsd_verify(statsd_module.dst_pipes[0], expected_values)
@statsd_setup(timestamps=range(1, 100), percentile_thresholds=(100,))
def test_timers_metadata(self, statsd_module):
mock_pipe = statsd_module.dst_pipes[0]
expected_value = {
"mean": 100.0,
"upper": 100.0,
"lower": 100.0,
"count": 1,
"count_ps": 1.0,
}
expected_value2 = expected_value.copy()
expected_value2.update(count=2, count_ps=2.0, stdev=0.0)
statsd_module.handle_line(0, "gorm:100|ms")
statsd_module.handle_line(0, "gorm:100|ms|#a=b")
statsd_module.handle_line(0, "gorm:100|ms|#a=b,c=5")
statsd_module.handle_line(0, "gorm:100|ms|#a=z,c=5")
statsd_module.handle_line(0, "gorm:100|ms|#c=5,a=b")
statsd_module.tick()
statsd_verify(mock_pipe, [
('stats_timers', expected_value, 1, dict(name='gorm', percentile='100.0')),
('stats_timers', expected_value, 1, dict(name='gorm', a='b', percentile='100.0')),
('stats_timers', expected_value2, 1, dict(name='gorm', a='b', c='5', percentile='100.0')),
('stats_timers', expected_value, 1, dict(name='gorm', a='z', c='5', percentile='100.0')),
])
statsd_module.handle_line(1, "gorm:100|ms|#a=b,c=5")
statsd_module.tick()
statsd_verify(mock_pipe, [
('stats_timers', expected_value, 2, dict(name='gorm', a='b', c='5', percentile='100.0')),
])
statsd_module.tick()
statsd_verify(mock_pipe, [])
@statsd_setup(timestamps=range(1, 1000))
def test_malformed_timers(self, statsd_module):
self.malformed_entries(statsd_module, 'ms')
@statsd_setup(timestamps=range(1, 1000))
def test_malformed_timers_metadata(self, statsd_module):
self.malformed_metadata(statsd_module, "gorm:1|ms")
@statsd_setup(timestamps=range(1, 1000), percentile_thresholds=(100,))
def test_timestamped_timers_metadata(self, statsd_module):
self.timestamped_metadata(statsd_module, "gorm:1|ms")
@statsd_setup(timestamps=range(1, 1000), percentile_thresholds=(100,))
def test_bucketed_timers_metadata(self, statsd_module):
self.bucketed_metadata(statsd_module, "gorm:1|ms", expected_metadata_size=3)
@statsd_setup(flush_interval=0.1,
timestamps=(0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7),
histogram_selector=lambda key: lambda x: 'test_histogram',)
def test_histogram_samples1(self, statsd_module):
mock_pipe = statsd_module.dst_pipes[0]
statsd_module.handle_line(0, "gorm:100|h")
expected_value = {
"mean": 100,
"lower": 100,
"upper": 100,
"count": 1,
"count_ps": 10,
}
statsd_module.tick()
statsd_verify(mock_pipe, [
('stats_histograms', expected_value, 0.1, dict(name='gorm', histogram='test_histogram'))
])
@statsd_setup(flush_interval=0.1,
timestamps=(0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7),
histogram_selector=lambda key: lambda x: 'test_histogram', )
def test_histogram_samples2(self, statsd_module):
mock_pipe = statsd_module.dst_pipes[0]
statsd_module.handle_line(0, "gorm:100|h")
statsd_module.handle_line(0, "gorm:200|h|@0.2")
statsd_module.handle_line(0, "gorm:300|h")
expected_value = {
"mean": 200,
"lower": 100,
"upper": 300,
"count": 3,
"count_ps": 30,
"stdev": 100.0
}
statsd_module.tick()
statsd_verify(mock_pipe, [
('stats_histograms', expected_value, 0.1, dict(name='gorm', histogram='test_histogram'))
])
@statsd_setup(flush_interval=0.1,
timestamps=(0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7),
histogram_selector=multiple_histogram_selector)
def test_histogram_large_series(self, statsd_module):
mock_pipe = statsd_module.dst_pipes[0]
test_samples = dict(gorm={}, gurm={}, foo={})
for i in range(3000):
name = random.choice(tuple(test_samples.keys()))
value = random.randint(0, 1500)
statsd_module.handle_line(0, name + ":" + str(value) + "|h")
selector = multiple_histogram_selector(dict(name=name))
if not selector:
continue
bucket = selector(value)
if bucket:
test_samples[name].setdefault(bucket, []).append(value)
break
expected_values = []
for name, d in test_samples.items():
for k, v in d.items():
expected_value = {
"mean": RoughFloat(statistics.mean(v)),
"lower": min(v),
"upper": max(v),
"count": len(v),
"count_ps": len(v) * 10,
}
if len(v) > 1:
expected_value['stdev'] = RoughFloat(statistics.stdev(v))
expected_values.append(
('stats_histograms', expected_value, 0.1, dict(name=name, histogram=k))
)
statsd_module.tick()
statsd_verify(mock_pipe, expected_values)
@statsd_setup(timestamps=range(1, 1000))
def test_malformed_histograms(self, statsd_module):
self.malformed_entries(statsd_module, 'h')
@statsd_setup(timestamps=range(1, 1000))
def test_malformed_histograms_metadata(self, statsd_module):
self.malformed_metadata(statsd_module, "gorm:1|h")
@statsd_setup(timestamps=range(1, 1000), percentile_thresholds=(100,),
histogram_selector=lambda key: lambda x: 'test_histogram',)
def test_timestamped_histograms_metadata(self, statsd_module):
self.timestamped_metadata(statsd_module, "gorm:1|h")
@statsd_setup(timestamps=range(1, 1000), percentile_thresholds=(),
histogram_selector=lambda key: lambda x: 'test_histogram',)
def test_bucketed_histograms_metadata(self, statsd_module):
self.bucketed_metadata(statsd_module, "gorm:1|h", expected_metadata_size=3)
@statsd_setup(timestamps=range(1, 1000))
def test_commas(self, statsd_module):
mock_pipe = statsd_module.dst_pipes[0]
statsd_module.handle_line(0, "foo:1|c|#hello=world,")
statsd_module.handle_line(0, "foo:1|c|#hello=world,,,more=metadata,")
statsd_module.tick()
statsd_verify(mock_pipe, [
('stats_counters', dict(rate=1, count=1), 1, dict(name='foo', hello='world')),
])
def prepare_performance_test(self):
flag = os.environ.get('TEST_PERFORMANCE', 'no').lower()
test_requested = flag in ('yes', 'true', '1')
if not test_requested:
self.skipTest("Performance test not requested")
return None
flag = os.environ.get('PROFILE_PERFORMANCE', 'no').lower()
profiler_requested = flag in ('yes', 'true', '1')
return cProfile.Profile() if profiler_requested else None
def close_performance_test(self, profiler):
if profiler:
buf = io.StringIO()
stats = pstats.Stats(profiler, stream=buf).sort_stats('cumulative')
stats.print_stats(0.1)
print(buf.getvalue())
def rand_str(self, min_len=3, max_len=10, chars=string.ascii_lowercase):
return ''.join(random.choice(chars) for i in range(random.randint(min_len, max_len)))
def rand_num(self, min_len=1, max_len=3):
return self.rand_str(min_len, max_len, string.digits)
def rand_val(self, mean=None):
if mean is None:
mean = 10
return round(min(max(0, random.gauss(mean, mean / 10)), 2 * mean), 3)
def rand_vec(self, length=None, mean=None):
if length is None:
length = random.randint(10, 100)
return list(self.rand_val(mean) for i in range(length))
def metadata_test_set(self, metric_type, set_size, tags_per_sample):
buf = set()
while len(buf) < set_size:
if tags_per_sample > 0:
tags_str = ','.join(self.rand_str() + '=' + self.rand_str() for i in range(tags_per_sample))
else:
tags_str = ''
l = self.rand_str() + ':' + self.rand_num() + '|' + metric_type
if random.random() > 0.5:
l = l + '|@{:.1f}'.format(random.random())
if tags_str:
l = l + '|#' + tags_str
buf.add(l)
return buf
def metadata_performance(self, statsd_module, prefix, metric_type, N, M, set_size, tags_per_sample, profiler=None):
mock_pipe = statsd_module.dst_pipes[0]
test_sample_set = self.metadata_test_set(metric_type, set_size, tags_per_sample)
insertion_time = 0
aggregation_time = 0
t = 0
for i in range(N):
start_timestamp = time.process_time()
for j in range(M):
for sample in test_sample_set:
if profiler:
profiler.enable()
statsd_module.handle_line(t, sample)
if profiler:
profiler.disable()
insertion_timestamp = time.process_time()
if profiler:
profiler.enable()
statsd_module.tick()
if profiler:
profiler.disable()
aggregation_timestamp = time.process_time()
t += 1
mock_pipe.reset_mock()
insertion_time += (insertion_timestamp - start_timestamp)
aggregation_time += (aggregation_timestamp - insertion_timestamp)
total_samples = N * M * len(test_sample_set)
us_per_insertion = 1000000 * insertion_time / total_samples
us_per_aggregation = 1000000 * aggregation_time / total_samples
print(('\n{prefix}: {total_samples:d} samples in {total_time:.2f}s'
' -> insertion {us_per_insertion:.2f}us/sample'
' -> aggregation {us_per_aggregation:.2f}us/sample').format(
prefix=prefix, total_samples=total_samples, total_time=(insertion_time + aggregation_time),
us_per_insertion=us_per_insertion, us_per_aggregation=us_per_aggregation,
), flush=True, file=sys.stderr)
@statsd_setup(timestamps=range(1, 10000000))
def test_counters_performance(self, statsd_module):
prof = self.prepare_performance_test()
self.metadata_performance(statsd_module, "counters without tags", 'c', 100, 10, 1000, 0, prof)
self.metadata_performance(statsd_module, "counters with 3 tags", 'c', 100, 10, 1000, 3, prof)
self.metadata_performance(statsd_module, "counters with 10 tags", 'c', 100, 10, 1000, 10, prof)
self.close_performance_test(prof)
@statsd_setup(timestamps=range(1, 10000000))
def test_gauges_performance(self, statsd_module):
prof = self.prepare_performance_test()
self.metadata_performance(statsd_module, "gauges without tags", 'g', 100, 10, 1000, 0, prof)
self.metadata_performance(statsd_module, "gauges with 3 tags", 'g', 100, 10, 1000, 3, prof)
self.metadata_performance(statsd_module, "gauges with 10 tags", 'g', 100, 10, 1000, 10, prof)
self.close_performance_test(prof)
@statsd_setup(timestamps=range(1, 10000000))
def test_sets_performance(self, statsd_module):
prof = self.prepare_performance_test()
self.metadata_performance(statsd_module, "sets without tags", 's', 100, 10, 1000, 0, prof)
self.metadata_performance(statsd_module, "sets with 3 tags", 's', 100, 10, 1000, 3, prof)
self.metadata_performance(statsd_module, "sets with 10 tags", 's', 100, 10, 1000, 10, prof)
self.close_performance_test(prof)
@statsd_setup(timestamps=range(1, 10000000), percentile_thresholds=(90, 99))
def test_timers_performance(self, statsd_module):
prof = self.prepare_performance_test()
self.metadata_performance(statsd_module, "timers without tags", 'ms', 100, 10, 1000, 0, prof)
self.metadata_performance(statsd_module, "timers with 3 tags", 'ms', 100, 10, 1000, 3, prof)
self.metadata_performance(statsd_module, "timers with 10 tags", 'ms', 100, 10, 1000, 10, prof)
self.close_performance_test(prof)
@statsd_setup(timestamps=range(1, 10000000), percentile_thresholds=(90, 99),
histogram_selector=lambda key: single_histogram_1_bucket)
def test_histograms_performance1(self, statsd_module):
prof = self.prepare_performance_test()
self.metadata_performance(statsd_module, "histogram with 1 bucket, no tags", 'h', 100, 10, 1000, 0, prof)
self.metadata_performance(statsd_module, "histogram with 1 bucket, 10 tags", 'h', 100, 10, 1000, 10, prof)
self.close_performance_test(prof)
@statsd_setup(timestamps=range(1, 10000000), percentile_thresholds=(90, 99),
histogram_selector=lambda key: single_histogram_3_buckets)
def test_histograms_performance3(self, statsd_module):
prof = self.prepare_performance_test()
self.metadata_performance(statsd_module, "histogram with 3 buckets, no tags", 'h', 100, 10, 1000, 0, prof)
self.metadata_performance(statsd_module, "histogram with 3 buckets, 10 tags", 'h', 100, 10, 1000, 10, prof)
self.close_performance_test(prof)
@statsd_setup(timestamps=range(1, 10000000), percentile_thresholds=(90, 99),
histogram_selector=lambda key: single_histogram_10_buckets)
def test_histograms_performance10(self, statsd_module):
prof = self.prepare_performance_test()
self.metadata_performance(statsd_module, "histogram with 10 buckets, no tags", 'h', 100, 10, 1000, 0, prof)
self.metadata_performance(statsd_module, "histogram with 10 buckets, 10 tags", 'h', 100, 10, 1000, 10, prof)
self.close_performance_test(prof)
def percentile_test_set(self, length, N=1):
buf = []
for i in range(N):
name = ('name', self.rand_str(min_len=10, max_len=10))
vector = self.rand_vec(length=length)
buf.append((tuple((name,),), vector))
return buf
def percentiles_performance(self, statsd_module, prefix, vector_len, N, M, profiler=None):
total_time, test_set = 0, self.percentile_test_set(vector_len, N)
for i in range(M):
statsd_module.buffer_metric = lambda bucket, stats, timestamp, metadata: None
statsd_module.timers.clear()
statsd_module.timers.update((k, (8, v)) for k, v in test_set)
statsd_module.last_timestamp = 0
statsd_module.current_timestamp = 10
start_time = time.process_time()
if profiler:
profiler.enable()
statsd_module.enqueue_timers(10)
if profiler:
profiler.disable()
time_delta = time.process_time() - start_time
total_time += time_delta
total_samples = N * M * vector_len
us_per_sample = 1000000 * total_time / total_samples
print('\n{prefix}: {total_samples:d} samples in {time_delta:.2f}s -> {us_per_sample:.1f}us/sample'.format(
prefix=prefix, total_samples=total_samples, time_delta=time_delta, us_per_sample=us_per_sample
), flush=True, file=sys.stderr)
@statsd_setup(timestamps=range(1, 10000000), percentile_thresholds=(90,))
def test_1percentile_performance(self, statsd_module):
prof = self.prepare_performance_test()
self.percentiles_performance(statsd_module, "1 percentile, 10000 vectors of 10 samples", 10, 10000, 10, prof)
self.percentiles_performance(statsd_module, "1 percentile, 1000 vectors of 100 samples", 100, 1000, 10, prof)
self.percentiles_performance(statsd_module, "1 percentile, 100 vectors of 1000 samples", 1000, 100, 10, prof)
self.percentiles_performance(statsd_module, "1 percentile, 10 vectors of 10000 samples", 10000, 10, 10, prof)
self.close_performance_test(prof)
@statsd_setup(timestamps=range(1, 10000000), percentile_thresholds=(50, 90, 99))
def test_3percentiles_performance(self, statsd_module):
prof = self.prepare_performance_test()
self.percentiles_performance(statsd_module, "3 percentiles, 10000 vectors of 10 samples", 10, 10000, 10, prof)
self.percentiles_performance(statsd_module, "3 percentiles, 1000 vectors of 100 samples", 100, 1000, 10, prof)
self.percentiles_performance(statsd_module, "3 percentiles, 100 vectors of 1000 samples", 1000, 100, 10, prof)
self.percentiles_performance(statsd_module, "3 percentiles, 10 vectors of 10000 samples", 10000, 10, 10, prof)
self.close_performance_test(prof)
@statsd_setup(timestamps=range(1, 10000000), percentile_thresholds=(10, 20, 30, 40, 50, 60, 70, 80, 90, 100))
def test_10percentiles_performance(self, statsd_module):
prof = self.prepare_performance_test()
self.percentiles_performance(statsd_module, "10 percentiles, 10000 vectors of 10 samples", 10, 10000, 10, prof)
self.percentiles_performance(statsd_module, "10 percentiles, 1000 vectors of 100 samples", 100, 1000, 10, prof)
self.percentiles_performance(statsd_module, "10 percentiles, 100 vectors of 1000 samples", 1000, 100, 10, prof)
self.percentiles_performance(statsd_module, "10 percentiles, 10 vectors of 10000 samples", 10000, 10, 10, prof)
self.close_performance_test(prof)
@statsd_setup(timestamps=range(1, 100))
def test_datadog_metadata(self, statsd_module):
mock_pipe = statsd_module.dst_pipes[0]
statsd_module.handle_line(0, "gorm:1.5|c")
statsd_module.handle_line(0, "gorm:2.0|c|#a:b")
statsd_module.handle_line(0, "gorm:2.5|c|#a:b,c:5")
statsd_module.handle_line(0, "gorm:3.0|c|#a:z,c:5")
statsd_module.handle_line(0, "gorm:3.5|c|#c:5,a:b")
statsd_module.tick()
statsd_verify(mock_pipe, [
('stats_counters', dict(rate=1.5, count=1.5), 1, dict(name='gorm')),
('stats_counters', dict(rate=2.0, count=2.0), 1, dict(name='gorm', a='b')),
('stats_counters', dict(rate=6.0, count=6.0), 1, dict(name='gorm', a='b', c='5')),
('stats_counters', dict(rate=3.0, count=3.0), 1, dict(name='gorm', a='z', c='5')),
])
statsd_module.handle_line(1, "gorm:4.0|c|#c:5,a:z")
statsd_module.tick()
statsd_verify(mock_pipe, [
('stats_counters', dict(rate=4.0, count=4.0), 2, dict(name='gorm', a='z', c='5')),
])
statsd_module.tick()
statsd_verify(mock_pipe, [])
@statsd_setup(timestamps=range(1, 100))
def test_escaped_metadata(self, statsd_module):
mock_pipe = statsd_module.dst_pipes[0]
statsd_module.handle_line(0, "gorm:1.5|c")
statsd_module.handle_line(0, "gorm:2.0|c|#a=bcd")
statsd_module.handle_line(0, r"gorm:2.5|c|#a=b\c\d")
statsd_module.handle_line(0, r"gorm:3.5|c|#a=b\,c=d")
statsd_module.handle_line(0, r"gorm:5.5|c|#a=b\,,c=d")
statsd_module.handle_line(0, r"gorm:7.5|c|#a=b\nc,d=e")
statsd_module.tick()
statsd_verify(mock_pipe, [
('stats_counters', dict(rate=1.5, count=1.5), 1, dict(name='gorm')),
('stats_counters', dict(rate=4.5, count=4.5), 1, dict(name='gorm', a='bcd')),
('stats_counters', dict(rate=3.5, count=3.5), 1, dict(name='gorm', a='b,c=d')),
('stats_counters', dict(rate=5.5, count=5.5), 1, dict(name='gorm', a='b,', c='d')),
('stats_counters', dict(rate=7.5, count=7.5), 1, dict(name='gorm', a='b\nc', d='e')),
])
statsd_module.tick()
statsd_verify(mock_pipe, [])
@statsd_setup(timestamps=range(1, 100))
def test_case_sensitivity(self, statsd_module):
mock_pipe = statsd_module.dst_pipes[0]
statsd_module.handle_line(0, "gorm:2.0|c|#a=bcd")
statsd_module.handle_line(0, "goRM:1.0|c|#a=BCD")
statsd_module.handle_line(0, "gorm:2.5|c|#A=bcd")
statsd_module.handle_line(0, "gorm:3.5|c|#a=Bcd")
statsd_module.tick()
statsd_verify(mock_pipe, [
('stats_counters', dict(rate=2.0, count=2.0), 1, dict(name='gorm', a='bcd')),
('stats_counters', dict(rate=1.0, count=1.0), 1, dict(name='goRM', a='BCD')),
('stats_counters', dict(rate=2.5, count=2.5), 1, dict(name='gorm', A='bcd')),
('stats_counters', dict(rate=3.5, count=3.5), 1, dict(name='gorm', a='Bcd')),
])
statsd_module.tick()
statsd_verify(mock_pipe, [])
if __name__ == '__main__':
unittest.main()
|
python
|
import os
import cv2
import numpy as np
import torch
# from utils import cropping as fp
from csl_common.utils import nn, cropping
from csl_common import utils
from landmarks import fabrec
from torchvision import transforms as tf
from landmarks import lmvis
snapshot_dir = os.path.join('.')
INPUT_SIZE = 256
transforms = [utils.transforms.CenterCrop(INPUT_SIZE)]
transforms += [utils.transforms.ToTensor()]
transforms += [utils.transforms.Normalize([0.518, 0.418, 0.361], [1, 1, 1])]
crop_to_tensor = tf.Compose(transforms)
def load_image(im_dir, fname):
from skimage import io
img_path = os.path.join(im_dir, fname)
img = io.imread(img_path)
if img is None:
raise IOError("\tError: Could not load image {}!".format(img_path))
if len(img.shape) == 2 or img.shape[2] == 1:
img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)
if img.shape[2] == 4:
print(fname, "converting RGBA to RGB...")
img = cv2.cvtColor(img, cv2.COLOR_RGBA2RGB)
assert img.shape[2] == 3, "{}, invalid format: {}".format(img_path, img.shape)
return img
def detect_in_crop(net, crop):
with torch.no_grad():
X_recon, lms_in_crop, X_lm_hm = net.detect_landmarks(crop)
lms_in_crop = utils.nn.to_numpy(lms_in_crop.reshape(1, -1, 2))
return X_recon, lms_in_crop, X_lm_hm
def test_crop(net, input_image, gt_landmarks, bb_for_crop=None, lms_for_crop=None, align=False, scale=1.0):
assert bb_for_crop is not None or lms_for_crop is not None
cropper = cropping.FaceCrop(input_image, bbox=bb_for_crop, landmarks=lms_for_crop,
align_face_orientation=align, scale=scale,
output_size=(INPUT_SIZE, INPUT_SIZE))
crop = cropper.apply_to_image()
landmarks = cropper.apply_to_landmarks(gt_landmarks)[0]
item = {'image': crop, 'landmarks': landmarks, 'pose': None}
item = crop_to_tensor(item)
images = nn.atleast4d(item['image']).cuda()
X_recon, lms, X_lm_hm = detect_in_crop(net, images)
# lmvis.visualize_batch(images, landmarks, X_recon, X_lm_hm, lms, wait=0, clean=True)
lmvis.visualize_batch_CVPR(images, landmarks, X_recon, X_lm_hm, lms, wait=0,
horizontal=True, show_recon=True, radius=2, draw_wireframes=True)
if __name__ == '__main__':
model = './data/models/snapshots/demo'
net = fabrec.load_net(model, num_landmarks=98)
net.eval()
im_dir = './images'
img0 = 'ada.jpg'
with torch.no_grad():
img = load_image(im_dir, img0)
scalef = 0.65
bb0 = [0,0] + list(img.shape[:2][::-1])
bb = utils.geometry.scaleBB(bb0, scalef, scalef, typeBB=2)
test_crop(net, img, gt_landmarks=None, bb_for_crop=bb)
|
python
|
from django.shortcuts import render
from django.views.generic import TemplateView
from django.contrib.auth.mixins import LoginRequiredMixin
class NewFatpercentageView(LoginRequiredMixin, TemplateView):
template_name = "new_fatpercentage.html"
class FatpercentageView(LoginRequiredMixin, TemplateView):
template_name = "fatpercentage_stats.html"
|
python
|
import sys
from io import StringIO
def io_sys_stdin():
"""标准输入流
Ctrl + D 结束输入
"""
for line in sys.stdin: # 按行分割输入
s = line.split() # 该步返回一个 list,按空格分割元素
print(s)
def io_input():
"""使用 input() 读取输入,会将输入内容作为表达式
以换行符为结束标志
Python 3 没有 raw_input() 方法,以 input() 代替
"""
s = input()
print(s)
def redirectStdin():
"""使用 StringIO 重定向标准输入
"""
sys.stdin = StringIO("line1\nlin2\nlin3")
for line in sys.stdin:
s = line.split() # 会按照换行符分割,返回的是一个只含一个元素的 list
print(s)
if __name__ == "__main__":
print("\ninput()")
io_input()
print("\nredirect stdin")
redirectStdin()
|
python
|
#
# Copyright 2014 Thomas Rabaix <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from element.node import NodeHandler
class IndexView(object):
def __init__(self, container):
self.container = container
def execute(self, request_handler, context):
return 200, 'ioc.extra.stats:index.html', {}
class ParametersView(NodeHandler):
def __init__(self, container):
self.container = container
def execute(self, request_handler, context, type=None):
params = {
'parameters': self.container.parameters,
'context': context
}
return self.render(request_handler, self.container.get('ioc.extra.jinja2'), 'ioc.extra.stats:parameters.html', params)
class ServicesView(NodeHandler):
def __init__(self, container):
self.container = container
def execute(self, request_handler, context):
context.node.title = "Services"
return 200, 'ioc.extra.stats:services.html', {
'services': self.container.services,
'context': context,
}
|
python
|
from Jumpscale import j
def test():
"""
to run:
kosmos 'j.data.rivine.test(name="sia_basic")'
"""
e = j.data.rivine.encoder_sia_get()
# you can add integers, booleans, iterateble objects, strings,
# bytes and byte arrays. Dictionaries and objects are not supported.
e.add(False)
e.add("a")
e.add([1, True, "foo"])
e.add(b"123")
# the result is a single bytearray
assert (
e.data
== b"\x00\x01\x00\x00\x00\x00\x00\x00\x00a\x03\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x01\x03\x00\x00\x00\x00\x00\x00\x00foo\x03\x00\x00\x00\x00\x00\x00\x00123"
)
|
python
|
from nose.tools import *
from ..app import address_parts
@raises(TypeError)
def test_address_parts_no_address():
expected = []
actual = address_parts()
def test_address_parts_with_address():
expected = ['AddressNumber', 'StreetName']
actual = address_parts('123 main')
assert actual == expected
|
python
|
from sys import *
def parse_weights(numberExpectedParams, filename):
f = open(filename, 'r')
contents = f.readlines()
params = []
linenumber=0
for i in contents:
linenumber = linenumber + 1
i = i.strip()
if i == "":
continue
try:
paramVal = float(i)
params.append(paramVal)
except ValueError:
print "Ao ler arquivo de parametros (%s), esperava um numero real na linha %d, mas encontrou '%s'. Verifique" % (filename, linenumber, i)
exit()
if len(params) != numberExpectedParams:
print "Numero incorreto de pesos no arquivo informado! Foram encontrados %d pesos, mas o seu controlador utiliza %d" % (len(params), numberExpectedParams)
exit()
print "Leitura de %d pesos teve sucesso: %s" % (numberExpectedParams, params)
return params
|
python
|
from flask import current_app as app
from flask import jsonify, request
from director.api import api_bp
from director.builder import WorkflowBuilder
from director.exceptions import WorkflowNotFound
from director.extensions import cel_workflows, schema
from director.models.workflows import Workflow
@api_bp.route("/workflows", methods=["POST"])
@schema.validate(
{
"required": ["project", "name", "payload"],
"additionalProperties": False,
"properties": {
"project": {"type": "string"},
"name": {"type": "string"},
"payload": {"type": "object"},
},
}
)
def create_workflow():
data = request.get_json()
project = data["project"]
name = data["name"]
fullname = f"{project}.{name}"
# Check if the workflow exists
try:
cel_workflows.get_by_name(fullname)
except WorkflowNotFound:
return jsonify({"error": f"Workflow {fullname} not found"}), 404
# Create the workflow in DB
obj = Workflow(project=project, name=name, payload=data["payload"])
obj.save()
# Build the workflow and execute it
data = obj.to_dict()
workflow = WorkflowBuilder(obj.id)
workflow.run()
app.logger.info(f"Workflow ready : {workflow.canvas}")
return jsonify(data), 201
@api_bp.route("/workflows")
def list_workflows():
workflows = Workflow.query.all()
return jsonify([w.to_dict() for w in workflows])
@api_bp.route("/workflows/<workflow_id>")
def get_workflow(workflow_id):
workflow = Workflow.query.filter_by(id=workflow_id).first()
if not workflow:
return jsonify({"error": f"Workflow {workflow_id} not found"}), 404
tasks = [t.to_dict() for t in workflow.tasks]
resp = workflow.to_dict()
resp.update({"tasks": tasks})
return jsonify(resp)
|
python
|
nome = input('Digite seu nome:')
if nome == 'Cristiano':
print('Sou eu')
else:
print('Não sou seu')
|
python
|
from django.db import models
from bitoptions import BitOptions, BitOptionsField
TOPPINGS = BitOptions(
('pepperoni', 'mushrooms', 'onions', 'sausage', 'bacon', 'black olives',
'green olives', 'green peppers', 'pineapple', 'spinach', 'tomatoes',
'broccoli', 'jalapeno peppers', 'anchovies', 'chicken', 'beef', 'ham',
'salami')
)
CHEESES = BitOptions(('feta', 'parmesan', 'provolone', 'goat', 'mozzarella'))
COLORS = BitOptions(('red', 'green', 'blue'))
class Box(models.Model):
"""
Test model with nullable BitOptionsField.
"""
colors = BitOptionsField(options=COLORS, null=True, blank=True)
class Pizza(models.Model):
"""
Test model with small and medium size list of options.
"""
toppings = BitOptionsField(options=TOPPINGS)
cheeses = BitOptionsField(options=CHEESES)
|
python
|
print(["ABC","ARC","AGC"][int(input())//50+8>>5])
|
python
|
from __future__ import print_function
import sys
if sys.version_info < (3, 8):
print(file=sys.stderr)
print('This game needs Python 3.8 or later; preferably 3.9.', file=sys.stderr)
exit(1)
try:
import moderngl
import pyglet
import png
except ImportError:
print(file=sys.stderr)
print('You need to install dependencies for this game:', file=sys.stderr)
print(file=sys.stderr)
print(' python -m pip install -r requirements.txt', file=sys.stderr)
print(file=sys.stderr)
exit(1)
import keypad_racer.__main__
|
python
|
#!/usr/bin/python2
#Dasporal
import swiftclient
import os, sys, mimetypes
import requests
import json
import pprint
data = file(os.path.join(sys.path[0], "../tokens.json"))
tokens = json.load(data)
if len(sys.argv) != 2 or len(sys.argv) != 3:
print ("Usage: podcast_upload.py [audio file name]")
exit(1)
# Fetching infos on the file path
file_path = os.path.abspath(sys.argv[1])
file_name = os.path.basename(sys.argv[1])
# Opening file
try:
episode = open(file_path)
except IOError:
print ("File ", file_path, " not found.")
exit(1)
# Uploading to Mixcloud
print ("Uploading of ", file_name, " on Mixcloud started...")
# Filling the requests parameters
files = {"mp3": episode}
url = "https://api.mixcloud.com/upload/"
params = {"access_token": tokens["mixcloud"]["test_token"]}
data = {"name": "Test API"}
# API request
r = requests.post(url, data=data, params=params, files=files)
# Error handling
if (r.status_code == 200):
print ("Upload to Mixcloud succeeded!")
else:
print ("Upload to Mixcloud failed with error code ", str(r.status_code), " (", r.reason, ")")
exit(1)
# OpenStack
# Setting options
options = {}
options['tenant_id'] = tokens["openstack"]["tenant_id"]
options['region_name'] = tokens["openstack"]["region_name"]
# Opening connection
client = swiftclient.client.Connection(tokens["openstack"]["auth_url"], tokens["openstack"]["username"], tokens["openstack"]["password"], 5, None, None, False, 1, 64, tokens["openstack"]["tenant_name"], options, '2')
# Getting infos on the file
episode_size = os.stat(file_path).st_size
episode_content = episode.read(episode_size)
# Uploading
print ("Uploading of ", file_name, " on OpenStack started...")
try:
client.put_object("podcasts", file_name, episode_content, episode_size, None, None, "audio/mpeg")
except swiftclient.exceptions.ClientException as e:
print ("Error: Server responded to the PUT request on ", e.http_path, " with ", str(e.http_status), " ", e.http_reason)
exit(1)
print ("Upload to OpenStack succeeded!")
|
python
|
DEFAULT_USER_AGENT = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64)'
SPECIAL_CASES = {
'ee': 'et',
}
LANGUAGES = {
'af': 'afrikaans',
'sq': 'albanian',
'ar': 'arabic',
'be': 'belarusian',
'bg': 'bulgarian',
'ca': 'catalan',
'zh-CN': 'chinese_simplified',
'zh-TW': 'chinese_traditional',
'hr': 'croatian',
'cs': 'czech',
'da': 'danish',
'nl': 'dutch',
'en': 'english',
'eo': 'esperanto',
'et': 'estonian',
'tl': 'filipino',
'fi': 'finnish',
'fr': 'french',
'gl': 'galician',
'de': 'german',
'el': 'greek',
'iw': 'hebrew',
'hi': 'hindi',
'hu': 'hungarian',
'is': 'icelandic',
'id': 'indonesian',
'ga': 'irish',
'it': 'italian',
'ja': 'japanese',
'ko': 'korean',
'la': 'latin',
'lv': 'latvian',
'lt': 'lithuanian',
'mk': 'macedonian',
'ms': 'malay',
'mt': 'maltese',
'no': 'norwegian',
'fa': 'persian',
'pl': 'polish',
'pt': 'portuguese',
'ro': 'romanian',
'ru': 'russian',
'sr': 'serbian',
'sk': 'slovak',
'sl': 'slovenian',
'es': 'spanish',
'sw': 'swahili',
'sv': 'swedish',
'th': 'thai',
'tr': 'turkish',
'uk': 'ukrainian',
'vi': 'vietnamese',
'cy': 'welsh',
'yi': 'yiddish',
}
|
python
|
from keras.engine import InputSpec
from keras.layers import Dense
from keras.layers.wrappers import Wrapper, TimeDistributed
class Highway(Wrapper):
def __init__(self, layer, gate=None, **kwargs):
self.supports_masking = True
self.gate = gate
super(Highway, self).__init__(layer, **kwargs)
def build(self, input_shape=None):
assert len(input_shape) in [2, 3]
self.input_spec = [InputSpec(shape=input_shape)]
nb_output_dims = input_shape[-1]
if self.gate is None:
gate = Dense(nb_output_dims, activation='sigmoid')
if len(input_shape) == 3:
gate = TimeDistributed(gate)
self.gate = gate
super(Highway, self).build(input_shape)
def get_output_shape_for(self, input_shape):
assert self.layer.get_output_shape_for(input_shape) == input_shape
assert self.gate.get_output_shape_for(input_shape) == input_shape
return input_shape
def call(self, x, mask=None):
return self.layer(x) * self.gate(x) + x * (1 - self.gate(x))
|
python
|
import tensorflow as tf
import tqdm
from one_shot_learning_network import MatchingNetwork
class ExperimentBuilder:
def __init__(self, data):
"""
Initializes an ExperimentBuilder object. The ExperimentBuilder object takes care of setting up our experiment
and provides helper functions such as run_training_epoch and run_validation_epoch to simplify out training
and evaluation procedures.
:param data: A data provider class
"""
self.data = data
def build_experiment(self, batch_size, classes_per_set, samples_per_class, fce):
"""
:param batch_size: The experiment batch size
:param classes_per_set: An integer indicating the number of classes per support set
:param samples_per_class: An integer indicating the number of samples per class
:param channels: The image channels
:param fce: Whether to use full context embeddings or not
:return: a matching_network object, along with the losses, the training ops and the init op
"""
height, width, channels = self.data.x.shape[2], self.data.x.shape[3], self.data.x.shape[4]
self.support_set_images = tf.placeholder(tf.float32, [batch_size, classes_per_set, samples_per_class, height, width,
channels], 'support_set_images')
self.support_set_labels = tf.placeholder(tf.int32, [batch_size, classes_per_set, samples_per_class], 'support_set_labels')
self.target_image = tf.placeholder(tf.float32, [batch_size, height, width, channels], 'target_image')
self.target_label = tf.placeholder(tf.int32, [batch_size], 'target_label')
self.training_phase = tf.placeholder(tf.bool, name='training-flag')
self.rotate_flag = tf.placeholder(tf.bool, name='rotate-flag')
self.keep_prob = tf.placeholder(tf.float32, name='dropout-prob')
self.current_learning_rate = 1e-03
self.learning_rate = tf.placeholder(tf.float32, name='learning-rate-set')
self.one_shot_omniglot = MatchingNetwork(batch_size=batch_size, support_set_images=self.support_set_images,
support_set_labels=self.support_set_labels,
target_image=self.target_image, target_label=self.target_label,
keep_prob=self.keep_prob, num_channels=channels,
is_training=self.training_phase, fce=fce, rotate_flag=self.rotate_flag,
num_classes_per_set=classes_per_set,
num_samples_per_class=samples_per_class, learning_rate=self.learning_rate)
summary, self.losses, self.c_error_opt_op = self.one_shot_omniglot.init_train()
init = tf.global_variables_initializer()
self.total_train_iter = 0
return self.one_shot_omniglot, self.losses, self.c_error_opt_op, init
def run_training_epoch(self, total_train_batches, sess):
"""
Runs one training epoch
:param total_train_batches: Number of batches to train on
:param sess: Session object
:return: mean_training_categorical_crossentropy_loss and mean_training_accuracy
"""
total_c_loss = 0.
total_accuracy = 0.
with tqdm.tqdm(total=total_train_batches) as pbar:
for i in range(total_train_batches): # train epoch
x_support_set, y_support_set, x_target, y_target = self.data.get_train_batch(augment=True)
_, c_loss_value, acc = sess.run(
[self.c_error_opt_op, self.losses[self.one_shot_omniglot.classify], self.losses[self.one_shot_omniglot.dn]],
feed_dict={self.keep_prob: 1.0, self.support_set_images: x_support_set,
self.support_set_labels: y_support_set, self.target_image: x_target, self.target_label: y_target,
self.training_phase: True, self.rotate_flag: False, self.learning_rate: self.current_learning_rate})
iter_out = "train_loss: {}, train_accuracy: {}".format(c_loss_value, acc)
pbar.set_description(iter_out)
pbar.update(1)
total_c_loss += c_loss_value
total_accuracy += acc
self.total_train_iter += 1
if self.total_train_iter % 2000 == 0:
self.current_learning_rate /= 2
print("change learning rate", self.current_learning_rate)
total_c_loss = total_c_loss / total_train_batches
total_accuracy = total_accuracy / total_train_batches
return total_c_loss, total_accuracy
def run_validation_epoch(self, total_val_batches, sess):
"""
Runs one validation epoch
:param total_val_batches: Number of batches to train on
:param sess: Session object
:return: mean_validation_categorical_crossentropy_loss and mean_validation_accuracy
"""
total_val_c_loss = 0.
total_val_accuracy = 0.
with tqdm.tqdm(total=total_val_batches) as pbar:
for i in range(total_val_batches): # validation epoch
x_support_set, y_support_set, x_target, y_target = self.data.get_val_batch(augment=True)
c_loss_value, acc = sess.run(
[self.losses[self.one_shot_omniglot.classify], self.losses[self.one_shot_omniglot.dn]],
feed_dict={self.keep_prob: 1.0, self.support_set_images: x_support_set,
self.support_set_labels: y_support_set, self.target_image: x_target, self.target_label: y_target,
self.training_phase: False, self.rotate_flag: False})
iter_out = "val_loss: {}, val_accuracy: {}".format(c_loss_value, acc)
pbar.set_description(iter_out)
pbar.update(1)
total_val_c_loss += c_loss_value
total_val_accuracy += acc
total_val_c_loss = total_val_c_loss / total_val_batches
total_val_accuracy = total_val_accuracy / total_val_batches
return total_val_c_loss, total_val_accuracy
def run_testing_epoch(self, total_test_batches, sess):
"""
Runs one testing epoch
:param total_test_batches: Number of batches to train on
:param sess: Session object
:return: mean_testing_categorical_crossentropy_loss and mean_testing_accuracy
"""
total_test_c_loss = 0.
total_test_accuracy = 0.
with tqdm.tqdm(total=total_test_batches) as pbar:
for i in range(total_test_batches):
x_support_set, y_support_set, x_target, y_target = self.data.get_test_batch(augment=True)
c_loss_value, acc = sess.run(
[self.losses[self.one_shot_omniglot.classify], self.losses[self.one_shot_omniglot.dn]],
feed_dict={self.keep_prob: 1.0, self.support_set_images: x_support_set,
self.support_set_labels: y_support_set, self.target_image: x_target,
self.target_label: y_target,
self.training_phase: False, self.rotate_flag: False})
iter_out = "test_loss: {}, test_accuracy: {}".format(c_loss_value, acc)
pbar.set_description(iter_out)
pbar.update(1)
total_test_c_loss += c_loss_value
total_test_accuracy += acc
total_test_c_loss = total_test_c_loss / total_test_batches
total_test_accuracy = total_test_accuracy / total_test_batches
return total_test_c_loss, total_test_accuracy
|
python
|
from __future__ import annotations
from typing import TYPE_CHECKING
from os import chdir, path
from asdfy import ASDFProcessor, ASDFAccessor
if TYPE_CHECKING:
from obspy import Trace, Stream
if not path.exists('traces.h5') and path.exists('tests/traces.h5'):
chdir('tests')
def func1(stream: Stream):
# save waveform by returning a Stream
return stream
def func2(acc: ASDFAccessor):
# save waveform by returning a Stream
assert acc.fellows and len(acc.fellows) == 9, f'incorrect station number'
for acc2 in acc.fellows:
assert acc2.component == acc.component
assert acc2.ds is acc.ds
output = {}
for trace in acc.stream:
output[trace.stats.channel] = trace
return output
def func3(trace: Trace):
trace.filter('lowpass', freq=1/17)
# save waveform by returning a Trace
return trace
def func4(syn_acc, obs_acc):
syn = syn_acc.trace
obs = obs_acc.trace
data = syn.data - obs.data # type: ignore
stats = syn.stats
assert len(syn_acc.fellows) == 27, f'incorrect station number {len(syn_acc.fellows)}'
assert len(obs_acc.fellows) == 27, f'incorrect station number {len(obs_acc.fellows)}'
for acc in syn_acc.fellows:
assert acc.ds is syn_acc.ds
for acc in obs_acc.fellows:
assert acc.ds is obs_acc.ds
# save as auxiliary data by returning a tuple
return data, {
'misfit': data.std(),
'network': stats.network,
'station': stats.station,
'component': stats.component}
def func5(acc):
from asdfy import ASDFAuxiliary
# save as auxiliary data by returning namedtuple `ASDFAuxiliary`
return ASDFAuxiliary(acc.data, acc.auxiliary.parameters)
def func6(aux_group):
from obspy import Trace, Stream
# save waveform by returning a Trace
traces = []
for cha, aux in aux_group.items():
assert cha[-1] == aux.parameters['component']
traces.append(Trace(aux.data, header=aux.parameters))
return Stream(traces)
def reset():
from subprocess import check_call
check_call('rm -f proc1.h5', shell=True)
check_call('rm -f proc2.h5', shell=True)
check_call('rm -f proc3.h5', shell=True)
check_call('rm -f proc4.h5', shell=True)
check_call('rm -f proc5.h5', shell=True)
check_call('rm -f proc6.h5', shell=True)
def verify():
from numpy.linalg import norm
from pyasdf import ASDFDataSet
with ASDFDataSet('proc1.h5', mode='r', mpi=False) as ds:
assert len(ds.events) == 1
assert hasattr(ds.waveforms['II.BFO'], 'StationXML')
with ASDFDataSet('proc6.h5', mode='r', mpi=False) as ds:
data_proc = ds.waveforms['II.BFO'].test[0].data # type: ignore
with ASDFDataSet('traces_proc.h5', mode='r', mpi=False) as ds:
data_ref = ds.waveforms['II.BFO'].test[0].data # type: ignore
assert norm(data_proc - data_ref) / norm(data_ref) < 1e-4
print('pass')
reset()
def verify_mpi():
from mpi4py.MPI import COMM_WORLD as comm
rank = comm.Get_rank()
if rank == 0:
verify()
def test():
from mpi4py.MPI import COMM_WORLD as comm
rank = comm.Get_rank()
if rank == 0:
reset()
# process stream data
ap = ASDFProcessor('traces.h5', 'proc1.h5', func1, input_type='stream', input_tag='synthetic')
if rank == 0:
print('test1: stream -> stream')
assert len(ap.access()) == 9
ap.run()
# process stream data with more info passed
if rank == 0:
print('test2: accessor -> stream')
ASDFProcessor('traces.h5', 'proc2.h5', func2, input_type='stream', accessor=True).run()
# process trace data
if rank == 0:
print('test3: trace -> trace')
ASDFProcessor('proc2.h5', 'proc3.h5', func3).run()
# process trace data (save with a different tag)
if rank == 0:
print('test4: (trace, trace) -> auxiliary')
ASDFProcessor(('proc1.h5', 'proc3.h5'), 'proc4.h5', func4, accessor=True, output_tag='test').run()
# process auxiliary data with more info passed
if rank == 0:
print('test5: accessor -> auxiliary')
ASDFProcessor('proc4.h5', 'proc5.h5', func5, input_type='auxiliary', accessor=True, input_tag='test').run()
# process auxiliary data
if rank == 0:
print('test6: auxiliary_group -> stream')
ASDFProcessor('proc5.h5', 'proc6.h5', func6, input_type='auxiliary_group').run()
if rank == 0:
verify()
if __name__ == '__main__':
test()
|
python
|
import numpy as np
from starfish.core.expression_matrix.concatenate import concatenate
from starfish.core.expression_matrix.expression_matrix import ExpressionMatrix
from starfish.types import Features
def test_concatenate_two_expression_matrices():
a_data = np.array(
[[0, 1],
[1, 0]]
)
b_data = np.array(
[[0],
[1]]
)
dims = [Features.CELLS, Features.GENES]
a_coords = [(Features.CELLS, [0, 1]), (Features.GENES, ["x", "y"])]
b_coords = [(Features.CELLS, [0, 1]), (Features.GENES, ["x"])]
a = ExpressionMatrix(a_data, dims=dims, coords=a_coords)
b = ExpressionMatrix(b_data, dims=dims, coords=b_coords)
concatenated = concatenate([a, b])
expected = np.array(
[[0, 1],
[1, 0],
[0, np.nan],
[1, np.nan]]
)
np.testing.assert_equal(concatenated.values, expected)
|
python
|
"""The plugin module implements various plugins to extend the behaviour
of community app.
The plugins provided by this module are:
SketchsTab - additional tab to show on the profile pages
LiveCodeExtension - injecting livecode css/js into lesson page
"""
import frappe
from community.plugins import PageExtension, ProfileTab
from community.widgets import Widgets
from .overrides import Sketch
class SketchesTab(ProfileTab):
def get_title(self):
return "Sketches"
def render(self):
sketches = Sketch.get_recent_sketches(owner=self.user.name, limit=16)
context = dict(sketches=sketches, widgets=Widgets())
return frappe.render_template(
"templates/profile/sketches.html",
context)
class LiveCodeExtension(PageExtension):
def render_header(self):
livecode_url = frappe.get_value("LMS Settings", None, "livecode_url")
context = {
"livecode_url": livecode_url
}
return frappe.render_template(
"templates/livecode/extension_header.html",
context)
def render_footer(self):
livecode_url = frappe.get_value("LMS Settings", None, "livecode_url")
context = {
"livecode_url": livecode_url
}
return frappe.render_template(
"templates/livecode/extension_footer.html",
context)
def exercise_renderer(argument):
exercise = frappe.get_doc("Exercise", argument)
context = dict(exercise=exercise)
return frappe.render_template("templates/exercise.html", context)
def image_renderer(argument):
"""Markdown macro for Image.
Rendered the image of an exercise.
This is a hack to extend the already exiting exercise infrastrcture
to use for showing images. To distinguish between real exercises and
the exercises used for showing images, the latter ones are prefixed
with `image-`.
usage:
{{ Image("image-flag-of-germany") }}
"""
exercise = frappe.get_doc("Exercise", argument)
context = dict(exercise=exercise)
return frappe.render_template("templates/image.html", context)
def youtube_video_renderer(video_id):
return f"""
<iframe width="560" height="315"
src="https://www.youtube.com/embed/{video_id}"
title="YouTube video player"
frameborder="0"
allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture"
allowfullscreen>
</iframe>
"""
|
python
|
# -*- coding: utf-8 -*-
"""
@description: 分类器
@author:XuMing
"""
import re
import jieba
from jieba import posseg
class DictClassifier:
def __init__(self):
self.__root_path = "data/dict/"
jieba.load_userdict("data/dict/user.dict") # 自定义分词词库
# 情感词典
self.__phrase_dict = self.__get_phrase_dict()
self.__positive_dict = self.__get_dict(self.__root_path + "positive_dict.txt")
self.__negative_dict = self.__get_dict(self.__root_path + "negative_dict.txt")
self.__conjunction_dict = self.__get_dict(self.__root_path + "conjunction_dict.txt")
self.__punctuation_dict = self.__get_dict(self.__root_path + "punctuation_dict.txt")
self.__adverb_dict = self.__get_dict(self.__root_path + "adverb_dict.txt")
self.__denial_dict = self.__get_dict(self.__root_path + "denial_dict.txt")
def classify(self, sentence):
return self.analyse_sentence(sentence)
def analysis_file(self, file_path_in, file_path_out, encoding='utf-8', print_show=False, start=0, end=-1):
results = []
with open(file_path_in, 'r', encoding=encoding) as f:
num_line = 0
for line in f:
# 语料开始位置
num_line += 1
if num_line < start:
continue
results.append(self.analysis_sentence(line.strip(), file_path_out, print_show))
# 语料结束位置
if 0 < end <= num_line:
break
return results
def analyse_sentence(self, sentence, run_out_file_path=None, print_show=False):
# 情感分析的数据结构
comment_analysis = {"score": 0}
# 评论分句
clauses = self.__divide_sentence_to_clause(sentence + '%')
# 对每个分句情感分析
for i in range(len(clauses)):
# 分析子句的数据结构
sub_clause = self.__analyse_clause(clauses[i].replace("。", "."), run_out_file_path, print_show)
# 将子句分析的数据结果添加到整体数据结构中
comment_analysis["su-clause" + str(i)] = sub_clause
comment_analysis["score"] += sub_clause["score"]
if run_out_file_path is not None:
# 将整句写到输出文件
self.__write_out_file(run_out_file_path, "\n" + sentence + "\n")
self.__output_analysis(comment_analysis, run_out_file_path)
self.__write_out_file(run_out_file_path, str(comment_analysis) + "\n\n")
if print_show:
print("\n" + sentence)
self.__output_analysis(comment_analysis)
print(comment_analysis)
if comment_analysis["score"] > 0:
return 1
else:
return 0
def __divide_sentence_to_clause(self, sentence):
clauses = self.__split_sentence(sentence)
clauses[-1] = clauses[-1][:-1]
return clauses
def __analyse_clause(self, clauses, run_out_file_path, print_show):
sub_clause = {"score": 0, "positive": [], "negative": [], "conjunction": [], "punctuation": [], "pattern": []}
seg_result = posseg.lcut(clauses)
# 输出分词结果
if run_out_file_path is not None:
self.__write_out_file(run_out_file_path, clauses + "\n")
self.__write_out_file(run_out_file_path, str(seg_result) + "\n")
if print_show:
print(clauses)
print(seg_result)
# 判断句子:如果。。。就好了
judgement = self.__is_clause_pattern_if_good(clauses)
if judgement != "":
sub_clause["pattern"].append(judgement)
sub_clause["score"] -= judgement["value"]
return sub_clause
# 判断句子:是。。。不是。。。
judgement = self.__is_clause_pattern_is_not(clauses)
if judgement != "":
sub_clause["pattern"].append(clauses)
sub_clause["score"] -= judgement["value"]
# 判断句子:短语
judgement = self.__is_clause_pattern_phrase(clauses, seg_result)
if judgement != "":
sub_clause["score"] += judgement["score"]
if judgement["score"] >= 0:
sub_clause["positive"].append(judgement)
elif judgement["score"] < 0:
sub_clause["negative"].append(judgement)
match_result = judgement["key"].split(":")[-1]
i = 0
while i < len(seg_result):
if seg_result[i].word in match_result:
if i + 1 == len(seg_result) or seg_result[i + 1].word in match_result:
del (seg_result[i])
continue
i += 1
# 逐个分词
for i in range(len(seg_result)):
mark, result = self.__analyse_word(seg_result[i].word, seg_result, i)
if mark == 0:
continue
elif mark == 1:
sub_clause["conjunction"].append(result)
elif mark == 2:
sub_clause["punctuation"].append(result)
elif mark == 3:
sub_clause["positive"].append(result)
sub_clause["score"] += result["score"]
elif mark == 4:
sub_clause["negative"].append(result)
sub_clause["score"] -= result["score"]
# 综合连词的情感值
for conj in sub_clause["conjunction"]:
sub_clause["score"] *= conj["value"]
# 综合标点符号的情感值
for punc in sub_clause["punctuation"]:
sub_clause["score"] *= punc["value"]
return sub_clause
@staticmethod
def __is_clause_pattern_if_good(clauses):
re_pattern = re.compile(r".*(要|选)的.+(送|给).*")
match = re_pattern.match(clauses)
if match is not None:
pattern = {"key": "要的是...给的是...", "value": 1}
return pattern
return ""
@staticmethod
def __is_clause_pattern_is_not(clauses):
re_pattern = re.compile(r".*(如果|要是|希望).+就[\u4e00-\u9fa5]+(好|完美)了")
match = re_pattern.match(clauses)
if match is not None:
pattern = {"key": "如果...就好了", "value": 1.0}
return pattern
return ""
def __is_clause_pattern_phrase(self, clauses, seg_result):
for phrase in self.__phrase_dict:
keys = phrase.keys()
to_compile = phrase["key"].replace("……", "[\u4e00-\u9fa5]*")
if "start" in keys:
to_compile = to_compile.replace("*", "{" + phrase["start"] + "," + phrase["end"] + "}")
if "head" in keys:
to_compile = phrase["head"] + to_compile
match = re.compile(to_compile).search(clauses)
if match is not None:
is_continue = True
pos = [flag for word, flag in posseg.cut(match.group())]
if "between_tag" in keys:
if phrase["between_tag"] not in pos and len(pos) > 2:
is_continue = False
if is_continue:
for i in range(len(seg_result)):
if seg_result[i].word in match.group():
try:
if seg_result[i + 1].word in match.group():
return self.__emotional_word_analysis(
phrase["key"] + ":" + match.group(), phrase["value"],
[x for x, y in seg_result], i)
except IndexError:
return self.__emotional_word_analysis(
phrase["key"] + ":" + match.group(), phrase["value"],
[x for x, y in seg_result], i)
return ""
def __emotional_word_analysis(self, core_word, value, segments, index):
# 情感词典内,则构建一个以情感词为中心的字典数据结构
orientation = {"key": core_word, "adverb": [], "denial": [], "value": value}
orientation_score = orientation["value"]
# 判断三个前视窗内是否有否定词、副词
view_window = index - 1
if view_window > -1:
# 前词是否是情感词
if segments[view_window] in self.__negative_dict or segments[view_window] in self.__positive_dict:
orientation["score"] = orientation_score
return orientation
# 前词是否是副词
if segments[view_window] in self.__adverb_dict:
adverb = {"key": segments[view_window], "position": 1,
"value": self.__adverb_dict[segments[view_window]]}
orientation["adverb"].append(adverb)
orientation_score *= self.__adverb_dict[segments[view_window]]
# 前词是否是否定词
elif segments[view_window] in self.__denial_dict:
denial = {"key": segments[view_window], "position": 1,
"value": self.__denial_dict[segments[view_window]]}
orientation["denial"].append(denial)
orientation_score *= -1
view_window = index - 2
if view_window > -1:
# 判断前一个词是否是情感词
if segments[view_window] in self.__negative_dict or \
segments[view_window] in self.__positive_dict:
orientation['score'] = orientation_score
return orientation
if segments[view_window] in self.__adverb_dict:
adverb = {"key": segments[view_window], "position": 2,
"value": self.__adverb_dict[segments[view_window]]}
orientation_score *= self.__adverb_dict[segments[view_window]]
orientation["adverb"].insert(0, adverb)
elif segments[view_window] in self.__denial_dict:
denial = {"key": segments[view_window], "position": 2,
"value": self.__denial_dict[segments[view_window]]}
orientation["denial"].insert(0, denial)
orientation_score *= -1
# 判断是否是“不是很好”的结构(区别于“很不好”)
if len(orientation["adverb"]) > 0:
# 是,则引入调节阈值,0.3
orientation_score *= 0.3
view_window = index - 3
if view_window > -1:
# 判断前一个词是否是情感词
if segments[view_window] in self.__negative_dict or segments[view_window] in self.__positive_dict:
orientation['score'] = orientation_score
return orientation
if segments[view_window] in self.__adverb_dict:
adverb = {"key": segments[view_window], "position": 3,
"value": self.__adverb_dict[segments[view_window]]}
orientation_score *= self.__adverb_dict[segments[view_window]]
orientation["adverb"].insert(0, adverb)
elif segments[view_window] in self.__denial_dict:
denial = {"key": segments[view_window], "position": 3,
"value": self.__denial_dict[segments[view_window]]}
orientation["denial"].insert(0, denial)
orientation_score *= -1
# 判断是否是“不是很好”的结构(区别于“很不好”)
if len(orientation["adverb"]) > 0 and len(orientation["denial"]) == 0:
orientation_score *= 0.3
# 添加情感分析值
orientation['score'] = orientation_score
# 返回的数据结构
return orientation
def __analyse_word(self, word, seg_result=None, index=-1):
# 判断连词
judgement = self.__is_word_conjunction(word)
if judgement != "":
return 1, judgement
# 判断标点符号
judgement = self.__is_word_punctuation(word)
if judgement != "":
return 2, judgement
# 判断正向情感词
judgement = self.__is_word_positive(word, seg_result, index)
if judgement != "":
return 3, judgement
# 判断负向情感词
judgement = self.__is_word_negative(word, seg_result, index)
if judgement != "":
return 4, judgement
return 0, ""
def __is_word_conjunction(self, word):
if word in self.__conjunction_dict:
conjunction = {"key": word, "value": self.__conjunction_dict[word]}
return conjunction
return ""
def __is_word_punctuation(self, word):
if word in self.__punctuation_dict:
punctuation = {"key": word, "value": self.__punctuation_dict[word]}
return punctuation
return ""
def __is_word_positive(self, word, seg_result, index):
"""
判断分词在正向情感词典内
:param word:
:param seg_result:
:param index:
:return:
"""
if word in self.__positive_dict:
return self.__emotional_word_analysis(word, self.__positive_dict[word],
[x for x, y in seg_result], index)
return ""
def __is_word_negative(self, word, seg_result, index):
"""
判断分词在负向情感词典内
:param word:
:param seg_result:
:param index:
:return:
"""
if word in self.__negative_dict:
return self.__emotional_word_analysis(word, self.__negative_dict[word],
[x for x, y in seg_result], index)
return ""
def __output_analysis(self, comment_analysis, run_out_file_path=None):
output = "Score:" + str(comment_analysis["score"]) + "\n"
for i in range(len(comment_analysis) - 1):
output += "Sub-clause" + str(i) + ": "
clause = comment_analysis["su-clause" + str(i)]
if len(clause["conjunction"]) > 0:
output += "conjunction:"
for punctuation in clause["conjunction"]:
output += punctuation["key"] + " "
if len(clause["positive"]) > 0:
output += "positive:"
for positive in clause["positive"]:
if len(positive["denial"]) > 0:
for denial in positive["denial"]:
output += denial["key"] + str(denial["position"]) + "-"
if len(positive['adverb']) > 0:
for adverb in positive["adverb"]:
output += adverb["key"] + str(adverb["position"]) + "-"
output += positive["key"] + " "
if len(clause["negative"]) > 0:
output += "negative:"
for negative in clause["negative"]:
if len(negative["denial"]) > 0:
for denial in negative["denial"]:
output += denial["key"] + str(denial["position"]) + "-"
if len(negative["adverb"]) > 0:
for adverb in negative["adverb"]:
output += adverb["key"] + str(adverb["position"]) + "-"
output += negative["key"] + " "
if len(clause["punctuation"]) > 0:
output += "punctuation:"
for pattern in clause["pattern"]:
output += pattern["key"] + " "
output += "\n"
if run_out_file_path is not None:
self.__write_out_file(run_out_file_path, output)
else:
print(output)
@staticmethod
def __write_out_file(path, info, encoding="utf-8"):
with open(path, "a", encoding=encoding) as f:
f.write("%s" % info)
@staticmethod
def __split_sentence(sentence):
pattern = re.compile("[,,。.%、!!??;;~~…….… ]+")
split_clauses = pattern.split(sentence.strip())
punctuations = pattern.findall(sentence.strip())
try:
split_clauses.remove("")
except ValueError:
pass
punctuations.append("")
clauses = ["".join(x) for x in zip(split_clauses, punctuations)]
return clauses
def __get_phrase_dict(self):
"""
取短语词典
:return:
"""
sentiment_dict = []
pattern = re.compile(r"\s+")
with open(self.__root_path + "phrase_dict.txt", "r", encoding="utf-8") as f:
for line in f:
phrase = {}
result = pattern.split(line.strip())
if len(result) >= 2:
phrase["key"] = result[0]
phrase["value"] = float(result[1])
for i, temp_split in enumerate(result):
if i < 2:
continue
else:
a, b = temp_split.split(":")
phrase[a] = b
sentiment_dict.append(phrase)
return sentiment_dict
@staticmethod
def __get_dict(path, encoding="utf-8"):
"""
构建情感词典
:param path:
:param encoding:
:return:
"""
sentiment_dict = {}
pattern = re.compile(r"\s+")
with open(path, encoding=encoding) as f:
for line in f:
result = pattern.split(line.strip())
if len(result) == 2:
sentiment_dict[result[0]] = float(result[1])
return sentiment_dict
|
python
|
import discord
import logging
import os
class ValorantBot(discord.Client):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.BOT_LOG = os.getenv('BOT_LOG')
if self.BOT_LOG == 'INFO' or self.BOT_LOG is None or self.BOT_LOG == '':
logging.getLogger().setLevel(logging.INFO)
elif self.BOT_LOG == 'DEBUG':
logging.getLogger().setLevel(logging.DEBUG)
else:
logging.fatal('Neither INFO nor DEBUG specified for log level, refusing to start.')
self.BOT_TOKEN = os.getenv('BOT_TOKEN')
logging.info("ValorantBot initialized.")
def run(self, *args, **kwargs):
if self.BOT_TOKEN is None or self.BOT_TOKEN == '':
logging.fatal('Supply environment variable BOT_TOKEN to authenticate.')
super().run(self.BOT_TOKEN)
async def on_ready(self):
logging.info(f'{self.user} has connected to server')
|
python
|
import unittest
from typing import List, Text
INPUT_FILE = "input.txt"
TEST_INPUT_SHORT = "test_input_short.txt"
TEST_INPUT_LONG = "test_input_long.txt"
def getJolts(inputFile: Text):
jolts: List[int] = []
with open(inputFile, "r") as inputFile:
lines = inputFile.readlines()
for line in lines:
line = line.strip("\n")
jolts.append(int(line))
return jolts
def getJoltageDifferenceCountersProduct(jolts: List[int]):
if not jolts:
raise ValueError("No adapters found.")
jolts.append(max(jolts) + 3)
numAdaptersWithOneJoltageDifference = 0
numAdaptersWithThreeJoltageDifference = 0
currentJoltage = 0
joltsSet = set(jolts)
while currentJoltage != max(jolts):
if currentJoltage + 1 in joltsSet:
numAdaptersWithOneJoltageDifference += 1
currentJoltage += 1
elif currentJoltage + 2 in joltsSet:
currentJoltage += 2
elif currentJoltage + 3 in joltsSet:
numAdaptersWithThreeJoltageDifference += 1
currentJoltage += 3
else:
raise ValueError("Connecting adapters is not possible.")
return numAdaptersWithOneJoltageDifference * numAdaptersWithThreeJoltageDifference
def countDistictWaysToArrangeAdapters(jolts: List[int]):
jolts.sort()
maxJoltage: int = max(jolts)
memo: List[int] = [0] * (maxJoltage + 1)
memo[0] = 1
for jolt in jolts:
memo[jolt] = memo[jolt - 1] + memo[jolt - 2] + memo[jolt - 3]
return memo[maxJoltage]
def main():
jolts: List[int] = getJolts(INPUT_FILE)
print(getJoltageDifferenceCountersProduct(jolts)) # 2414
print(countDistictWaysToArrangeAdapters(jolts)) # 21156911906816
class JoltsTester(unittest.TestCase):
def test_getJoltageDifferenceCountersProduct_shortInput_correctProductReturned(self):
jolts: List[int] = getJolts(TEST_INPUT_SHORT)
self.assertEqual(35, getJoltageDifferenceCountersProduct(jolts))
def test_getJoltageDifferenceCountersProduct_longInput_correctProductReturned(self):
jolts: List[int] = getJolts(TEST_INPUT_LONG)
self.assertEqual(220, getJoltageDifferenceCountersProduct(jolts))
def test_countDistinctWaysToArrangeAdapters_shortInput_correctCountReturned(self):
jolts: List[int] = getJolts(TEST_INPUT_SHORT)
self.assertEqual(8, countDistictWaysToArrangeAdapters(jolts))
def test_countDistinctWaysToArrangeAdapters_longInput_correctCountReturned(self):
jolts: List[int] = getJolts(TEST_INPUT_LONG)
self.assertEqual(19208, countDistictWaysToArrangeAdapters(jolts))
if __name__ == '__main__':
# main()
unittest.main()
|
python
|
from resolwe.process import IntegerField, Process, StringField
class PythonProcessDataIdBySlug(Process):
"""The process is used for testing get_data_id_by_slug."""
slug = "test-python-process-data-id-by-slug"
name = "Test Python Process Data ID by Slug"
version = "1.0.0"
process_type = "data:python:dataidbyslug"
requirements = {
"resources": {
"network": True,
},
}
class Input:
"""Input fields."""
slug = StringField(label="Slug")
class Output:
data_id = IntegerField(label="Data ID")
def run(self, inputs, outputs):
data_id = self.get_data_id_by_slug(inputs.slug)
outputs.data_id = data_id
|
python
|
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from matplotlib import style
import pandas as pd
style.use('fivethirtyeight')
fig = plt.figure(figsize=(12,8))
ax1 = fig.add_subplot(2,2,1)
ax2 = fig.add_subplot(2,2,2)
ax3 = fig.add_subplot(2,2,3)
ax4 = fig.add_subplot(2,2,4)
def animate(i):
df = pd.read_csv('real time stock data.csv')
ys = df.iloc[1:, 2].values
xs = list(range(1, len(ys)+1))
ax1.clear()
ax1.plot(xs, ys)
ax1.set_title('BSE', fontsize=12 )
ys = df.iloc[1:, 3].values
ax2.clear()
ax2.plot(xs, ys)
ax2.set_title('Nifty', fontsize=12 )
ys = df.iloc[1:, 4].values
ax3.clear()
ax3.plot(xs, ys)
ax3.set_title('DJI', fontsize=12 )
ys = df.iloc[1:, 5].values
ax4.clear()
ax4.plot(xs, ys)
ax4.set_title('S&P', fontsize=12 )
ani = animation.FuncAnimation(fig, animate, interval=1000)
plt.tight_layout()
plt.show()
|
python
|
import os
from src.MarkdownFile import MarkdownFile
class Parser:
def __init__(self, folderPath='.', ignoredDirectories=['.obsidian', '.git']):
self._folderPath = folderPath
self._ignoredDirectories = ignoredDirectories
self.mdFiles = list[MarkdownFile]
self._retrieveMarkdownFiles()
def _retrieveMarkdownFiles(self):
"""Directory traversal to find all .md files and stores them in _mdFiles
Full credit goes to: https://github.com/archelpeg
"""
self.mdFiles = []
for dirpath, _, files in os.walk(self._folderPath):
# print(f'Found directory: {dirpath}, and ignored={self._isDirectoryIgnored(dirpath)}')
if not self._isDirectoryIgnored(dirpath):
for file_name in files:
if file_name.endswith('.md'):
normalised_path = os.path.normpath(dirpath + "/" + file_name) # normalises path for current file system
file = MarkdownFile(file_name, normalised_path)
self.mdFiles.append(file)
def _isDirectoryIgnored(self, directory: str):
"""Returns a boolean indicating if the directory specified is in self._ignoredDirectories"""
directory = directory.replace('\\', '/')
normalisedFolderPath = self._folderPath.replace('\\', '/')
splitDirectory = directory.split('/')
splitFolderPath = normalisedFolderPath.split('/')
# Remove folderPath in order to search uniquely in subdirectories
for el in splitFolderPath:
splitDirectory.remove(el)
# Return if the subdirectory starts with a element in ignoredDirectories
if len(splitDirectory) != 0:
return splitDirectory[0] in self._ignoredDirectories
else:
return False
def searchFilesWithTag(self, tag=None):
"""Find all files containing a specific tag
"""
files = set()
if tag == None:
return files
for file in self.mdFiles:
if tag in file.tags:
files.add(file)
return files
def findSubFilesForFiles(self, files: set):
"""Iteration to grow files while it can"""
while not self._growSubFiles(files):
pass
return files
def _growSubFiles(self, files):
"""Add new files found following links in files and stores them in files"""
addedFiles = set()
for file in files:
linkedFiles = file.links
linkedFiles = [link for link in linkedFiles] # Added .md at the end
linkedFiles = [file # Get the full links
for file in self.mdFiles
for link in linkedFiles
if link in file.fileName
]
linkedFiles = set(linkedFiles) - files # Only keep not added files
for link in linkedFiles:
addedFiles.add(link)
for file in addedFiles:
files.add(file)
return len(addedFiles) == 0
|
python
|
# Created byMartin.cz
# Copyright (c) Martin Strohalm. All rights reserved.
import pero
class DrawTest(pero.Graphics):
"""Test case for text properties drawing."""
def draw(self, canvas, *args, **kwargs):
"""Draws the test."""
# clear canvas
canvas.fill(pero.colors.White)
# set scaling
canvas.draw_scale = 1
canvas.line_scale = 3
canvas.font_scale = 1.5
# init glyphs
origin = pero.Plus(
size = 20,
line_width = 1,
line_color = pero.colors.Red)
label = pero.Text(
angle = pero.rads(20),
font_size = 12,
font_name = "Arial",
text_spacing = .5,
text_bgr_color = pero.colors.Grey.opaque(.3))
rect = pero.Rect(
line_color = pero.colors.Green,
fill_color = None)
# init coords
padding = 40
# test alignment and baseline
y = padding
for base in (pero.TEXT_BASE_TOP, pero.TEXT_BASE_MIDDLE, pero.TEXT_BASE_BOTTOM):
x = padding
for align in (pero.TEXT_ALIGN_LEFT, pero.TEXT_ALIGN_CENTER, pero.TEXT_ALIGN_RIGHT):
text = "%s\n%s" % (base.upper(), align.upper())
label.draw(canvas, x=x, y=y, text=text, text_align=align, text_base=base)
bbox = canvas.get_text_bbox(text, x, y, label.angle)
rect.draw(canvas, x=bbox.x, y=bbox.y, width=bbox.width, height=bbox.height)
origin.draw(canvas, x=x, y=y)
x += 250/canvas.draw_scale
y += 150/canvas.draw_scale
# run test
if __name__ == '__main__':
pero.debug(DrawTest(), 'show', "Text", 700, 370)
|
python
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pretend
import pytest
import yara
from warehouse.malware.checks.setup_patterns import check as c
from warehouse.malware.models import (
MalwareCheckState,
VerdictClassification,
VerdictConfidence,
)
from .....common.db.malware import MalwareCheckFactory
from .....common.db.packaging import FileFactory
def test_initializes(db_session):
check_model = MalwareCheckFactory.create(
name="SetupPatternCheck", state=MalwareCheckState.Enabled
)
check = c.SetupPatternCheck(db_session)
assert check.id == check_model.id
assert isinstance(check._yara_rules, yara.Rules)
@pytest.mark.parametrize(
("obj", "file_url"), [(None, pretend.stub()), (pretend.stub(), None)]
)
def test_scan_missing_kwargs(db_session, obj, file_url):
MalwareCheckFactory.create(
name="SetupPatternCheck", state=MalwareCheckState.Enabled
)
check = c.SetupPatternCheck(db_session)
with pytest.raises(c.FatalCheckException):
check.scan(obj=obj, file_url=file_url)
def test_scan_non_sdist(db_session):
MalwareCheckFactory.create(
name="SetupPatternCheck", state=MalwareCheckState.Enabled
)
check = c.SetupPatternCheck(db_session)
file = FileFactory.create(packagetype="bdist_wheel")
check.scan(obj=file, file_url=pretend.stub())
assert check._verdicts == []
def test_scan_no_setup_contents(db_session, monkeypatch):
monkeypatch.setattr(
c, "fetch_url_content", pretend.call_recorder(lambda *a: pretend.stub())
)
monkeypatch.setattr(
c, "extract_file_content", pretend.call_recorder(lambda *a: None)
)
MalwareCheckFactory.create(
name="SetupPatternCheck", state=MalwareCheckState.Enabled
)
check = c.SetupPatternCheck(db_session)
file = FileFactory.create(packagetype="sdist")
check.scan(obj=file, file_url=pretend.stub())
assert len(check._verdicts) == 1
assert check._verdicts[0].check_id == check.id
assert check._verdicts[0].file_id == file.id
assert check._verdicts[0].classification == VerdictClassification.Indeterminate
assert check._verdicts[0].confidence == VerdictConfidence.High
assert (
check._verdicts[0].message
== "sdist does not contain a suitable setup.py for analysis"
)
def test_scan_benign_contents(db_session, monkeypatch):
monkeypatch.setattr(
c, "fetch_url_content", pretend.call_recorder(lambda *a: pretend.stub())
)
monkeypatch.setattr(
c,
"extract_file_content",
pretend.call_recorder(lambda *a: b"this is a benign string"),
)
MalwareCheckFactory.create(
name="SetupPatternCheck", state=MalwareCheckState.Enabled
)
check = c.SetupPatternCheck(db_session)
file = FileFactory.create(packagetype="sdist")
check.scan(obj=file, file_url=pretend.stub())
assert len(check._verdicts) == 1
assert check._verdicts[0].check_id == check.id
assert check._verdicts[0].file_id == file.id
assert check._verdicts[0].classification == VerdictClassification.Benign
assert check._verdicts[0].confidence == VerdictConfidence.Low
assert check._verdicts[0].message == "No malicious patterns found in setup.py"
def test_scan_matched_content(db_session, monkeypatch):
monkeypatch.setattr(
c, "fetch_url_content", pretend.call_recorder(lambda *a: pretend.stub())
)
monkeypatch.setattr(
c,
"extract_file_content",
pretend.call_recorder(
lambda *a: b"this looks suspicious: os.system('cat /etc/passwd')"
),
)
MalwareCheckFactory.create(
name="SetupPatternCheck", state=MalwareCheckState.Enabled
)
check = c.SetupPatternCheck(db_session)
file = FileFactory.create(packagetype="sdist")
check.scan(obj=file, file_url=pretend.stub())
assert len(check._verdicts) == 1
assert check._verdicts[0].check_id == check.id
assert check._verdicts[0].file_id == file.id
assert check._verdicts[0].classification == VerdictClassification.Threat
assert check._verdicts[0].confidence == VerdictConfidence.High
assert check._verdicts[0].message == "process_spawn_in_setup"
|
python
|
########################################
# Automatically generated, do not edit.
########################################
from pyvisdk.thirdparty import Enum
AutoStartWaitHeartbeatSetting = Enum(
'no',
'systemDefault',
'yes',
)
|
python
|
"""
Vulnerability service interfaces and implementations for `pip-audit`.
"""
from .interface import (
Dependency,
ResolvedDependency,
ServiceError,
SkippedDependency,
VulnerabilityResult,
VulnerabilityService,
)
from .osv import OsvService
from .pypi import PyPIService
__all__ = [
"Dependency",
"ResolvedDependency",
"ServiceError",
"SkippedDependency",
"VulnerabilityResult",
"VulnerabilityService",
"OsvService",
"PyPIService",
]
|
python
|
#!/usr/bin/env python
import sys
from shutil import rmtree
from os.path import abspath, dirname, join
import django
from django.conf import settings
sys.path.insert(0, abspath(dirname(__file__)))
if not settings.configured:
media_root = join(abspath(dirname(__file__)), 'test_files')
rmtree(media_root, ignore_errors=True)
installed_apps = (
'django.contrib.contenttypes',
'django.contrib.auth',
'django.contrib.sessions',
'django.contrib.admin',
'simple_history',
'simple_history.tests',
'simple_history.tests.external',
)
auth_user_model = 'auth.User'
if django.VERSION >= (1, 5):
installed_apps += ('simple_history.tests.custom_user', )
auth_user_model = 'custom_user.CustomUser'
settings.configure(
ROOT_URLCONF='simple_history.tests.urls',
MEDIA_ROOT=media_root,
STATIC_URL='/static/',
INSTALLED_APPS=installed_apps,
DATABASES={
'default': {
'ENGINE': 'django.db.backends.sqlite3',
}
},
AUTH_USER_MODEL=auth_user_model
)
def main():
from django.test.simple import DjangoTestSuiteRunner
failures = DjangoTestSuiteRunner(
verbosity=1, interactive=True, failfast=False).run_tests(['tests'])
sys.exit(failures)
if __name__ == "__main__":
main()
|
python
|
import lightgbm as lgbm
from sklearn.model_selection import StratifiedKFold
import numpy as np
import pandas as pd
from sklearn.metrics import accuracy_score
def load_data():
real_df = pd.read_csv(
"feat/real.txt",
delimiter=" ",
header=None,
names=["a", "b", "c", "d", "e", "f"],
index_col=False,
float_precision="high",
)
real_df["target"] = 1
fake_df = pd.read_csv(
"feat/fake.txt",
delimiter=" ",
header=None,
names=["a", "b", "c", "d", "e", "f"],
index_col=False,
float_precision="high",
)
fake_df["target"] = 0
real_df.head()
df = pd.concat([real_df, fake_df], ignore_index=True, sort=False)
del real_df, fake_df
y = df.target.values
df = df.drop("target", axis="columns").values
return df, y
data, target = load_data()
NFOLDS = 5
kfold = StratifiedKFold(n_splits=NFOLDS, shuffle=True, random_state=218)
learning_rate = 0.1
num_leaves = 15
min_data_in_leaf = 2000
feature_fraction = 0.6
num_boost_round = 100
params = {"objective": "binary",
"boosting_type": "gbdt",
"learning_rate": learning_rate,
"num_leaves": num_leaves,
"max_bin": 256,
"feature_fraction": feature_fraction,
"drop_rate": 0.1,
"is_unbalance": False,
"max_drop": 50,
"min_child_samples": 10,
"min_child_weight": 150,
"min_split_gain": 0,
"subsample": 0.9,
"metric": 'binary',
"verbose": 5,
"n_jobs": -1
}
x_score = []
final_cv_train = np.zeros(len(data))
kf = kfold.split(data, target)
for i, (train_fold, validate) in enumerate(kf):
X_train, X_validate, label_train, label_validate = \
data[train_fold, :], data[validate, :], target[train_fold], target[validate]
dtrain = lgbm.Dataset(X_train, label_train)
dvalid = lgbm.Dataset(X_validate, label_validate, reference=dtrain)
bst = lgbm.train(params, dtrain, num_boost_round, valid_sets=dvalid,
early_stopping_rounds=100)
preds = bst.predict(X_validate)
print(preds.shape)
print(preds)
print(f"Fold {i+1} score {accuracy_score(label_validate, preds)}")
|
python
|
class BudgetFiscalYear():
'''Class to describe the federal fiscal year'''
__base = None
__bfy = None
__efy = None
__today = None
__date = None
__startdate = None
__enddate = None
__expiration = None
__weekends = 0
__workdays = 0
__year = None
__month = None
__day = None
__holidays = None
__data = None
__dataframe = None
@property
def firstyear( self ):
if self.__base is not None:
return self.__bfy
@firstyear.setter
def firstyear( self, yr ):
if yr is not None:
self.__bfy = str( yr )
self.__data[ 'firstyear' ] = self.__bfy
@property
def lastyear( self ):
if self.__efy is not None:
return self.__efy
@lastyear.setter
def lastyear( self, yr ):
if yr is not None:
self.__efy = str( yr )
self.__data[ 'lastyear' ] = self.__efy
@property
def calendaryear( self ):
if self.__year:
return self.__year
@calendaryear.setter
def calendaryear( self, yr ):
if yr is not None:
self.__year = str( yr )
self.__data[ 'calendaryear' ] = self.__year
@property
def startdate( self ):
if isinstance( self.__startdate, dt.date ):
return self.__startdate
@startdate.setter
def startdate( self, start ):
if isinstance( start, dt.date ):
self.__startdate = start
self.__data[ 'startdate' ] = self.__startdate
@property
def enddate( self ):
if isinstance( self.__enddate, dt.date ):
return self.__enddate
@enddate.setter
def enddate( self, end ):
if isinstance( end, dt.date ):
self.__enddate = end
self.__data[ 'enddate' ] = self.__enddate
@property
def expiration( self ):
if isinstance( self.__expiration, dt.date ):
return self.__expiration
@expiration.setter
def expiration( self, exp ):
if isinstance( exp, dt.date ):
self.__expiration = exp
self.__data[ 'expiration' ] = self.__expiration
@property
def weekends( self ):
if self.__weekends is not None:
return self.__weekends
@weekends.setter
def weekends( self, end ):
if isinstance( end, int ):
self.__weekends = end
self.__data[ 'weekends' ] = self.__weekends
@property
def workdays( self ):
if self.__workdays is not None:
return float( self.__workdays )
@workdays.setter
def workdays( self, work ):
if isinstance( work, int ):
self.__workdays = work
self.__data[ 'workdays' ] = self.__workdays
@property
def date( self ):
if isinstance( self.__date, dt.date ):
return self.__date
@date.setter
def date( self, today ):
if isinstance( today, dt.date ):
self.__date = today
self.__data[ 'date' ] = self.__date
@property
def day( self ):
if self.__day is not None:
return self.__day
@day.setter
def day( self, today ):
if isinstance( today, dt.date ):
self.__day = today
self.__data[ 'day' ] = self.__day
@property
def month( self ):
if self.__month is not None:
return self.__month
@property
def holidays( self ):
if self.__holidays is not None:
return self.__holidays
@property
def data( self ):
if self.__data is not None:
return self.__data
@data.setter
def data( self, src ):
if isinstance( src, pd.DataFrame ):
self.__data = src
@property
def table( self ):
if self.__dataframe is not None:
return self.__dataframe
def __init__( self, bfy ):
self.__today = dt.date.today()
self.__base = str( bfy )
self.__date = self.__today
self.__year = int( self.__base )
self.__day = self.__date.day
self.__month = self.__date.month
self.__startdate = dt.date( self.__year, 10, 1 )
self.__bfy = str( self.__startdate.year )
self.__enddate = dt.date( self.__year + 1, 9, 30 )
self.__efy = str( self.__enddate.year )
self.__data = { 'base': self.__base,
'date': self.__date,
'calendaryear': self.__year,
'day': self.__day,
'month': self.__month,
'startdate': self.__startdate,
'enddate': self.__enddate }
self.__dataframe = pd.DataFrame
def __str__( self ):
return str( self.__year )
|
python
|
# import os
# import shutil
# from django.test import TestCase
# from django_dicom.data_import.local_import import LocalImport
# from django_dicom.models.image import Image
# from tests.fixtures import TEST_FILES_PATH, TEST_IMAGE_PATH, TEST_ZIP_PATH
# TESTS_DIR = os.path.normpath("./tests")
# TEMP_FILES = os.path.join(TESTS_DIR, "tmp*.dcm")
# IMPORTED_DIR = os.path.join(TESTS_DIR, "MRI")
# class LocalImportTestCase(TestCase):
# """
# Tests for the :class:`~django_dicom.data_import.local_import.LocalImport` class,
# which is meant to provide methods to facilitate data import.
# """
# def tearDown(self):
# """
# Tries to remove the :class:`~django_dicom.models.image.Image` instances
# that may have been created during each test, as well as the destination
# directory.
# For more information see unittest's :meth:`~unittest.TestCase.tearDown` method.
# """
# Image.objects.all().delete()
# try:
# shutil.rmtree(IMPORTED_DIR)
# except FileNotFoundError:
# pass
# def test_initialization(self):
# """
# Tests that the :class:`~django_dicom.data_import.local_import.LocalImport`
# class is initialized properly.
# """
# instance = LocalImport(TEST_IMAGE_PATH)
# self.assertEqual(instance.path, TEST_IMAGE_PATH)
# def test_import_local_dcm(self):
# """
# Tests importing a single DICOM image from some path using
# :meth:`~django_dicom.data_import.local_import.LocalImport.import_local_dcm`.
# """
# image, created = LocalImport.import_local_dcm(TEST_IMAGE_PATH)
# self.assertTrue(created)
# self.assertIsInstance(image, Image)
# # Also check that the created instance is updated
# self.assertIsNotNone(image.uid)
# self.assertIsNotNone(image.series)
# self.assertIsNotNone(image.series.study)
# self.assertIsNotNone(image.series.patient)
# def test_import_local_zip_archive(self):
# """
# Tests importing DICOM images from a single ZIP archive using
# :meth:`~django_dicom.data_import.local_import.LocalImport.import_local_zip_archive`.
# """
# self.assertEqual(Image.objects.count(), 0)
# LocalImport.import_local_zip_archive(TEST_ZIP_PATH, verbose=False)
# # The ZIP archive contains 3 images
# self.assertEqual(Image.objects.count(), 3)
# def test_path_generator_without_extension(self):
# """
# Tests the :meth:`~django_dicom.data_import.local_import.LocalImport.path_generator`
# method with no *extension* parameter setting.
# """
# counter = 0
# for path in LocalImport(TEST_FILES_PATH).path_generator():
# is_valid_path = os.path.isfile(path)
# self.assertTrue(is_valid_path)
# is_under_base_dir = path.startswith(TEST_FILES_PATH)
# self.assertTrue(is_under_base_dir)
# counter += 1
# # There are 6 files in the given path
# self.assertEqual(counter, 6)
# def test_path_generator_with_extension(self):
# """
# Tests the :meth:`~django_dicom.data_import.local_import.LocalImport.path_generator`
# method with the *extension* parameter set.
# """
# # A dictionary of extensions and the number of files we expect
# extensions = {"zip": 2, "dcm": 4}
# for extension in extensions:
# counter = 0
# generator = LocalImport(TEST_FILES_PATH).path_generator(extension=extension)
# for path in generator:
# is_valid_path = os.path.isfile(path)
# self.assertTrue(is_valid_path)
# is_under_base_dir = path.startswith(TEST_FILES_PATH)
# self.assertTrue(is_under_base_dir)
# counter += 1
# self.assertEqual(counter, extensions.get(extension))
# def test_import_dcm_files(self):
# """
# Tests importing multiple DICOM images at once using the
# :meth:`~django_dicom.data_import.local_import.LocalImport.import_dcm_files`
# method.
# """
# self.assertEqual(Image.objects.count(), 0)
# LocalImport(TEST_FILES_PATH).import_dcm_files(verbose=False)
# # There are 4 DICOM images in the test files directory.
# self.assertEqual(Image.objects.count(), 4)
# def test_import_zip_archives(self):
# """
# Tests importing DICOM images from multiple ZIP archives at once using the
# :meth:`~django_dicom.data_import.local_import.LocalImport.import_zip_archives`
# method.
# """
# self.assertEqual(Image.objects.count(), 0)
# LocalImport(TEST_FILES_PATH).import_zip_archives(verbose=False)
# # The ZIP archives contain a total of 4 (unique) DICOM images.
# self.assertEqual(Image.objects.count(), 4)
# def test_run_with_zip_archives(self):
# """
# Tests the :class:`~django_dicom.data_import.local_import.LocalImport` class's
# :meth:`~django_dicom.data_import.local_import.LocalImport.run` method when
# set to include ZIP archives.
# """
# self.assertEqual(Image.objects.count(), 0)
# LocalImport(TEST_FILES_PATH).run(import_zip=True, verbose=False)
# # The test files directory contains a total of 8 (unique) DICOM images.
# self.assertEqual(Image.objects.count(), 8)
# def test_run_without_zip_archives(self):
# """
# Tests the :class:`~django_dicom.data_import.local_import.LocalImport` class's
# :meth:`~django_dicom.data_import.local_import.LocalImport.run` method when
# set to exclude ZIP archives.
# """
# self.assertEqual(Image.objects.count(), 0)
# LocalImport(TEST_FILES_PATH).run(import_zip=False, verbose=False)
# # There are 4 DICOM images in the test files directory.
# self.assertEqual(Image.objects.count(), 4)
# def test_run_default_configuration(self):
# """
# Tests the :class:`~django_dicom.data_import.local_import.LocalImport` class's
# :meth:`~django_dicom.data_import.local_import.LocalImport.run` method's
# default configuration is to include ZIP archives.
# """
# self.assertEqual(Image.objects.count(), 0)
# LocalImport(TEST_FILES_PATH).run(verbose=False)
# # The test files directory contains a total of 8 (unique) DICOM images.
# self.assertEqual(Image.objects.count(), 8)
|
python
|
import os
import joblib
import pandas as pd
import numpy as np
from dataclasses import dataclass
from sklearn.preprocessing import RobustScaler
from sklearn.feature_selection import VarianceThreshold
from rdkit import Chem
from rdkit.Chem import MACCSkeys
from rdkit.Chem import MolFromSmarts
from mordred import Calculator, descriptors
# VARIABLES
PATH = os.path.abspath(os.path.dirname(__file__))
DATA_PATH = os.path.abspath(os.path.join(PATH, "..", "data"))
# PROCESSING FUNCTIONS
MAX_NA = 0.2
class NanFilter(object):
def __init__(self):
self._name = "nan_filter"
def fit(self, X):
max_na = int((1 - MAX_NA) * X.shape[0])
idxs = []
for j in range(X.shape[1]):
c = np.sum(np.isnan(X[:, j]))
if c > max_na:
continue
else:
idxs += [j]
self.col_idxs = idxs
def transform(self, X):
return X[:, self.col_idxs]
def save(self, file_name):
joblib.dump(self, file_name)
def load(self, file_name):
return joblib.load(file_name)
class Scaler(object):
def __init__(self):
self._name = "scaler"
self.abs_limit = 10
self.skip = False
def set_skip(self):
self.skip = True
def fit(self, X):
if self.skip:
return
self.scaler = RobustScaler()
self.scaler.fit(X)
def transform(self, X):
if self.skip:
return X
X = self.scaler.transform(X)
X = np.clip(X, -self.abs_limit, self.abs_limit)
return X
def save(self, file_name):
joblib.dump(self, file_name)
def load(self, file_name):
return joblib.load(file_name)
class Imputer(object):
def __init__(self):
self._name = "imputer"
self._fallback = 0
def fit(self, X):
ms = []
for j in range(X.shape[1]):
vals = X[:, j]
mask = ~np.isnan(vals)
vals = vals[mask]
if len(vals) == 0:
m = self._fallback
else:
m = np.median(vals)
ms += [m]
self.impute_values = np.array(ms)
def transform(self, X):
for j in range(X.shape[1]):
mask = np.isnan(X[:, j])
X[mask, j] = self.impute_values[j]
return X
def save(self, file_name):
joblib.dump(self, file_name)
def load(self, file_name):
return joblib.load(file_name)
class VarianceFilter(object):
def __init__(self):
self._name = "variance_filter"
def fit(self, X):
self.sel = VarianceThreshold()
self.sel.fit(X)
self.col_idxs = self.sel.transform([[i for i in range(X.shape[1])]]).ravel()
def transform(self, X):
return self.sel.transform(X)
def save(self, file_name):
joblib.dump(self, file_name)
def load(self, file_name):
return joblib.load(file_name)
# MORDRED DESCRIPTORS
def mordred_featurizer(smiles):
calc = Calculator(descriptors, ignore_3D=True)
df = calc.pandas([Chem.MolFromSmiles(smi) for smi in smiles])
return df
class MordredDescriptor(object):
def __init__(self):
self.nan_filter = NanFilter()
self.imputer = Imputer()
self.variance_filter = VarianceFilter()
self.scaler = Scaler()
def fit(self, smiles):
df = mordred_featurizer(smiles)
X = np.array(df, dtype=np.float32)
self.nan_filter.fit(X)
X = self.nan_filter.transform(X)
self.imputer.fit(X)
X = self.imputer.transform(X)
self.variance_filter.fit(X)
X = self.variance_filter.transform(X)
self.scaler.fit(X)
X = self.scaler.transform(X)
self.features = list(df.columns)
self.features = [self.features[i] for i in self.nan_filter.col_idxs]
self.features = [self.features[i] for i in self.variance_filter.col_idxs]
return pd.DataFrame(X, columns=self.features)
def transform(self, smiles):
df = mordred_featurizer(smiles)
X = np.array(df, dtype=np.float32)
X = self.nan_filter.transform(X)
X = self.imputer.transform(X)
X = self.variance_filter.transform(X)
X = self.scaler.transform(X)
return pd.DataFrame(X, columns=self.features)
# CLASSIC DESCRIPTORS
@dataclass
class Descriptors:
"""Molecular descriptors"""
#: Descriptor type
descriptor_type: str
#: Descriptor values
descriptors: tuple
# Descriptor name
descriptor_names: tuple
# t_stats for each molecule
tstats: tuple = ()
def _calculate_rdkit_descriptors(mol):
from rdkit.ML.Descriptors import MoleculeDescriptors # type: ignore
dlist = [
"NumHDonors",
"NumHAcceptors",
"MolLogP",
"NumHeteroatoms",
"RingCount",
"NumRotatableBonds",
]
c = MoleculeDescriptors.MolecularDescriptorCalculator(dlist)
d = c.CalcDescriptors(mol)
def calc_aromatic_bonds(mol):
return sum(1 for b in mol.GetBonds() if b.GetIsAromatic())
def _create_smarts(SMARTS):
s = ",".join("$(" + s + ")" for s in SMARTS)
_mol = MolFromSmarts("[" + s + "]")
return _mol
def calc_acid_groups(mol):
acid_smarts = (
"[O;H1]-[C,S,P]=O",
"[*;-;!$(*~[*;+])]",
"[NH](S(=O)=O)C(F)(F)F",
"n1nnnc1",
)
pat = _create_smarts(acid_smarts)
return len(mol.GetSubstructMatches(pat))
def calc_basic_groups(mol):
basic_smarts = (
"[NH2]-[CX4]",
"[NH](-[CX4])-[CX4]",
"N(-[CX4])(-[CX4])-[CX4]",
"[*;+;!$(*~[*;-])]",
"N=C-N",
"N-C=N",
)
pat = _create_smarts(basic_smarts)
return len(mol.GetSubstructMatches(pat))
def calc_apol(mol, includeImplicitHs=True):
# atomic polarizabilities available here:
# https://github.com/mordred-descriptor/mordred/blob/develop/mordred/data/polarizalibity78.txt
ap = os.path.join(DATA_PATH, "atom_pols.txt")
with open(ap, "r") as f:
atom_pols = [float(x) for x in next(f).split(",")]
res = 0.0
for atom in mol.GetAtoms():
anum = atom.GetAtomicNum()
if anum <= len(atom_pols):
apol = atom_pols[anum]
if includeImplicitHs:
apol += atom_pols[1] * atom.GetTotalNumHs(includeNeighbors=False)
res += apol
else:
raise ValueError(f"atomic number {anum} not found")
return res
d = d + (
calc_aromatic_bonds(mol),
calc_acid_groups(mol),
calc_basic_groups(mol),
calc_apol(mol),
)
return d
def classic_featurizer(smiles):
names = tuple(
[
"number of hydrogen bond donor",
"number of hydrogen bond acceptor",
"Wildman-Crippen LogP",
"number of heteroatoms",
"ring count",
"number of rotatable bonds",
"aromatic bonds count",
"acidic group count",
"basic group count",
"atomic polarizability",
]
)
mols = [Chem.MolFromSmiles(smi) for smi in smiles]
R = []
cols = None
for m in mols:
descriptors = _calculate_rdkit_descriptors(m)
descriptor_names = names
descriptors = Descriptors(
descriptor_type="Classic",
descriptors=descriptors,
descriptor_names=descriptor_names,
)
R += [list(descriptors.descriptors)]
if cols is None:
cols = list(descriptors.descriptor_names)
data = pd.DataFrame(R, columns=cols)
return data
class ClassicDescriptor(object):
def __init__(self):
self.nan_filter = NanFilter()
self.imputer = Imputer()
self.variance_filter = VarianceFilter()
self.scaler = Scaler()
def fit(self, smiles):
df = classic_featurizer(smiles)
X = np.array(df, dtype=np.float32)
self.nan_filter.fit(X)
X = self.nan_filter.transform(X)
self.imputer.fit(X)
X = self.imputer.transform(X)
self.variance_filter.fit(X)
X = self.variance_filter.transform(X)
self.scaler.fit(X)
X = self.scaler.transform(X)
self.features = list(df.columns)
self.features = [self.features[i] for i in self.nan_filter.col_idxs]
self.features = [self.features[i] for i in self.variance_filter.col_idxs]
return pd.DataFrame(X, columns=self.features)
def transform(self, smiles):
df = classic_featurizer(smiles)
X = np.array(df, dtype=np.float32)
X = self.nan_filter.transform(X)
X = self.imputer.transform(X)
X = self.variance_filter.transform(X)
X = self.scaler.transform(X)
return pd.DataFrame(X, columns=self.features)
# MORGAN FINGERPRINTS
from rdkit.Chem import rdMolDescriptors as rd
from rdkit import Chem
RADIUS = 3
NBITS = 2048
DTYPE = np.uint8
def clip_sparse(vect, nbits):
l = [0]*nbits
for i,v in vect.GetNonzeroElements().items():
l[i] = v if v < 255 else 255
return l
class _MorganDescriptor(object):
def __init__(self):
self.nbits = NBITS
self.radius = RADIUS
def calc(self, mol):
v = rd.GetHashedMorganFingerprint(mol, radius=self.radius, nBits=self.nbits)
return clip_sparse(v, self.nbits)
def morgan_featurizer(smiles):
d = _MorganDescriptor()
X = np.zeros((len(smiles), NBITS))
for i, smi in enumerate(smiles):
mol = Chem.MolFromSmiles(smi)
X[i,:] = d.calc(mol)
return X
class MorganDescriptor(object):
def __init__(self):
pass
def fit(self, smiles):
X = morgan_featurizer(smiles)
self.features = ["fp-{0}".format(i) for i in range(X.shape[1])]
return pd.DataFrame(X, columns=self.features)
def transform(self, smiles):
X = morgan_featurizer(smiles)
return pd.DataFrame(X, columns=self.features)
# RDKIT 200 Descriptors
from rdkit.Chem import Descriptors as RdkitDescriptors
from rdkit import Chem
RDKIT_PROPS = {"1.0.0": ['BalabanJ', 'BertzCT', 'Chi0', 'Chi0n', 'Chi0v', 'Chi1', 'Chi1n',
'Chi1v', 'Chi2n', 'Chi2v', 'Chi3n', 'Chi3v', 'Chi4n', 'Chi4v',
'EState_VSA1', 'EState_VSA10', 'EState_VSA11', 'EState_VSA2',
'EState_VSA3', 'EState_VSA4', 'EState_VSA5', 'EState_VSA6',
'EState_VSA7', 'EState_VSA8', 'EState_VSA9', 'ExactMolWt',
'FpDensityMorgan1', 'FpDensityMorgan2', 'FpDensityMorgan3',
'FractionCSP3', 'HallKierAlpha', 'HeavyAtomCount', 'HeavyAtomMolWt',
'Ipc', 'Kappa1', 'Kappa2', 'Kappa3', 'LabuteASA', 'MaxAbsEStateIndex',
'MaxAbsPartialCharge', 'MaxEStateIndex', 'MaxPartialCharge',
'MinAbsEStateIndex', 'MinAbsPartialCharge', 'MinEStateIndex',
'MinPartialCharge', 'MolLogP', 'MolMR', 'MolWt', 'NHOHCount',
'NOCount', 'NumAliphaticCarbocycles', 'NumAliphaticHeterocycles',
'NumAliphaticRings', 'NumAromaticCarbocycles', 'NumAromaticHeterocycles',
'NumAromaticRings', 'NumHAcceptors', 'NumHDonors', 'NumHeteroatoms',
'NumRadicalElectrons', 'NumRotatableBonds', 'NumSaturatedCarbocycles',
'NumSaturatedHeterocycles', 'NumSaturatedRings', 'NumValenceElectrons',
'PEOE_VSA1', 'PEOE_VSA10', 'PEOE_VSA11', 'PEOE_VSA12', 'PEOE_VSA13',
'PEOE_VSA14', 'PEOE_VSA2', 'PEOE_VSA3', 'PEOE_VSA4', 'PEOE_VSA5',
'PEOE_VSA6', 'PEOE_VSA7', 'PEOE_VSA8', 'PEOE_VSA9', 'RingCount',
'SMR_VSA1', 'SMR_VSA10', 'SMR_VSA2', 'SMR_VSA3', 'SMR_VSA4', 'SMR_VSA5',
'SMR_VSA6', 'SMR_VSA7', 'SMR_VSA8', 'SMR_VSA9', 'SlogP_VSA1', 'SlogP_VSA10',
'SlogP_VSA11', 'SlogP_VSA12', 'SlogP_VSA2', 'SlogP_VSA3', 'SlogP_VSA4',
'SlogP_VSA5', 'SlogP_VSA6', 'SlogP_VSA7', 'SlogP_VSA8', 'SlogP_VSA9',
'TPSA', 'VSA_EState1', 'VSA_EState10', 'VSA_EState2', 'VSA_EState3',
'VSA_EState4', 'VSA_EState5', 'VSA_EState6', 'VSA_EState7', 'VSA_EState8',
'VSA_EState9', 'fr_Al_COO', 'fr_Al_OH', 'fr_Al_OH_noTert', 'fr_ArN',
'fr_Ar_COO', 'fr_Ar_N', 'fr_Ar_NH', 'fr_Ar_OH', 'fr_COO', 'fr_COO2',
'fr_C_O', 'fr_C_O_noCOO', 'fr_C_S', 'fr_HOCCN', 'fr_Imine', 'fr_NH0',
'fr_NH1', 'fr_NH2', 'fr_N_O', 'fr_Ndealkylation1', 'fr_Ndealkylation2',
'fr_Nhpyrrole', 'fr_SH', 'fr_aldehyde', 'fr_alkyl_carbamate', 'fr_alkyl_halide',
'fr_allylic_oxid', 'fr_amide', 'fr_amidine', 'fr_aniline', 'fr_aryl_methyl',
'fr_azide', 'fr_azo', 'fr_barbitur', 'fr_benzene', 'fr_benzodiazepine',
'fr_bicyclic', 'fr_diazo', 'fr_dihydropyridine', 'fr_epoxide', 'fr_ester',
'fr_ether', 'fr_furan', 'fr_guanido', 'fr_halogen', 'fr_hdrzine', 'fr_hdrzone',
'fr_imidazole', 'fr_imide', 'fr_isocyan', 'fr_isothiocyan', 'fr_ketone',
'fr_ketone_Topliss', 'fr_lactam', 'fr_lactone', 'fr_methoxy', 'fr_morpholine',
'fr_nitrile', 'fr_nitro', 'fr_nitro_arom', 'fr_nitro_arom_nonortho',
'fr_nitroso', 'fr_oxazole', 'fr_oxime', 'fr_para_hydroxylation', 'fr_phenol',
'fr_phenol_noOrthoHbond', 'fr_phos_acid', 'fr_phos_ester', 'fr_piperdine',
'fr_piperzine', 'fr_priamide', 'fr_prisulfonamd', 'fr_pyridine', 'fr_quatN',
'fr_sulfide', 'fr_sulfonamd', 'fr_sulfone', 'fr_term_acetylene', 'fr_tetrazole',
'fr_thiazole', 'fr_thiocyan', 'fr_thiophene', 'fr_unbrch_alkane', 'fr_urea', 'qed']
}
CURRENT_VERSION = "1.0.0"
class _RdkitDescriptor(object):
def __init__(self):
self.properties = RDKIT_PROPS[CURRENT_VERSION]
self._funcs = {name: func for name, func in RdkitDescriptors.descList}
def calc(self, mols):
R = []
for mol in mols:
if mol is None:
r = [np.nan]*len(self.properties)
else:
r = []
for prop in self.properties:
r += [self._funcs[prop](mol)]
R += [r]
return np.array(R)
def rdkit_featurizer(smiles):
d = _RdkitDescriptor()
mols = [Chem.MolFromSmiles(smi) for smi in smiles]
X = d.calc(mols)
data = pd.DataFrame(X, columns=d.properties)
return data
class RdkitDescriptor(object):
def __init__(self):
self.nan_filter = NanFilter()
self.imputer = Imputer()
self.variance_filter = VarianceFilter()
self.scaler = Scaler()
def fit(self, smiles):
df = rdkit_featurizer(smiles)
X = np.array(df, dtype=np.float32)
self.nan_filter.fit(X)
X = self.nan_filter.transform(X)
self.imputer.fit(X)
X = self.imputer.transform(X)
self.variance_filter.fit(X)
X = self.variance_filter.transform(X)
self.scaler.fit(X)
X = self.scaler.transform(X)
self.features = list(df.columns)
self.features = [self.features[i] for i in self.nan_filter.col_idxs]
self.features = [self.features[i] for i in self.variance_filter.col_idxs]
return pd.DataFrame(X, columns=self.features)
def transform(self, smiles):
df = rdkit_featurizer(smiles)
X = np.array(df, dtype=np.float32)
X = self.nan_filter.transform(X)
X = self.imputer.transform(X)
X = self.variance_filter.transform(X)
X = self.scaler.transform(X)
return pd.DataFrame(X, columns=self.features)
# MACCS DESCRIPTORS
def maccs_featurizer(smiles):
mols = [Chem.MolFromSmiles(smi) for smi in smiles]
mk = os.path.join(DATA_PATH, "MACCSkeys.txt")
with open(str(mk), "r") as f:
names = tuple([x.strip().split("\t")[-1] for x in f.readlines()[1:]])
R = []
cols = None
for m in mols:
# rdkit sets fps[0] to 0 and starts keys at 1!
fps = list(MACCSkeys.GenMACCSKeys(m).ToBitString())[1:] # ersilia edit
descriptors = tuple(int(i) for i in fps)
descriptor_names = names
descriptors = Descriptors(
descriptor_type="MACCS",
descriptors=descriptors,
descriptor_names=descriptor_names,
)
R += [list(descriptors.descriptors)]
if cols is None:
cols = list(descriptors.descriptor_names)
data = pd.DataFrame(R, columns=cols)
return data
class MaccsDescriptor(object):
def __init__(self):
pass
def fit(self, smiles):
return maccs_featurizer(smiles)
def transform(self, smiles):
return maccs_featurizer(smiles)
|
python
|
import torch.nn as nn
import functools
import torch
import functools
import torch.nn.functional as F
from torch.autograd import Variable
import math
import torchvision
class Bottleneck(nn.Module):
# expansion = 4
def __init__(self, inplanes, outplanes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, int(inplanes/4), kernel_size=1, bias=False)
self.bn1 = nn.InstanceNorm2d(int(inplanes/4))
self.conv2 = nn.Conv2d(int(inplanes/4), int(inplanes/4), kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.InstanceNorm2d(int(inplanes/4))
self.conv3 = nn.Conv2d(int(inplanes/4), outplanes, kernel_size=1, bias=False)
self.bn3 = nn.InstanceNorm2d(outplanes)
self.relu = nn.LeakyReLU(inplace=True)
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
out += residual
out = self.relu(out)
return out
class ASBlock(nn.Module):
def __init__(self, dim, padding_type, norm_layer, use_dropout, use_bias, cated_stream2=False):
super(ASBlock, self).__init__()
self.conv_block_stream1 = self.build_conv_block(dim, padding_type, norm_layer, use_dropout, use_bias, cal_att=False)
self.conv_block_stream2 = self.build_conv_block(dim, padding_type, norm_layer, use_dropout, use_bias, cal_att=True, cated_stream2=cated_stream2)
self.channel_switch = nn.Conv2d(dim * 2, dim, kernel_size=1, padding=0, bias=False)
self.channel_switch_N = nn.InstanceNorm2d(dim)
self.channel_switch_A = nn.LeakyReLU(True)
def build_conv_block(self, dim, padding_type, norm_layer, use_dropout, use_bias, cated_stream2=False, cal_att=False):
conv_block = []
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
if cated_stream2:
conv_block += [nn.Conv2d(dim*2, dim*2, kernel_size=3, padding=p, bias=use_bias),
norm_layer(dim*2),
nn.ReLU(True)]
else:
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias),
norm_layer(dim),
nn.ReLU(True)]
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
if cal_att:
if cated_stream2:
conv_block += [nn.Conv2d(dim*2, dim, kernel_size=3, padding=p, bias=use_bias)]
else:
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias)]
else:
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias),
norm_layer(dim)]
return nn.Sequential(*conv_block)
def forward(self, x1, x2):
x1_out = self.conv_block_stream1(x1)
x2_out = self.conv_block_stream2(x2)
att = F.sigmoid(x2_out)
x1_out = torch.cat([x1_out ,att],1)
x1_out = self.channel_switch(x1_out)
x1_out = self.channel_switch_N(x1_out)
x1_out_after = self.channel_switch_A(x1_out)
out = x1 + x1_out_after # residual connection
# stream2 receive feedback from stream1
x2_out = torch.cat((x2_out, out), 1)
return out, x2_out, x1, x1_out_after
class ASNModel(nn.Module):
def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.InstanceNorm2d, use_dropout=False, n_blocks=6, gpu_ids=[], padding_type='reflect', n_downsampling=2):
assert(n_blocks >= 0 and type(input_nc) == list)
super(ASNModel, self).__init__()
self.input_nc_s1 = input_nc[0]
self.input_nc_s2 = input_nc[1]
self.output_nc = output_nc
self.ngf = ngf
self.gpu_ids = gpu_ids
self.model_stream1_down_Reflect = nn.ReflectionPad2d(3)
self.model_stream1_down_Con1 = nn.Conv2d(self.input_nc_s1, 64, kernel_size=7, padding=0, bias=False)
self.model_stream1_down_N1 = nn.InstanceNorm2d(64)
self.model_stream1_down_A1 = nn.LeakyReLU(True)
self.model_stream2_down_Reflect = nn.ReflectionPad2d(3)
self.model_stream2_down_Con1 = nn.Conv2d(self.input_nc_s2, 64, kernel_size=7, padding=0,
bias=False)
self.model_stream2_down_N1 = nn.InstanceNorm2d(64)
self.model_stream2_down_A1 = nn.LeakyReLU(True)
self.model_stream1_down_Con2 = nn.Conv2d(64 ,128, kernel_size=3,
stride=2, padding=1, bias=False)
self.model_stream1_down_N2 = nn.InstanceNorm2d(128)
self.model_stream1_down_A2 = nn.LeakyReLU(True)
self.model_stream2_down_Con2 = nn.Conv2d(64 , 128, kernel_size=3,
stride=2, padding=1, bias=False)
self.model_stream2_down_N2 = nn.InstanceNorm2d(128)
self.model_stream2_down_A2 = nn.LeakyReLU(True)
self.model_stream1_down_Con3 = nn.Conv2d(128, 256, kernel_size=3,
stride=2, padding=1, bias=False)
self.model_stream1_down_N3 = nn.InstanceNorm2d(256)
self.model_stream1_down_A3 = nn.LeakyReLU(True)
self.model_stream2_down_Con3 = nn.Conv2d(128, 256, kernel_size=3,
stride=2, padding=1, bias=False)
self.model_stream2_down_N3 = nn.InstanceNorm2d(256)
self.model_stream2_down_A3 = nn.LeakyReLU(True)
self.model_stream1_down_Con4 = nn.Conv2d(256, 512, kernel_size=3, stride=2, padding=1, bias=False)
self.model_stream1_down_N4 = nn.InstanceNorm2d(512)
self.model_stream1_down_A4 = nn.LeakyReLU(True)
self.model_stream2_down_Con4 = nn.Conv2d(256, 512, kernel_size=3, stride=2, padding=1, bias=False)
self.model_stream2_down_N4 = nn.InstanceNorm2d(512)
self.model_stream2_down_A4 = nn.LeakyReLU(True)
cated_stream2 = [True for i in range(4)]
cated_stream2[0] = False
asBlock = nn.ModuleList()
for i in range(4):
asBlock.append(ASBlock(512, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=False, cated_stream2=cated_stream2[i]))
self.layer0 = self._make_layer(2, 1024, 1024)
self.model_stream1_up_Con0_rgb = nn.ConvTranspose2d(1024, 3, kernel_size=3, stride=1, padding=1, bias=False)
self.model_stream1_up_A0_rgb = nn.Tanh()
self.model_stream1_up_Con0 = nn.ConvTranspose2d(1024, 512, kernel_size=3, stride=2, padding=1, output_padding=1, bias=False)
self.model_stream1_up_N0 = nn.InstanceNorm2d(512)
self.model_stream1_up_A0 = nn.ReLU(True)
self.layer1 = self._make_layer(2, 771, 771)
self.model_stream1_up_Con1_rgb = nn.ConvTranspose2d(771, 3, kernel_size=3, stride=1, padding=1, bias=False)
self.model_stream1_up_A1_rgb = nn.Tanh()
self.model_stream1_up_Con1 = nn.ConvTranspose2d(771, 256, kernel_size=3, stride=2, padding=1, output_padding=1, bias=False)
self.model_stream1_up_N1 = nn.InstanceNorm2d(256)
self.model_stream1_up_A1 = nn.ReLU(True)
self.layer2 = self._make_layer(2, 387, 387)
self.model_stream1_up_Con2_rgb = nn.ConvTranspose2d(387, 3, kernel_size=3, stride=1, padding=1, bias=False)
self.model_stream1_up_A1_rgb = nn.Tanh()
self.model_stream1_up_Con2 = nn.ConvTranspose2d(387 , 128, kernel_size=3, stride=2, padding=1, output_padding=1, bias=False)
self.model_stream1_up_N2 = nn.InstanceNorm2d(128)
self.model_stream1_up_A2 = nn.ReLU(True)
self.model_stream1_up_Reflect = nn.ReflectionPad2d(1)
self.model_stream1_up_Con3 = nn.Conv2d(128 , 3, kernel_size=3, padding=0, bias=False)
self.model_stream1_up_A3 = nn.Tanh()
self.model_stream1_up_Con5 = nn.Conv2d(6 , 3, kernel_size=1, padding=0, bias=False)
self.model_stream1_up_A5 = nn.Tanh()
self.asBlock = asBlock
def _make_layer(self, block, planes, outplanes):
layers = []
layers.append(Bottleneck(planes, outplanes))
for i in range(1, block):
layers.append(Bottleneck(outplanes, outplanes))
return nn.Sequential(*layers)
def weights_init_kaiming(self, m):
classname = m.__class__.__name__
if classname.find('Linear') != -1:
nn.init.kaiming_normal_(m.weight, a=0, mode='fan_out')
nn.init.constant_(m.bias, 0.0)
elif classname.find('Conv') != -1:
nn.init.kaiming_normal_(m.weight, a=0, mode='fan_in')
if m.bias is not None:
nn.init.constant_(m.bias, 0.0)
elif classname.find('BatchNorm') != -1:
if m.affine:
nn.init.constant_(m.weight, 1.0)
nn.init.constant_(m.bias, 0.0)
def weights_init_classifier(self, m):
classname = m.__class__.__name__
if classname.find('Linear') != -1:
nn.init.normal_(m.weight, std=0.001)
if m.bias:
nn.init.constant_(m.bias, 0.0)
def forward(self, input):
x1, x2 = input
# eye, nose, mouth are for TP-GAN
# Down Sampling
x1 = self.model_stream1_down_Reflect(x1)
x1 = self.model_stream1_down_Con1(x1)
x1 = self.model_stream1_down_N1(x1)
x1 = self.model_stream1_down_A1(x1)
x2 = self.model_stream2_down_Reflect(x2)
x2 = self.model_stream2_down_Con1(x2)
x2 = self.model_stream2_down_N1(x2)
x2 = self.model_stream2_down_A1(x2)
x1 = self.model_stream1_down_Con2(x1)
x1 = self.model_stream1_down_N2(x1)
x1 = self.model_stream1_down_A2(x1)
x2 = self.model_stream2_down_Con2(x2)
x2 = self.model_stream2_down_N2(x2)
x2 = self.model_stream2_down_A2(x2)
x_64 = x1
x1 = self.model_stream1_down_Con3(x1)
x1 = self.model_stream1_down_N3(x1)
x1 = self.model_stream1_down_A3(x1)
x2 = self.model_stream2_down_Con3(x2)
x2 = self.model_stream2_down_N3(x2)
x2 = self.model_stream2_down_A3(x2)
x_32 = x1
x1 = self.model_stream1_down_Con4(x1)
x1 = self.model_stream1_down_N4(x1)
x1 = self.model_stream1_down_A4(x1)
x_16 = x1
x2 = self.model_stream2_down_Con4(x2)
x2 = self.model_stream2_down_N4(x2)
x2 = self.model_stream2_down_A4(x2)
# AS-Block
att = torch.sigmoid(x2)
x1_out = x1 * att
x1 = x1 + x1_out
before_list = []
after_list =[]
for model in self.asBlock:
x1, x2, x1_before, x1_after = model(x1, x2)
before_list.append(x1_before)
after_list.append(x1_after)
x1 = torch.cat([x1 ,x_16],1)
x1 = self.layer0(x1)
fake_16 = self.model_stream1_up_Con0_rgb(x1)
fake_16 = self.model_stream1_up_A0_rgb(fake_16)
fake_16_32 = torch.nn.functional.upsample(fake_16,(32,32),mode='bilinear')
x1 = self.model_stream1_up_Con0(x1)
x1 = self.model_stream1_up_N0(x1)
x1 = self.model_stream1_up_A0(x1)
x1 = torch.cat([x1 ,x_32],1)
x1 = torch.cat([x1 ,fake_16_32],1)
x1 = self.layer1(x1)
fake_32 = self.model_stream1_up_Con1_rgb(x1)
fake_32 = self.model_stream1_up_A0_rgb(fake_32)
fake_32_64 = torch.nn.functional.upsample(fake_32,(64,64),mode='bilinear')
x1 = self.model_stream1_up_Con1(x1)
x1 = self.model_stream1_up_N1(x1)
x1 = self.model_stream1_up_A1(x1)
x1 = torch.cat([x1 ,x_64],1)
x1 = torch.cat([x1 ,fake_32_64],1)
x1 = self.layer2(x1)
fake_64 = self.model_stream1_up_Con2_rgb(x1)
fake_64 = self.model_stream1_up_A0_rgb(fake_64)
fake_64_128 = torch.nn.functional.upsample(fake_64,(128,128),mode='bilinear')
x1 = self.model_stream1_up_Con2(x1)
x1 = self.model_stream1_up_N2(x1)
x1 = self.model_stream1_up_A2(x1)
x1 = self.model_stream1_up_Reflect(x1)
x1 = self.model_stream1_up_Con3(x1)
x1 = self.model_stream1_up_A3(x1)
x1 = torch.cat([x1 ,fake_64_128],1)
x1 = self.model_stream1_up_Con5(x1)
x1 = self.model_stream1_up_A5(x1)
return x1, fake_64, fake_32, fake_16, before_list, after_list
class ASNetwork(nn.Module):
def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=6, gpu_ids=[], padding_type='reflect', n_downsampling=2):
super(ASNetwork, self).__init__()
assert type(input_nc) == list and len(input_nc) == 2, 'The AttModule take input_nc in format of list only!!'
self.gpu_ids = gpu_ids
self.model = ASNModel(input_nc, output_nc, ngf, norm_layer, use_dropout, n_blocks, gpu_ids, padding_type, n_downsampling=n_downsampling)
def forward(self, input):
if self.gpu_ids and isinstance(input[0].data, torch.cuda.FloatTensor):
return nn.parallel.data_parallel(self.model, input, self.gpu_ids)
else:
return self.model(input)
|
python
|
# -*- coding: utf-8 -*-
#
# Copyright 2015 Benjamin Kiessling
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""
kraken.kraken
~~~~~~~~~~~~~
Command line drivers for recognition functionality.
"""
import os
import warnings
import logging
from typing import Dict, Union, List, cast, Any, IO
from functools import partial
from PIL import Image
import click
from click import open_file
from kraken.lib import log
warnings.simplefilter('ignore', UserWarning)
logger = logging.getLogger('kraken')
APP_NAME = 'kraken'
DEFAULT_MODEL = ['en-default.mlmodel']
LEGACY_MODEL_DIR = '/usr/local/share/ocropus'
def message(msg: str, **styles) -> None:
if logger.getEffectiveLevel() >= 30:
click.secho(msg, **styles)
def binarizer(threshold, zoom, escale, border, perc, range, low, high, base_image, input, output) -> None:
from kraken import binarization
try:
im = Image.open(input)
except IOError as e:
raise click.BadParameter(str(e))
message('Binarizing\t', nl=False)
try:
res = binarization.nlbin(im, threshold, zoom, escale, border, perc, range,
low, high)
form = None
ext = os.path.splitext(output)[1]
if ext in ['.jpg', '.jpeg', '.JPG', '.JPEG', '']:
form = 'png'
if ext:
logger.warning('jpeg does not support 1bpp images. Forcing to png.')
res.save(output, format=form)
except Exception:
message('\u2717', fg='red')
raise
message('\u2713', fg='green')
def segmenter(text_direction, script_detect, allowed_scripts, scale,
maxcolseps, black_colseps, remove_hlines, pad, mask, base_image, input,
output) -> None:
import json
from kraken import pageseg
try:
im = Image.open(input)
except IOError as e:
raise click.BadParameter(str(e))
if mask:
try:
mask = Image.open(mask)
except IOError as e:
raise click.BadParameter(str(e))
message('Segmenting\t', nl=False)
try:
res = pageseg.segment(im, text_direction, scale, maxcolseps, black_colseps, no_hlines=remove_hlines, pad=pad, mask=mask)
if script_detect:
res = pageseg.detect_scripts(im, res, valid_scripts=allowed_scripts)
except Exception:
message('\u2717', fg='red')
raise
with open_file(output, 'w') as fp:
fp = cast(IO[Any], fp)
json.dump(res, fp)
message('\u2713', fg='green')
def recognizer(model, pad, no_segmentation, bidi_reordering, script_ignore, base_image, input, output, lines) -> None:
import json
import tempfile
from kraken import rpred
try:
im = Image.open(base_image)
except IOError as e:
raise click.BadParameter(str(e))
ctx = click.get_current_context()
# input may either be output from the segmenter then it is a JSON file or
# be an image file when running the OCR subcommand alone. might still come
# from some other subcommand though.
scripts = set()
if not lines and base_image != input:
lines = input
if not lines:
if no_segmentation:
lines = tempfile.NamedTemporaryFile(mode='w', delete=False)
logger.info('Running in no_segmentation mode. Creating temporary segmentation {}.'.format(lines.name))
json.dump({'script_detection': False,
'text_direction': 'horizontal-lr',
'boxes': [(0, 0) + im.size]}, lines)
lines.close()
lines = lines.name
else:
raise click.UsageError('No line segmentation given. Add one with `-l` or run `segment` first.')
elif no_segmentation:
logger.warning('no_segmentation mode enabled but segmentation defined. Ignoring --no-segmentation option.')
with open_file(lines, 'r') as fp:
try:
fp = cast(IO[Any], fp)
bounds = json.load(fp)
except ValueError as e:
raise click.UsageError('{} invalid segmentation: {}'.format(lines, str(e)))
# script detection
if bounds['script_detection']:
for l in bounds['boxes']:
for t in l:
scripts.add(t[0])
it = rpred.mm_rpred(model, im, bounds, pad,
bidi_reordering=bidi_reordering,
script_ignore=script_ignore)
else:
it = rpred.rpred(model['default'], im, bounds, pad,
bidi_reordering=bidi_reordering)
if not lines and no_segmentation:
logger.debug('Removing temporary segmentation file.')
os.unlink(lines.name)
preds = []
with log.progressbar(it, label='Processing', length=len(bounds['boxes'])) as bar:
for pred in bar:
preds.append(pred)
ctx = click.get_current_context()
with open_file(output, 'w', encoding='utf-8') as fp:
fp = cast(IO[Any], fp)
message('Writing recognition results for {}\t'.format(base_image), nl=False)
logger.info('Serializing as {} into {}'.format(ctx.meta['mode'], output))
if ctx.meta['mode'] != 'text':
from kraken import serialization
fp.write(serialization.serialize(preds, base_image,
Image.open(base_image).size,
ctx.meta['text_direction'],
scripts,
ctx.meta['mode']))
else:
fp.write('\n'.join(s.prediction for s in preds))
message('\u2713', fg='green')
@click.group(chain=True)
@click.version_option()
@click.option('-i', '--input', type=(click.Path(exists=True), # type: ignore
click.Path(writable=True)), multiple=True,
help='Input-output file pairs. Each input file (first argument) is mapped to one '
'output file (second argument), e.g. `-i input.png output.txt`')
@click.option('-I', '--batch-input', multiple=True, help='Glob expression to add multiple files at once.')
@click.option('-o', '--suffix', help='Suffix for output files from batch inputs.')
@click.option('-v', '--verbose', default=0, count=True, show_default=True)
@click.option('-d', '--device', default='cpu', show_default=True, help='Select device to use (cpu, cuda:0, cuda:1, ...)')
def cli(input, batch_input, suffix, verbose, device):
"""
Base command for recognition functionality.
Inputs are defined as one or more pairs `-i input_file output_file`
followed by one or more chainable processing commands. Likewise, verbosity
is set on all subcommands with the `-v` switch.
"""
ctx = click.get_current_context()
ctx.meta['device'] = device
log.set_logger(logger, level=30-min(10*verbose, 20))
@cli.resultcallback()
def process_pipeline(subcommands, input, batch_input, suffix, **args):
"""
Helper function calling the partials returned by each subcommand and
placing their respective outputs in temporary files.
"""
import glob
import tempfile
input = list(input)
if batch_input and suffix:
for batch_expr in batch_input:
for in_file in glob.glob(batch_expr, recursive=True):
input.append((in_file, '{}{}'.format(os.path.splitext(in_file)[0], suffix)))
for io_pair in input:
try:
base_image = io_pair[0]
fc = [io_pair[0]] + [tempfile.mkstemp()[1] for cmd in subcommands[1:]] + [io_pair[1]]
for task, input, output in zip(subcommands, fc, fc[1:]):
task(base_image=base_image, input=input, output=output)
base_image = input
finally:
for f in fc[1:-1]:
os.unlink(f)
@cli.command('binarize')
@click.option('--threshold', show_default=True, default=0.5, type=click.FLOAT)
@click.option('--zoom', show_default=True, default=0.5, type=click.FLOAT)
@click.option('--escale', show_default=True, default=1.0, type=click.FLOAT)
@click.option('--border', show_default=True, default=0.1, type=click.FLOAT)
@click.option('--perc', show_default=True, default=80, type=click.IntRange(1, 100))
@click.option('--range', show_default=True, default=20, type=click.INT)
@click.option('--low', show_default=True, default=5, type=click.IntRange(1, 100))
@click.option('--high', show_default=True, default=90, type=click.IntRange(1, 100))
def binarize(threshold, zoom, escale, border, perc, range, low, high):
"""
Binarizes page images.
"""
return partial(binarizer, threshold, zoom, escale, border, perc, range, low, high)
@cli.command('segment')
@click.option('-d', '--text-direction', default='horizontal-lr',
show_default=True,
type=click.Choice(['horizontal-lr', 'horizontal-rl',
'vertical-lr', 'vertical-rl']),
help='Sets principal text direction')
@click.option('-s/-n', '--script-detect/--no-script-detect', default=False,
show_default=True,
help='Enable script detection on segmenter output')
@click.option('-a', '--allowed-scripts', default=None, multiple=True,
show_default=True,
help='List of allowed scripts in script detection output. Ignored if disabled.')
@click.option('--scale', show_default=True, default=None, type=click.FLOAT)
@click.option('-m', '--maxcolseps', show_default=True, default=2, type=click.INT)
@click.option('-b/-w', '--black-colseps/--white_colseps', show_default=True, default=False)
@click.option('-r/-l', '--remove_hlines/--hlines', show_default=True, default=True)
@click.option('-p', '--pad', show_default=True, type=(int, int), default=(0, 0),
help='Left and right padding around lines')
@click.option('-m', '--mask', show_default=True, default=None,
type=click.File(mode='rb', lazy=True), help='Segmentation mask '
'suppressing page areas for line detection. 0-valued image '
'regions are ignored for segmentation purposes. Disables column '
'detection.')
def segment(text_direction, script_detect, allowed_scripts, scale, maxcolseps,
black_colseps, remove_hlines, pad, mask):
"""
Segments page images into text lines.
"""
return partial(segmenter, text_direction, script_detect, allowed_scripts,
scale, maxcolseps, black_colseps, remove_hlines, pad, mask)
def _validate_mm(ctx, param, value):
model_dict = {'ignore': []} # type: Dict[str, Union[str, List[str]]]
if len(value) == 1 and len(value[0].split(':')) == 1:
model_dict['default'] = value[0]
return model_dict
try:
for m in value:
k, v = m.split(':')
if v == 'ignore':
model_dict['ignore'].append(k) # type: ignore
else:
model_dict[k] = os.path.expanduser(v)
except Exception as e:
raise click.BadParameter('Mappings must be in format script:model')
return model_dict
@cli.command('ocr')
@click.pass_context
@click.option('-m', '--model', default=DEFAULT_MODEL, multiple=True,
show_default=True, callback=_validate_mm,
help='Path to an recognition model or mapping of the form '
'$script1:$model1. Add multiple mappings to run multi-model '
'recognition based on detected scripts. Use the default keyword '
'for adding a catch-all model. Recognition on scripts can be '
'ignored with the model value ignore.')
@click.option('-p', '--pad', show_default=True, type=click.INT, default=16, help='Left and right '
'padding around lines')
@click.option('-n', '--reorder/--no-reorder', show_default=True, default=True,
help='Reorder code points to logical order')
@click.option('-s', '--no-segmentation', default=False, show_default=True, is_flag=True,
help='Enables non-segmentation mode treating each input image as a whole line.')
@click.option('-h', '--hocr', 'serializer', help='Switch between hOCR, '
'ALTO, and plain text output', flag_value='hocr')
@click.option('-a', '--alto', 'serializer', flag_value='alto')
@click.option('-y', '--abbyy', 'serializer', flag_value='abbyyxml')
@click.option('-t', '--text', 'serializer', flag_value='text', default=True,
show_default=True)
@click.option('-d', '--text-direction', default='horizontal-tb',
show_default=True,
type=click.Choice(['horizontal-tb', 'vertical-lr', 'vertical-rl']),
help='Sets principal text direction in serialization output')
@click.option('-l', '--lines', type=click.Path(exists=True), show_default=True,
help='JSON file containing line coordinates')
@click.option('--threads', default=1, show_default=True,
help='Number of threads to use for OpenMP parallelization.')
def ocr(ctx, model, pad, reorder, no_segmentation, serializer, text_direction, lines, threads):
"""
Recognizes text in line images.
"""
from kraken.lib import models
# first we try to find the model in the absolue path, then ~/.kraken, then
# LEGACY_MODEL_DIR
nm = {} # type: Dict[str, models.TorchSeqRecognizer]
ign_scripts = model.pop('ignore')
for k, v in model.items():
search = [v,
os.path.join(click.get_app_dir(APP_NAME), v),
os.path.join(LEGACY_MODEL_DIR, v)]
location = None
for loc in search:
if os.path.isfile(loc):
location = loc
break
if not location:
raise click.BadParameter('No model for {} found'.format(k))
message('Loading RNN {}\t'.format(k), nl=False)
try:
rnn = models.load_any(location, device=ctx.meta['device'])
nm[k] = rnn
except Exception:
message('\u2717', fg='red')
raise
ctx.exit(1)
message('\u2713', fg='green')
if 'default' in nm:
from collections import defaultdict
nn = defaultdict(lambda: nm['default']) # type: Dict[str, models.TorchSeqRecognizer]
nn.update(nm)
nm = nn
# thread count is global so setting it once is sufficient
nn[k].nn.set_num_threads(threads)
# set output mode
ctx.meta['mode'] = serializer
ctx.meta['text_direction'] = text_direction
return partial(recognizer,
model=nm,
pad=pad,
no_segmentation=no_segmentation,
bidi_reordering=reorder,
script_ignore=ign_scripts,
lines=lines)
@cli.command('show')
@click.pass_context
@click.argument('model_id')
def show(ctx, model_id):
"""
Retrieves model metadata from the repository.
"""
import unicodedata
from kraken import repo
from kraken.lib.util import make_printable, is_printable
desc = repo.get_description(model_id)
chars = []
combining = []
for char in sorted(desc['graphemes']):
if not is_printable(char):
combining.append(make_printable(char))
else:
chars.append(char)
message('name: {}\n\n{}\n\n{}\nscripts: {}\nalphabet: {} {}\naccuracy: {:.2f}%\nlicense: {}\nauthor(s): {}\ndate: {}'.format(model_id,
desc['summary'],
desc['description'],
' '.join(desc['script']),
''.join(chars),
', '.join(combining),
desc['accuracy'],
desc['license']['id'],
'; '.join(x['name'] for x in desc['creators']),
desc['publication_date']))
ctx.exit(0)
@cli.command('list')
@click.pass_context
def list_models(ctx):
"""
Lists models in the repository.
"""
from kraken import repo
message('Retrieving model list ', nl=False)
model_list = repo.get_listing(partial(message, '.', nl=False))
message('\b\u2713', fg='green', nl=False)
message('\033[?25h\n', nl=False)
for id, metadata in model_list.items():
message('{} ({}) - {}'.format(id, ', '.join(metadata['type']), metadata['summary']))
ctx.exit(0)
@cli.command('get')
@click.pass_context
@click.argument('model_id')
def get(ctx, model_id):
"""
Retrieves a model from the repository.
"""
from kraken import repo
try:
os.makedirs(click.get_app_dir(APP_NAME))
except OSError:
pass
message('Retrieving model ', nl=False)
filename = repo.get_model(model_id, click.get_app_dir(APP_NAME),
partial(message, '.', nl=False))
message('\b\u2713', fg='green', nl=False)
message('\033[?25h')
message('Model name: {}'.format(filename))
ctx.exit(0)
if __name__ == '__main__':
cli()
|
python
|
hello = 'hello world'
print(hello)
|
python
|
# -*- coding: utf-8 -*-
nota1 = float(input('Digite a primeira nota: '))
nota2 = float(input('Digite a segunda nota: '))
media = (nota1 + nota2) / 2
print('A média foi de {:.2f}!'.format(media))
|
python
|
import unittest
import sys
undertest = __import__(sys.argv[-1].split(".py")[0])
maioridade_penal = getattr(undertest, 'maioridade_penal', None)
class PublicTests(unittest.TestCase):
def test_basico_1(self):
assert maioridade_penal("Jansen Italo Ana","14 21 60") == "Italo Ana"
if __name__ == '__main__':
loader = unittest.TestLoader()
runner = unittest.TextTestRunner()
runner.run(loader.loadTestsFromModule(sys.modules[__name__]))
|
python
|
#!/usr/bin/python3
import json
def stringToInt(origin_string):
result = 0
temp_string = origin_string.strip()
for c in temp_string:
if c >= '0' and c <= '9':
result = result * 10 + (ord(c) - ord('0'))
else:
return -1
return result
def getString(hint, default_value_hint, default_value):
temp_input = input("%s(%s): " % (hint, default_value_hint))
if temp_input != "":
return temp_input
else:
return default_value
def getNumber(hint, default_value_hint, default_value):
temp_input = input("%s(%s): " % (hint, default_value_hint))
if temp_input == "" or stringToInt(temp_input) < 0:
return default_value
else:
return stringToInt(temp_input)
def createNewSetting(a):
new_setting = dict(a["settings"][0])
new_setting["translator"]["url"] = getString(
"What server url would you like to use?",
new_setting["translator"]["url"],
new_setting["translator"]["url"]
)
new_setting["translator"]["delay_milliseconds"] = getNumber(
"Wait time between requests. It aims to limit the request rate for the access to translation server via this api may be canceled if the rate is too high.",
"700",
700
)
new_setting["IO"]["input_file"]["path"] = getString(
"Read from which file?",
"DESIDE AT RUNTIME",
None
)
new_setting["IO"]["input_file"]["encode"] = getString(
"Encodeing of input file?",
"utf-8",
"utf-8"
)
new_setting["IO"]["input_file"]["language"] = getString(
"Language of input file.",
"auto",
"auto"
)
new_setting["IO"]["output_file"]["path"] = getString(
"Write to which file?",
"ADD .out AFTER INPUT PATH",
None
)
new_setting["IO"]["output_file"]["encode"] = getString(
"Encodeing of output file?",
"utf-8",
"utf-8"
)
temp = input("Now please tell me the route of translation language, one each line. End with a empty line?(ja zh-cn):\n")
temp_list = list()
while True:
if temp == "":
break
temp_list.append(temp)
temp = input()
if len(temp_list) >= 2:
new_setting["translation"]["steps"] = temp_list
new_setting["translation"]["rounds"] = getNumber(
"Translate for how many rounds. Set to 0 to translate until the result no longer changes.",
"0",
0
)
new_setting["name"] = input("Finally, give this setting a name: ")
a["settings"].append(new_setting)
temp = input("Set this setting default?[Y]/n: ")
if temp == "" or temp == "y" or temp == "Y":
a["default"] = len(a["settings"]) - 1
try:
file = open("settings.json", "w")
if file.writable == False:
print("Oops, can't save setting! Terminate......")
exit()
file.write(json.dumps(a, indent="\t"))
finally:
if file:
file.close()
def getSetting():
try:
file = open("settings.json", "r")
if file.readable() == False:
print("Can't read setting file. Terminate.")
exit()
a = json.loads(file.read())
finally:
if file:
file.close()
current_index = 0
for setting in a["settings"]:
print("%d. %s%s" % (current_index, setting["name"], "(DEFAULT)" if(current_index == a["default"]) else ""))
current_index += 1
print("%d. CREATE A NEW SETTING" % current_index)
selection = input("Please select a setting by its index, or <ENTER> for default: ")
if selection == "":
selected_setting = a["default"]
else:
selected_setting = stringToInt(selection)
if selected_setting > current_index or selected_setting < 0:
print("Invalid index. Use default.")
selected_setting = a["default"]
elif selected_setting == current_index:
createNewSetting(a)
return a["settings"][selected_setting]
|
python
|
import sys
import zipfile
import shutil
import commands
import os
import hashlib
import re
import traceback
import json
from lib.dexparser import Dexparser
from lib.CreateReport import HTMLReport
from lib.Certification import CERTParser
import lib.dynamic as dynamic
dexList = [] #dexfile list
#program usage
def usage():
print "androtools : no file specified"
print "./androtools <APK_FILE_PATH> <HTML_OUTPUT_FILENAME>"
#program information
def about(apkfile):
print "Androtools - Android APK Static & Dynamic Analyzer"
print "Developed by Kim Namjun (Sejong University, Department of Information Security)"
print "Target APK Path : %s" %apkfile
#filehash extractor
def filehash(apkfile, mode):
if mode == "md5":
with open(apkfile, 'rb') as f:
m = hashlib.md5()
while True:
data = f.read()
if not data:
break
m.update(data)
return m.hexdigest()
elif mode == "sha1":
with open(apkfile, 'rb') as f:
m = hashlib.sha1()
while True:
data = f.read()
if not data:
break
m.update(data)
return m.hexdigest()
elif mode == "sha256":
with open(apkfile, 'rb') as f:
m = hashlib.sha256()
while True:
data = f.read()
if not data:
break
m.update(data)
return m.hexdigest()
else:
return ""
#delete temp file directory
def delTemp():
commands.getoutput("rm -rf temp")
#check target file that this is vaild apk file
def is_android(zfile):
for fname in zfile.namelist():
if "AndroidManifest.xml" in fname:
return True
elif "resources.arsc" in fname:
return True
else:
pass
return False
#logging error to error_log.txt
def logError(error_msg):
f = open('error_log.txt', 'a+')
f.write('[*] ' + error_msg + '\n')
f.close()
#extract dex file to temp file
def extractDEX(zfile):
global dexList
for fname in zfile.namelist():
if fname[-4:] == ".dex": #if file extension is dex
zfile.extract(fname, "temp")
dexpath = os.path.join("temp", fname)
dexhash = filehash(dexpath, "md5")
shutil.move(dexpath, os.path.join("temp", dexhash + ".dex"))
dexList.append(dexhash + ".dex")
#file resource searching
def fileResource(zfile):
print "[*] Extracting File Resource Data..."
extension = {'.apk' : 0, '.png' : 0, '.jpg' : 0, '.xml' : 0, '.mp3' : 0, '.txt' : 0, '.ini' : 0, '.so' : 0}
keylist = extension.keys()
soEnvironment = []
for fname in zfile.namelist():
if fname[-4:] in keylist:
extension[fname[-4:]] += 1
if fname[:4] == "lib/":
soEnvironment.append(fname.split('/')[1])
extension[fname[-3:]] += 1
statistics = []
for ext in extension.keys():
if extension[ext] == 0:
pass
else:
tempArr = []
tempArr.append(ext)
tempArr.append(str(extension[ext]))
statistics.append(tempArr)
return statistics
#extract string from xml
def extractString(report, apkfile):
print "[*] Extracting All XML String..."
stringCmd = "./lib/aapt dump strings %s" %apkfile
strResult = commands.getoutput(stringCmd).split('\n')
extractedStr = []
for xmlstring in strResult:
if "res/" in xmlstring:
pass
else:
try:
if len(xmlstring.split(':')[1]) == 0:
pass
else:
extractedStr.append(xmlstring.split(': ')[1])
except:
extractedStr.append(xmlstring)
report.stringinfo(extractedStr)
#get method information from dex
def methodAnalysis(report, string, typeid, method):
methodArr = []
for i in range(len(method)):
(class_idx, proto_idx, name_idx) = method[i]
class_str = string[typeid[class_idx]]
name_str = string[name_idx]
data = '%s.%s()' % (class_str, name_str)
methodArr.append(data)
report.dexmethodinfo(methodArr)
#get dex class filename (.java)
def classExtract(report, string):
classArray = []
for dexstr in string:
if ".java" in dexstr:
classArray.append(dexstr)
report.dexclassinfo(classArray)
#get dex adler32 checksum
def checksum(dexmodule):
return dexmodule.checksum()
#check similarity using ssdeep
def simcheck(apkfile, fuzzyhash):
print "[*] Checking Similarity..."
simdata = []
match = []
if os.path.exists('sim.txt') == False: #if sim.txt not found?
print "[*] Creating similarity storage DB.."
f = open('sim.txt', 'a+')
f.write('ssdeep,1.1--blocksize:hash:hash,filename\n' + fuzzyhash + '\n')
else:
searchQuery = commands.getoutput("ssdeep -m sim.txt " + apkfile).split('\n')
#print searchQuery
for query in searchQuery:
tempArr = []
try:
persent = query.split(':')[1].split(' ')[1].replace(')', '%)')
filename = os.path.basename(query.split(':')[1].split(' ')[0])
tempArr.append(filename)
tempArr.append(persent)
match.append(tempArr)
except:
pass
f = open('sim.txt', 'a+')
f.write(fuzzyhash + '\n')
return match
#find suspicious string in dex and replace if highlight
def findSuspicious(report, stringlist):
dexstrlist = []
for i in range(len(stringlist)):
email = re.findall(r'([\w.-]+)@([\w.-]+)', stringlist[i])
url = re.findall(r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', stringlist[i])
ip = re.findall(r'^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$', stringlist[i])
if email:
dexstrlist.append(str(email[0][0] + "@" + email[0][1]))
if url:
dexstrlist.append(str(url[0]))
if ip:
dexstrlist.append(str(ip[0]))
report.dexstringinfo(dexstrlist)
#parse information from DEX list
def parseDEX(report):
global dexList
report.dexinfoHeader()
for dexfile in dexList:
parse = Dexparser(os.path.join("temp", dexfile))
string = parse.string_list()
typeid = parse.typeid_list()
method = parse.method_list()
adler32 = checksum(parse)
report.dexBasicinfo(dexfile, adler32)
findSuspicious(report, string)
#classExtract(report, string)
#methodAnalysis(report, string, typeid, method)
#get permission information
def permission(report, apkfile):
print "[*] Extracting Permission Data..."
permlist = []
permcmd = "./lib/aapt dump permissions %s" %apkfile
getperm = commands.getoutput(permcmd).split('\n')
for perm in getperm:
if "uses-permission" in perm:
permlist.append(perm.split(': ')[1])
report.writePerminfo(permlist)
def nativeparser(solist, report):
filterList = []
for sofile in solist:
with open(os.path.join("temp", sofile[1] + ".so"), 'rb') as f:
data = f.read()
email = re.findall(r'([\w.-]+)@([\w.-]+)', data)
url = re.findall(r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', data)
ip = re.findall(r'^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$', data)
if email:
if str(email[0][0] + "@" + email[0][1]) not in filterList:
filterList.append(str(email[0][0] + "@" + email[0][1]))
if url:
if str(url[0]) not in filterList:
filterList.append(str(url[0]))
if ip:
if str(ip[0]) not in filterList:
filterList.append(str(ip[0]))
report.nativeStringinfo(filterList)
#native file information
def nativefile(zfile, report):
print "[*] Extracting Native File Data..."
solist = []
for fname in zfile.namelist():
if fname[-3:] == ".so":
tempArr = []
sofile = os.path.basename(fname)
source = zfile.open(fname)
target = file(os.path.join("temp", sofile), "wb")
with source, target:
shutil.copyfileobj(source, target)
sohash = filehash(os.path.join("temp", sofile), "sha1")
shutil.move(os.path.join("temp", sofile), os.path.join("temp", sohash + ".so"))
tempArr.append(fname)
tempArr.append(sohash)
solist.append(tempArr)
report.nativeFileinfo(solist)
nativeparser(solist, report)
#get apk file basic information
def getbasic(apkfile, report):
print "[*] Extracting Basic APK File Data..."
filename = os.path.basename(apkfile)
md5hash = filehash(apkfile, "md5")
sha1hash = filehash(apkfile, "sha1")
sha256hash = filehash(apkfile, "sha256")
filesize = str(os.path.getsize(apkfile) / 1024)
try:
fuzzy = commands.getoutput("ssdeep -s " + apkfile).split('\n')[1]
except:
print "[*] Fuzzyhash Command not found. Please <brew install ssdeep> to install"
fuzzy = ""
report.writeBaseinfo(filename, md5hash, sha1hash, sha256hash, fuzzy.split(',')[0], filesize)
return fuzzy
#get Certification information
def getCert(zfile, report):
print "[*] Extracting Certification Data..."
certlist = []
certdata = []
for fname in zfile.namelist():
if fname[-4:] == ".RSA":
certfile = os.path.basename(fname)
source = zfile.open(fname)
target = file(os.path.join("temp", certfile), "wb")
with source, target:
shutil.copyfileobj(source, target)
certlist.append(certfile)
for cert in certlist:
tempArr = []
c = CERTParser(os.path.join("temp", cert))
tempArr.append(cert)
tempArr.append(c.fingerprint())
tempArr.append(c.issuer())
tempArr.append(c.starttime())
certdata.append(tempArr)
report.writeCERTinfo(certdata)
#get AndroidManifest.xml information
def getManifest(apkfile, report):
print "[*] Extracting AndroidManifest Data..."
infocmd = "./lib/aapt dump badging %s" %apkfile
getinfo = commands.getoutput(infocmd).split('\n')
apiver = ""
cputype = ""
entry = ""
targetver = ""
appname = ""
packname = ""
entry = ""
for info in getinfo:
data = info.split(':')
if data[0] == "sdkVersion":
apiver = data[1].replace('\'', '')
if data[0] == "targetSdkVersion":
targetver = data[1].replace('\'', '')
if data[0] == "application-label":
try:
appname = data[1].replace('\'', '')
except:
appname = data[1]
if data[0] == "package":
packname = data[1].split('\'')[1]
if data[0] == "launchable-activity":
entry = data[1].split('\'')[1]
if data[0] == "native-code":
for cpu in data[1].split('\''):
cputype += cpu + " "
report.writeManifestinfo(apiver, cputype, targetver, appname, packname, entry)
return [packname, entry]
#dynamic analysis
def dynamicAnalysis(report, apkfile, packname, entry):
print "[*] Dynamic Analysis start!"
anal_result = dynamic.main(apkfile, packname, entry)
result = json.loads(anal_result)
try:
report.datasectioninfo(result['filetag']['startCreate'], result['filetag']['endCreate'])
except:
pass
try:
report.logcatinfo(result['logtag'])
except:
pass
try:
report.packetinfo(result['packettag']['packet'], result['timeline']['ipList'])
except:
pass
print "[*] Dynamic Analysis end!"
#program entry point
def main(apkfile, output):
try:
about(apkfile) #program information
isVaild = zipfile.is_zipfile(apkfile) #check vaild zip container
if isVaild:
zfile = zipfile.ZipFile(apkfile)
isAndroid = is_android(zfile) #check vaild android apk file
if isAndroid:
print "[*] Analysis start!"
#setting HTML Report
report = HTMLReport(output)
report.header()
report.style()
report.bodystart()
fuzzy = getbasic(apkfile, report)
extractDEX(zfile) #extract dex file
filetype = fileResource(zfile) #analyze file resources
simresult = simcheck(apkfile, fuzzy) #similarity check
report.writeFileinfo(filetype, simresult)
xmlinfo = getManifest(apkfile, report)
permission(report, apkfile)
getCert(zfile, report)
parseDEX(report)
extractString(report, apkfile)
nativefile(zfile, report)
dynamicAnalysis(report, apkfile, xmlinfo[0], xmlinfo[1])
report.endbody()
del report
else:
print "[*] Sorry, We can\'t analyze this file"
else:
print "[*] Sorry, We can\'t analyze this file"
delTemp()
print "[*] Analysis complete!"
except Exception, e:
logError(str(traceback.format_exc()))
print "[*] Androtools Exception - Error logged!"
if __name__ == '__main__':
try:
main(sys.argv[1], sys.argv[2])
except:
usage()
|
python
|
#!/usr/bin/env python
import setuptools
setuptools.setup(
author='Bryan Stitt',
author_email='[email protected]',
description='Mark shows unwatched on a schedule.',
long_description=__doc__,
entry_points={
'console_scripts': [
'plex-schedule = plex_schedule.cli:cli',
],
},
install_requires=[
'click',
'plexapi',
'sqlalchemy',
'PyYAML',
], # keep this in sync with requirements.in
name='plex_schedule',
packages=setuptools.find_packages(),
version='0.0.1.dev0',
)
|
python
|
"""A class using all the slightly different ways a function could be defined
and called. Used for testing appmap instrumentation.
"""
# pylint: disable=missing-function-docstring
from functools import lru_cache, wraps
import time
import appmap
class ClassMethodMixin:
@classmethod
def class_method(cls):
return 'ClassMethodMixin#class_method, cls %s' % (cls.__name__)
class Super:
def instance_method(self):
return self.method_not_called_directly()
def method_not_called_directly(self):
return 'Super#instance_method'
def wrap_fn(fn):
@wraps(fn)
def wrapped_fn(*args, **kwargs):
try:
print('calling %s' % (fn.__name__))
return fn(*args, **kwargs)
finally:
print('called %s' % (fn.__name__))
return wrapped_fn
class ExampleClass(Super, ClassMethodMixin):
def __repr__(self):
return 'ExampleClass and %s' % (self.another_method())
# Include some lines so the line numbers in the expected appmap
# don't change:
# <blank>
def another_method(self):
return "ExampleClass#another_method"
def test_exception(self):
raise Exception('test exception')
what_time_is_it = time.gmtime
@appmap.labels('super', 'important')
def labeled_method(self):
return 'super important'
@staticmethod
@wrap_fn
def wrapped_static_method():
return 'wrapped_static_method'
@classmethod
@wrap_fn
def wrapped_class_method(cls):
return 'wrapped_class_method'
@wrap_fn
def wrapped_instance_method(self):
return 'wrapped_instance_method'
@staticmethod
@lru_cache(maxsize=1)
def static_cached(value):
return value + 1
def instance_with_param(self, p):
return p
@staticmethod
def static_method():
import yaml, io # Formatting is funky to minimize changes to expected appmap
yaml.Dumper(io.StringIO()).open(); return 'ExampleClass.static_method\n...\n'
@staticmethod
def call_yaml():
return ExampleClass.dump_yaml('ExampleClass.call_yaml')
@staticmethod
def dump_yaml(data):
import yaml
# Call twice, to make sure both show up in the recording
yaml.dump(data)
yaml.dump(data)
def with_docstring(self):
"""
docstrings can have
multiple lines
"""
return True
# comments can have
# multiple lines
def with_comment(self):
return True
|
python
|
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command for creating instances."""
from googlecloudsdk.api_lib.compute import base_classes
from googlecloudsdk.api_lib.compute import csek_utils
from googlecloudsdk.api_lib.compute import image_utils
from googlecloudsdk.api_lib.compute import instance_utils
from googlecloudsdk.api_lib.compute import metadata_utils
from googlecloudsdk.api_lib.compute import utils
from googlecloudsdk.api_lib.compute import zone_utils
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.compute import flags
from googlecloudsdk.command_lib.compute.instances import flags as instances_flags
DETAILED_HELP = {
'DESCRIPTION': """\
*{command}* facilitates the creation of Google Compute Engine
virtual machines. For example, running:
$ {command} example-instance-1 example-instance-2 example-instance-3 --zone us-central1-a
will create three instances called `example-instance-1`,
`example-instance-2`, and `example-instance-3` in the
`us-central1-a` zone.
When an instance is in RUNNING state and the system begins to boot,
the instance creation is considered finished, and the command returns
with a list of new virtual machines. Note that you usually cannot log
into a new instance until it finishes booting. Check the progress of an
instance using `gcloud compute instances get-serial-port-output`.
For more examples, refer to the *EXAMPLES* section below.
""",
'EXAMPLES': """\
To create an instance with the latest ``Red Hat Enterprise Linux
6'' image available, run:
$ {command} example-instance --image rhel-6 --zone us-central1-a
""",
}
def _CommonArgs(parser):
"""Register parser args common to all tracks."""
metadata_utils.AddMetadataArgs(parser)
instances_flags.AddDiskArgs(parser)
instances_flags.AddLocalSsdArgs(parser)
instances_flags.AddCanIpForwardArgs(parser)
instances_flags.AddAddressArgs(parser, instances=True)
instances_flags.AddMachineTypeArgs(parser)
instances_flags.AddMaintenancePolicyArgs(parser)
instances_flags.AddNoRestartOnFailureArgs(parser)
instances_flags.AddPreemptibleVmArgs(parser)
instances_flags.AddScopeArgs(parser)
instances_flags.AddTagsArgs(parser)
instances_flags.AddCustomMachineTypeArgs(parser)
instances_flags.AddNetworkArgs(parser)
instances_flags.AddPrivateNetworkIpArgs(parser)
instances_flags.AddImageArgs(parser)
parser.add_argument(
'--description',
help='Specifies a textual description of the instances.')
parser.add_argument(
'names',
metavar='NAME',
nargs='+',
help='The names of the instances to create.')
flags.AddZoneFlag(
parser,
resource_type='instances',
operation_type='create')
csek_utils.AddCsekKeyArgs(parser)
class Create(base_classes.BaseAsyncCreator,
image_utils.ImageExpander,
zone_utils.ZoneResourceFetcher):
"""Create Google Compute Engine virtual machine instances."""
@staticmethod
def Args(parser):
_CommonArgs(parser)
@property
def service(self):
return self.compute.instances
@property
def method(self):
return 'Insert'
@property
def resource_type(self):
return 'instances'
def CreateRequests(self, args):
instances_flags.ValidateDiskFlags(args)
instances_flags.ValidateLocalSsdFlags(args)
# This feature is only exposed in alpha/beta
allow_rsa_encrypted = self.ReleaseTrack() in [base.ReleaseTrack.ALPHA,
base.ReleaseTrack.BETA]
self.csek_keys = csek_utils.CsekKeyStore.FromArgs(args, allow_rsa_encrypted)
scheduling = instance_utils.CreateSchedulingMessage(
messages=self.messages,
maintenance_policy=args.maintenance_policy,
preemptible=args.preemptible,
restart_on_failure=args.restart_on_failure)
service_accounts = instance_utils.CreateServiceAccountMessages(
messages=self.messages,
scopes=([] if args.no_scopes else args.scopes))
if args.tags:
tags = self.messages.Tags(items=args.tags)
else:
tags = None
metadata = metadata_utils.ConstructMetadataMessage(
self.messages,
metadata=args.metadata,
metadata_from_file=args.metadata_from_file)
# If the user already provided an initial Windows password and
# username through metadata, then there is no need to check
# whether the image or the boot disk is Windows.
boot_disk_size_gb = utils.BytesToGb(args.boot_disk_size)
utils.WarnIfDiskSizeIsTooSmall(boot_disk_size_gb, args.boot_disk_type)
instance_refs = self.CreateZonalReferences(args.names, args.zone)
# Check if the zone is deprecated or has maintenance coming.
self.WarnForZonalCreation(instance_refs)
network_interface = instance_utils.CreateNetworkInterfaceMessage(
scope_prompter=self,
compute_client=self.compute_client,
network=args.network,
subnet=args.subnet,
private_network_ip=args.private_network_ip,
no_address=args.no_address,
address=args.address,
instance_refs=instance_refs)
machine_type_uris = instance_utils.CreateMachineTypeUris(
scope_prompter=self,
compute_client=self.compute_client,
project=self.project,
machine_type=args.machine_type,
custom_cpu=args.custom_cpu,
custom_memory=args.custom_memory,
instance_refs=instance_refs)
create_boot_disk = not instance_utils.UseExistingBootDisk(args.disk or [])
if create_boot_disk:
image_uri, _ = self.ExpandImageFlag(args, return_image_resource=False)
else:
image_uri = None
# A list of lists where the element at index i contains a list of
# disk messages that should be set for the instance at index i.
disks_messages = []
# A mapping of zone to boot disk references for all existing boot
# disks that are being attached.
# TODO(user): Simplify this once resources.Resource becomes
# hashable.
existing_boot_disks = {}
for instance_ref in instance_refs:
persistent_disks, boot_disk_ref = (
instance_utils.CreatePersistentAttachedDiskMessages(
self, self.compute_client, self.csek_keys, args.disk or [],
instance_ref))
local_ssds = [
instance_utils.CreateLocalSsdMessage(
self, x.get('device-name'), x.get('interface'), instance_ref.zone)
for x in args.local_ssd or []]
if create_boot_disk:
boot_disk = instance_utils.CreateDefaultBootAttachedDiskMessage(
self, self.compute_client, self.resources,
disk_type=args.boot_disk_type,
disk_device_name=args.boot_disk_device_name,
disk_auto_delete=args.boot_disk_auto_delete,
disk_size_gb=boot_disk_size_gb,
require_csek_key_create=(
args.require_csek_key_create if self.csek_keys else None),
image_uri=image_uri,
instance_ref=instance_ref,
csek_keys=self.csek_keys)
persistent_disks = [boot_disk] + persistent_disks
else:
existing_boot_disks[boot_disk_ref.zone] = boot_disk_ref
disks_messages.append(persistent_disks + local_ssds)
requests = []
for instance_ref, machine_type_uri, disks in zip(
instance_refs, machine_type_uris, disks_messages):
requests.append(self.messages.ComputeInstancesInsertRequest(
instance=self.messages.Instance(
canIpForward=args.can_ip_forward,
disks=disks,
description=args.description,
machineType=machine_type_uri,
metadata=metadata,
name=instance_ref.Name(),
networkInterfaces=[network_interface],
serviceAccounts=service_accounts,
scheduling=scheduling,
tags=tags,
),
project=self.project,
zone=instance_ref.zone))
return requests
Create.detailed_help = DETAILED_HELP
|
python
|
from unittest import TestCase
from gui_components import parser
from math import tan
class ParserTestCase(TestCase):
def test_ctan(self):
self.assertEqual(parser.ctan(0.5), 1 / tan(0.5))
def test__check_res(self):
self.assertEqual(parser._check_res('2*x+3', 1), (True, 5))
self.assertEqual(parser._check_res('2*x+vasiles(2)+3', 1), (False, 0))
def test_get_integral_inside_expression(self):
self.assertEqual(parser.get_integral_inside_expression('integrala(2+x)'), '2+x')
self.assertEqual(parser.get_integral_inside_expression('5*x**2'), '')
def test_check_expression_validity(self):
self.assertEqual(parser.check_expression_validity(''), False)
self.assertEqual(parser.check_expression_validity('integrala(2+x)'), True)
self.assertEqual(parser.check_expression_validity('2+x'), True)
self.assertEqual(parser.check_expression_validity('integrala('), False)
self.assertEqual(parser.check_expression_validity('integrala22'), False)
self.assertEqual(parser.check_expression_validity('integrala(22+5+x)'), True)
def test_expr_to_lamda(self):
currrent_lambda = lambda x: x * 2
parser_lambda = parser.expr_to_lamda('x*2')
self.assertEqual(currrent_lambda(2), parser_lambda(2))
def test_check_expression_is_number(self):
self.assertTrue(parser.check_expression_is_number('2.5'))
self.assertFalse(parser.check_expression_is_number('vasile'))
|
python
|
import random
whi1 = True
while whi1 is True:
try:
print("Selamat Datang Di Game Batu, Gunting, Kertas!")
pilihanAwal = int(input("Apakah Kau Ingin Langsung Bermain?\n1. Mulai Permainan\n2. Tentang Game\n3. Keluar\nPilihan: "))
whi2 = True
while whi2 is True:
if pilihanAwal == 1:
print("=" * 100)
def fungsibgk(pilihan):
komputer = random.choice(["Batu", "Gunting", "Kertas"])
if pilihan == 1:
print("Anda Memilih Batu")
print("Komputer Memilih", komputer)
if komputer == "Batu":
print("Seimbang")
elif komputer == "Gunting":
print("Kau Menang")
elif komputer == "Kertas":
print("Kau Kalah")
elif pilihan == 2:
print("Anda Memilih Gunting")
print("Komputer Memilih", komputer)
if komputer == "Gunting":
print("Seimbang")
elif komputer == "Kertas":
print("Kau Menang")
elif komputer == "Batu":
print("Kau Kalah")
elif pilihan == 3:
print("Anda Memilih Kertas")
print("Komputer Memilih", komputer)
if komputer == "Kertas":
print("Seimbang")
elif komputer == "Batu":
print("Kau Menang")
elif komputer == "Gunting":
print("Kau Kalah")
else:
print("Maaf, Pilihan Anda Tidak Ada Dalam Daftar")
pilihan = int(input("Masukkan Pilihan Anda: \n1. Batu\n2. Gunting\n3. Kertas\nPilihan: "))
print("=" * 100)
fungsibgk(pilihan)
elif pilihanAwal == 2:
print("=" * 100)
print("Created By Aswassaw227\nBuild In Python 3.6")
print("=" * 100)
break
elif pilihanAwal == 3:
print("=" * 100)
print("Terima Kasih Karena Telah Bermain, Semoga Harimu Menyenangkan")
print("=" * 100)
exit()
else:
print("=" * 100)
print("Maaf, Pilihan Anda Tidak Ada Dalam Daftar")
print("=" * 100)
break
except Exception as err:
print("=" * 100)
print(err)
print("=" * 100)
|
python
|
# Generated by Django 2.0.13 on 2020-10-10 16:05
import ddcz.models.magic
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('ddcz', '0024_skills_name'),
]
operations = [
migrations.AlterField(
model_name='commonarticle',
name='zdrojmail',
field=ddcz.models.magic.MisencodedTextField(blank=True, null=True),
),
migrations.AlterField(
model_name='gallerypicture',
name='zdrojmail',
field=ddcz.models.magic.MisencodedTextField(blank=True, null=True),
),
migrations.AlterField(
model_name='monster',
name='zdrojmail',
field=ddcz.models.magic.MisencodedTextField(blank=True, null=True),
),
migrations.AlterField(
model_name='photo',
name='zdrojmail',
field=ddcz.models.magic.MisencodedTextField(blank=True, null=True),
),
migrations.AlterField(
model_name='skill',
name='zdrojmail',
field=ddcz.models.magic.MisencodedTextField(blank=True, null=True),
),
]
|
python
|
# Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
from oslo_log import log as logging
import six
from patron import keymgr
LOG = logging.getLogger(__name__)
@six.add_metaclass(abc.ABCMeta)
class VolumeEncryptor(object):
"""Base class to support encrypted volumes.
A VolumeEncryptor provides hooks for attaching and detaching volumes, which
are called immediately prior to attaching the volume to an instance and
immediately following detaching the volume from an instance. This class
performs no actions for either hook.
"""
def __init__(self, connection_info, **kwargs):
self._key_manager = keymgr.API()
self.encryption_key_id = kwargs.get('encryption_key_id')
def _get_key(self, context):
"""Retrieves the encryption key for the specified volume.
:param: the connection information used to attach the volume
"""
return self._key_manager.get_key(context, self.encryption_key_id)
@abc.abstractmethod
def attach_volume(self, context, **kwargs):
"""Hook called immediately prior to attaching a volume to an instance.
"""
pass
@abc.abstractmethod
def detach_volume(self, **kwargs):
"""Hook called immediately after detaching a volume from an instance.
"""
pass
|
python
|
from .calibration import Calibration
from .capture import Capture
from .configuration import Configuration, default_configuration
from .device import Device
from .image import Image
from .imu_sample import ImuSample
from .transformation import Transformation
|
python
|
# some comment
""" doc string """
import math
import sys
class the_class():
# some comment
""" doc string """
import os
import sys
class second_class():
some_statement
import os
import sys
|
python
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Pizza Project - main.py - started on 8 November 2021
# Written by Garret Stand licensed under a MIT license for academic use.
# This file contains shell formatting and other output modification/redirections functions for the program. It is a non-executable library.
# Please read the readme if you wish to execute this program locally. Developed on Python 3.9.7
import json
import sys
import os
import random
import uuid
import platform
import argparse
import fpdf
import time
import subprocess
# DEVNOTE: import all external libraries/dependencies above this line, and all internal libraries/dependencies below this line
import argparsing
import dataDriver
import orders
if __name__ == '__main__':
dir_path = os.path.dirname(os.path.realpath(__file__))
print("This is a library. This was probably ran accidentally.\nPlease execute the pizza program from the \"main.py\" program contained in the root of the project (" + dir_path + ") by running \"python3 main.py\", or open it in a text editor/IDE to see its contents and use in the program.")
exit(1)
args = argparsing.returnArgs()
if platform.system() == 'Linux' or platform.system() == 'Darwin': # color initalization for linux/macos, wont work on windows (exceprt from my python library)
red='\033[00;31m'
green='\033[00;32m'
yellow='\033[00;33m'
blue='\033[00;34m'
purple='\033[00;35m'
cyan='\033[00;36m'
lightgray='\033[00;37m'
lred='\033[01;31m'
lgreen='\033[01;32m'
lyellow='\033[01;33m'
lblue='\033[01;34m'
lpurple='\033[01;35m'
lcyan='\033[01;36m'
white='\033[01;37m'
bold='\033[01m'
dim='\033[02m'
blink='\033[05m' # not working/odd behaviour in some terminals but this is known
underlined='\033[04m'
reverse='\033[07m'
passwordhide='\033[08m'
reset='\033[0m'
errorBG='\033[41;30m'
noticeBG='\033[43;30m'
debugBG='\033[47;30m'
else:
red=''
green=''
yellow=''
blue=''
purple=''
cyan=''
lightgray=''
lred=''
lgreen=''
lyellow=''
lblue=''
lpurple=''
lcyan=''
white=''
bold=''
dim=''
blink=''
underlined=''
reverse=''
passwordhide=''
reset=''
errorBG=''
noticeBG=''
indent = u'\U00000009' # unicode tabulation charecter, for use in printing data structures in debug subroutines and raw data writes when necessary for the data driver (yes i use tabs), or other printing/layout use.
def printError(text, debug=False):
'''
Prints an error to the console with an optional debug check
'''
if debug:
print(errorBG + "[ERROR]" + reset + " " + text) if args.debugFlag else None
else:
print(errorBG + "[ERROR]" + reset + " " + text)
def printNotice(text, debug=False):
'''
Prints a warning to the console with an optional debug check
'''
if debug:
print(noticeBG + "[NOTICE]" + reset + " " + text) if args.debugFlag else None
else:
print(noticeBG + "[NOTICE]" + reset + " " + text)
def printDebug(text):
'''
Prints debug text to the console if the debug flag is set
'''
print(debugBG + "[DEBUG]" + reset + " " + text) if args.debugFlag else None
def clear():
'''
Platform agnostic screen clear
'''
os.system('cls' if os.name == 'nt' else 'clear')
def generateReceipt(order):
'''
Generates a receipt for a given order.
'''
dir_path = os.path.dirname(os.path.realpath(__file__))
configData = dataDriver.loadConfig()
header = '*'
for i in range(len(configData['parlorName'])+8):
header += '*'
header += '''*
* ''' + configData['parlorName'] + ''' *
*'''
for i in range(len(configData['parlorName'])+8):
header += '*'
header += '*'
headerLines = header.splitlines()
receipt = fpdf.FPDF()
receipt.add_page()
receipt.add_font('receiptFont', '', dir_path + '/data/receiptFont.ttf', uni=True)
receipt.set_font("receiptFont", size = 10)
for x in headerLines:
receipt.cell(200, 10, txt=x, ln=1, align='C')
receipt.cell(200, 10, txt="Pizza Receipt", ln=1, align="C")
receipt.cell(200, 10, txt="Time ordered: " + time.ctime(order["time"]), ln=1, align="C")
receipt.cell(200, 10, txt="--", ln=1, align="C")
receipt.cell(200, 10, txt="Order Items:", ln=1, align="C")
subTotal = 0
for pizza in order["pizzas"]:
price = float(0)
if order["pizzas"][pizza]["size"] == "small":
price = price + float(configData["sizeCosts"]["small"])
elif order["pizzas"][pizza]["size"] == "medium":
price = price + float(configData["sizeCosts"]["medium"])
elif order["pizzas"][pizza]["size"] == "large":
price = price + float(configData["sizeCosts"]["large"])
if len(order["pizzas"][pizza]["toppings"]) > 3:
for i in range(3):
price = price + float(configData["toppings<=3"])
for i in range(len(order["pizzas"][pizza]["toppings"]) - 3):
price = price + float(configData["toppings>=4"])
else:
for i in range(len(order["pizzas"][pizza]["toppings"])):
price = price + float(configData["toppings<=3"])
line = "Pizza " + pizza + ": " + order["pizzas"][pizza]["size"] + " pizza with " + "cheese, " + str(order["pizzas"][pizza]["toppings"])[1:len(str(order["pizzas"][pizza]["toppings"]))-1].replace("'", "") + " | $" + "{:.2f}".format(price) if order["pizzas"][pizza]["toppings"] != [''] else "Pizza " + pizza + ": " + order["pizzas"][pizza]["size"] + " pizza with cheese | $" + "{:.2f}".format(price)
receipt.cell(200, 10, txt=line, ln=1, align="L")
subTotal = subTotal + price
receipt.cell(200, 10, txt="Subtotal: $" + "{:.2f}".format(subTotal), ln=1, align="L")
tax = subTotal * float(configData["taxRate"]/100)
receipt.cell(200, 10, txt="Tax: $" + "{:.2f}".format(tax), ln=1, align="L")
total = subTotal + tax
receipt.cell(200, 10, txt="Total: $" + "{:.2f}".format(total), ln=1, align="L")
receipt.cell(200, 10, txt="--", ln=1, align="C")
if order["delivered"] == True:
price = float(configData["deliveryFee"])
receipt.cell(200, 10, txt="Delivery Fee: $" + "{:.2f}".format(price), ln=1, align="L")
if order["deliveryTip"] != None:
price = price + order["deliveryTip"]
receipt.cell(200, 10, txt="Tip: $" + "{:.2f}".format(order["deliveryTip"]), ln=1, align="L")
grandTotal = total + price
else:
receipt.cell(200, 10, txt="Delivery Fee: $0.00 (not delivery)", ln=1, align="L")
grandTotal = total
receipt.cell(200, 10, txt="Grand Total: $" + "{:.2f}".format(grandTotal), ln=1, align="L")
receipt.cell(200, 10, txt="--", ln=1, align="C")
receipt.cell(200, 10, txt="Order info:", ln=1, align="C")
receipt.cell(200, 10, txt="Name: " + order["name"], ln=1, align="L")
if order["delivered"] == True:
receipt.cell(200, 10, txt="Delivery: Yes", ln=1, align="L")
receipt.cell(200, 10, txt="Address: " + order["address"], ln=1, align="L")
else:
receipt.cell(200, 10, txt="Delivery: No", ln=1, align="L")
receipt.cell(200, 10, txt="Address: 123 Parlor St. for pickup", ln=1, align="L")
receipt.cell(200, 10, txt="--", ln=1, align="C")
receipt.cell(200, 10, txt="Thank you for your order!", ln=1, align="C")
receipt.output("receipt.pdf")
print("Receipt generated! Openining in system default PDF viewer...")
subprocess.Popen(["open receipt.pdf"], shell=True) # macOS now at the moment, time constratints :/
|
python
|
#!/usr/bin/env python
# encoding: utf-8
# 把str编码由ascii改为utf8(或gb18030)
import random
import sys
import time
import requests
from bs4 import BeautifulSoup
file_name = 'book_list.txt'
file_content = '' # 最终要写到文件里的内容
file_content += '生成时间:' + time.asctime()
headers = [
{'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:34.0) Gecko/20100101 Firefox/34.0'},
{'User-Agent': 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6'},
{'User-Agent': 'Mozilla/5.0 (Windows NT 6.2) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.12 Safari/535.11'},
{'User-Agent': 'Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.2; Trident/6.0)'},
{'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:40.0) Gecko/20100101 Firefox/40.0'},
{'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Ubuntu Chromium/44.0.2403.89 Chrome/44.0.2403.89 Safari/537.36'}
]
def book_spider(book_tag):
global file_content, headers
url = "http://www.douban.com/tag/%s/book" % book_tag
source_code = requests.get(url, headers=random.choice(headers))
# just get the code, no headers or anything
plain_text = source_code.text
# BeautifulSoup objects can be sorted through easy
soup = BeautifulSoup(plain_text)
title_divide = '\n' + '--' * 30 + '\n' + '--' * 30 + '\n'
file_content += title_divide + '\t' * 4 + \
book_tag + ':' + title_divide
count = 1
# 得到书籍列表的soup对象
list_soup = soup.find('div', {'class': 'mod book-list'})
for book_info in list_soup.findAll('dd'):
print('tag: %s, count: %d' % (book_tag, count))
title = book_info.find('a', {'class': 'title'}).string.strip()
desc = book_info.find('div', {'class': 'desc'}).string.strip()
desc_list = desc.split('/')
author_info = '作者/译者: ' + '/'.join(desc_list[0:-3])
pub_info = '出版信息: ' + '/'.join(desc_list[-3:])
try:
rating = book_info.find(
'span', {'class': 'rating_nums'}).string.strip()
except AttributeError:
rating = "无"
file_content += "*%d\t《%s》\t评分:%s\n\t%s\n\t%s\n\n" % (
count, title, rating, author_info.strip(), pub_info.strip())
count += 1
def do_spider(book_lists):
for book_tag in book_lists:
book_spider(book_tag)
if __name__ == "__main__":
book_lists = ['心理学', '人物传记', '中国历史', '旅行', '生活', '科普']
do_spider(book_lists)
# 将最终结果写入文件
f = open(file_name, 'w')
f.write(file_content)
f.close()
|
python
|
import numpy as np
from scipy.ndimage import map_coordinates
import open3d
from PIL import Image
from shapely.geometry import Point
from shapely.geometry.polygon import Polygon
import functools
from multiprocessing import Pool
from utils_eval import np_coor2xy, np_coory2v
def xyz_2_coorxy(xs, ys, zs, H, W):
us = np.arctan2(xs, -ys)
vs = -np.arctan(zs / np.sqrt(xs**2 + ys**2))
coorx = (us / (2 * np.pi) + 0.5) * W
coory = (vs / np.pi + 0.5) * H
return coorx, coory
def pt_in_poly(poly, pt):
return poly.contains(Point(pt))
def warp_walls(xy, floor_z, ceil_z, H, W, ppm, alpha):
all_rgba = []
all_xyz = []
for i in range(len(xy)):
next_i = (i + 1) % len(xy)
xy_a = xy[i]
xy_b = xy[next_i]
xy_w = np.sqrt(((xy_a - xy_b)**2).sum())
t_h = int(round((ceil_z - floor_z) * ppm))
t_w = int(round(xy_w * ppm))
xs = np.linspace(xy_a[0], xy_b[0], t_w)[None].repeat(t_h, 0)
ys = np.linspace(xy_a[1], xy_b[1], t_w)[None].repeat(t_h, 0)
zs = np.linspace(floor_z, ceil_z, t_h)[:, None].repeat(t_w, 1)
coorx, coory = xyz_2_coorxy(xs, ys, zs, H, W)
plane_texture = np.stack([
map_coordinates(equirect_texture[..., 0], [coory, coorx], order=1, mode='wrap'),
map_coordinates(equirect_texture[..., 1], [coory, coorx], order=1, mode='wrap'),
map_coordinates(equirect_texture[..., 2], [coory, coorx], order=1, mode='wrap'),
np.zeros([t_h, t_w]) + alpha,
], -1)
plane_xyz = np.stack([xs, ys, zs], axis=-1)
all_rgba.extend(plane_texture.reshape(-1, 4))
all_xyz.extend(plane_xyz.reshape(-1, 3))
return all_rgba, all_xyz
def warp_floor_ceiling(xy, z_floor, z_ceiling, H, W, ppm, alpha, n_thread):
min_x = xy[:, 0].min()
max_x = xy[:, 0].max()
min_y = xy[:, 1].min()
max_y = xy[:, 1].max()
t_h = int(round((max_y - min_y) * ppm))
t_w = int(round((max_x - min_x) * ppm))
xs = np.linspace(min_x, max_x, t_w)[None].repeat(t_h, 0)
ys = np.linspace(min_y, max_y, t_h)[:, None].repeat(t_w, 1)
zs_floor = np.zeros_like(xs) + z_floor
zs_ceil = np.zeros_like(xs) + z_ceiling
coorx_floor, coory_floor = xyz_2_coorxy(xs, ys, zs_floor, H, W)
coorx_ceil, coory_ceil = xyz_2_coorxy(xs, ys, zs_ceil, H, W)
floor_texture = np.stack([
map_coordinates(equirect_texture[..., 0], [coory_floor, coorx_floor], order=1, mode='wrap'),
map_coordinates(equirect_texture[..., 1], [coory_floor, coorx_floor], order=1, mode='wrap'),
map_coordinates(equirect_texture[..., 2], [coory_floor, coorx_floor], order=1, mode='wrap'),
np.zeros([t_h, t_w]) + alpha,
], -1).reshape(-1, 4)
floor_xyz = np.stack([xs, ys, zs_floor], axis=-1).reshape(-1, 3)
ceil_texture = np.stack([
map_coordinates(equirect_texture[..., 0], [coory_ceil, coorx_ceil], order=1, mode='wrap'),
map_coordinates(equirect_texture[..., 1], [coory_ceil, coorx_ceil], order=1, mode='wrap'),
map_coordinates(equirect_texture[..., 2], [coory_ceil, coorx_ceil], order=1, mode='wrap'),
np.zeros([t_h, t_w]) + alpha,
], -1).reshape(-1, 4)
ceil_xyz = np.stack([xs, ys, zs_ceil], axis=-1).reshape(-1, 3)
xy_poly = Polygon(xy)
with Pool(n_thread) as p:
sel = p.map(functools.partial(pt_in_poly, xy_poly), floor_xyz[:, :2])
return floor_texture[sel], floor_xyz[sel], ceil_texture[sel], ceil_xyz[sel]
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--img', default='assert/output_preprocess/demo_aligned_rgb.png',
help='Image texture in equirectangular format')
parser.add_argument('--layout', default='assert/output/demo_aligned_rgb_cor_id.txt',
help='Txt file containing layout corners (cor_id)')
parser.add_argument('--camera_height', default=1.6, type=float,
help='Camera height in meter (not the viewer camera)')
parser.add_argument('--ppm', default=120, type=int,
help='Points per meter')
parser.add_argument('--point_size', default=0.0025, type=int,
help='Point size')
parser.add_argument('--alpha', default=1.0, type=float,
help='Opacity of the texture')
parser.add_argument('--threads', default=10, type=int,
help='Number of threads to use')
parser.add_argument('--ignore_floor', action='store_true',
help='Skip rendering floor')
parser.add_argument('--ignore_ceiling', action='store_true',
help='Skip rendering ceiling')
args = parser.parse_args()
# Reading source (texture img, cor_id txt)
equirect_texture = np.array(Image.open(args.img)) / 255.0
with open(args.layout) as f:
cor_id = np.array([line.split() for line in f], np.float32)
# Convert cor_id to 3d xyz
N = len(cor_id) // 2
H, W = equirect_texture.shape[:2]
floor_z = -args.camera_height
floor_xy = np_coor2xy(cor_id[1::2], floor_z, W, H)
c = np.sqrt((floor_xy**2).sum(1))
v = np_coory2v(cor_id[0::2, 1], H)
ceil_z = (c * np.tan(v)).mean()
# Warp each wall
all_rgba, all_xyz = warp_walls(floor_xy, floor_z, ceil_z, H, W, args.ppm, args.alpha)
# Warp floor and ceiling
if not args.ignore_floor or not args.ignore_ceiling:
fi, fp, ci, cp = warp_floor_ceiling(floor_xy, floor_z, ceil_z, H, W,
ppm=args.ppm,
alpha=args.alpha,
n_thread=args.threads)
if not args.ignore_floor:
all_rgba.extend(fi)
all_xyz.extend(fp)
if not args.ignore_ceiling:
all_rgba.extend(ci)
all_xyz.extend(cp)
# Launch point cloud viewer
print('# of points:', len(all_rgba))
all_xyz = np.array(all_xyz)
all_rgb = np.array(all_rgba)[:, :3]
pcd = open3d.PointCloud()
pcd.points = open3d.Vector3dVector(all_xyz)
pcd.colors = open3d.Vector3dVector(all_rgb)
open3d.draw_geometries([pcd])
|
python
|
import pytest
from openeye import oechem
from openff.recharge.aromaticity import AromaticityModel, AromaticityModels
from openff.recharge.utilities.openeye import smiles_to_molecule
@pytest.mark.parametrize(
"smiles",
[
"c1ccccc1", # benzene
"c1ccc2ccccc2c1", # napthelene
"c1ccc2c(c1)ccc3ccccc23", # phenanthrene
"c1ccc2c(c1)ccc3c4ccccc4ccc23", # chrysene
"c1cc2ccc3cccc4ccc(c1)c2c34", # pyrene
"c1cc2ccc3ccc4ccc5ccc6ccc1c7c2c3c4c5c67", # coronene
"Cc1ccc2cc3ccc(C)cc3cc2c1", # 2,7-Dimethylanthracene
],
)
def test_am1_bcc_aromaticity_simple(smiles):
"""Checks that the custom AM1BCC aromaticity model behaves as
expected for simple fused hydrocarbons.
"""
oe_molecule = smiles_to_molecule(smiles)
AromaticityModel.assign(oe_molecule, AromaticityModels.AM1BCC)
ring_carbons = [
atom
for atom in oe_molecule.GetAtoms()
if atom.GetAtomicNum() == 6 and oechem.OEAtomIsInRingSize(atom, 6)
]
ring_indices = {atom.GetIdx() for atom in ring_carbons}
assert all(atom.IsAromatic() for atom in ring_carbons)
assert all(
bond.IsAromatic()
for bond in oe_molecule.GetBonds()
if bond.GetBgnIdx() in ring_indices and bond.GetEndIdx() in ring_indices
)
def test_am1_bcc_aromaticity_ring_size():
"""Checks that the custom AM1BCC aromaticity model behaves as
expected fused hydrocarbons with varying ring sizes"""
oe_molecule = smiles_to_molecule("C1CC2=CC=CC3=C2C1=CC=C3")
AromaticityModel.assign(oe_molecule, AromaticityModels.AM1BCC)
atoms = {atom.GetIdx(): atom for atom in oe_molecule.GetAtoms()}
assert [not atoms[index].IsAromatic() for index in range(2)]
assert [atoms[index].IsAromatic() for index in range(2, 12)]
@pytest.mark.parametrize(
"aromaticity_model",
[AromaticityModels.AM1BCC, AromaticityModels.MDL],
)
def test_aromaticity_models(aromaticity_model):
oe_molecule = smiles_to_molecule("C")
AromaticityModel.assign(oe_molecule, aromaticity_model)
|
python
|
import pygame, math, os, time
from .Torpedo import Torpedo
from .Explosion import Explosion
FRICTION_COEFF = 1 - 0.015
class Spaceship(pygame.sprite.Sprite):
def __init__(self, colour, img_path, bearing, torpedo_group, explosion_group):
super().__init__()
self.torpedo_group = torpedo_group
self.explosion_group = explosion_group
self.colour = colour
self.last_shoot = 0
self.shoot_delay = 0.9
self.raw_image = pygame.image.load(
os.path.join("assets", "spaceships", img_path)
)
self.raw_image = pygame.transform.scale(self.raw_image, (64, 64))
self.image = self.raw_image
self.rect = self.image.get_rect()
self.rect.x = 0
self.rect.y = 0
self.x_vel = 0
self.y_vel = 0
self.power = {"engines": 0, "shields": 0, "weapons": 0}
self.bearing = {"engines": bearing, "shields": 0, "weapons": 0}
self.active = {"engines": False, "shields": False, "weapons": False}
self.health = 100
def update(self):
# print("{col} health is {health}".format(col=self.colour,health=self.health))
if self.health <0 :
# print("{me} has now died".format(me=self.colour))
self.kill()
self.explosion_group.add(Explosion(self.colour, self.rect.x, self.rect.y))
return
self.bearing["engines"] = self.bearing["engines"] % 360
self.bearing["shields"] = self.bearing["shields"] % 360
self.bearing["weapons"] = self.bearing["weapons"] % 360
self.rect.x += self.x_vel
self.rect.y += self.y_vel
self.x_vel *= FRICTION_COEFF
self.y_vel *= FRICTION_COEFF
if self.active["engines"]:
self.x_vel -= (
math.sin(math.radians(self.bearing["engines"])) * self.power["engines"]
)
self.y_vel -= (
math.cos(math.radians(self.bearing["engines"])) * self.power["engines"]
)
self.image, self.rect = Spaceship.rotate(
self.raw_image, self.rect, self.bearing["engines"]
)
if (self.rect.y < 0 and self.y_vel < 0) or (
self.rect.y > 1080 - self.image.get_height() and self.y_vel > 0
):
self.y_vel = -self.y_vel
if (self.rect.x < 0 and self.x_vel < 0) or (
self.rect.x > (1920 - 378) - self.image.get_width() and self.x_vel > 0
):
self.x_vel = -self.x_vel
if self.active["weapons"] and time.time() > self.last_shoot + (self.shoot_delay/(self.power["weapons"]/65 + 0.1)):
self.shoot()
self.last_shoot = time.time()
def shoot(self):
self.torpedo_group.add(Torpedo(self.colour,self.bearing["weapons"],self.rect.x,self.rect.y,self.x_vel,self.y_vel))
@staticmethod
def rotate(image, rect, angle):
"""Rotate the image while keeping its center."""
# Rotate the original image without modifying it.
new_image = pygame.transform.rotate(image, angle)
# Get a new rect with the center of the old rect.
rect = new_image.get_rect(center=rect.center)
return new_image, rect
|
python
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import json
from datetime import datetime
class SyncModel:
""" Implements common methods used by the models for syncing data into database.
currently following models use this: Companies, Contacts, Departments, Events,
Invoices, Projects, Users."""
def __init(self, db_params: dict, table_names: dict):
self.name = 'syncmodel'
pass
def table_name(self):
return self.table
def status(self):
return {
'database_table': self.table_name(),
'synced_entries': self.count(),
'last_modified': self.max_last_modified_timestamp()
}
def truncate_table(self):
self.postgresql_wrapper.execute(
f'TRUNCATE TABLE {self.table};'
)
def count_sql(self):
return f'SELECT COUNT(*) FROM {self.table}'
def max_last_modified_sql(self):
return f'SELECT max(updated_at) FROM {self.table}'
def max_last_modified_timestamp(self) -> datetime:
"""Returns the highest ldap_modifytimestamp"""
return self.postgresql_wrapper.execute(
self.max_last_modified_sql()
)[0][0]
def count(self) -> int:
return self.postgresql_wrapper.execute(self.count_sql())[0][0]
# customize these below methods in the classes where we want different or additional columns.
@classmethod
def create_table_sql(cls, table_name):
return f'''CREATE TABLE IF NOT EXISTS {table_name}(
id serial PRIMARY KEY,
tl_uuid uuid NOT NULL,
tl_content jsonb NOT NULL,
tl_type VARCHAR,
created_at timestamp with time zone NOT NULL DEFAULT now(),
updated_at timestamp with time zone NOT NULL DEFAULT now(),
CONSTRAINT {table_name.replace(".","_")}_constraint_key UNIQUE (tl_uuid)
);
'''
# selects a page of data from our models database table
def select_page(self, limit=0, offset=0):
return self.postgresql_wrapper.execute(
f'''
SELECT * from {self.table} ORDER BY id LIMIT %s OFFSET %s
''',
(limit, offset)
)
def upsert_entities_sql(self):
return f'''INSERT INTO {self.table} (
tl_uuid,
tl_type,
tl_content)
VALUES (%s, %s, %s) ON CONFLICT (tl_uuid) DO
UPDATE
SET tl_content = EXCLUDED.tl_content,
tl_type = EXCLUDED.tl_type,
updated_at = now();
'''
def _prepare_vars_upsert(self, teamleader_result, tl_type: str) -> tuple:
"""Transforms teamleader entry to pass to the psycopg2 execute function.
Transform it to a tuple containing the parameters to be able to upsert.
"""
return (
str(teamleader_result['id']),
tl_type,
json.dumps(teamleader_result)
)
def upsert_results(self, teamleader_results: list):
"""Upsert the teamleader entries into PostgreSQL.
Transforms and flattens the teamleader entries to one list,
in order to execute in one transaction.
Arguments:
teamleader_results -- list of Tuple[list[teamleader_entry], str].
"""
vars_list = []
for result_tuple in teamleader_results:
tl_type = result_tuple[1]
# Parse and flatten the SQL values from the ldap_results as a
# passable list
vars_list.extend(
[
self._prepare_vars_upsert(tl_result, tl_type)
for tl_result
in result_tuple[0]
]
)
self.postgresql_wrapper.executemany(
self.upsert_entities_sql(), vars_list)
# deprecated/unused
# import uuid
# def insert_entity(self, date_time: datetime = datetime.now(), content='{"key": "value"}'):
# vars = (str(uuid.uuid4()), self.name, content)
# self.postgresql_wrapper.execute(self.upsert_entities_sql(), vars)
|
python
|
class InvalidBrowserException(Exception):
pass
class InvalidURLException(Exception):
pass
|
python
|
import os
import time
import json
import string
import random
import itertools
from datetime import datetime
import numpy as np
import pandas as pd
from numba import jit
from sklearn.metrics import mean_squared_error
from contextlib import contextmanager, redirect_stdout
import matplotlib.pyplot as plt
N_TRAIN = 20216100
N_TEST = 41697600
# load file paths
settings = json.load(open("./settings.json"))
OUTPUT_PATH = settings["OUTPUT_PATH"]
MODEL_PATH = settings["MODEL_PATH"]
DATA_PATH = settings["DATA_PATH"]
PRIMARY_USE_GROUPINGS = [
["Education"],
["Lodging/residential"],
["Office"],
["Entertainment/public assembly"],
["Public services"],
["Other", "Retail", "Parking", "Warehouse/storage",
"Food sales and service", "Religious worship", "Utility", "Technology/science",
"Healthcare", "Manufacturing/industrial", "Services",]
]
def take_first(x): return x.values[0]
def take_last(x): return x.values[-1]
@contextmanager
def timer(name):
print(f'{datetime.now()} - [{name}] ...')
t0 = time.time()
yield
print(f'{datetime.now()} - [{name}] done in {time.time() - t0:.0f} s\n')
def make_dir(dir_name):
"""Create a directory if it doesn"t already exist"""
if not os.path.exists(dir_name):
os.makedirs(dir_name)
class Logger(object):
"""Save a string line(s) to a file."""
def __init__(self, file_path, mode="w", verbose=False):
self.file_path = file_path
self.verbose = verbose
open(file_path, mode=mode)
def append(self, line, print_line=None):
if print_line or self.verbose:
print(line)
with open(self.file_path, "a") as f:
with redirect_stdout(f):
print(line)
@jit(nopython=True)
def find_zero_streaks(x):
n = len(x)
streaks = np.zeros(n)
if x[0] == 0:
streaks[0] = 1
for i in range(1,n):
if x[i] == 0:
streaks[i] = streaks[i-1] + 1
return streaks
def find_zero_streaks_wrapper(x):
return find_zero_streaks(x.values)
@jit(nopython=True)
def find_constant_values(x, min_constant_values=6):
i = 0
j = i + 1
n = len(x)
ignore_values = np.zeros(n)
while j < n:
if x[i] == x[j]:
k = j+1
while k < n and x[i] == x[k]:
k += 1
if k-1-i > min_constant_values:
ignore_values[i+1:k] = 1
i = k
else:
i += 1
j = i + 1
return ignore_values==1
def rmsle(x,y):
x = np.log1p(x)
y = np.log1p(y)
return np.sqrt(mean_squared_error(x, y))
def plot_feature_importance(model, feature_cols):
importance_df = pd.DataFrame(
model.feature_importance(),
index=feature_cols,
columns=['importance']).sort_values('importance')
fig, ax = plt.subplots(figsize=(8, 8))
importance_df.plot.barh(ax=ax)
fig.show()
def get_validation_months(n_months):
validation_months_list = [np.arange(i+1,i+2+n_months-1)
for shift in range(n_months)
for i in range(shift,12+shift, n_months)]
validation_months_list = [(x-1) % 12 + 1 for x in validation_months_list]
return validation_months_list
def reduce_mem_usage(df, skip_cols=[], verbose=False):
""" Reduce memory usage in a pandas dataframe
Based on this great kernel:
https://www.kaggle.com/arjanso/reducing-dataframe-memory-size-by-65
"""
start_mem_usg = df.memory_usage().sum() / 1024**2
print("Memory usage of properties dataframe is :",start_mem_usg," MB")
NAlist = [] # Keeps track of columns that have missing values filled in.
for col in np.setdiff1d(df.columns, skip_cols):
if df[col].dtype != object: # Exclude strings
# print column type
if verbose:
print("******************************")
print("Column: ",col)
print("dtype before: ",df[col].dtype)
# make variables for Int, max and min
IsInt = False
mx = df[col].max()
mn = df[col].min()
if verbose:
print("min for this col: ",mn)
print("max for this col: ",mx)
# Integer does not support NA, therefore, NA needs to be filled
if not np.isfinite(df[col]).all():
NAlist.append(col)
df[col].fillna(mn-1,inplace=True)
# test if column can be converted to an integer
asint = df[col].fillna(0).astype(np.int64)
result = (df[col] - asint)
result = result.sum()
if result > -0.01 and result < 0.01:
IsInt = True
# Make Integer/unsigned Integer datatypes
if IsInt:
if mn >= 0:
if mx < 255:
df[col] = df[col].astype(np.uint8)
elif mx < 65535:
df[col] = df[col].astype(np.uint16)
elif mx < 4294967295:
df[col] = df[col].astype(np.uint32)
else:
df[col] = df[col].astype(np.uint64)
else:
if mn > np.iinfo(np.int8).min and mx < np.iinfo(np.int8).max:
df[col] = df[col].astype(np.int8)
elif mn > np.iinfo(np.int16).min and mx < np.iinfo(np.int16).max:
df[col] = df[col].astype(np.int16)
elif mn > np.iinfo(np.int32).min and mx < np.iinfo(np.int32).max:
df[col] = df[col].astype(np.int32)
elif mn > np.iinfo(np.int64).min and mx < np.iinfo(np.int64).max:
df[col] = df[col].astype(np.int64)
# Make float datatypes 32 bit
else:
df[col] = df[col].astype(np.float32)
if verbose:
print("dtype after: ",df[col].dtype)
print("******************************")
# Print final result
if verbose:
print("___MEMORY USAGE AFTER COMPLETION:___")
mem_usg = df.memory_usage().sum() / 1024**2
print("Memory usage is: ",mem_usg," MB")
print("This is ",100*mem_usg/start_mem_usg,"% of the initial size")
return df, NAlist
def load_data(data_name):
"""Loads and formats data"""
# raw
if data_name == "train":
return pd.read_csv(f"{DATA_PATH}/train.csv")
if data_name == "test":
return pd.read_csv(f"{DATA_PATH}/test.csv")
if data_name == "input":
return load_data("train"), load_data("test")
# clean
if data_name == "train_clean":
return pd.read_pickle(f"{DATA_PATH}/preprocessed/train_clean.pkl")
if data_name == "test_clean":
return pd.read_pickle(f"{DATA_PATH}/preprocessed/test_clean.pkl")
if data_name == "clean":
return load_data("train_clean"), load_data("test_clean")
# nn meter
if data_name == "train_nn_meter":
return pd.read_pickle(f"{DATA_PATH}/preprocessed/train_nn_meter.pkl")
if data_name == "test_nn_meter":
return pd.read_pickle(f"{DATA_PATH}/preprocessed/test_nn_meter.pkl")
if data_name == "nn_meter":
return load_data("train_nn_meter"), load_data("test_nn_meter")
# nn target normalized meter
if data_name == "train_nn_target_normalized_meter":
return pd.read_pickle(f"{DATA_PATH}/preprocessed/train_nn_target_normalized_meter.pkl")
if data_name == "test_nn_target_normalized_meter":
return pd.read_pickle(f"{DATA_PATH}/preprocessed/test_nn_target_normalized_meter.pkl")
if data_name == "nn_target_normalized_meter":
return load_data("train_nn_target_normalized_meter"), load_data("test_nn_target_normalized_meter")
# nn site
if data_name == "train_nn_site":
return pd.read_pickle(f"{DATA_PATH}/preprocessed/train_nn_site.pkl")
if data_name == "test_nn_site":
return pd.read_pickle(f"{DATA_PATH}/preprocessed/test_nn_site.pkl")
if data_name == "nn_site":
return load_data("train_nn_site"), load_data("test_nn_site")
# nn target normalized site
if data_name == "train_nn_target_normalized_site":
return pd.read_pickle(f"{DATA_PATH}/preprocessed/train_nn_target_normalized_site.pkl")
if data_name == "test_nn_target_normalized_site":
return pd.read_pickle(f"{DATA_PATH}/preprocessed/test_nn_target_normalized_site.pkl")
if data_name == "nn_target_normalized_site":
return load_data("train_nn_target_normalized_site"), load_data("test_nn_target_normalized_site")
# debug 1000
if data_name == "train_clean_debug_1000":
return pd.read_pickle(f"{DATA_PATH}/preprocessed/train_clean_debug_1000.pkl")
if data_name == "test_clean_debug_1000":
return pd.read_pickle(f"{DATA_PATH}/preprocessed/test_clean_debug_1000.pkl")
if data_name == "clean_debug_1000":
return load_data("train_clean_debug_1000"), load_data("test_clean_debug_1000")
if data_name == "leak_debug_1000":
return pd.read_pickle(f"{DATA_PATH}/preprocessed/leak_debug_1000.pkl")
# debug 10000
if data_name == "train_clean_debug_10000":
return pd.read_pickle(f"{DATA_PATH}/preprocessed/train_clean_debug_10000.pkl")
if data_name == "test_clean_debug_10000":
return pd.read_pickle(f"{DATA_PATH}/preprocessed/test_clean_debug_10000.pkl")
if data_name == "clean_debug_10000":
return load_data("train_clean_debug_10000"), load_data("test_clean_debug_10000")
if data_name == "leak_debug_10000":
return pd.read_pickle(f"{DATA_PATH}/preprocessed/leak_debug_10000.pkl")
# raw weather
if data_name == "train_weather":
return pd.read_csv(f"{DATA_PATH}/weather_train.csv")
if data_name == "test_weather":
return pd.read_csv(f"{DATA_PATH}/weather_test.csv")
if data_name == "weather":
return load_data("train_weather"), load_data("test_weather")
# leak
if data_name == "leak":
return pd.read_feather(f"{DATA_PATH}/leak.feather")
# leak
if data_name == "is_leak":
return pd.read_feather(f"{DATA_PATH}/is_leak.feather")
# rows to drop
if data_name == "bad_meter_readings":
return pd.read_csv(f"{DATA_PATH}/bad_meter_readings.csv")
# meta
if data_name == "meta":
return pd.read_csv(f"{DATA_PATH}/building_metadata.csv")
# submissions
if data_name == "sample_submission":
return pd.read_csv(f"{DATA_PATH}/sample_submission.csv")
# meta
if data_name == "best_submission":
return pd.read_csv(f"{DATA_PATH}/submissions/final_average_top4.csv")
|
python
|
"""
Configurations for Reserved Virtual Machines simulations:
"""
###################################
### Don't touch this line - import
import numpy
###################################
################################################
### General configurations, for all simualtions
START_TIME = 0 # Seconds - NOT IMPLEMENTED -> To simulate starting the experiment at a specific hour
SIMULATION_TIME = 86400 # Total simulation time (in seconds)
#AVERAGE_SERVICE_TIME = 0.008
AVERAGE_SERVICE_TIME = 0.3
#MAX_AVERAGE_LATENCY = 0.33 #Expected response time for the request from client's perspective (in seconds)
MAX_AVERAGE_LATENCY = 60
#####SYNTHETIC RATE FOR EXPERIMENTS
#ARRIVAL_RATE = numpy.array([
#4,5,6,5,4,3,2,1.6,1.4,1.3,1.2,1.1,1,1,1.1,1.1,1.2,1.4,1.6,1.7,1.8,1.9,2,3
#])
#### REAL REQUEST RATE FROM DATIL
ARRIVAL_RATE = numpy.array([
4.745364, 6.063600, 7.923774, 10.608352, 14.594335, 20.014631, 26.161790, 28.412080, 30.432822, 30.187835,
20.620131, 12.936782, 5.346152, 1.807029, 2.229556, 3.186768, 3.543904, 4.126800,
4.330005, 3.319482, 3.371923, 3.806141, 3.396690, 4.052290
])
################################################
### Configurations for Reserved VMs simulations
MAX_CONCURRENT_REQUESTS_PER_VM = 2
VM_HOURLY_COST_RESERVED = 0.034 #Per VM hourly cost in USD
################################################
### Configurations for On-demand VMs simulations
MAX_CONCURRENT_REQUESTS_PER_VM = 2
VM_HOURLY_COST_ONDEMAND = 0.047 #Per VM hourly cost in USD
################################################
### Configurations for Serverless simulations
TIME_TO_SETUP_FUNCTION = 1.4
COST_PER_REQUEST = 0.0000002
FUNCTION_MEMORY = 128 #Compression function requires 15MB, but the minimal for billing is 128MB
COST_PER_EXECUTION = 0.00001667 #Compression function requires 0.008 secs, but usage is rounded to nearest 100m for billing.
|
python
|
#!/usr/bin/python
#
## @file
#
# Collection of classes that control the establish the basic operation of dave
# as it issues various types of commands to HAL and Kilroy
#
# Jeff 3/14
#
# Hazen 09/14
#
from xml.etree import ElementTree
from PyQt4 import QtCore
import sc_library.tcpMessage as tcpMessage
## addField
#
# @param block A ElementTree node.
# @param name The name of the field as a string.
# @param value The value of the field.
#
def addField(block, name, value):
field = ElementTree.SubElement(block, name)
field.set("type", str(type(value).__name__))
field.text = str(value)
## DaveAction
#
# The base class for a dave action (DA for short).
#
class DaveAction(QtCore.QObject):
# Define custom signal
complete_signal = QtCore.pyqtSignal(object)
error_signal = QtCore.pyqtSignal(object)
## __init__
#
# Default initialization.
#
def __init__(self):
# Initialize parent class
QtCore.QObject.__init__(self, None)
self.action_type = "NA"
self.disk_usage = 0
self.duration = 0
self.tcp_client = None
self.message = None
self.valid = True
# Define pause behaviors
self.should_pause = False # Pause after completion
self.should_pause_after_error = True # Pause after error
# Initialize internal timer
self.lost_message_timer = QtCore.QTimer(self)
self.lost_message_timer.setSingleShot(True)
self.lost_message_timer.timeout.connect(self.handleTimerDone)
self.lost_message_delay = 2000 # Wait for a test message to be returned before issuing an error
## abort
#
# Handle an external abort call
#
def abort(self):
self.completeAction(self.message)
## cleanUp
#
# Handle clean up of the action
#
def cleanUp(self):
self.tcp_client.messageReceived.disconnect()
## createETree
#
# Takes a dictionary that may (or may not) contain the information that is
# is necessary to create the Action. If the information is not present then
# None is returned. If the information is present then a ElementTree is
# is returned containing the information necessary to create the Action.
#
# @param dict A dictionary.
#
# @return A ElementTree object or None.
#
def createETree(self, dictionary):
pass
## completeAction
#
# Handle the completion of an action
#
# @param message A TCP message object
#
def completeAction(self, message):
if message.isTest():
time = message.getResponse("duration")
if time is not None: self.duration = time
space = message.getResponse("disk_usage")
if space is not None: self.disk_usage = space
self.complete_signal.emit(message)
## completeActionWithError
#
# Send an error message if needed
#
# @param message A TCP message object
#
def completeActionWithError(self, message):
if (self.should_pause_after_error == True):
self.should_pause = True
self.error_signal.emit(message)
## getActionType
#
# @return The type of the action (i.e. "hal", "kilroy", ..)
#
def getActionType(self):
return self.action_type
## getDescriptor
#
# @return A string that describes the action.
#
def getDescriptor(self):
return type(self).__name__[2:]
## getDuration
#
# @return Duration (in seconds?)
#
def getDuration(self):
return self.duration
## getLongDescriptor
#
# @return A N x 2 array containing the message data.
#
def getLongDescriptor(self):
if self.message is not None:
mdict = self.message.getMessageData()
data = []
for key in sorted(mdict):
data.append([key, mdict[key]])
return data
else:
return [None,None]
## getUsage
#
# @return Disk usage.
#
def getUsage(self):
return self.disk_usage
## handleReply
#
# handle the return of a message
#
# @param message A TCP message object
#
def handleReply(self, message):
# Stop lost message timer
self.lost_message_timer.stop()
# Check to see if the same message got returned
if not (message.getID() == self.message.getID()):
message.setError(True, "Communication Error: Incorrect Message Returned")
self.completeActionWithError(message)
elif message.hasError():
self.completeActionWithError(message)
else: # Correct message and no error
self.completeAction(message)
## handleTimerDone
#
# Handle a timer done signal
#
def handleTimerDone(self):
error_str = "A message of type " + self.message.getType() + " was never received.\n"
error_str += "Perhaps a module is missing?"
self.message.setError(True, error_str)
self.completeActionWithError(self.message)
## isValid
#
# @return True/False is the command is valid.
#
def isValid(self):
return self.valid
## setProperty
#
# Set object property, throw an error if the property is not recognized.
#
def setProperty(self, pname, pvalue):
if pname in self.properties.keys():
self.properties[pname] = pvalue
else:
raise Exception(pname + " is not a valid property for " + str(type(self)))
## setup
#
# Perform post creation initialization.
#
# @param node The node of an ElementTree.
#
def setup(self, node):
pass
## setValid
#
# @param is_valid True/False is this message is valid.
#
def setValid(self, is_valid):
self.valid = is_valid
## shouldPause
#
# Determine if the command engine should pause after this action
#
# @return A boolean determining if the program pauses after this action is complete
def shouldPause(self):
return self.should_pause
## start
#
# Start the action.
#
# @param tcp_client The TCP client to use for communication.
# @param test_mode Send the command in test mode.
#
def start(self, tcp_client, test_mode):
self.tcp_client = tcp_client
self.message.setTestMode(test_mode)
self.tcp_client.messageReceived.connect(self.handleReply)
if self.message.isTest():
self.lost_message_timer.start(self.lost_message_delay)
self.tcp_client.sendMessage(self.message)
#
# Specific Actions
#
## DADelay
#
# This action introduces a defined delay.
#
class DADelay(DaveAction):
## __init__
#
def __init__(self):
DaveAction.__init__(self)
## abort
#
# Handle an external abort call
#
def abort(self):
self.delay_timer.stop()
self.completeAction(self.message)
## cleanUp
#
# Handle clean up of the action
#
def cleanUp(self):
pass
## createETree
#
# @param dict A dictionary.
#
# @return A ElementTree object or None.
#
def createETree(self, dictionary):
delay = dictionary.get("delay")
if delay is not None:
block = ElementTree.Element(str(type(self).__name__))
addField(block, "delay", delay)
return block
## getDescriptor
#
# @return A string that describes the action.
#
def getDescriptor(self):
return "pause for " + str(self.delay) + "ms"
## handleTimerComplete
#
# Handle completion of the felay timer
#
def handleTimerComplete(self):
self.completeAction(self.message)
## setup
#
# Perform post creation initialization.
#
# @param node The node of an ElementTree.
#
def setup(self, node):
# Prepare delay timer
self.delay_timer = QtCore.QTimer(self)
self.delay_timer.setSingleShot(True)
self.delay_timer.timeout.connect(self.handleTimerComplete)
self.delay = int(node.find("delay").text)
# Create message and add delay time for accurate dave time estimates
self.message = tcpMessage.TCPMessage(message_type = "Delay",
message_data = {"delay": self.delay});
self.message.addResponse("duration", self.delay)
## start
#
# Start the action.
#
# @param dummy Ignored.
# @param test_mode Send the command in test mode.
#
def start(self, dummy, test_mode):
self.message.setTestMode(test_mode)
if self.message.isTest():
self.completeAction(self.message)
else:
self.delay_timer.start(self.delay)
print "Delaying " + str(self.delay) + " ms"
## DAFindSum
#
# The find sum action.
#
class DAFindSum(DaveAction):
## __init__
#
def __init__(self):
DaveAction.__init__(self)
self.action_type = "hal"
## createETree
#
# @param dict A dictionary.
#
# @return A ElementTree object or None.
#
def createETree(self, dictionary):
find_sum = dictionary.get("find_sum")
if find_sum is None:
return
if (find_sum > 0.0):
block = ElementTree.Element(str(type(self).__name__))
addField(block, "min_sum", find_sum)
return block
## getDescriptor
#
# @return A string that describes the action.
#
def getDescriptor(self):
return "find sum (minimum sum = " + str(self.min_sum) + ")"
## handleReply
#
# Overload of default handleReply to allow comparison of min_sum
#
# @param message A TCP message object
#
def handleReply(self, message):
found_sum = message.getResponse("found_sum")
if not (found_sum == None) and (found_sum <= self.min_sum):
message.setError(True, "Found sum " + str(found_sum) + " is smaller than minimum sum " + str(self.min_sum))
DaveAction.handleReply(self, message)
## setup
#
# Perform post creation initialization.
#
# @param node The node of an ElementTree.
#
def setup(self, node):
self.min_sum = float(node.find("min_sum").text)
self.message = tcpMessage.TCPMessage(message_type = "Find Sum",
message_data = {"min_sum": self.min_sum})
## DAMoveStage
#
# The move stage action.
#
class DAMoveStage(DaveAction):
## __init__
#
# @param tcp_client A tcp communications object.
#
def __init__(self):
DaveAction.__init__(self)
self.action_type = "hal"
## createETree
#
# @param dict A dictionary.
#
# @return A ElementTree object or None.
#
def createETree(self, dictionary):
stage_x = dictionary.get("stage_x")
stage_y = dictionary.get("stage_y")
if (stage_x is not None) and (stage_y is not None):
block = ElementTree.Element(str(type(self).__name__))
addField(block, "stage_x", stage_x)
addField(block, "stage_y", stage_y)
return block
## getDescriptor
#
# @return A string that describes the action.
#
def getDescriptor(self):
return "move stage to " + str(self.stage_x) + ", " + str(self.stage_y)
## setup
#
# Perform post creation initialization.
#
# @param node The node of an ElementTree.
#
def setup(self, node):
self.stage_x = float(node.find("stage_x").text)
self.stage_y = float(node.find("stage_y").text)
self.message = tcpMessage.TCPMessage(message_type = "Move Stage",
message_data = {"stage_x" : self.stage_x,
"stage_y" : self.stage_y})
## DAPause
#
# This action causes Dave to pause.
#
class DAPause(DaveAction):
## __init__
#
# @param tcp_client A tcp communications object.
#
def __init__(self):
DaveAction.__init__(self)
## cleanUp
#
# Handle clean up of the action
#
def cleanUp(self):
pass
## createETree
#
# @param dict A dictionary.
#
# @return A ElementTree object or None.
#
def createETree(self, dictionary):
pause = dictionary.get("pause")
if (pause is not None):
block = ElementTree.Element(str(type(self).__name__))
return block
## getDescriptor
#
# @return A string that describes the action.
#
def getDescriptor(self):
return "pause"
## setup
#
# Perform post creation initialization.
#
# @param node The node of an ElementTree.
#
def setup(self, node):
# Create message and add delay time for accurate dave time estimates
self.message = tcpMessage.TCPMessage(message_type = "Pause");
# Define pause behaviors
self.should_pause = True
## start
#
# Start the action.
#
# @param dummy Ignored.
# @param test_mode Send the command in test mode.
#
def start(self, dummy, test_mode):
self.message.setTestMode(test_mode)
if self.message.isTest():
self.completeAction(self.message)
else:
self.completeAction(self.message)
## DARecenterPiezo
#
# The piezo recentering action. Note that this is only useful if the microscope
# has a motorized Z.
#
class DARecenterPiezo(DaveAction):
## __init__
#
def __init__(self):
DaveAction.__init__(self)
self.action_type = "hal"
## createETree
#
# @param dictionary A dictionary.
#
# @return A ElementTree object or None.
#
def createETree(self, dictionary):
recenter = dictionary.get("recenter")
if (recenter is not None):
block = ElementTree.Element(str(type(self).__name__))
return block
## getDescriptor
#
# @return A string that describes the action.
#
def getDescriptor(self):
return "recenter piezo"
## setup
#
# Perform post creation initialization.
#
# @param node The node of an ElementTree.
#
def setup(self, node):
self.message = tcpMessage.TCPMessage(message_type = "Recenter Piezo")
## DASetDirectory
#
# Change the Hal Directory.
#
class DASetDirectory(DaveAction):
## __init__
#
def __init__(self):
DaveAction.__init__(self)
self.action_type = "hal"
## createETree
#
# @param dictionary A dictionary.
#
# @return A ElementTree object or None.
#
def createETree(self, dictionary):
directory = dictionary.get("directory")
if (directory is not None):
block = ElementTree.Element(str(type(self).__name__))
addField(block, "directory", directory)
return block
## getDescriptor
#
# @return A string that describes the action.
#
def getDescriptor(self):
return "change directory to " + self.directory
## setup
#
# Perform post creation initialization.
#
# @param node The node of an ElementTree.
#
def setup(self, node):
self.directory = node.find("directory").text
self.message = tcpMessage.TCPMessage(message_type = "Set Directory",
message_data = {"directory": self.directory})
## DASetFocusLockTarget
#
# The set focus lock target action.
#
class DASetFocusLockTarget(DaveAction):
## __init__
#
def __init__(self):
DaveAction.__init__(self)
self.action_type = "hal"
## createETree
#
# @param dictionary A dictionary.
#
# @return A ElementTree object or None.
#
def createETree(self, dictionary):
lock_target = dictionary.get("lock_target")
if (lock_target is not None):
block = ElementTree.Element(str(type(self).__name__))
addField(block, "lock_target", lock_target)
return block
## getDescriptor
#
# @return A string that describes the action.
#
def getDescriptor(self):
return "set focus lock target to " + str(self.lock_target)
## setup
#
# Perform post creation initialization.
#
# @param node The node of an ElementTree.
#
def setup(self, node):
self.lock_target = float(node.find("lock_target").text)
self.message = tcpMessage.TCPMessage(message_type = "Set Lock Target",
message_data = {"lock_target" : self.lock_target})
## DASetParameters
#
# The action responsible for setting the movie parameters in Hal.
#
class DASetParameters(DaveAction):
## __init__
#
def __init__(self):
DaveAction.__init__(self)
self.action_type = "hal"
## createETree
#
# @param dictionary A dictionary.
#
# @return A ElementTree object or None.
#
def createETree(self, dictionary):
parameters = dictionary.get("parameters")
if (parameters is not None):
block = ElementTree.Element(str(type(self).__name__))
addField(block, "parameters", parameters)
return block
## getDescriptor
#
# @return A string that describes the action.
#
def getDescriptor(self):
return "set parameters to " + str(self.parameters)
## setup
#
# Perform post creation initialization.
#
# @param node The node of an ElementTree.
#
def setup(self, node):
p_node = node.find("parameters")
if (p_node.attrib["type"] == "int"):
self.parameters = int(node.find("parameters").text)
else:
self.parameters = node.find("parameters").text
self.message = tcpMessage.TCPMessage(message_type = "Set Parameters",
message_data = {"parameters" : self.parameters})
## DASetProgression
#
# The action responsible for setting the illumination progression.
#
class DASetProgression(DaveAction):
## __init__
#
def __init__(self):
DaveAction.__init__(self)
self.action_type = "hal"
## createETree
#
# @param dictionary A dictionary.
#
# @return A ElementTree object or None.
#
def createETree(self, dictionary):
progression = dictionary.get("progression")
if progression is not None:
block = ElementTree.Element(str(type(self).__name__))
for pnode in progression:
# The round trip fixes some white space issues.
block.append(ElementTree.fromstring(ElementTree.tostring(pnode)))
return block
## getDescriptor
#
# @return A string that describes the action.
#
def getDescriptor(self):
return "set progressions to " + self.type
## setup
#
# Perform post creation initialization.
#
# @param node The node of an ElementTree.
#
def setup(self, node):
self.type = node.find("type").text
message_data = {"type" : self.type}
# File progression.
if node.find("filename") is not None:
message_data["filename"] = node.find("filename").text
# Math progression.
elif node.find("channel") is not None:
channels = []
for ch_node in [x for x in node if (x.tag == "channel")]:
channel = int(ch_node.text)
start = float(ch_node.attrib["start"])
if "frames" in ch_node.attrib:
frames = int(ch_node.attrib["frames"])
else:
frames = 100
if "inc" in ch_node.attrib:
inc = float(ch_node.attrib["inc"])
else:
inc = 0.0
channels.append([channel, start, frames, inc])
message_data["channels"] = channels
self.message = tcpMessage.TCPMessage(message_type = "Set Progression",
message_data = message_data)
## DATakeMovie
#
# Send a take movie command to Hal
#
class DATakeMovie(DaveAction):
## __init__
#
def __init__(self):
DaveAction.__init__(self)
self.action_type = "hal"
self.properties = {"name" : None,
"length" : None,
"min_spots" : None,
"parameters" : None,
"directory" : None,
"overwrite" : None}
## abort
#
# Send an abort message to Hal
#
def abort(self):
stop_message = tcpMessage.TCPMessage(message_type = "Abort Movie")
self.tcp_client.sendMessage(stop_message)
## createETree
#
# @param dictionary A dictionary.
#
# @return A ElementTree object or None.
#
def createETree(self, dictionary):
name = dictionary.get("name")
length = dictionary.get("length")
min_spots = dictionary.get("min_spots")
parameters = dictionary.get("parameters")
directory = dictionary.get("directory")
overwrite = dictionary.get("overwrite")
if (name is not None) and (length is not None):
if (length > 0):
block = ElementTree.Element(str(type(self).__name__))
addField(block, "name", name)
addField(block, "length", length)
if min_spots is not None:
addField(block, "min_spots", min_spots)
if parameters is not None:
addField(block, "parameters", parameters)
if directory is not None:
addField(block, "directory", directory)
if overwrite is not None:
addField(block, "overwrite", overwrite)
return block
## getDescriptor
#
# @return A string that describes the action.
#
def getDescriptor(self):
if (self.min_spots > 0):
return "take movie " + self.name + ", " + str(self.length) + " frames, " + str(self.min_spots) + " minimum spots"
else:
return "take movie " + self.name + ", " + str(self.length) + " frames"
## handleReply
#
# Overload of default handleReply to allow comparison of min_spots
#
# @param message A TCP message object
#
def handleReply(self, message):
found_spots = message.getResponse("found_spots")
if not (found_spots == None) and (found_spots < self.min_spots):
err_str = str(found_spots) + " found molecules is less than the target: "
err_str += str(self.min_spots)
message.setError(True, err_str)
DaveAction.handleReply(self,message)
## setup
#
# Perform post creation initialization.
#
# @param node The node of an ElementTree.
#
def setup(self, node):
self.name = node.find("name").text
self.length = int(node.find("length").text)
self.min_spots = 0
if node.find("min_spots") is not None:
self.min_spots = int(node.find("min_spots").text)
message_data = {"name" : self.name,
"length" : self.length,
"min_spots" : self.min_spots,
"parameters" : None}
if node.find("parameters") is not None:
message_data["parameters"] = node.find("parameters").text
if node.find("directory") is not None:
message_data["directory"] = node.find("directory").text
if node.find("overwrite") is not None:
message_data["overwrite"] = node.find("overwrite").text
self.message = tcpMessage.TCPMessage(message_type = "Take Movie",
message_data = message_data)
## DAValveProtocol
#
# The fluidics protocol action. Send commands to Kilroy.
#
class DAValveProtocol(DaveAction):
## __init__
#
# Initialize the valve protocol action
#
def __init__(self):
DaveAction.__init__(self)
self.action_type = "kilroy"
self.properties = {"name" : None}
## createETree
#
# Generate a Element Tree for the valve protocol specified.
#
# @param dictionary A dictionary containing the relevant data to create the element tree
#
def createETree(self, dictionary):
name = dictionary.get("name", None)
if (name is not None):
node = ElementTree.Element(str(type(self).__name__))
node.text = name
return node
else:
return None
## getDescriptor
#
# @return A string that describes the action.
#
def getDescriptor(self):
return "valve protocol " + self.protocol_name
## setup
#
# Perform post creation initialization.
#
# @param node The node of an ElementTree.
#
def setup(self, node):
self.protocol_name = node.text
self.protocol_is_running = False
self.message = tcpMessage.TCPMessage(message_type = "Kilroy Protocol",
message_data = {"name": self.protocol_name})
#
# The MIT License
#
# Copyright (c) 2014 Zhuang Lab, Harvard University
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
|
python
|
"""
Copyright (c) 2016-present, Facebook, Inc.
All rights reserved.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree. An additional grant
of patent rights can be found in the PATENTS file in the same directory.
"""
import asyncio
import logging
from typing import Any, List
from magma.common.service import MagmaService
from magma.common.streamer import StreamerClient
from magma.configuration.mconfig_managers import MconfigManager, \
load_service_mconfig
from magma.magmad.service_manager import ServiceManager
from orc8r.protos.mconfig import mconfigs_pb2
from orc8r.protos.mconfig_pb2 import GatewayConfigsDigest
CONFIG_STREAM_NAME = 'configs'
class ConfigManager(StreamerClient.Callback):
"""
Manager for access gateway config. Updates are received as a stream and
are guaranteed to be lossless and in-order. Config is written to file in
JSON format.
"""
def __init__(self, services: List[str], service_manager: ServiceManager,
magmad_service: MagmaService, mconfig_manager: MconfigManager,
allow_unknown_fields: bool = True, loop=None) -> None:
"""
Args:
services: List of services to manage
service_manager: ServiceManager instance
magmad_service: magmad service instance
mconfig_manager: manager class for the mconfig
allow_unknown_fields: set to True to suppress unknown field errors
loop: asyncio event loop to run in
"""
self._services = services
self._service_manager = service_manager
self._magmad_service = magmad_service
self._mconfig_manager = mconfig_manager
self._allow_unknown_fields = allow_unknown_fields
self._loop = loop or asyncio.get_event_loop()
# Load managed config
self._mconfig = self._mconfig_manager.load_mconfig()
def get_request_args(self, stream_name: str) -> Any:
# Include an mconfig digest argument to allow cloud optimization of
# not returning a non-updated mconfig.
digest = getattr(self._mconfig.metadata, 'digest', None)
if digest is None:
return None
mconfig_digest_proto = GatewayConfigsDigest(
md5_hex_digest=digest.md5_hex_digest)
return mconfig_digest_proto
def process_update(self, stream_name, updates, resync):
"""
Handle config updates. Resync is ignored since the entire config
structure is passed in every update.
Inputs:
- updates - list of GatewayConfigs protobuf structures
- resync - boolean indicating whether all database information will be
resent (hence cached data can be discarded). This is ignored
since config is contained in one DB element, hence all
data is sent in every update.
"""
if len(updates) == 0:
logging.info('No config update to process')
return
# We will only take the last update
for update in updates[:-1]:
logging.info('Ignoring config update %s', update.key)
# Deserialize and store the last config update
logging.info('Processing config update %s', updates[-1].key)
mconfig_str = updates[-1].value.decode()
mconfig = self._mconfig_manager.deserialize_mconfig(
mconfig_str,
self._allow_unknown_fields,
)
if 'magmad' not in mconfig.configs_by_key:
logging.error('Invalid config! Magmad service config missing')
return
self._mconfig_manager.update_stored_mconfig(mconfig_str)
self._magmad_service.reload_mconfig()
def did_mconfig_change(serv_name):
return mconfig.configs_by_key.get(serv_name) != \
self._mconfig.configs_by_key.get(serv_name)
# Reload magmad configs locally
if did_mconfig_change('magmad'):
self._loop.create_task(
self._service_manager.update_dynamic_services(
load_service_mconfig('magmad', mconfigs_pb2.MagmaD())
.dynamic_services,
)
)
services_to_restart = [
srv for srv in self._services if did_mconfig_change(srv)
]
if services_to_restart:
self._loop.create_task(
self._service_manager.restart_services(services_to_restart),
)
self._mconfig = mconfig
|
python
|
# Not used
# Author : Satish Palaniappan
__author__ = "Satish Palaniappan"
import os, sys, inspect
cmd_folder = os.path.realpath(os.path.abspath(os.path.split(inspect.getfile(inspect.currentframe()))[0]))
if cmd_folder not in sys.path:
sys.path.insert(0, cmd_folder)
from twokenize import *
import re
Code = r"\\[a-zA-Z0-9]+"
List = [
Url_RE,
Timelike,
Code
]
# stoplist = [")","(",".","'",",",";",":","?","/","!","@","$","*","+","-","_","=","&","%","`","~","\"","{","}"]
stopwords = [s.strip() for s in open(cmd_folder + "/stopwords","r").readlines()]
# print(stopwords)
### Not Implemented
def prep (text):
line = text
line = re.sub(r"[@#]", '', line)
for r in List:
line = re.sub(r," ", line)
for w in stopwords:
line = line.replace(" " + w.strip() + " "," ")
return line
def process(text):
# text = prep(text.strip().lower())
text = text.strip().lower()
text = u" ".join(tokenize(text))
return text.encode("utf-8")
|
python
|
import json
import cv2.aruco as aruco
import numpy as np
with open("config.json", "r") as json_file:
data = json.load(json_file)
arucoDictionary = aruco.Dictionary_get(data["arucoDictionary"])
timeStep = data["timeStep"]
isLogEnabled = bool(data["logEnabled"])
markerWidth = data["markerWidth"]
camera = int(data["camera"])
actuators = data["actuators"]
gameDuration = data["gameDuration"]
device = data["device"]
minDistance = data["minDistance"]
maxDistance = data["maxDistance"]
frequencyClose = data["frequencyClose"]
frequencyOptimal = data["frequencyOptimal"]
frequencyFar = data["frequencyFar"]
actuatorRanges = data["actuatorRanges"]
targetLookAtThreshold = data["targetLookAtThreshold"]
dangerTime = data["dangerTime"]
shoulderMotors = data["shoulderMotors"]
motorInterval = data["motorInterval"]
resolutionX = data["resolutionX"]
resolutionY = data["resolutionY"]
distortCoeffs = np.array(data["distortCoeffs"])
focalLength = data["focalLength"]
camMatrix = np.array(data["camMatrix"])
camCenter = data["camCenter"]
calibrate = data["calibrate"]
useFisheye = data["useFisheye"]
deviceMode = int(data["deviceMode"])
usbPort = data["usbPort"]
catchThiefAfterTime = data["catchThiefAfterTime"]
buttonGpioPort= int(data["button_gpio_pin"])
def get_marker_id(side):
return data["markers"][side]
|
python
|
import unittest
import younit
# @unittest.skip("skipped")
class CommonTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
pass
async def async_setUp(self):
pass
async def async_tearDown(self):
pass
def setUp(self):
pass
def tearDown(self):
pass
def GIVEN_this(self):
pass
def WHEN_that(self):
pass
def THEN_verify(self):
pass
|
python
|
# -*- coding: utf-8 -*-
import os
import pathlib
from .Decorators import Decorators
from ...Exceptions import AsyncyError
def safe_path(story, path):
"""
safe_path resolves a path completely (../../a/../b) completely
and returns an absolute path which can be used safely by prepending
the story's tmp dir. This ensures that the story cannot abuse the system
and write elsewhere, for example, stories.json.
:param story: The story (Stories object)
:param path: A path to be resolved
:return: The absolute path, which can be used to read/write directly
"""
story.create_tmp_dir()
# Adding the leading "/" is important, otherwise the current working
# directory will be used as the base path.
path = f'/{path}'
path = pathlib.Path(path).resolve()
return f'{story.get_tmp_dir()}{os.fspath(path)}'
@Decorators.create_service(name='file', command='write', arguments={
'path': {'type': 'string'},
'content': {'type': 'any'}
})
async def file_write(story, line, resolved_args):
path = safe_path(story, resolved_args['path'])
try:
with open(path, 'w') as f:
f.write(resolved_args['content'])
except IOError as e:
raise AsyncyError(message=f'Failed to write to file: {e}',
story=story, line=line)
@Decorators.create_service(name='file', command='read', arguments={
'path': {'type': 'string'}
}, output_type='string')
async def file_read(story, line, resolved_args):
path = safe_path(story, resolved_args['path'])
try:
with open(path, 'r') as f:
return f.read()
except IOError as e:
raise AsyncyError(message=f'Failed to read file: {e}',
story=story, line=line)
@Decorators.create_service(name='file', command='exists', arguments={
'path': {'type': 'string'}
}, output_type='boolean')
async def file_exists(story, line, resolved_args):
path = safe_path(story, resolved_args['path'])
return os.path.exists(path)
def init():
pass
|
python
|
# Generated by Django 3.1.2 on 2020-10-30 18:18
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('catalog', '0002_auto_20201030_1417'),
]
operations = [
migrations.RemoveField(
model_name='book',
name='language',
),
migrations.AddField(
model_name='book',
name='language',
field=models.ForeignKey(help_text='Select the language the book is written in.', null=True, on_delete=django.db.models.deletion.SET_NULL, to='catalog.language'),
),
]
|
python
|
import logging
from application.utils import globals
from application.utils.helpers import Singleton
from pymongo import MongoClient, ASCENDING, DESCENDING
@Singleton
class Connection:
_client = None
db = None
def __init__(self):
try:
self._client = MongoClient(globals.configuration.mongo['uri'])
self.db = self._client[globals.configuration.mongo['db']]
self.generate_structure()
except Exception, error:
logging.error('DB error: %s' % error.message)
raise error
def generate_structure(self):
"""
Create indexes
:return:
"""
try:
self.db.twitter.ensure_index([('created', DESCENDING)], name='_date_index1', background=True)
self.db.twitter.ensure_index([('source', ASCENDING)], name='_source_index1', background=True)
self.db.twitter.ensure_index([('hashtags', ASCENDING)], name='_hashtags_index1', background=True)
self.db.twitter.ensure_index([('user', ASCENDING)], name='_user_index1', background=True)
except Exception, error:
logging.error('Error during index creation: %s' % error.message)
raise error
|
python
|
# Copyright (c) 2019 - The Procedural Generation for Gazebo authors
# For information on the respective copyright owner see the NOTICE file
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..types import XMLBase
from .size import Size
from .normal import Normal
class Plane(XMLBase):
_NAME = 'plane'
_TYPE = 'sdf'
_CHILDREN_CREATORS = dict(
size=dict(creator=Size, default=[2]),
normal=dict(creator=Normal)
)
def __init__(self):
XMLBase.__init__(self)
self.reset()
@property
def size(self):
return self._get_child_element('size')
@size.setter
def size(self, vec):
self._add_child_element('size', vec)
@property
def normal(self):
return self._get_child_element('normal')
@normal.setter
def normal(self, vec):
self._add_child_element('normal', vec)
|
python
|
# Generated by Django 2.2.5 on 2019-11-21 01:48
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Search',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('query', models.CharField(max_length=100)),
('search_count', models.PositiveIntegerField(default=1)),
('timestamp', models.DateTimeField(auto_now_add=True)),
],
options={
'verbose_name': 'Search',
'verbose_name_plural': 'searches',
},
),
]
|
python
|
import os
import sys
import tempfile
import mimetypes
import webbrowser
# Import the email modules we'll need
from email import policy
from email.parser import BytesParser
# An imaginary module that would make this work and be safe.
from imaginary import magic_html_parser
# In a real program you'd get the filename from the arguments.
msg = BytesParser(policy=policy.default).parse(open('outgoing.msg', 'rb'))
# Now the header items can be accessed as a dictionary, and any non-ASCII will
# be converted to unicode:
print('To:', msg['to'])
print('From:', msg['from'])
print('Subject:', msg['subject'])
# If we want to print a priview of the message content, we can extract whatever
# the least formatted payload is and print the first three lines. Of course,
# if the message has no plain text part printing the first three lines of html
# is probably useless, but this is just a conceptual example.
simplest = msg.get_body(preferencelist=('plain', 'html'))
print()
print(''.join(simplest.get_content().splitlines(keepends=True)[:3]))
ans = input("View full message?")
if ans.lower()[0] == 'n':
sys.exit()
# We can extract the richest alternative in order to display it:
richest = msg.get_body()
partfiles = {}
if richest['content-type'].maintype == 'text':
if richest['content-type'].subtype == 'plain':
for line in richest.get_content().splitlines():
print(line)
sys.exit()
elif richest['content-type'].subtype == 'html':
body = richest
else:
print("Don't know how to display {}".format(richest.get_content_type()))
sys.exit()
elif richest['content-type'].content_type == 'multipart/related':
body = richest.get_body(preferencelist=('html'))
for part in richest.iter_attachments():
fn = part.get_filename()
if fn:
extension = os.path.splitext(part.get_filename())[1]
else:
extension = mimetypes.guess_extension(part.get_content_type())
with tempfile.NamedTemporaryFile(suffix=extension, delete=False) as f:
f.write(part.get_content())
# again strip the <> to go from email form of cid to html form.
partfiles[part['content-id'][1:-1]] = f.name
else:
print("Don't know how to display {}".format(richest.get_content_type()))
sys.exit()
with tempfile.NamedTemporaryFile(mode='w', delete=False) as f:
# The magic_html_parser has to rewrite the href="cid:...." attributes to
# point to the filenames in partfiles. It also has to do a safety-sanitize
# of the html. It could be written using html.parser.
f.write(magic_html_parser(body.get_content(), partfiles))
webbrowser.open(f.name)
os.remove(f.name)
for fn in partfiles.values():
os.remove(fn)
# Of course, there are lots of email messages that could break this simple
# minded program, but it will handle the most common ones.
|
python
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
My first warp!
Using scikit-image piecewise affine transformation,
based on manual node assignment with stable corners.
A midpoint morph (halfway between the key frames) is generated.
http://scikit-image.org/docs/dev/auto_examples/plot_piecewise_affine.html
"""
############
# Settings #
############
home = r'/Users/jasper/Documents/PythonSpul/muddymorph/testcases'
key_a = home + r'/ball1.jpg'
key_b = home + r'/ball2.jpg'
nodefile = home + r'/ball_nodeclick.csv'
################
# Dependencies #
################
# Open source
import numpy as np
import matplotlib.pyplot as plt
from skimage.transform import PiecewiseAffineTransform, warp
# Home grown
import muddymorph_algo as algo
########
# Warp #
########
# Make an announcement
print("")
print("MuddyMorph Warp Proto 1")
print("=======================")
print("")
# Load data
print("Loading images and coordinates ... ", end="")
Ka = algo.load_rgba(key_a)
Kb = algo.load_rgba(key_b)
h, w = Ka.shape[:2]
nodes = np.loadtxt(nodefile, delimiter=',').astype(int)
print("done")
# Add edges to node paths
for x in [0, w - 1]:
for y in [0, h - 1]:
nodes = np.row_stack((nodes, [x, y, x, y]))
# Source and destination coordinates
print("Warping like crazy ... ", end="")
pa = nodes[:, 0:2]
pb = nodes[:, 2:4]
pi = pa + 0.5 * (pb - pa)
# Transform A
Ta = PiecewiseAffineTransform()
Ta.estimate(pi, pa)
Wa = warp(Ka, Ta)
# Transform B
dst_b = pb + 0.5 * (pa - pb)
Tb = PiecewiseAffineTransform()
Tb.estimate(pi, pb)
Wb = warp(Kb, Tb)
print("done")
##########
# Review #
##########
# Show plain images
print("Plotting input ... ", end="")
plt.close('all')
fig = algo.big_figure('MuddyMorph Proto - Warp 1', w * 3, h * 2)
plt.subplot(2, 3, 1)
plt.imshow(Ka)
plt.axis('image')
plt.plot(nodes[:, 0], nodes[:, 1], 'r+')
plt.title('A plain', fontweight='bold')
plt.subplot(2, 3, 2)
plt.imshow(Kb)
plt.axis('image')
plt.plot(nodes[:, 2], nodes[:, 3], 'r+')
plt.title('B plain', fontweight='bold')
plt.subplot(2, 3, 3)
plt.imshow(0.5 * Ka + 0.5 * Kb)
plt.axis('image')
plt.title('A&B plain', fontweight='bold')
print("done")
# Show warped images
print("Plotting result ... ", end="")
plt.subplot(2, 3, 4)
plt.imshow(Wa)
plt.axis('image')
plt.plot(pi[:, 0], pi[:, 1], 'r+')
plt.title('A warp', fontweight='bold')
plt.subplot(2, 3, 5)
plt.imshow(Wb)
plt.axis('image')
plt.plot(pi[:, 0], pi[:, 1], 'r+')
plt.title('B warp', fontweight='bold')
plt.subplot(2, 3, 6)
plt.imshow(0.5 * Wa + 0.5 * Wb)
plt.axis('image')
plt.title('A&B plain', fontweight='bold')
print("done")
|
python
|
# Copyright (c) 2006-2013 Regents of the University of Minnesota.
# For licensing terms, see the file LICENSE.
import os
import sys
import re
import traceback
# This is the global namespace.
# 2011.04.19: g.log is the only member of the global namespace. I think there
# were bigger plans for g.py, but it's only ever housed the logger.
# See: conf.py, which sets g.log = logging.
# 2011.08.19: Import pyserver_glue so we get os.environ['PYSERVER_HOME'].
# 2013.04.20: pyserver_glue no longer sets os.environ['PYSERVER_HOME'], but
# we no longer use it. Also, it should still be the first element
# of sys.path[].
import pyserver_glue
# 2011.01.23: Adding g.assurt so we can show a proper stack trace
ignore_stack_re = re.compile(r'^\s*raise Ccp_Assert\(message\)$')
class Ccp_Assert(AssertionError):
def __init__(self, message):
if not message:
# NO! prints to stderr or something: message = traceback.print_stack()
#message = traceback.format_exc()
strip_stack = False
stack_lines_ = traceback.format_stack()
stack_lines = []
for lines in stack_lines_:
for line in lines.split('\n'):
if line:
#log.debug('Ccp_Assert: line: %s' % (line,))
if ignore_stack_re.match(line) is not None:
#import pdb; pdb.set_trace()
# "raise Ccp_Assert(message)" is actually secondtolast ln
# The line before is, e.g.,
# File "/ccp/dev/cp_1051/pyserver/g.py", ln 36, in assurt
try:
stack_lines.pop()
except IndexError:
log.error('Ccp_Assert: empty list?')
strip_stack = True
break
stack_lines.append(line)
if strip_stack:
break
message = '\n'.join(stack_lines)
#Exception.__init__(self, message)
AssertionError.__init__(self, message)
#log.error('Ccp_Assert: %s' % (message,))
#traceback.print_exception(*sys.exc_info())
debug_me = False
#debug_me = True
# FIXME: Should we check either of these, i.e., for cron jobs?
#Apr-20 20:40:20 DEBG schema-up # os.getenv("LOGNAME"): landonb
#Apr-20 20:40:20 DEBG schema-up # os.environ.get("TERM"): xterm
# From pyserver, Fedora:
# os.getenv('LOGNAME') is None
# os.getenv('TERM') is 'xterm'
# {'LANG': 'C',
# 'TERM': 'xterm',
# 'SHLVL': '2',
# 'INSTANCE': 'minnesota',
# 'PWD': '/',
# 'PYSERVER_HOME': '/ccp/dev/cp_nnnn/pyserver',
# 'PATH': '/sbin:/usr/sbin:/bin:/usr/bin',
# '_': '/usr/sbin/httpd'}
iamwhoiam = True
# NOTE: os.getenv same as os.environ.get. Type os.environ to see all.
if ((os.getenv('APACHE_RUN_USER') == 'www-data') # Ubuntu apache service
or (os.getenv('_') == '/usr/sbin/httpd') # Fedora apache service
or (os.getenv('LOGNAME') == 'www-data') # Ubuntu routed/mr_do service
or (os.getenv('LOGNAME') == 'apache')): # Fedora routed/mr_do service
# FIXME: What are the mr_do/routed services under Ubuntu?
iamwhoiam = False
# NOTE: If starting as a service, cannot import rpdb2 here.
# 'cause the cwd is '.'. After pyserver_glue runs, it'll
# be corrected, so the import is in the assurt fcn.
# The 'assert' keyword is reserved, so we call it, uh, 'syrt!
def assurt(condit, message=None, soft=False):
if not bool(condit):
# FIXME: This doesn't work if being run as a service. Can you figure out
# if we're a daemon and throw a normal assert instead?
if debug_me:
log.warning('DEBUGGING!!')
print 'DEBUGGING!!'
if iamwhoiam:
import pdb; pdb.set_trace()
else:
log.warning('Waiting for remote debug client...')
print 'Waiting for remote debug client...'
import rpdb2
rpdb2.start_embedded_debugger('password', fAllowRemote=True)
assrt = Ccp_Assert(message)
if not soft:
raise assrt
else:
log.error('Soft Assert: %s' % (str(assrt),))
#
def assurt_soft(condit, message=None):
assurt(condit, message, soft=True)
#
# Some debugging hints:
#
# Start the remote debugger
# -------------------------
#
# In one terminal window,
#
# $ cd /ccp/dev/cp/pyserver/bin/winpdb ; py rpdb2.py
# > password password
#
# In your code, start a debug session where you want to break,
#
# import rpdb2
# rpdb2.start_embedded_debugger('password', fAllowRemote=True)
#
# And then back in your terminal window, find the list of
# waiting sessions,
#
# > attach
# Connecting to 'localhost'...
# Scripts to debug on 'localhost':
#
# pid name
# --------------------------
# 28969 /ccp/dev/cp/pyserver/g.py
#
# > attach 28969
# ...
#
# Start a local debugger
# ----------------------
#
# If you're just running a script (and not pyserver via apache),
# insert a simple pdb break into your code,
#
# import pdb;pdb.set_trace()
#
# You can also use a safer, user-specific mechanism, e.g.,
#
# conf.break_here('ccpv3')
#
# ***
class Ccp_Shutdown(Exception):
'''An error telling the code to cleanup as quickly as possible.'''
def __init__(self, message=''):
Exception.__init__(self, message)
#
def check_keep_running(keep_running):
if (keep_running is not None) and (not keep_running.isSet()):
raise Ccp_Shutdown()
# ***
if (__name__ == '__main__'):
pass
|
python
|
# -*- coding: utf-8 -*-
""" contest forms: HTTP form processing for contest pages
:copyright: Copyright (c) 2014 Bivio Software, Inc. All Rights Reserved.
:license: Apache, see LICENSE for more details.
"""
import decimal
import re
import sys
import flask
import flask_mail
import flask_wtf
import paypalrestsdk
import paypalrestsdk.exceptions
import wtforms
import wtforms.validators as wtfv
from . import model as pcm
from .. import common
from .. import controller as ppc
from ..auth import model as pam
class Contestant(flask_wtf.Form):
"""Project submission form.
Fields:
display_name: project name
contestant_desc: project summary
youtube_url: full YouTube video url
slideshow_url: full SlideShare url
founder_desc: current user's founder info for this project
website: project website (optional)
"""
display_name = wtforms.StringField(
'Legal Name of Business', validators=[
wtfv.DataRequired(), wtfv.Length(max=100)])
contestant_desc = wtforms.TextAreaField(
'Summary of Business, Product and/or Service',
validators=[wtfv.DataRequired(), wtfv.Length(max=10000)])
youtube_url = wtforms.StringField(
'YouTube Video URL', validators=[
wtfv.DataRequired(), wtfv.Length(max=500)])
slideshow_url = wtforms.StringField(
'SlideShare Pitch Deck URL', validators=[
wtfv.DataRequired(), wtfv.Length(max=500)])
founder_desc = wtforms.TextAreaField(
'Your Bio', validators=[wtfv.DataRequired(), wtfv.Length(max=10000)])
website = wtforms.StringField(
'Business Website', validators=[wtfv.Length(max=100)])
tax_id = wtforms.StringField(
'Business US Tax Id', validators=[
wtfv.DataRequired(), wtfv.Length(max=30)])
business_phone = wtforms.StringField(
'Business Phone', validators=[
wtfv.DataRequired(), wtfv.Length(max=100)])
business_address = wtforms.TextAreaField(
'Business Mailing Address', validators=[
wtfv.DataRequired(), wtfv.Length(max=500)])
agree_to_terms = wtforms.BooleanField(
'Agree to Terms of Service', validators=[wtfv.DataRequired()])
founder2_name = wtforms.StringField(
'Other Founder Name', validators=[wtfv.Length(max=100)])
founder2_desc = wtforms.TextAreaField(
'Other Founder Bio', validators=[wtfv.Length(max=10000)])
founder3_name = wtforms.StringField(
'Other Founder Name', validators=[wtfv.Length(max=100)])
founder3_desc = wtforms.TextAreaField(
'Other Founder Bio', validators=[wtfv.Length(max=10000)])
def execute(self, contest):
"""Validates and creates the contestant model"""
if self.is_submitted() and self.validate():
contestant = self._update_models(contest)
if contestant:
self._send_mail_to_support(contestant)
flask.flash(
'Thank you for submitting your entry. You will be '
'contacted by email when your entry has been reviewed.')
return flask.redirect(contest.format_uri('contestants'))
return contest.task_class.get_template().render_template(
contest,
'submit',
form=self,
selected_menu_action='submit-contestant'
)
def validate(self):
"""Performs superclass wtforms validation followed by url
field validation"""
super(Contestant, self).validate()
self._validate_youtube()
self._validate_slideshare()
self._validate_website()
common.log_form_errors(self)
return not self.errors
def _add_founder(self, contestant, founder):
"""Creates the founder and links it to the contestant."""
ppc.db.session.add(founder)
ppc.db.session.flush()
ppc.db.session.add(
pam.BivAccess(
source_biv_id=contestant.biv_id,
target_biv_id=founder.biv_id
)
)
def _add_founders(self, contestant):
"""Add the current user as a founder and any optional founders."""
founder = pcm.Founder()
self.populate_obj(founder)
founder.display_name = flask.session['user.display_name']
self._add_founder(contestant, founder)
ppc.db.session.add(
pam.BivAccess(
source_biv_id=flask.session['user.biv_id'],
target_biv_id=founder.biv_id
)
)
if self.founder2_name.data:
self._add_founder(contestant, pcm.Founder(
display_name=str(self.founder2_name.data),
founder_desc=str(self.founder2_desc.data),
))
if self.founder3_name.data:
self._add_founder(contestant, pcm.Founder(
display_name=str(self.founder3_name.data),
founder_desc=str(self.founder3_desc.data),
))
def _send_mail_to_support(self, contestant):
"""Send a notification to support for a new entry"""
ppc.mail().send(flask_mail.Message(
'New Entry Submitted: {}'.format(contestant.biv_id),
recipients=[ppc.app().config['PUBLICPRIZE']['SUPPORT_EMAIL']],
# TODO(pjm): requires new Flask-Mail for unicode on python 3
# body='Submitted by: {} {}\nTitle: {}\nReview URL: {}'.format(
# flask.session['user.display_name'],
# pam.User.query.filter_by(
# biv_id=flask.session['user.biv_id']
# ).one().user_email,
# contestant.display_name,
# contestant.format_absolute_uri()
# )
body='Submitted by: {}\nReview URL: {}'.format(
pam.User.query.filter_by(
biv_id=flask.session['user.biv_id']
).one().user_email,
contestant.format_absolute_uri()
)
))
def _slideshare_code(self):
"""Download slideshare url and extract embed code.
The original url may not have the code.
ex. www.slideshare.net/Micahseff/game-xplain-pitch-deck-81610
Adds field errors if the code can not be determined.
"""
html = common.get_url_content(self.slideshow_url.data)
if not html:
self.slideshow_url.errors = [
'SlideShare URL invalid or unavailable.']
return None
match = re.search(r'slideshow/embed_code/(\d+)', html)
if match:
return match.group(1)
self.slideshow_url.errors = [
'Embed code not found on SlideShare page.']
return None
def _update_models(self, contest):
"""Creates the Contestant and Founder models
and adds BivAccess models to join the contest and Founder models"""
contestant = pcm.Contestant()
self.populate_obj(contestant)
contestant.youtube_code = self._youtube_code()
contestant.slideshow_code = self._slideshare_code()
contestant.is_public = \
ppc.app().config['PUBLICPRIZE']['ALL_PUBLIC_CONTESTANTS']
contestant.is_under_review = False
ppc.db.session.add(contestant)
ppc.db.session.flush()
ppc.db.session.add(
pam.BivAccess(
source_biv_id=contest.biv_id,
target_biv_id=contestant.biv_id
)
)
self._add_founders(contestant)
return contestant
def _youtube_code(self):
"""Ensure the youtube url contains a VIDEO_ID"""
value = self.youtube_url.data
# http://youtu.be/a1Y73sPHKxw
# or https://www.youtube.com/watch?v=a1Y73sPHKxw
if re.search(r'\?', value) and re.search(r'v\=', value):
match = re.search(r'(?:\?|\&)v\=(.*?)(&|$)', value)
if match:
return match.group(1)
else:
match = re.search(r'\/([^\&\?\/]+)$', value)
if match:
return match.group(1)
return None
def _validate_slideshare(self):
"""Ensures the SlideShare slide deck exists"""
if self.slideshow_url.errors:
return
code = self._slideshare_code()
if code:
if not common.get_url_content(
'http://www.slideshare.net/slideshow/embed_code/' + code):
self.slideshow_url.errors = [
'Unknown SlideShare ID: ' + code + '.']
def _validate_website(self):
"""Ensures the website exists"""
if self.website.errors:
return
if self.website.data:
if not common.get_url_content(self.website.data):
self.website.errors = ['Website invalid or unavailable.']
def _validate_youtube(self):
"""Ensures the YouTube video exists"""
if self.youtube_url.errors:
return
code = self._youtube_code()
if code:
html = common.get_url_content('http://youtu.be/' + code)
# TODO(pjm): need better detection for not-found page
if not html or re.search(r'<title>YouTube</title>', html):
self.youtube_url.errors = [
'Unknown YouTube VIDEO_ID: ' + code + '.']
else:
self.youtube_url.errors = ['Invalid YouTube URL.']
class Donate(flask_wtf.Form):
"""Donation form.
Fields:
amount: donation amount
"""
# TODO(pjm): DecimalField doesn't accept '' value...
amount = wtforms.StringField('Contribution Amount')
donate5 = wtforms.SubmitField('$5')
donate25 = wtforms.SubmitField('$25')
donate100 = wtforms.SubmitField('$100')
other_amount = wtforms.SubmitField('Other Amount')
def execute(self, contestant):
"""Validates and redirects to PayPal
For test credit card payments, use card number: 4736656842918643
"""
if self.is_submitted() and self.validate():
url = self._paypal_payment(contestant)
if url:
return flask.redirect(url)
contest = contestant.get_contest()
return contest.task_class.get_template().render_template(
contest,
'detail',
contestant=contestant,
contestant_url=contestant.format_absolute_uri(),
contestant_tweet="Help us win! " + contestant.display_name,
form=self,
)
def execute_payment(self, contestant):
"""Handles return task from paypal. Calls paypal with payment and
payer IDs to complete the transaction."""
donor = pcm.Donor.unsafe_load_from_session()
if not donor:
ppc.app().logger.warn('missing session donor')
flask.flash('The referenced contribution was already processed.')
return flask.redirect(contestant.format_uri())
self._save_payment_info_to_donor(donor)
payment = paypalrestsdk.Payment({
'id': donor.paypal_payment_id
})
donor.remove_from_session()
try:
if payment.execute({'payer_id': donor.paypal_payer_id}):
donor.donor_state = 'executed'
ppc.db.session.add(donor)
return flask.redirect(contestant.format_uri('thank-you'))
else:
ppc.app().logger.warn('payment execute failed')
except paypalrestsdk.exceptions.ClientError as err:
ppc.app().logger.warn(err)
except:
ppc.app().logger.warn(sys.exc_info()[0])
return flask.redirect(contestant.format_uri())
def validate(self):
"""Ensure the amount is present and at least $10"""
super(Donate, self).validate()
amount = None
if self.donate5.data:
amount = 5
elif self.donate25.data:
amount = 25
elif self.donate100.data:
amount = 100
elif self.amount.data:
try:
if float(self.amount.data) < 10:
self.amount.errors = ['Amount must be at least $10.']
elif float(self.amount.data) > 1000000:
self.amount.errors = ['Amount too large.']
except ValueError:
self.amount.errors = ['Please enter an amount.']
else:
self.amount.errors = ['Please enter an amount.']
self.amount.raw_data = None
if amount:
self.amount.data = decimal.Decimal(amount)
common.log_form_errors(self)
return not self.errors
def _create_donor(self, contestant):
"""Create a new donor model and link to the parent contestant."""
donor = pcm.Donor()
self.populate_obj(donor)
donor.donor_state = 'submitted'
ppc.db.session.add(donor)
ppc.db.session.flush()
ppc.db.session.add(
pam.BivAccess(
source_biv_id=contestant.biv_id,
target_biv_id=donor.biv_id
)
)
return donor
def _link_donor_to_user(self, donor):
"""Link the donor model to a user model. Match the donor email with
the user. If no match, use the current user, if present."""
if pam.BivAccess.query.select_from(pam.User).filter(
pam.BivAccess.source_biv_id == pam.User.biv_id,
pam.BivAccess.target_biv_id == donor.biv_id
).count() > 0:
return
user = pam.User.query.filter_by(user_email=donor.donor_email).first()
if not user and flask.session.get('user.is_logged_in'):
user = pam.User.query.filter_by(
biv_id=flask.session['user.biv_id']
).one()
if not user:
return
ppc.db.session.add(
pam.BivAccess(
source_biv_id=user.biv_id,
target_biv_id=donor.biv_id
)
)
def _paypal_payment(self, contestant):
"""Call paypal server to create payment record.
Returns a redirect link to paypal site or None on error."""
donor = self._create_donor(contestant)
amount = '%.2f' % float(self.amount.data)
payment = paypalrestsdk.Payment({
'intent': 'sale',
'payer': {
'payment_method': 'paypal'
},
'redirect_urls': {
'return_url': contestant.format_absolute_uri('donate-done'),
'cancel_url': contestant.format_absolute_uri('donate-cancel'),
},
'transactions': [
{
'amount': {
'total': amount,
'currency': 'USD',
},
'item_list': {
'items': [
{
'quantity': 1,
'price': amount,
'currency': 'USD',
'name': '{} contribution, {}'.format(
contestant.display_name,
contestant.get_contest().display_name),
'tax': 0
}
]
}
}
]
})
try:
if payment.create():
ppc.app().logger.info(payment)
donor.paypal_payment_id = str(payment.id)
donor.add_to_session()
for link in payment.links:
if link.method == 'REDIRECT':
return str(link.href)
else:
ppc.app().logger.warn(payment.error)
except paypalrestsdk.exceptions.ClientError as err:
ppc.app().logger.warn(err)
except:
ppc.app().logger.warn(sys.exc_info()[0])
self.amount.errors = [
'There was an error processing your contribution.']
return None
def _save_payment_info_to_donor(self, donor):
"""Get payer info from paypal server, save info to Donor model."""
try:
payment = paypalrestsdk.Payment.find(donor.paypal_payment_id)
info = payment.payer.payer_info
donor.donor_email = info.email
donor.display_name = info.first_name + ' ' + info.last_name
except paypalrestsdk.exceptions.ConnectionError as err:
ppc.app().logger.warn(err)
donor.paypal_payer_id = flask.request.args['PayerID']
donor.donor_state = 'pending_confirmation'
ppc.db.session.add(donor)
self._link_donor_to_user(donor)
class Judgement(flask_wtf.Form):
"""Judgement form.
Fields:
question(1 .. 6): question score
question(1 ..6)_comment: comments for survey question
general_comment: End of survey comments
"""
def _comment_field(label='Comments'):
return wtforms.TextAreaField(
label, validators=[wtfv.Length(max=10000)])
def _question_field(number):
return wtforms.RadioField(
'Question {}'.format(number),
choices=[
('1', 'Unsatisfactory'),
('2', 'Improvement Needed'),
('3', 'Meets Expectations'),
('4', 'Exceeds Expectations')
]
)
question1 = _question_field('1')
question1_comment = _comment_field()
question2 = _question_field('2')
question2_comment = _comment_field()
question3 = _question_field('3')
question3_comment = _comment_field()
question4 = _question_field('4')
question4_comment = _comment_field()
question5 = _question_field('5')
question5_comment = _comment_field()
question6 = _question_field('6')
question6_comment = _comment_field()
general_comment = _comment_field('General Comments')
def execute(self, contestant):
"""Saves scores for questions."""
contest = contestant.get_contest()
if self.is_submitted():
if self.validate():
self._save_scores(contestant)
flask.flash('Thank you for scoring contestant {}.'.format(
contestant.display_name))
return flask.redirect(
contest.format_uri('judging'))
else:
self._load_scores(contestant)
return contest.task_class.get_template().render_template(
contest,
'judge-contestant',
sub_base_template=contest.task_class.get_template().base_template('detail'),
contestant=contestant,
form=self
)
@classmethod
def get_points_for_question(cls, number):
return pcm.JudgeScore.get_points_for_question(number)
@classmethod
def get_text_for_question(cls, number):
return pcm.JudgeScore.get_text_for_question(number)
def validate(self):
"""Clear any errors for unselected radio choices."""
super(Judgement, self).validate()
for num in range(1, 7):
self['question{}'.format(num)].errors = None
common.log_form_errors(self)
return not self.errors
def _load_scores(self, contestant):
"""Load scores from database."""
for num in range(1, 7):
score = self._unsafe_get_score(contestant, num)
if not score:
continue
self['question{}'.format(num)].data = str(score.judge_score)
self['question{}_comment'.format(num)].data = score.judge_comment
question0 = self._unsafe_get_score(contestant, 0)
if score:
self.general_comment.data = question0.judge_comment
def _save_score(self, contestant, num, val, comment):
"""Save a question score to database."""
score = self._unsafe_get_score(contestant, num)
if not score:
score = pcm.JudgeScore()
score.judge_biv_id = flask.session['user.biv_id']
score.contestant_biv_id = contestant.biv_id
score.question_number = int(num)
score.judge_score = int(val)
score.judge_comment = comment
ppc.db.session.add(score)
def _save_scores(self, contestant):
"""Saves scores to database."""
for num in range(1, 7):
val = self['question{}'.format(num)].data
# TODO(pjm): hack - val may have been coerced to string "None"
if val is None or val == 'None':
val = 0
self._save_score(contestant, num, val,
str(self['question{}_comment'.format(num)].data))
self._save_score(contestant, 0, 0, str(self.general_comment.data))
def _unsafe_get_score(self, contestant, num):
"""Loads a question score from database."""
return pcm.JudgeScore.query.filter_by(
judge_biv_id=flask.session['user.biv_id'],
contestant_biv_id=contestant.biv_id,
question_number=int(num)
).first()
|
python
|
import sqlite3
import os
import urllib.request
from urllib.error import *
DATABASE_PATH = 'database/card_image_database.db'
def create_card_image_database(print_function):
print_function('Creating Database.')
if os.path.exists(DATABASE_PATH):
try:
os.remove(DATABASE_PATH)
except OSError as e:
print('Error while deleting file', DATABASE_PATH)
print(e)
conn = sqlite3.connect(DATABASE_PATH)
c = conn.cursor()
print_function('Creating Table.')
c.execute("""CREATE TABLE cards (
card_id integer,
card_image blob
)""")
conn.commit()
print_function('Retrieving Card IDs.')
conn_card = sqlite3.connect('database/card_database.db')
conn_card.row_factory = lambda cursor, row: row[0]
c_card = conn_card.cursor()
card_ids = c_card.execute('SELECT card_id FROM cards').fetchall()
num_of_cards = len(card_ids)
print_function('Inserting Card Images.')
for index, card_id in enumerate(card_ids):
try:
card_image_url = "https://shadowverse-portal.com/image/card/phase2/common/L/L_" + str(card_id) + ".jpg"
with urllib.request.urlopen(card_image_url) as response:
data = response.read()
insert_card_image(card_id, data, conn, c)
except TimeoutError:
print_function('Downloading image failed. Update database to try again.')
return
except URLError:
print_function('Unable to reach website. Please check internet connection.')
return
print_function('Inserted ' + str(index + 1) + ' out of ' + str(num_of_cards) + ' images.')
print_function('Completed')
conn.close()
def update_card_image_database(print_function):
if not os.path.exists(DATABASE_PATH):
create_card_image_database(print_function)
return
print_function('Accessing Database.')
conn = sqlite3.connect(DATABASE_PATH)
conn.row_factory = lambda cursor, row: row[0]
c = conn.cursor()
image_card_ids = c.execute('SELECT card_id FROM cards').fetchall()
print_function('Retrieving Card IDs.')
conn_card = sqlite3.connect('database/card_database.db')
conn_card.row_factory = lambda cursor, row: row[0]
c_card = conn_card.cursor()
card_card_ids = c_card.execute('SELECT card_id FROM cards').fetchall()
num_of_cards = len(card_card_ids)
print_function('Inserting Card Images.')
for index, card_id in enumerate(card_card_ids):
if card_id not in image_card_ids:
try:
card_image_url = "https://shadowverse-portal.com/image/card/phase2/common/L/L_" + str(card_id) + ".jpg"
with urllib.request.urlopen(card_image_url) as response:
data = response.read()
insert_card_image(card_id, data, conn, c)
except TimeoutError:
print_function('Downloading image failed. Update database to try again.')
return
except URLError:
print_function('Unable to reach website. Please check internet connection.')
return
print_function('Inserted ' + str(index + 1) + ' out of ' + str(num_of_cards) + ' images.')
print_function('Completed')
conn.close()
def insert_card_image(card_id_number, card_image, conn, cursor):
with conn:
cursor.execute("""INSERT INTO cards VALUES (
:card_id,
:card_image
)""",
{
'card_id': card_id_number,
'card_image': card_image
})
|
python
|
/home/runner/.cache/pip/pool/37/a3/2b/4c0a8aea5f52564ead5b0791d74f0f33c3a5eea3657f257e9c770b86c6
|
python
|
'''
Largest Palindrome of two N-digit numbers given
N = 1, 2, 3, 4
'''
def largePali4digit():
answer = 0
for i in range(9999, 1000, -1):
for j in range(i, 1000, -1):
k = i * j
s = str(k)
if s == s[::-1] and k > answer:
return i, j
def largePali3digit():
answer = 0
for i in range(999, 100, -1):
for j in range(i, 100, -1):
k = i * j
s = str(k)
if s == s[::-1] and k > answer:
return i, j
def largePali2digit():
answer = 0
for i in range(99, 10, -1):
for j in range(i, 10, -1):
k = i * j
s = str(k)
if s == s[::-1] and k > answer:
return i, j
def largePali1digit():
answer = 0
for i in range(9, 1, -1):
for j in range(i, 1, -1):
k = i * j
s = str(k)
if s == s[::-1] and k > answer:
return i, j
print(largePali3digit())
print(largePali2digit())
print(largePali1digit())
print(largePali4digit())
|
python
|
import os
import tempfile
from os import makedirs
from os.path import join, exists
from sys import platform
def get_home_folder():
from pathlib import Path
home_folder = f"{Path.home()}"
return home_folder
def get_temp_folder():
temp_folder = None
if platform == "linux" or platform == "linux2":
temp_folder = tempfile.gettempdir()
elif platform == "darwin":
temp_folder = tempfile.gettempdir()
elif platform == "win32":
temp_folder = tempfile.gettempdir()
try:
makedirs(temp_folder)
except Exception:
pass
if exists(temp_folder):
return temp_folder
else:
return None
def get_cache_folder():
cache_folder = None
if platform == "linux" or platform == "linux2":
cache_folder = join(get_home_folder(), '.cache')
elif platform == "darwin":
cache_folder = join(get_home_folder(), '/Library/Caches')
elif platform == "win32":
cache_folder = join(get_home_folder(), os.getenv('LOCALAPPDATA'))
try:
makedirs(cache_folder)
except Exception:
pass
if exists(cache_folder):
return cache_folder
else:
return None
|
python
|
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
def addTwoNumbers(self, l1: ListNode, l2: ListNode) -> ListNode:
def addTwoNumbers(list1, list2, node, digit):
n = digit
if (list1 != None):
n += list1.val
if (list2 != None):
n += list2.val
q = n // 10
r = n % 10
node.val = r
node.next = None
if (list1 == None):
next_list1 = None
else:
next_list1 = list1.next
if (list2 == None):
next_list2 = None
else:
next_list2 = list2.next
if (next_list1 == None and next_list2 == None and q == 0):
return
node.next = ListNode(0)
addTwoNumbers(next_list1, next_list2, node.next, q)
startNode = ListNode(0)
addTwoNumbers(l1, l2, startNode, 0)
return startNode
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.