max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
corehq/motech/repeaters/exceptions.py
|
akashkj/commcare-hq
| 471 |
81207
|
<reponame>akashkj/commcare-hq
class RequestConnectionError(Exception):
pass
class ReferralError(Exception):
pass
class DataRegistryCaseUpdateError(Exception):
pass
|
Validation/EcalRecHits/test/EcalFullValid_cfg.py
|
ckamtsikis/cmssw
| 852 |
81215
|
<filename>Validation/EcalRecHits/test/EcalFullValid_cfg.py
# The following comments couldn't be translated into the new config version:
# services
import FWCore.ParameterSet.Config as cms
process = cms.Process("EcalFullValid")
# initialize MessageLogger
process.load("FWCore.MessageLogger.MessageLogger_cfi")
# initialize magnetic field
process.load("Configuration.StandardSequences.MagneticField_cff")
# geometry (Only Ecal)
process.load("Geometry.EcalCommonData.EcalOnly_cfi")
process.load("Geometry.CaloEventSetup.CaloGeometry_cff")
process.load("Geometry.CaloEventSetup.EcalTrigTowerConstituents_cfi")
process.load("Geometry.EcalMapping.EcalMapping_cfi")
process.load("Geometry.EcalMapping.EcalMappingRecord_cfi")
# DQM services
process.load("DQMServices.Core.DQM_cfg")
# ECAL hits validation sequence
process.load("Validation.EcalHits.ecalSimHitsValidationSequence_cff")
# ECAL digis validation sequence
process.load("Validation.EcalDigis.ecalDigisValidationSequence_cff")
# ECAL rechits validation sequence
process.load("Validation.EcalRecHits.ecalRecHitsValidationSequence_cff")
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(200)
)
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring('file:hits.root')
)
process.Timing = cms.Service("Timing")
process.SimpleMemoryCheck = cms.Service("SimpleMemoryCheck")
process.simhits = cms.Sequence(process.ecalSimHitsValidationSequence)
process.digis = cms.Sequence(process.ecalDigisValidationSequence)
process.rechits = cms.Sequence(process.ecalRecHitsValidationSequence)
process.p1 = cms.Path(process.simhits*process.digis*process.rechits)
process.DQM.collectorHost = ''
|
src/babi-lstm.py
|
Asteur/qa
| 261 |
81217
|
<reponame>Asteur/qa
# -*- coding: utf-8 -*-
from __future__ import division, print_function
from keras.layers import Dense, Merge, Dropout, RepeatVector
from keras.layers.embeddings import Embedding
from keras.layers.recurrent import LSTM
from keras.models import Sequential
import os
import babi
BABI_DIR = "../data/babi_data/tasks_1-20_v1-2/en"
TASK_NBR = 1
EMBED_HIDDEN_SIZE = 50
BATCH_SIZE = 32
NBR_EPOCHS = 40
train_file, test_file = babi.get_files_for_task(TASK_NBR, BABI_DIR)
data_train = babi.get_stories(os.path.join(BABI_DIR, train_file))
data_test = babi.get_stories(os.path.join(BABI_DIR, test_file))
word2idx = babi.build_vocab([data_train, data_test])
vocab_size = len(word2idx) + 1
print("vocab_size=", vocab_size)
story_maxlen, question_maxlen = babi.get_maxlens([data_train, data_test])
print("story_maxlen=", story_maxlen)
print("question_maxlen=", question_maxlen)
Xs_train, Xq_train, Y_train = babi.vectorize(data_train, word2idx,
story_maxlen, question_maxlen)
Xs_test, Xq_test, Y_test = babi.vectorize(data_test, word2idx,
story_maxlen, question_maxlen)
print(Xs_train.shape, Xq_train.shape, Y_train.shape)
print(Xs_test.shape, Xq_test.shape, Y_test.shape)
# define model
# generate embeddings for stories
story_rnn = Sequential()
story_rnn.add(Embedding(vocab_size, EMBED_HIDDEN_SIZE,
input_length=story_maxlen))
story_rnn.add(Dropout(0.3))
# generate embeddings for question and make adaptable to story
question_rnn = Sequential()
question_rnn.add(Embedding(vocab_size, EMBED_HIDDEN_SIZE,
input_length=question_maxlen))
question_rnn.add(Dropout(0.3))
question_rnn.add(LSTM(EMBED_HIDDEN_SIZE, return_sequences=False))
question_rnn.add(RepeatVector(story_maxlen))
# merge the two
model = Sequential()
model.add(Merge([story_rnn, question_rnn], mode="sum"))
model.add(LSTM(EMBED_HIDDEN_SIZE, return_sequences=False))
model.add(Dropout(0.3))
model.add(Dense(vocab_size, activation="softmax"))
model.compile(optimizer="adam", loss="categorical_crossentropy",
metrics=["accuracy"])
print("Training...")
model.fit([Xs_train, Xq_train], Y_train,
batch_size=BATCH_SIZE, nb_epoch=NBR_EPOCHS, validation_split=0.05)
loss, acc = model.evaluate([Xs_test, Xq_test], Y_test, batch_size=BATCH_SIZE)
print()
print("Test loss/accuracy = {:.4f}, {:.4f}".format(loss, acc))
|
codegen_sources/model/src/model/__init__.py
|
AlexShypula/CodeGen
| 241 |
81224
|
<gh_stars>100-1000
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import math
import os
import sys
from logging import getLogger
import torch
from .pretrain import load_embeddings
# , TRANSFORMER_LAYER_PARAMS
from .transformer import DECODER_ONLY_PARAMS, TransformerModel, Classifier
from ..data.dictionary import UNK_WORD
logger = getLogger()
def check_model_params(params):
"""
Check models parameters.
"""
# masked language modeling task parameters
assert params.bptt >= 1
assert 0 <= params.word_pred < 1
assert 0 <= params.sample_alpha < 1
s = params.word_mask_keep_rand.split(",")
assert len(s) == 3
s = [float(x) for x in s]
assert all([0 <= x <= 1 for x in s]) and sum(s) == 1
params.word_mask = s[0]
params.word_keep = s[1]
params.word_rand = s[2]
if params.mask_length == "":
params.mask_length = None
params.mask_length_dist = None
elif params.mask_length == "poisson":
assert (
params.poisson_lambda is not None
), "poisson_lambda is None, it should be set when using poisson mask_length"
_lambda = params.poisson_lambda
lambda_to_the_k = 1
e_to_the_minus_lambda = math.exp(-_lambda)
k_factorial = 1
ps = []
for k in range(0, 128):
ps.append(e_to_the_minus_lambda * lambda_to_the_k / k_factorial)
lambda_to_the_k *= _lambda
k_factorial *= k + 1
if ps[-1] < 0.0000001:
break
ps = torch.FloatTensor(ps)
params.mask_length_dist_probas = ps
params.mask_length_dist = torch.distributions.Categorical(ps)
else:
params.mask_length = int(params.mask_length)
ps = torch.FloatTensor(params.mask_length + 1).fill_(0.0)
ps[params.mask_length] = 1
params.mask_length_dist = torch.distributions.Categorical(ps)
# input sentence noise for DAE
if len(params.ae_steps) == 0:
assert params.word_shuffle == 0
assert params.word_dropout == 0
assert params.word_blank == 0
else:
assert params.word_shuffle == 0 or params.word_shuffle > 1
assert 0 <= params.word_dropout < 1
assert 0 <= params.word_blank < 1
# model dimensions
if params.emb_dim_encoder == 0 and params.emb_dim_decoder == 0:
assert params.emb_dim > 0
params.emb_dim_encoder = params.emb_dim
params.emb_dim_decoder = params.emb_dim
else:
assert params.emb_dim == 0
assert params.emb_dim_encoder > 0 and params.emb_dim_decoder > 0
if params.emb_dim_encoder == params.emb_dim_decoder:
params.emb_dim = params.emb_dim_decoder
else:
assert params.reload_emb == "", (
"Pre-trained embeddings are not supported when the embedding size of the "
"encoder and the decoder do not match "
)
assert params.emb_dim_encoder % params.n_heads == 0
assert params.emb_dim_decoder % params.n_heads == 0
if params.n_layers_encoder == 0 and params.n_layers_decoder == 0:
assert params.n_layers > 0
params.n_layers_encoder = params.n_layers
params.n_layers_decoder = params.n_layers
else:
assert params.n_layers == 0
assert params.n_layers_encoder > 0 and params.n_layers_decoder > 0
# reload pretrained word embeddings
if params.reload_emb != "":
assert os.path.isfile(params.reload_emb)
# reload a pretrained model
if params.reload_model != "":
if params.encoder_only:
assert os.path.isfile(params.reload_model)
else:
s = params.reload_model.split(",")
assert len(s) == 2
assert all([x == "" or os.path.isfile(x) for x in s]), [
x for x in s if not os.path.isfile(x)
]
if params.use_classifier and params.reload_classifier == "":
params.reload_classifier = params.reload_model
assert not (
params.beam_size > 1 and params.number_samples > 1
), "Cannot sample when already doing beam search"
assert (params.eval_temperature is None) == (
params.number_samples <= 1
), "Eval temperature should be set if and only if taking several samples at eval time"
def set_pretrain_emb(model, dico, word2id, embeddings, gpu):
"""
Pretrain word embeddings.
"""
n_found = 0
with torch.no_grad():
for i in range(len(dico)):
idx = word2id.get(dico[i], None)
if idx is None:
continue
n_found += 1
model.embeddings.weight[i] = (
embeddings[idx].cuda() if gpu else embeddings[idx]
)
model.pred_layer.proj.weight[i] = (
embeddings[idx].cuda() if gpu else embeddings[idx]
)
logger.info(
"Pretrained %i/%i words (%.3f%%)."
% (n_found, len(dico), 100.0 * n_found / len(dico))
)
@torch.no_grad()
def build_model(params, dico, gpu=True):
"""
Build model.
"""
if params.encoder_only:
# build
model = TransformerModel(params, dico, is_encoder=True, with_output=True)
# reload pretrained word embeddings
if params.reload_emb != "":
word2id, embeddings = load_embeddings(params.reload_emb, params)
set_pretrain_emb(model, dico, word2id, embeddings, gpu)
# reload a pretrained model
if params.reload_model != "":
logger.info("============ Model Reloading")
logger.info("Reloading model from %s ..." % params.reload_model)
reload_transformer(params, params.reload_model, dico, model, "model", gpu)
logger.info("Model: {}".format(model))
logger.info(
"Number of parameters (model): %i"
% sum([p.numel() for p in model.parameters() if p.requires_grad])
)
logger.info("")
return [model.cuda() if gpu else model]
else:
# build
# TODO: only output when necessary - len(params.clm_steps + params.mlm_steps) > 0
encoder = TransformerModel(params, dico, is_encoder=True, with_output=True)
if params.separate_decoders:
decoders = [
TransformerModel(params, dico, is_encoder=False, with_output=True)
for _ in params.lang2id.values()
]
else:
decoders = [
TransformerModel(params, dico, is_encoder=False, with_output=True)
]
for layer in range(params.n_layers_decoder):
if layer <= params.n_share_dec - 1:
assert params.amp == -1, "sharing layers is not supported with AMP"
logger.info("Sharing decoder attention parameters for layer %i" % layer)
for i in range(1, len(decoders)):
decoders[i].attentions[layer] = decoders[0].attentions[layer]
# reload pretrained word embeddings
if params.reload_emb != "":
word2id, embeddings = load_embeddings(params.reload_emb, params)
set_pretrain_emb(encoder, dico, word2id, embeddings, gpu)
for decoder in decoders:
set_pretrain_emb(decoder, dico, word2id, embeddings, gpu)
# reload a pretrained model
if params.reload_model != "":
logger.info("============ Model Reloading")
enc_path, dec_path = params.reload_model.split(",")
assert not (enc_path == "" and dec_path == "")
# reload encoder
if enc_path != "":
logger.info("Reloading encoder from %s ..." % enc_path)
reload_transformer(params, enc_path, dico, encoder, "encoder", gpu)
# reload decoders
if dec_path != "":
for dec in decoders:
logger.info("Reloading decoders from %s ..." % dec_path)
if params.reload_encoder_for_decoder:
reload_transformer(params, dec_path, dico, dec, "encoder", gpu)
else:
reload_transformer(params, dec_path, dico, dec, "decoder", gpu)
logger.debug("Encoder: {}".format(encoder))
logger.debug("Decoder: {}".format(decoders))
logger.info(
"Number of parameters (encoder): %i"
% sum([p.numel() for p in encoder.parameters() if p.requires_grad])
)
logger.info(
"Number of parameters (decoders): %i"
% sum([p.numel() for p in decoders[0].parameters() if p.requires_grad])
)
logger.info(f"Number of decoders: {len(decoders)}")
logger.info("")
return (
[encoder.cuda() if gpu else encoder],
[dec.cuda() if gpu else dec for dec in decoders],
)
@torch.no_grad()
def build_classifier(params):
"""
Build classifier.
"""
# build
classifier = Classifier(params)
# reload a pretrained model
if params.reload_classifier != "":
logger.info("Reloading classifier from %s ..." % params.reload_classifier)
reloaded = torch.load(
params.reload_classifier,
map_location=lambda storage, loc: storage.cuda(params.local_rank),
)
if "classifier" not in reloaded:
logger.warning(
f"There is no classifier in {params.reload_classifier}. The classifier weights will be initialized randomly"
)
else:
reloaded = reloaded["classifier"]
if all([k.startswith("module.") for k in reloaded.keys()]):
reloaded = {k[len("module.") :]: v for k, v in reloaded.items()}
classifier.load_state_dict(reloaded)
logger.info("Classifier: {}".format(classifier))
return [classifier.cuda()]
def reload_transformer(params, path, dico, model, model_type, gpu=True):
"""
Reload a transformer state dict to current model:
clean 'module.' from state dict,
match the word embeddings comparing dicos,
match lang embedding with params lang mapping,
extend or truncate position embeddings when size dont match,
load state dict.
"""
reloaded = torch.load(
path,
map_location=lambda storage, loc: storage.cuda(params.local_rank)
if gpu
else storage.cpu(),
)
clean_model_state_dict(reloaded, model_type)
reload_word_embeddings(reloaded, dico, model_type)
reload_lang_embeddings(reloaded, params, model_type)
reload_position_embeddings(reloaded, model, model_type)
# if the model is a decoder
if hasattr(model, "encoder_attn"):
for i in range(params.n_layers_decoder):
for name in DECODER_ONLY_PARAMS:
weight_name = name % i
if weight_name not in reloaded[model_type]:
logger.warning("Parameter %s not found." % (weight_name))
encoder_attn_name = weight_name.replace(
"encoder_attn", "attentions"
)
if (
getattr(params, "reload_encoder_attn_on_decoder", False)
and "encoder_attn" in weight_name
and encoder_attn_name in reloaded[model_type]
):
logger.warning(f"Reloading {encoder_attn_name} instead")
reloaded[model_type][weight_name] = (
reloaded[model_type][encoder_attn_name].clone().detach()
)
else:
reloaded[model_type][weight_name] = model.state_dict()[
weight_name
]
model.load_state_dict(reloaded[model_type], strict=not params.spans_emb_encoder)
def clean_model_state_dict(reloaded, model_type):
"""
remove prefix module from the keys of the model state dict.
"""
model_reloaded = reloaded[model_type if model_type in reloaded else "model"]
if all([k.startswith("module.") for k in model_reloaded.keys()]):
model_reloaded = {k[len("module.") :]: v for k, v in model_reloaded.items()}
reloaded[model_type] = model_reloaded
def reload_word_embeddings(reloaded, dico, model_type):
"""
Check when reloading a model that dictionary are the same. If not, do a word embedding mapping if possible.
"""
reloaded_word2id = reloaded["dico_word2id"]
reloaded_id2word = reloaded["dico_id2word"]
assert len(reloaded_word2id) == len(reloaded_id2word)
assert all(reloaded_id2word[v] == k for k, v in reloaded_word2id.items())
matching_indices = []
word_not_found = []
for idx, word in dico.id2word.items():
if word not in reloaded_word2id:
word_not_found += [word]
matching_indices += [reloaded_word2id[UNK_WORD]]
else:
matching_indices += [reloaded_word2id[word]]
assert len(matching_indices) == len(dico)
if len(word_not_found) > 0:
logger.warning(
f"When reloading word embeddings, could not find embeddings for {len(word_not_found)} words: {word_not_found[0:5] + ['...'] + word_not_found[-5:]}... Initializing them to < unk >."
)
reloaded[model_type]["embeddings.weight"] = torch.cat(
[
reloaded[model_type]["embeddings.weight"][index : index + 1]
for index in matching_indices
],
dim=0,
)
if "pred_layer.proj.weight" in reloaded[model_type]:
first_line = reloaded[model_type]["pred_layer.proj.weight"][0:1]
embedding_size = reloaded[model_type]["pred_layer.proj.weight"].shape[1]
reloaded[model_type]["pred_layer.proj.weight"] = torch.cat(
[
reloaded[model_type]["pred_layer.proj.weight"][index : index + 1]
if index is not None
else torch.normal(
torch.zeros_like(first_line),
torch.ones_like(first_line * (embedding_size ** (-0.5))),
)
for index in matching_indices
],
dim=0,
)
reloaded[model_type]["pred_layer.proj.bias"] = torch.cat(
[
reloaded[model_type]["pred_layer.proj.bias"][index].view(1)
if index is not None
else torch.rand_like(
reloaded[model_type]["pred_layer.proj.bias"][0].view(1)
)
for index in matching_indices
]
)
def reload_lang_embeddings(reloaded, params, model_type):
"""
When pretrained models has not been trained with the same languages:
change lang embedding state dict.
Otherwise, keep as it is.
"""
model_reloaded = reloaded[model_type]
reloaded_params = reloaded["params"]
if params.lgs_mapping == "":
lang_mapping = {}
else:
lang_mapping = {
mapping.split(":")[0]: mapping.split(":")[1]
for mapping in params.lgs_mapping.split(",")
}
langs_reloaded = reloaded_params["lang2id"]
langs_reloaded_id2lang = reloaded_params["id2lang"]
indices = []
for lang in [l for i, l in sorted(params.id2lang.items())]:
if lang in lang_mapping:
lang_ = lang_mapping[lang]
else:
lang_ = lang
index = [id for l, id in langs_reloaded.items() if l == lang_]
if len(index) == 0:
logger.warning(
f"No match found for lang {lang} {lang_} in {langs_reloaded.keys()}. Initializing randomly."
)
indices.append(None)
continue
else:
assert (
len(index) == 1
), f"matching lang found: {index} in reloaded model for lang {lang} in {langs_reloaded.keys()}"
logger.warning(
f"Lang {lang} matched to pretrained {langs_reloaded_id2lang[index[0]]} lang embedding."
)
indices.append(index[0])
first_line = model_reloaded["lang_embeddings.weight"][0:1]
embedding_size = model_reloaded["lang_embeddings.weight"].shape[1]
model_reloaded["lang_embeddings.weight"] = torch.cat(
[
model_reloaded["lang_embeddings.weight"][index : index + 1]
if index is not None
else torch.normal(
torch.zeros_like(first_line),
torch.ones_like(first_line * (embedding_size ** (-0.5))),
)
for index in indices
],
dim=0,
)
reloaded[model_type] = model_reloaded
def reload_position_embeddings(reloaded, encoder, model_type):
"""
When pretrained models has not been trained with the same size of position embedding:
remove unused or add extra positions.
"""
model_reloaded = reloaded[model_type]
current_size = encoder.position_embeddings.weight.size()[0]
reloaded_size = model_reloaded["position_embeddings.weight"].size()[0]
if current_size == reloaded_size:
return model_reloaded
elif current_size < reloaded_size:
logger.warning(
f"The size of position embeddings in current model is {current_size}, the size of reloaded is {reloaded_size}. need to truncate the reloaded position embeddings."
)
model_reloaded["position_embeddings.weight"] = model_reloaded[
"position_embeddings.weight"
][:current_size, :]
else:
logger.warning(
f"The size of position embeddings in current model is {current_size}, the size of reloaded is {reloaded_size}. need to repeat last positions {current_size - reloaded_size} times."
)
model_reloaded["position_embeddings.weight"] = torch.cat(
[
model_reloaded["position_embeddings.weight"],
model_reloaded["position_embeddings.weight"][-1, :].repeat(
current_size - reloaded_size, 1
),
],
dim=0,
)
reloaded[model_type] = model_reloaded
|
server_python/reptile/classify.py
|
dkvirus/py-novel
| 145 |
81238
|
<filename>server_python/reptile/classify.py
import requests
from lxml import etree
from db import Db
class Classify(object):
'''
爬取首页分类数据
'''
def reptileIndexClassify(self):
print('爬取首页分类数据:开始:(classify/reptileIndexClassify)...')
target_url = 'https://www.biquge5200.com/modules/article/search.php'
try:
# r = requests.get(target_url)
# root = etree.HTML(r.text)
# classifies = root.xpath('//div[@class="nav"]//li[position()>2]')
# arr1 = []
# for classify in classifies:
# path = classify.xpath('a/@href')[0].split('/')[-2]
# desc = classify.xpath('a/text()')[0]
# arr1.append(( path, desc ))
db = Db()
# db.insertMany('insert ignore into gysw_classify (`path`, `desc`) values (%s, %s)', tuple(arr1))
db.insertOne('insert ignore into gysw_classify(`path`, `desc`) values ("xxx2", "yyy2")')
db.close()
print('爬取首页分类数据:成功:(classify/reptileIndexClassify)...')
except Exception as e:
print('爬取首页分类数据:失败:(classify/reptileIndexClassify)...')
print(e)
# def classify():
# target_url = 'https://www.biquge5200.com/modules/article/search.php'
# try:
# r = requests.get(target_url)
# root = etree.HTML(r.text)
# classifies = root.xpath('//div[@class="nav"]//li[position()>2]')
# arr1 = []
# for classify in classifies:
# path = classify.xpath('a/@href')[0].split('/')[-2]
# desc = classify.xpath('a/text()')[0]
# arr1.append({ 'path': path, 'desc': desc })
# # 存库
# db = Db()
# arr2 = db.selectAll('select `path`, `desc` from gysw_classify')
# # 求交集
# arr3 = [i for i in arr1 if i not in arr2]
# arr4 = []
# for item in arr3:
# arr4.append(tuple(item.values()))
# db.insertMany('insert into gysw_classify (`path`, `desc`) values (%s, %s)', tuple(arr4))
# db.close()
# print('操作成功')
# except Exception as e:
# print(e)
# print('操作失败')
if __name__ == '__main__':
classify = Classify()
classify.reptileIndexClassify()
|
sightpy/materials/emissive.py
|
ulises1229/Python-Raytracer
| 326 |
81243
|
from ..utils.constants import *
from ..utils.vector3 import vec3, rgb, extract
from functools import reduce as reduce
from ..ray import Ray, get_raycolor
from .. import lights
import numpy as np
from . import Material
from ..textures import *
class Emissive(Material):
def __init__(self, color, **kwargs):
if isinstance(color, vec3):
self.texture_color = solid_color(color)
elif isinstance(color, texture):
self.texture_color = color
super().__init__(**kwargs)
def get_color(self, scene, ray, hit):
diff_color = self.texture_color.get_color(hit)
return diff_color
|
nlpaug/util/text/part_of_speech.py
|
techthiyanes/nlpaug
| 3,121 |
81245
|
<gh_stars>1000+
class PartOfSpeech:
NOUN = 'noun'
VERB = 'verb'
ADJECTIVE = 'adjective'
ADVERB = 'adverb'
pos2con = {
'n': [
'NN', 'NNS', 'NNP', 'NNPS', # from WordNet
'NP' # from PPDB
],
'v': [
'VB', 'VBD', 'VBG', 'VBN', 'VBZ', # from WordNet
'VBP' # from PPDB
],
'a': ['JJ', 'JJR', 'JJS', 'IN'],
's': ['JJ', 'JJR', 'JJS', 'IN'], # Adjective Satellite
'r': ['RB', 'RBR', 'RBS'], # Adverb
}
con2pos = {}
poses = []
for key, values in pos2con.items():
poses.extend(values)
for value in values:
if value not in con2pos:
con2pos[value] = []
con2pos[value].append(key)
@staticmethod
def pos2constituent(pos):
if pos in PartOfSpeech.pos2con:
return PartOfSpeech.pos2con[pos]
return []
@staticmethod
def constituent2pos(con):
if con in PartOfSpeech.con2pos:
return PartOfSpeech.con2pos[con]
return []
@staticmethod
def get_pos():
return PartOfSpeech.poses
|
tests/pybaseball/test_team_batting.py
|
reddigari/pybaseball
| 650 |
81252
|
<filename>tests/pybaseball/test_team_batting.py
from typing import Callable
import pandas as pd
import pytest
import requests
from pybaseball.team_batting import team_batting
@pytest.fixture(name="sample_html")
def _sample_html(get_data_file_contents: Callable) -> str:
return get_data_file_contents('team_batting.html')
@pytest.fixture(name="sample_processed_result")
def _sample_processed_result(get_data_file_dataframe: Callable) -> pd.DataFrame:
return get_data_file_dataframe('team_batting.csv')
def test_team_batting(response_get_monkeypatch: Callable, sample_html: str, sample_processed_result: pd.DataFrame):
season = 2019
response_get_monkeypatch(sample_html)
team_batting_result = team_batting(season).reset_index(drop=True)
pd.testing.assert_frame_equal(team_batting_result, sample_processed_result, check_dtype=False)
|
recipes/Python/218485_super_tuples/recipe-218485.py
|
tdiprima/code
| 2,023 |
81271
|
<filename>recipes/Python/218485_super_tuples/recipe-218485.py
def superTuple(name, attributes):
"""Creates a Super Tuple class."""
dct = {}
#Create __new__.
nargs = len(attributes)
def _new_(cls, *args):
if len(args) != nargs:
raise TypeError("%s takes %d arguments (%d given)." % (cls.__name__,
nargs,
len(args)))
return tuple.__new__(cls, args)
dct["__new__"] = staticmethod(_new_)
#Create __repr__.
def _repr_(self):
contents = [repr(elem) for elem in self]
return "%s<%s>" % (self.__class__.__name__,
", ".join(contents))
dct["__repr__"] = _repr_
#Create attribute properties.
def getter(i):
return lambda self: self.__getitem__(i)
for index, attribute in enumerate(attributes):
dct[attribute] = property(getter(index))
#Set slots.
dct["__slots__"] = []
#Return class.
return type(name, (tuple,), dct)
|
music21/figuredBass/examples.py
|
cuthbertLab/music21
| 1,449 |
81284
|
<filename>music21/figuredBass/examples.py
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
# Name: examples.py
# Purpose: music21 class which allows running of test cases
# Authors: <NAME>
#
# Copyright: Copyright © 2010-2011 <NAME> and the music21 Project
# License: BSD, see license.txt
# ------------------------------------------------------------------------------
'''
Each of the example methods in this module provides a figured bass line as a
:class:`~music21.figuredBass.realizer.FiguredBassLine` instance.
These can be realized by calling
:meth:`~music21.figuredBass.realizer.FiguredBassLine.realize`, which takes in an
optional :class:`~music21.figuredBass.rules.Rules` object.
The result is a :class:`~music21.figuredBass.realizer.Realization`
object which can generate realizations as instances of
:class:`~music21.stream.Score`. These realizations can then be displayed
in external software such as MuseScore or Finale by
calling :meth:`~music21.base.Music21Object.show`.
'''
import copy
import unittest
from music21.figuredBass import realizer
from music21.figuredBass import rules
# ------------------------------------------------------------------------------
def exampleA():
'''
This example was a homework assignment for 21M.302: Harmony & Counterpoint II
at MIT in the fall of 2010, taught by <NAME> of the MIT Music Program.
>>> from music21.figuredBass import examples
>>> fbLine = examples.exampleA()
>>> #_DOCS_SHOW fbLine.generateBassLine().show()
.. image:: images/figuredBass/fbExamples_bassLineA.*
:width: 700
The following is a realization of fbLine in four parts using the default rules set.
The soprano part is limited to stepwise motion, and the alto and tenor parts are
limited to motions within a perfect octave.
>>> from music21.figuredBass import rules
>>> fbRules = rules.Rules()
>>> fbRules.partMovementLimits = [(1, 2), (2, 12), (3, 12)]
>>> fbRealization1 = fbLine.realize(fbRules)
>>> fbRealization1.getNumSolutions()
360
>>> #_DOCS_SHOW fbRealization1.generateRandomRealization().show()
.. image:: images/figuredBass/fbExamples_sol1A.*
:width: 700
Now, the restriction on upper parts being within a perfect octave of each other is
removed, and fbLine is realized again.
>>> fbRules.upperPartsMaxSemitoneSeparation = None
>>> fbRealization2 = fbLine.realize(fbRules)
>>> fbRealization2.keyboardStyleOutput = False
>>> fbRealization2.getNumSolutions()
3713168
>>> #_DOCS_SHOW fbRealization2.generateRandomRealization().show()
.. image:: images/figuredBass/fbExamples_sol2A.*
:width: 700
'''
from music21 import converter
s = converter.parse("tinynotation: 3/2 C2 D2_6 E2_6 F2_6 C#2_b7,5,3 D2 "
"BB2_#6,5,3 C2_6 AA#2_7,5,#3 BB1_6,4 BB2_7,#5,#3 E1.",
makeNotation=False)
return realizer.figuredBassFromStream(s)
def exampleD():
'''
This example was a homework assignment for 21M.302: Harmony & Counterpoint II
at MIT in the fall of 2010, taught by <NAME> of the MIT Music Program.
>>> from music21.figuredBass import examples
>>> fbLine = examples.exampleD()
>>> #_DOCS_SHOW fbLine.generateBassLine().show()
.. image:: images/figuredBass/fbExamples_bassLineD.*
:width: 700
The following is a realization of fbLine in four parts using the default rules set.
The soprano part is limited to stepwise motion, and the alto and tenor parts are
limited to motions within a perfect octave.
>>> from music21.figuredBass import rules
>>> fbRules = rules.Rules()
>>> fbRules.partMovementLimits = [(1, 2), (2, 12), (3, 12)]
>>> fbRealization1 = fbLine.realize(fbRules)
>>> fbRealization1.getNumSolutions()
1560
>>> #_DOCS_SHOW fbRealization1.generateRandomRealization().show()
.. image:: images/figuredBass/fbExamples_sol1D.*
:width: 700
Now, the restriction on voice overlap is lifted, which is common in keyboard-style
figured bass, and fbLine is realized again. Voice overlap can be seen in the fourth
measure.
>>> fbRules.forbidVoiceOverlap = False
>>> fbRealization2 = fbLine.realize(fbRules)
>>> fbRealization2.getNumSolutions()
109006
>>> #_DOCS_SHOW fbRealization2.generateRandomRealization().show()
.. image:: images/figuredBass/fbExamples_sol2D.*
:width: 700
Now, the restriction on voice overlap is reset, but the restriction on the upper parts
being within a perfect octave of each other is removed. fbLine is realized again.
>>> fbRules.forbidVoiceOverlap = True
>>> fbRules.upperPartsMaxSemitoneSeparation = None
>>> fbRealization3 = fbLine.realize(fbRules)
>>> fbRealization3.getNumSolutions()
29629539
>>> fbRealization3.keyboardStyleOutput = False
>>> #_DOCS_SHOW fbRealization3.generateRandomRealization().show()
.. image:: images/figuredBass/fbExamples_sol3D.*
:width: 700
'''
from music21 import converter
from music21 import key
s = converter.parse("tinynotation: 3/4 BB4 C#4_#6 D4_6 E2 E#4_7,5,#3 F#2_6,4 "
"F#4_5,#3 G2 E4_6 F#2_6,4 E4_#4,2 D2_6 EE4_7,5,#3 AA2.",
makeNotation=False)
s.insert(0, key.Key('b'))
return realizer.figuredBassFromStream(s)
def exampleB():
'''
This example was retrieved from page 114 of *The Music Theory Handbook* by <NAME>.
>>> from music21.figuredBass import examples
>>> fbLine = examples.exampleB()
>>> #_DOCS_SHOW fbLine.generateBassLine().show()
.. image:: images/figuredBass/fbExamples_bassLineB.*
:width: 700
First, fbLine is realized with the default rules set.
>>> fbRealization1 = fbLine.realize()
>>> fbRealization1.getNumSolutions()
422
>>> #_DOCS_SHOW fbRealization1.generateRandomRealization().show()
.. image:: images/figuredBass/fbExamples_sol1B.*
:width: 700
Now, a Rules object is created, and the restriction that the chords
need to be complete is lifted. fbLine is realized once again.
>>> from music21.figuredBass import rules
>>> fbRules = rules.Rules()
>>> fbRules.forbidIncompletePossibilities = False
>>> fbRealization2 = fbLine.realize(fbRules)
>>> fbRealization2.getNumSolutions()
188974
>>> #_DOCS_SHOW fbRealization2.generateRandomRealization().show()
.. image:: images/figuredBass/fbExamples_sol2B.*
:width: 700
'''
from music21 import converter
from music21 import key
s = converter.parse("tinynotation: 4/4 D4 A4_7,5,#3 B-4 F4_6 G4_6 AA4_7,5,#3 D2",
makeNotation=False)
s.insert(0, key.Key('d'))
return realizer.figuredBassFromStream(s)
def exampleC():
'''
This example was retrieved from page 114 of *The Music Theory Handbook* by <NAME>.
>>> from music21.figuredBass import examples
>>> fbLine = examples.exampleC()
>>> #_DOCS_SHOW fbLine.generateBassLine().show()
.. image:: images/figuredBass/fbExamples_bassLineC.*
:width: 700
First, fbLine is realized with the default rules set.
>>> fbRealization1 = fbLine.realize()
>>> fbRealization1.getNumSolutions()
833
>>> #_DOCS_SHOW fbRealization1.generateRandomRealization().show()
.. image:: images/figuredBass/fbExamples_sol1C.*
:width: 700
Now, parallel fifths are allowed in realizations. The image below
shows one of them. There is a parallel fifth between the bass and
alto parts going from the half-diminished 6,5 (B,F#) to the dominant
seventh (C#,G#) in the second measure.
>>> from music21.figuredBass import rules
>>> fbRules = rules.Rules()
>>> fbRules.forbidParallelFifths = False
>>> fbRealization2 = fbLine.realize(fbRules)
>>> fbRealization2.getNumSolutions()
2427
>>> #_DOCS_SHOW fbRealization2.generateRandomRealization().show()
.. image:: images/figuredBass/fbExamples_sol2C.*
:width: 700
'''
from music21 import converter
from music21 import key
s = converter.parse("tinynotation: 4/4 FF#4 GG#4_#6 AA4_6 FF#4 BB4_6,5 C#4_7,5,#3 F#2",
makeNotation=False)
s.insert(0, key.Key('f#'))
return realizer.figuredBassFromStream(s)
def V43ResolutionExample():
'''
The dominant 4,3 can resolve to either the tonic 5,3 or tonic 6,3. The proper resolution
is dependent on the bass note of the tonic, and is determined in context, as shown in the
following figured bass realization.
>>> from music21.figuredBass import examples
>>> fbLine = examples.V43ResolutionExample()
>>> fbRealization = fbLine.realize()
>>> #_DOCS_SHOW fbRealization.generateRandomRealization().show()
.. image:: images/figuredBass/fbExamples_V43.*
:width: 350
'''
from music21 import converter
from music21 import key
s = converter.parse("tinynotation: 4/4 D2 E2_4,3 D2_5,3 E2_4,3 F#1_6,3", makeNotation=False)
s.insert(0, key.Key('D'))
return realizer.figuredBassFromStream(s)
def viio65ResolutionExample():
'''
For a fully diminished seventh chord resolving to the tonic, the resolution chord
can contain either a doubled third (standard resolution) or a doubled tonic (alternate
resolution), depending on whether the third of the diminished chord rises or falls.
The user can control this in a Rules object by modifying
:attr:`~music21.figuredBass.rules.Rules.doubledRootInDim7`.
However, when resolving a diminished 6,5, the third is found in the bass and the
proper resolution is determined in context, regardless of user preference.
The following shows both cases involving a diminished 6,5. The resolution of the
first diminished chord has a doubled D, while that of the second has a doubled F#.
Notice that the resolution of the first involves a diminished fifth (E, Bb) going
to a perfect fifth (D, A).
>>> from music21.figuredBass import examples
>>> fbLine = examples.viio65ResolutionExample()
>>> fbRealization = fbLine.realize()
>>> #_DOCS_SHOW fbRealization.generateRandomRealization().show()
.. image:: images/figuredBass/fbExamples_vii65.*
:width: 700
'''
from music21 import converter
from music21 import key
s = converter.parse("tinyNotation: 4/4 D2 E2_6,b5 D2 E2_6,b5 F#1_6", makeNotation=False)
s.insert(0, key.Key('D'))
return realizer.figuredBassFromStream(s)
def augmentedSixthResolutionExample():
'''
This example was retrieved from page 61 of *The Music Theory Handbook* by <NAME>.
Italian (8,#6,3), French (#6,4,3), German (#6,5,3), and Swiss (#6,#4,3)
augmented sixth resolutions to
either the major dominant or the major/minor tonic 6,4 are supported.
The first four bars show the
resolutions to the dominant in the order above, while the last bar
shows the German augmented sixth
resolving to the tonic.
>>> from music21.figuredBass import examples
>>> fbLine = examples.augmentedSixthResolutionExample()
>>> fbRealization = fbLine.realize()
>>> #_DOCS_SHOW fbRealization.generateRandomRealization().show()
.. image:: images/figuredBass/fbExamples_a6.*
:width: 700
'''
from music21 import converter
from music21 import key
s = converter.parse("tinynotation: 4/4 D4 BB-4_8,#6,3 AA2_# D4 BB-4_#6,4,3 "
"AA2_# D4 BB-4_#6,5,3 AA2_# D4 BB-4_#6,#4,3 AA2_# D4 "
"BB-4_#6,5,3 AA2_6,4",
makeNotation=False)
s.insert(0, key.Key('d'))
return realizer.figuredBassFromStream(s)
def italianA6ResolutionExample():
'''
The Italian augmented sixth chord (It+6) is the only
augmented sixth chord to consist of only three
pitch names, and when represented in four parts, the
tonic is doubled. The tonic can resolve up, down or
stay the same, and in four parts, the two tonics always
resolve differently, resulting in two equally
acceptable resolutions. An alternate approach to resolving
the It+6 chord was taken, such that an It+6
chord could map internally to two different resolutions.
Every other special resolution in fbRealizer
consists of a 1:1 mapping of special chords to resolutions.
Here, the It+6 chord is resolving to the dominant, minor tonic,
and major tonic, respectively. In the
dominant resolution shown, the tonics (D) are resolving inward,
but they can resolve outward as well. In
the minor tonic resolution, the higher tonic is resolving up to F,
and the lower tonic remains the same.
In the major tonic resolution, the higher tonic remains the same,
while the lower tonic resolves up to the F#.
>>> from music21.figuredBass import examples
>>> from music21.figuredBass import rules
>>> fbLine = examples.italianA6ResolutionExample()
>>> fbRules = rules.Rules()
>>> fbRules.upperPartsMaxSemitoneSeparation = None
>>> fbRules.partMovementLimits.append([1, 4])
>>> fbRealization = fbLine.realize(fbRules)
>>> fbRealization.keyboardStyleOutput = False
>>> #_DOCS_SHOW fbRealization.generateRandomRealization().show()
.. image:: images/figuredBass/fbExamples_it+6.*
:width: 700
'''
from music21 import converter
from music21 import key
s = converter.parse(
"tinynotation: D4 BB-4_#6,3 AA2_# D4 BB-4_#6,3 AA2_6,4 D4 BB-4_#6,3 AA2_#6,4",
makeNotation=False)
s.insert(0, key.Key('d'))
return realizer.figuredBassFromStream(s)
def twelveBarBlues():
'''
This is an progression in Bb major based on the twelve bar blues. The progression used is:
I | IV | I | I7
IV | IV | I | I7
V7 | IV6 | I | I
>>> from music21.figuredBass import examples
>>> from music21.figuredBass import rules
>>> bluesLine = examples.twelveBarBlues()
>>> #_DOCS_SHOW bluesLine.generateBassLine().show()
.. image:: images/figuredBass/fbExamples_bluesBassLine.*
:width: 700
>>> fbRules = rules.Rules()
>>> fbRules.partMovementLimits = [(1, 4), (2, 12), (3, 12)]
>>> fbRules.forbidVoiceOverlap = False
>>> blRealization = bluesLine.realize(fbRules)
>>> blRealization.getNumSolutions()
2224978
>>> #_DOCS_SHOW blRealization.generateRandomRealization().show()
.. image:: images/figuredBass/fbExamples_twelveBarBlues.*
:width: 700
'''
from music21 import converter
from music21 import key
s = converter.parse(
"tinynotation: BB-1 E-1 BB-1 BB-1_7 E-1 E-1 BB-1 BB-1_7 F1_7 G1_6 BB-1 BB-1",
makeNotation=False)
s.insert(0, key.Key('B-'))
return realizer.figuredBassFromStream(s)
# -----------------------------------------------------------------
# METHODS FOR GENERATION OF BLUES VAMPS
def generateBoogieVamp(blRealization=None, numRepeats=5):
'''
Turns whole notes in twelve bar blues bass line to blues boogie woogie bass line. Takes
in numRepeats, which is the number of times to repeat the bass line. Also, takes in a
realization of :meth:`~music21.figuredBass.examples.twelveBarBlues`. If none is provided,
a default realization with :attr:`~music21.figuredBass.rules.Rules.forbidVoiceOverlap`
set to False and :attr:`~music21.figuredBass.rules.Rules.partMovementLimits` set to
[(1, 4), (2, 12), (3, 12)] is used.
>>> from music21.figuredBass import examples
>>> #_DOCS_SHOW examples.generateBoogieVamp(numRepeats=1).show()
.. image:: images/figuredBass/fbExamples_boogieVamp.*
:width: 700
'''
from music21 import converter
from music21 import stream
from music21 import interval
if blRealization is None:
bluesLine = twelveBarBlues()
fbRules = rules.Rules()
fbRules.partMovementLimits = [(1, 4), (2, 12), (3, 12)]
fbRules.forbidVoiceOverlap = False
blRealization = bluesLine.realize(fbRules)
sampleScore = blRealization.generateRandomRealizations(numRepeats)
boogieBassLine = converter.parse("tinynotation: BB-8. D16 F8. G16 A-8. G16 F8. D16",
makeNotation=False)
newBassLine = stream.Part()
newBassLine.append(sampleScore[1][0]) # Time signature
newBassLine.append(sampleScore[1][1]) # Key signature
for n in sampleScore[1].notes:
i = interval.notesToInterval(boogieBassLine[0], n)
tp = boogieBassLine.transpose(i)
for lyr in n.lyrics:
tp.notes.first().addLyric(lyr.text)
for m in tp.notes:
newBassLine.append(m)
newScore = stream.Score()
newScore.insert(0, sampleScore[0])
newScore.insert(newBassLine)
return newScore
def generateTripletBlues(blRealization=None, numRepeats=5): # 12/8
'''
Turns whole notes in twelve bar blues bass line to triplet blues bass line. Takes
in numRepeats, which is the number of times to repeat the bass line. Also, takes in a
realization of :meth:`~music21.figuredBass.examples.twelveBarBlues`. If none is provided,
a default realization with :attr:`~music21.figuredBass.rules.Rules.forbidVoiceOverlap`
set to False and :attr:`~music21.figuredBass.rules.Rules.partMovementLimits` set to
[(1, 4), (2, 12), (3, 12)] is used.
>>> from music21.figuredBass import examples
>>> #_DOCS_SHOW examples.generateTripletBlues(numRepeats=1).show()
.. image:: images/figuredBass/fbExamples_tripletBlues.*
:width: 700
'''
from music21 import converter
from music21 import stream
from music21 import interval
from music21 import meter
if blRealization is None:
bluesLine = twelveBarBlues()
fbRules = rules.Rules()
fbRules.partMovementLimits = [(1, 4), (2, 12), (3, 12)]
fbRules.forbidVoiceOverlap = False
blRealization = bluesLine.realize(fbRules)
sampleScore = blRealization.generateRandomRealizations(numRepeats)
tripletBassLine = converter.parse("tinynotation: BB-4 BB-8 D4 D8 F4 F8 A-8 G8 F8",
makeNotation=False)
newBassLine = stream.Part()
for n in sampleScore[1].notes:
i = interval.notesToInterval(tripletBassLine[0], n)
tp = tripletBassLine.transpose(i)
for lyr in n.lyrics:
tp.notes.first().addLyric(lyr.text)
for m in tp.notes:
newBassLine.append(m)
newTopLine = stream.Part()
for sampleChord in sampleScore[0].notes:
sampleChordCopy = copy.deepcopy(sampleChord)
sampleChordCopy.quarterLength = 6.0
newTopLine.append(sampleChordCopy)
newScore = stream.Score()
newScore.append(meter.TimeSignature("12/8")) # Time signature
newScore.append(sampleScore[1][1]) # Key signature
newScore.insert(0, newTopLine)
newScore.insert(0, newBassLine)
return newScore
_DOC_ORDER = [exampleA, exampleB, exampleC, exampleD, V43ResolutionExample,
viio65ResolutionExample,
augmentedSixthResolutionExample, italianA6ResolutionExample, twelveBarBlues,
generateBoogieVamp, generateTripletBlues]
# ------------------------------------------------------------------------------
class Test(unittest.TestCase):
pass
if __name__ == '__main__':
import music21
music21.mainTest(Test)
|
kuwala/pipelines/google-poi/src/pipeline/SearchScraper.py
|
bmahmoudyan/kuwala
| 381 |
81301
|
import moment
import os
import pandas
import pyarrow as pa
import pyarrow.parquet as pq
import requests
from func_timeout import func_set_timeout, FunctionTimedOut
from pandas import DataFrame
from pathlib import Path
from pyspark.sql import SparkSession
from pyspark.sql.functions import col, lit
from pyspark.sql.types import StringType
from python_utils.src.spark_udfs import get_confidence_based_h3_and_name_distance, get_h3_distance, get_string_distance
from time import sleep
MAX_H3_DISTANCE = 500
class SearchScraper:
"""Get result for search strings"""
@staticmethod
@func_set_timeout(180)
def send_query(batch, query_type):
# noinspection PyBroadException
try:
host = os.getenv('GOOGLE_POI_API_HOST') or '127.0.0.1'
# noinspection HttpUrlsUsage
result = requests.request(
method='get',
url=f'http://{host}:3003/{"search" if query_type == "search" else "poi-information"}',
json=batch)
return result.json() if result else None
except Exception as e:
print(f'[{moment.now().format("YYYY-MM-DDTHH-mm-ss")}]: Search query failed: ', e)
print(f'[{moment.now().format("YYYY-MM-DDTHH-mm-ss")}]: Continuing without batch.')
return None
"""Match the queries that have been sent to the received results"""
@staticmethod
def match_search_results(directory: str, file_name: str):
memory = os.getenv('SPARK_MEMORY') or '16g'
spark = SparkSession.builder.appName('google-poi').config('spark.driver.memory', memory).getOrCreate()
df_str = spark.read.parquet(directory + file_name)
path_results = directory.replace('Strings', 'Results') + file_name.replace('strings', 'results')
df_res = spark.read.parquet(path_results)
# noinspection PyTypeChecker
df_res = df_str \
.alias('df_str') \
.join(df_res, df_str.query == df_res.query, 'inner') \
.filter(col('data.h3Index').isNotNull()) \
.withColumn('osmName', col('df_str.name')) \
.withColumn('googleName', col('data.name')) \
.withColumn(
'nameDistance',
get_string_distance(col('googleName'), col('osmName'), col('df_str.query'))
) \
.withColumn(
'h3Distance',
get_h3_distance(col('h3Index').cast(StringType()), col('data.h3Index').cast(StringType()),
lit(MAX_H3_DISTANCE))
) \
.withColumn(
'confidence',
get_confidence_based_h3_and_name_distance(col('h3Distance'), col('nameDistance'), lit(MAX_H3_DISTANCE))
) \
.select('osmId', 'type', 'confidence', 'data.id')
df_res.write.parquet(path_results.replace('results', 'results_matched'))
"""Match the POI ids that have been sent to the received results"""
@staticmethod
def match_poi_results(directory: str, file_name: str):
memory = os.getenv('SPARK_MEMORY') or '16g'
spark = SparkSession.builder.appName('google-poi').config('spark.driver.memory', memory).getOrCreate()
df_res = spark.read.parquet(
directory.replace('Strings', 'Results') + file_name.replace('strings', 'results_matched')
)
path_poi_data = directory.replace('searchStrings', 'poiData') + file_name.replace('search_strings', 'poi_data')
df_pd = spark.read.parquet(path_poi_data)
# noinspection PyTypeChecker
df_pd = df_res \
.alias('df_res') \
.join(df_pd, df_res.id == df_pd.id, 'inner') \
.filter(col('data.h3Index').isNotNull()) \
.select('osmId', 'type', 'confidence', col('df_res.id').alias('id'), 'data.*')
df_pd.write.parquet(path_poi_data.replace('poi_data', 'poi_data_matched'))
"""Send queries in batches for each partition of a dataframe"""
@staticmethod
def batch_queries(
df: DataFrame,
output_dir: str,
file_name: str,
query_property: str,
query_type: str,
schema=None
):
batch = list()
batch_size = 100
max_sleep_time = 120
writer = None
for index, row in df.iterrows():
batch.append(row[query_property])
# noinspection PyTypeChecker
if (len(batch) == batch_size) or ((index + 1) == len(df.index)):
successful = False
sleep_time = 1
while not successful and (sleep_time < max_sleep_time):
try:
result = SearchScraper.send_query(batch, query_type)
if result and ('data' in result):
data = pandas.DataFrame(result['data'])
# noinspection PyArgumentList
table = pa.Table.from_pandas(df=data, schema=schema)
if not writer:
script_dir = os.path.dirname(__file__)
output_dir = os.path.join(script_dir, output_dir)
output_file = os.path.join(output_dir, file_name)
Path(output_dir).mkdir(parents=True, exist_ok=True)
writer = pq.ParquetWriter(
output_file,
schema=schema if schema else table.schema,
flavor='spark'
)
writer.write_table(table)
successful = True
else:
sleep(sleep_time)
sleep_time *= 2
except FunctionTimedOut:
sleep(sleep_time)
sleep_time *= 2
if sleep_time >= max_sleep_time:
print(f'[{moment.now().format("YYYY-MM-DDTHH-mm-ss")}]: Request timed out too many times. '
f'Skipping batch')
batch = list()
if writer:
writer.close()
"""Send Google POI ids to retrieve all POI information"""
@staticmethod
def send_poi_queries(directory: str, file_name: str):
pois = pq \
.read_table(directory.replace('Strings', 'Results') + file_name.replace('strings', 'results_matched')) \
.to_pandas()
pois = pois[['id']].drop_duplicates()
schema = pa.schema([
pa.field('id', pa.string()),
pa.field('data', pa.struct([
pa.field('name', pa.string()),
pa.field('placeID', pa.string()),
pa.field('location', pa.struct([
pa.field('lat', pa.float64()),
pa.field('lng', pa.float64())
])),
pa.field('h3Index', pa.string()),
pa.field('address', pa.list_(pa.string())),
pa.field('timezone', pa.string()),
pa.field('categories', pa.struct([
pa.field('google', pa.list_(pa.string())),
pa.field('kuwala', pa.list_(pa.string()))
])),
pa.field('temporarilyClosed', pa.bool_()),
pa.field('permanentlyClosed', pa.bool_()),
pa.field('insideOf', pa.string()),
pa.field('contact', pa.struct([
pa.field('phone', pa.string()),
pa.field('website', pa.string())
])),
pa.field('openingHours', pa.list_(pa.struct([
pa.field('closingTime', pa.string()),
pa.field('openingTime', pa.string()),
pa.field('date', pa.string())
]))),
pa.field('rating', pa.struct([
pa.field('numberOfReviews', pa.int64()),
pa.field('stars', pa.float64())
])),
pa.field('priceLevel', pa.int64()),
pa.field('popularity', pa.list_(pa.struct([
pa.field('popularity', pa.int64()),
pa.field('timestamp', pa.string())
]))),
pa.field('waitingTime', pa.list_(pa.struct([
pa.field('waitingTime', pa.int64()),
pa.field('timestamp', pa.string())
]))),
pa.field('spendingTime', pa.list_(pa.int64()))
]))
])
SearchScraper.batch_queries(
df=pois,
output_dir=f'../../tmp/googleFiles/poiData/',
file_name=file_name.replace('search_strings', 'poi_data'),
query_property='id',
query_type='poi',
schema=schema
)
"""Send search strings to get Google POI ids"""
@staticmethod
def send_search_queries(directory: str, file_name: str):
search_strings = pq.read_table(directory + file_name).to_pandas()
schema = pa.schema([
pa.field('query', pa.string()),
pa.field('data', pa.struct([
pa.field('h3Index', pa.string()),
pa.field('id', pa.string()),
pa.field('location', pa.struct([
pa.field('lat', pa.float64()),
pa.field('lng', pa.float64())
])),
pa.field('name', pa.string())
]))
])
SearchScraper.batch_queries(
df=search_strings,
output_dir=f'../../tmp/googleFiles/searchResults/',
file_name=file_name.replace('strings', 'results'),
query_property='query',
query_type='search',
schema=schema
)
"""Write scraped POI information to a Parquet file"""
@staticmethod
def scrape_with_search_string():
script_dir = os.path.dirname(__file__)
parquet_files = os.path.join(script_dir, '../../tmp/googleFiles/searchStrings/')
file_name = sorted(os.listdir(parquet_files), reverse=True)[0]
SearchScraper.send_search_queries(parquet_files, file_name)
SearchScraper.match_search_results(parquet_files, file_name)
SearchScraper.send_poi_queries(parquet_files, file_name)
SearchScraper.match_poi_results(parquet_files, file_name)
|
examples/pytorch/AxHyperOptimizationPTL/ax_hpo_mnist.py
|
PeterSulcs/mlflow
| 10,351 |
81368
|
<filename>examples/pytorch/AxHyperOptimizationPTL/ax_hpo_mnist.py
import argparse
import mlflow
from ax.service.ax_client import AxClient
from iris import IrisClassification
from iris_data_module import IrisDataModule
import pytorch_lightning as pl
def train_evaluate(params, max_epochs=100):
model = IrisClassification(**params)
dm = IrisDataModule()
dm.setup(stage="fit")
trainer = pl.Trainer(max_epochs=max_epochs)
mlflow.pytorch.autolog()
trainer.fit(model, dm)
trainer.test(datamodule=dm)
test_accuracy = trainer.callback_metrics.get("test_acc")
return test_accuracy
def model_training_hyperparameter_tuning(max_epochs, total_trials, params):
"""
This function takes input params max_epochs, total_trials, params
and creates a nested run in Mlflow. The parameters, metrics, model and summary are dumped into their
respective mlflow-run ids. The best parameters are dumped along with the baseline model.
:param max_epochs: Max epochs used for training the model. Type:int
:param total_trials: Number of ax-client experimental trials. Type:int
:param params: Model parameters. Type:dict
"""
with mlflow.start_run(run_name="Parent Run"):
train_evaluate(params=params, max_epochs=max_epochs)
ax_client = AxClient()
ax_client.create_experiment(
parameters=[
{"name": "lr", "type": "range", "bounds": [1e-3, 0.15], "log_scale": True},
{"name": "weight_decay", "type": "range", "bounds": [1e-4, 1e-3]},
{"name": "momentum", "type": "range", "bounds": [0.7, 1.0]},
],
objective_name="test_accuracy",
)
for i in range(total_trials):
with mlflow.start_run(nested=True, run_name="Trial " + str(i)) as child_run:
parameters, trial_index = ax_client.get_next_trial()
test_accuracy = train_evaluate(params=parameters, max_epochs=max_epochs)
# completion of trial
ax_client.complete_trial(trial_index=trial_index, raw_data=test_accuracy.item())
best_parameters, metrics = ax_client.get_best_parameters()
for param_name, value in best_parameters.items():
mlflow.log_param("optimum_" + param_name, value)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser = pl.Trainer.add_argparse_args(parent_parser=parser)
parser.add_argument(
"--total_trials",
default=3,
help="umber of trials to be run for the optimization experiment",
)
args = parser.parse_args()
if "max_epochs" in args:
max_epochs = args.max_epochs
else:
max_epochs = 100
params = {"lr": 0.1, "momentum": 0.9, "weight_decay": 0}
model_training_hyperparameter_tuning(
max_epochs=int(max_epochs), total_trials=int(args.total_trials), params=params
)
|
riko/modules/fetchtext.py
|
nerevu/riko
| 1,716 |
81378
|
# -*- coding: utf-8 -*-
# vim: sw=4:ts=4:expandtab
"""
riko.modules.fetchtext
~~~~~~~~~~~~~~~~~~~~~~
Provides functions for fetching text data sources.
Accesses and extracts data from text sources on the web. This data can then be
merged with other data in your Pipe.
Examples:
basic usage::
>>> from riko import get_path
>>> from riko.modules.fetchtext import pipe
>>>
>>> conf = {'url': get_path('lorem.txt')}
>>> next(pipe(conf=conf))['content'] == 'What is Lorem Ipsum?'
True
Attributes:
OPTS (dict): The default pipe options
DEFAULTS (dict): The default parser options
"""
import pygogo as gogo
from . import processor
from riko import ENCODING
from riko.utils import fetch, auto_close, get_abspath
from riko.bado import coroutine, return_value, io
OPTS = {"ftype": "none", "assign": "content"}
DEFAULTS = {"encoding": ENCODING}
logger = gogo.Gogo(__name__, monolog=True).logger
@coroutine
def async_parser(_, objconf, skip=False, **kwargs):
"""Asynchronously parses the pipe content
Args:
_ (None): Ignored
objconf (obj): The pipe configuration (an Objectify instance)
skip (bool): Don't parse the content
kwargs (dict): Keyword arguments
Kwargs:
stream (dict): The original item
Returns:
Iter[dict]: The stream of items
Examples:
>>> from riko import get_path
>>> from riko.bado import react
>>> from riko.bado.mock import FakeReactor
>>> from meza.fntools import Objectify
>>>
>>> def run(reactor):
... callback = lambda x: print(next(x)['content'])
... url = get_path('lorem.txt')
... objconf = Objectify({'url': url, 'encoding': ENCODING})
... d = async_parser(None, objconf, assign='content')
... return d.addCallbacks(callback, logger.error)
>>>
>>> try:
... react(run, _reactor=FakeReactor())
... except SystemExit:
... pass
...
What is Lorem Ipsum?
"""
if skip:
stream = kwargs["stream"]
else:
url = get_abspath(objconf.url)
f = yield io.async_url_open(url)
assign = kwargs["assign"]
encoding = objconf.encoding
_stream = ({assign: line.strip().decode(encoding)} for line in f)
stream = auto_close(_stream, f)
return_value(stream)
def parser(_, objconf, skip=False, **kwargs):
"""Parses the pipe content
Args:
_ (None): Ignored
objconf (obj): The pipe configuration (an Objectify instance)
skip (bool): Don't parse the content
kwargs (dict): Keyword arguments
Kwargs:
stream (dict): The original item
Returns:
Iter[dict]: The stream of items
Examples:
>>> from riko import get_path
>>> from meza.fntools import Objectify
>>>
>>> url = get_path('lorem.txt')
>>> objconf = Objectify({'url': url, 'encoding': ENCODING})
>>> result = parser(None, objconf, assign='content')
>>> next(result)['content'] == 'What is Lorem Ipsum?'
True
"""
if skip:
stream = kwargs["stream"]
else:
f = fetch(decode=True, **objconf)
_stream = ({kwargs["assign"]: line.strip()} for line in f)
stream = auto_close(_stream, f)
return stream
@processor(DEFAULTS, isasync=True, **OPTS)
def async_pipe(*args, **kwargs):
"""A source that asynchronously fetches and parses an XML or JSON file to
return the entries.
Args:
item (dict): The entry to process
kwargs (dict): The keyword arguments passed to the wrapper
Kwargs:
conf (dict): The pipe configuration. Must contain the key 'url'. May
contain the key 'encoding'.
url (str): The web site to fetch.
encoding (str): The file encoding (default: utf-8).
assign (str): Attribute to assign parsed content (default: content)
Returns:
Deferred: twisted.internet.defer.Deferred stream of items
Examples:
>>> from riko import get_path
>>> from riko.bado import react
>>> from riko.bado.mock import FakeReactor
>>>
>>> def run(reactor):
... callback = lambda x: print(next(x)['content'])
... conf = {'url': get_path('lorem.txt')}
... d = async_pipe(conf=conf)
... return d.addCallbacks(callback, logger.error)
>>>
>>> try:
... react(run, _reactor=FakeReactor())
... except SystemExit:
... pass
...
What is Lorem Ipsum?
"""
return async_parser(*args, **kwargs)
@processor(DEFAULTS, **OPTS)
def pipe(*args, **kwargs):
"""A source that fetches and parses an XML or JSON file to
return the entries.
Args:
item (dict): The entry to process
kwargs (dict): The keyword arguments passed to the wrapper
Kwargs:
conf (dict): The pipe configuration. Must contain the key 'url'. May
contain the key 'encoding'.
url (str): The web site to fetch
encoding (str): The file encoding (default: utf-8).
assign (str): Attribute to assign parsed content (default: content)
Returns:
dict: an iterator of items
Examples:
>>> from riko import get_path
>>>
>>> conf = {'url': get_path('lorem.txt')}
>>> next(pipe(conf=conf))['content'] == 'What is Lorem Ipsum?'
True
"""
return parser(*args, **kwargs)
|
pyvex/lifting/util/vex_helper.py
|
osogi/pyvex
| 261 |
81382
|
<gh_stars>100-1000
import re
import copy
from ...const import ty_to_const_class, vex_int_class, get_type_size
from ...expr import Const, RdTmp, Unop, Binop, Load, CCall, Get, ITE
from ...stmt import WrTmp, Put, IMark, Store, NoOp, Exit
from ...enums import IRCallee
from future.utils import with_metaclass
class JumpKind(object):
Boring = 'Ijk_Boring'
Call = 'Ijk_Call'
Ret = 'Ijk_Ret'
Segfault = 'Ijk_SigSEGV'
Exit = 'Ijk_Exit'
Syscall = 'Ijk_Sys_syscall'
Sysenter = 'Ijk_Sys_sysenter'
Invalid = 'Ijk_INVALID'
NoDecode = 'Ijk_NoDecode'
typemeta_re = re.compile(r'int_(?P<size>\d+)$')
class TypeMeta(type):
def __getattr__(self, name):
match = typemeta_re.match(name)
if match:
width = int(match.group('size'))
return vex_int_class(width).type
else:
return type.__getattr__(name)
class Type(with_metaclass(TypeMeta, object)):
__metaclass__ = TypeMeta
ieee_float_16 = 'Ity_F16'
ieee_float_32 = 'Ity_F32'
ieee_float_64 = 'Ity_F64'
ieee_float_128 = 'Ity_F128'
decimal_float_32 = 'Ity_D32'
decimal_float_64 = 'Ity_D64'
decimal_float_128 = 'Ity_D128'
simd_vector_128 = 'Ity_V128'
simd_vector_256 = 'Ity_V256'
def get_op_format_from_const_ty(ty):
return ty_to_const_class(ty).op_format
def make_format_op_generator(fmt_string):
"""
Return a function which generates an op format (just a string of the vex instruction)
Functions by formatting the fmt_string with the types of the arguments
"""
def gen(arg_types):
converted_arg_types = list(map(get_op_format_from_const_ty, arg_types))
op = fmt_string.format(arg_t=converted_arg_types)
return op
return gen
def mkbinop(fstring):
return lambda self, expr_a, expr_b: self.op_binary(make_format_op_generator(fstring))(expr_a, expr_b)
def mkunop(fstring):
return lambda self, expr_a: self.op_unary(make_format_op_generator(fstring))(expr_a)
def mkcmpop(fstring_fragment, signedness=''):
def cmpop(self, expr_a, expr_b):
ty = self.get_type(expr_a)
fstring = 'Iop_Cmp%s{arg_t[0]}%s' % (fstring_fragment, signedness)
retval = mkbinop(fstring)(self, expr_a, expr_b)
return self.cast_to(retval, ty)
return cmpop
class IRSBCustomizer(object):
op_add = mkbinop('Iop_Add{arg_t[0]}')
op_sub = mkbinop('Iop_Sub{arg_t[0]}')
op_umul = mkbinop('Iop_Mul{arg_t[0]}')
op_smul = mkbinop('Iop_MullS{arg_t[0]}')
op_sdiv = mkbinop('Iop_DivS{arg_t[0]}')
op_udiv = mkbinop('Iop_DivU{arg_t[0]}')
# Custom operation that does not exist in libVEX
op_mod = mkbinop('Iop_Mod{arg_t[0]}')
op_or = mkbinop('Iop_Or{arg_t[0]}')
op_and = mkbinop('Iop_And{arg_t[0]}')
op_xor = mkbinop('Iop_Xor{arg_t[0]}')
op_shr = mkbinop('Iop_Shr{arg_t[0]}')
op_shl = mkbinop('Iop_Shl{arg_t[0]}')
op_not = mkunop('Iop_Not{arg_t[0]}')
op_cmp_eq = mkcmpop('EQ')
op_cmp_ne = mkcmpop('NE')
op_cmp_slt = mkcmpop('LT', 'S')
op_cmp_sle = mkcmpop('LE', 'S')
op_cmp_ult = mkcmpop('LT', 'U')
op_cmp_ule = mkcmpop('LE', 'U')
op_cmp_sge = mkcmpop('GE', 'S')
op_cmp_uge = mkcmpop('GE', 'U')
op_cmp_sgt = mkcmpop('GT', 'S')
op_cmp_ugt = mkcmpop('GT', 'U')
def __init__(self, irsb):
self.arch = irsb.arch
self.irsb = irsb
def get_type(self, rdt):
return rdt.result_type(self.irsb.tyenv)
# Statements (no return value)
def _append_stmt(self, stmt):
self.irsb.statements += [stmt]
def imark(self, int_addr, int_length, int_delta=0):
self._append_stmt(IMark(int_addr, int_length, int_delta))
def get_reg(self, regname): # TODO move this into the lifter
return self.arch.registers[regname][0]
def put(self, expr_val, tuple_reg):
self._append_stmt(Put(copy.copy(expr_val), tuple_reg))
def store(self, addr, expr):
self._append_stmt(Store(copy.copy(addr), copy.copy(expr), self.arch.memory_endness))
def noop(self):
self._append_stmt(NoOp())
def add_exit(self, guard, dst, jk, ip):
"""
Add an exit out of the middle of an IRSB.
(e.g., a conditional jump)
:param guard: An expression, the exit is taken if true
:param dst: the destination of the exit (a Const)
:param jk: the JumpKind of this exit (probably Ijk_Boring)
:param ip: The address of this exit's source
"""
self.irsb.statements.append(Exit(guard, dst.con, jk, ip))
# end statements
def goto(self, addr):
self.irsb.next = addr
self.irsb.jumpkind = JumpKind.Boring
def ret(self, addr):
self.irsb.next = addr
self.irsb.jumpkind = JumpKind.Ret
def call(self, addr):
self.irsb.next = addr
self.irsb.jumpkind = JumpKind.Call
def _add_tmp(self, t):
return self.irsb.tyenv.add(t)
def _rdtmp(self, tmp):
return RdTmp.get_instance(tmp)
def _settmp(self, expr):
ty = self.get_type(expr)
tmp = self._add_tmp(ty)
self._append_stmt(WrTmp(tmp, expr))
return self._rdtmp(tmp)
def rdreg(self, reg, ty):
return self._settmp(Get(reg, ty))
def load(self, addr, ty):
return self._settmp(Load(self.arch.memory_endness, ty, copy.copy(addr)))
def op_ccall(self, retty, funcstr, args):
return self._settmp(CCall(retty, IRCallee(len(args), funcstr, 0xffff), args))
def ite(self, condrdt, iftruerdt, iffalserdt):
return self._settmp(ITE(copy.copy(condrdt), copy.copy(iffalserdt), copy.copy(iftruerdt)))
def mkconst(self, val, ty):
cls = ty_to_const_class(ty)
return Const(cls(val))
# Operations
def op_generic(self, Operation, op_generator):
def instance(*args): # Note: The args here are all RdTmps
for arg in args: assert isinstance(arg, RdTmp) or isinstance(arg, Const)
arg_types = [self.get_type(arg) for arg in args]
# two operations should never share the same argument instances, copy them here to ensure that
args = [copy.copy(a) for a in args]
op = Operation(op_generator(arg_types), args)
msg = "operation needs to be well typed: " + str(op)
assert op.typecheck(self.irsb.tyenv), msg + "\ntypes: " + str(self.irsb.tyenv)
return self._settmp(op)
return instance
def op_binary(self, op_format_str):
return self.op_generic(Binop, op_format_str)
def op_unary(self, op_format_str):
return self.op_generic(Unop, op_format_str)
def cast_to(self, rdt, tydest, signed=False, high=False):
goalwidth = get_type_size(tydest)
rdtwidth = self.get_rdt_width(rdt)
if rdtwidth > goalwidth:
return self.op_narrow_int(rdt, tydest, high_half=high)
elif rdtwidth < goalwidth:
return self.op_widen_int(rdt, tydest, signed=signed)
else:
return rdt
def op_to_one_bit(self, rdt):
rdtty = self.get_type(rdt)
if rdtty not in [Type.int_64, Type.int_32]:
rdt = self.op_widen_int_unsigned(rdt, Type.int_32)
onebit = self.op_narrow_int(rdt, Type.int_1)
return onebit
def op_narrow_int(self, rdt, tydest, high_half=False):
op_name = '{op}{high}to{dest}'.format(op='Iop_{arg_t[0]}',
high='HI' if high_half else '',
dest=get_op_format_from_const_ty(tydest))
return self.op_unary(make_format_op_generator(op_name))(rdt)
def op_widen_int(self, rdt, tydest, signed=False):
op_name = '{op}{sign}to{dest}'.format(op='Iop_{arg_t[0]}',
sign='S' if signed else 'U',
dest=get_op_format_from_const_ty(tydest))
return self.op_unary(make_format_op_generator(op_name))(rdt)
def op_widen_int_signed(self, rdt, tydest):
return self.op_widen_int(rdt, tydest, signed=True)
def op_widen_int_unsigned(self, rdt, tydest):
return self.op_widen_int(rdt, tydest, signed=False)
def get_msb(self, tmp, ty):
width = get_type_size(ty)
return self.get_bit(tmp, width-1)
def get_bit(self, rdt, idx):
shifted = self.op_shr(rdt, idx)
bit = self.op_extract_lsb(shifted)
return bit
def op_extract_lsb(self, rdt):
bitmask = self.mkconst(1, self.get_type(rdt))
return self.op_and(bitmask, rdt)
def set_bit(self, rdt, idx, bval):
currbit = self.get_bit(rdt, idx)
bvalbit = self.op_extract_lsb(bval)
areequalextrabits = self.op_xor(bval, currbit)
one = self.mkconst(1, self.get_type(areequalextrabits))
areequal = self.op_and(areequalextrabits, one)
shifted = self.op_shl(areequal, idx)
return self.op_xor(rdt, shifted)
def set_bits(self, rdt, idxsandvals):
ty = self.get_type(rdt)
if all([isinstance(idx, Const) for idx, _ in idxsandvals]):
relevantbits = self.mkconst(sum([1 << idx.con.value for idx, _ in idxsandvals]), ty)
else:
relevantbits = self.mkconst(0, ty)
for idx, _ in idxsandvals:
shifted = self.op_shl(self.mkconst(1, ty), idx)
relevantbits = self.op_or(relevantbits, shifted)
setto = self.mkconst(0, ty)
for idx, bval in idxsandvals:
bvalbit = self.op_extract_lsb(bval)
shifted = self.op_shl(bvalbit, idx)
setto = self.op_or(setto, shifted)
shouldflip = self.op_and(self.op_xor(setto, rdt), relevantbits)
return self.op_xor(rdt, shouldflip)
def get_rdt_width(self, rdt):
return rdt.result_size(self.irsb.tyenv)
|
db/migrations/migration7.py
|
oktoshi/OpenBazaar-Server
| 723 |
81390
|
<reponame>oktoshi/OpenBazaar-Server
import sqlite3
def migrate(database_path):
print "migrating to db version 7"
conn = sqlite3.connect(database_path)
conn.text_factory = str
cursor = conn.cursor()
# create new table
cursor.execute('''CREATE TABLE IF NOT EXISTS audit_shopping (
audit_shopping_id integer PRIMARY KEY NOT NULL,
shopper_guid text NOT NULL,
contract_hash text,
"timestamp" integer NOT NULL,
action_id integer NOT NULL
);''')
cursor.execute('''CREATE INDEX IF NOT EXISTS shopper_guid_index ON audit_shopping (audit_shopping_id ASC);''')
cursor.execute('''CREATE INDEX IF NOT EXISTS action_id_index ON audit_shopping (audit_shopping_id ASC);''')
# update version
cursor.execute('''PRAGMA user_version = 7''')
conn.commit()
conn.close()
|
src/structlog/_utils.py
|
bbeattie-phxlabs/structlog
| 1,751 |
81416
|
<reponame>bbeattie-phxlabs/structlog<filename>src/structlog/_utils.py
# SPDX-License-Identifier: MIT OR Apache-2.0
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the MIT License. See the LICENSE file in the root of this
# repository for complete details.
"""
Generic utilities.
"""
import errno
import sys
from typing import Any, Callable
def until_not_interrupted(f: Callable[..., Any], *args: Any, **kw: Any) -> Any:
"""
Retry until *f* succeeds or an exception that isn't caused by EINTR occurs.
:param f: A callable like a function.
:param *args: Positional arguments for *f*.
:param **kw: Keyword arguments for *f*.
"""
while True:
try:
return f(*args, **kw)
except OSError as e:
if e.args[0] == errno.EINTR:
continue
raise
def get_processname() -> str:
# based on code from
# https://github.com/python/cpython/blob/313f92a57bc3887026ec16adb536bb2b7580ce47/Lib/logging/__init__.py#L342-L352
processname = "n/a"
mp: Any = sys.modules.get("multiprocessing")
if mp is not None:
# Errors may occur if multiprocessing has not finished loading
# yet - e.g. if a custom import hook causes third-party code
# to run when multiprocessing calls import.
try:
processname = mp.current_process().name
except Exception:
pass
return processname
|
learn2learn/gym/envs/particles/particles_2d.py
|
Brikwerk/learn2learn
| 1,774 |
81418
|
#!/usr/bin/env python3
import numpy as np
from gym import spaces
from gym.utils import seeding
from learn2learn.gym.envs.meta_env import MetaEnv
class Particles2DEnv(MetaEnv):
"""
[[Source]](https://github.com/learnables/learn2learn/blob/master/learn2learn/gym/envs/particles/particles_2d.py)
**Description**
Each task is defined by the location of the goal. A point mass
receives a directional force and moves accordingly
(clipped in [-0.1,0.1]). The reward is equal to the negative
distance from the goal.
**Credit**
Adapted from <NAME>othfuss' implementation.
"""
def __init__(self, task=None):
self.seed()
super(Particles2DEnv, self).__init__(task)
self.observation_space = spaces.Box(low=-np.inf, high=np.inf,
shape=(2,), dtype=np.float32)
self.action_space = spaces.Box(low=-0.1, high=0.1,
shape=(2,), dtype=np.float32)
self.reset()
# -------- MetaEnv Methods --------
def sample_tasks(self, num_tasks):
"""
Tasks correspond to a goal point chosen uniformly at random.
"""
goals = self.np_random.uniform(-0.5, 0.5, size=(num_tasks, 2))
tasks = [{'goal': goal} for goal in goals]
return tasks
def set_task(self, task):
self._task = task
self._goal = task['goal']
# -------- Gym Methods --------
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def reset(self, env=True):
"""
Sets point mass position back to (0,0)
"""
self._state = np.zeros(2, dtype=np.float32)
return self._state
def step(self, action):
"""
**Description**
Given an action, clips the action to be in the
appropriate range and moves the point mass position
according to the action.
**Arguments**
action (2-element array) - Array specifying the magnitude
and direction of the forces to be applied in the x and y
planes.
**Returns**
*state, reward, done, task*
* state (arr) - is a 2-element array encoding the x,y position of
the point mass
* reward (float) - signal equal to the negative squared distance
from the goal
* done (bool) - boolean indicating whether or not the point mass
is epsilon or less distance from the goal
* task (dict) - dictionary of task specific parameters and their current
values
"""
action = np.clip(action, -0.1, 0.1)
assert self.action_space.contains(action)
self._state = self._state + action
x = self._state[0] - self._goal[0]
y = self._state[1] - self._goal[1]
reward = -np.sqrt(x ** 2 + y ** 2)
done = ((np.abs(x) < 0.01) and (np.abs(y) < 0.01))
return self._state, reward, done, self._task
def render(self, mode=None):
raise NotImplementedError
|
tests/dispatcher/popart/convnet_test.py
|
gglin001/poptorch
| 128 |
81425
|
<gh_stars>100-1000
#!/usr/bin/env python3
# Copyright (c) 2021 Graphcore Ltd. All rights reserved.
import torch
import torch.nn as nn
import poptorch
import helpers
def test_mnist():
# A helper block to build convolution-pool-relu blocks.
class Block(nn.Module):
def __init__(self, in_channels, num_filters, kernel_size, pool_size):
super(Block, self).__init__()
self.conv = nn.Conv2d(in_channels,
num_filters,
kernel_size=kernel_size)
self.pool = nn.MaxPool2d(kernel_size=pool_size)
self.relu = nn.ReLU()
def forward(self, x):
x = self.conv(x)
x = self.pool(x)
x = self.relu(x)
return x
class Network(nn.Module):
def __init__(self):
super().__init__()
self.layer1 = Block(1, 10, 5, 2)
self.layer2 = Block(10, 20, 5, 2)
self.layer3 = nn.Linear(320, 256, False)
self.layer3_act = nn.ReLU()
self.layer4 = nn.Linear(256, 10)
self.softmax = nn.LogSoftmax(1)
self.loss = nn.NLLLoss(reduction="mean")
def forward(self, x):
x = self.layer1(x)
x = self.layer2(x)
x = x.view(-1, 320)
x = self.layer3_act(self.layer3(x))
x = self.layer4(x)
x = self.softmax(x)
return x
model = Network()
input = torch.ones([1, 1, 28, 28])
# Gather up all the buffers and parameters.
def all_data(model):
yield from model.named_parameters()
yield from model.named_buffers()
with poptorch.IPUScope([input], all_data(model)) as ipu:
out = model(input)
ipu.outputs([out])
# pylint: disable=no-member
helpers.assert_allclose(expected=model(input),
actual=ipu(input),
atol=1e-05,
rtol=1e-05,
equal_nan=True)
|
changes/models/artifact.py
|
vault-the/changes
| 443 |
81456
|
<reponame>vault-the/changes
from __future__ import absolute_import
import uuid
from datetime import datetime
from sqlalchemy import Column, String, DateTime, ForeignKey
from sqlalchemy.orm import relationship, backref
from sqlalchemy.schema import UniqueConstraint, Index
from changes.config import db
from changes.db.types.filestorage import FileData, FileStorage
from changes.db.types.guid import GUID
from changes.db.types.json import JSONEncodedDict
ARTIFACT_STORAGE_OPTIONS = {
'path': 'artifacts',
}
class Artifact(db.Model):
"""
The artifact produced by one job/step, produced on a single machine.
Sometimes this is a JSON dict referencing a file in S3, sometimes
it is Null, sometimes it is an empty dict. It is basically any file
left behind after a run for changes to pick up
"""
id = Column(GUID, primary_key=True, default=uuid.uuid4)
job_id = Column(GUID, ForeignKey('job.id', ondelete="CASCADE"), nullable=False)
step_id = Column(GUID, ForeignKey('jobstep.id', ondelete="CASCADE"), nullable=False)
project_id = Column(GUID, ForeignKey('project.id', ondelete="CASCADE"), nullable=False)
name = Column(String(1024), nullable=False)
date_created = Column(DateTime, nullable=False, default=datetime.utcnow)
data = Column(JSONEncodedDict)
file = Column(FileStorage(**ARTIFACT_STORAGE_OPTIONS))
job = relationship('Job', backref=backref('artifacts'))
project = relationship('Project')
step = relationship('JobStep', backref=backref('artifacts'))
__tablename__ = 'artifact'
__table_args__ = (
UniqueConstraint('step_id', 'name', name='unq_artifact_name'),
Index('idx_artifact_job_id', 'job_id'),
Index('idx_artifact_project_id', 'project_id'),
)
def __init__(self, **kwargs):
super(Artifact, self).__init__(**kwargs)
if self.id is None:
self.id = uuid.uuid4()
if self.date_created is None:
self.date_created = datetime.utcnow()
if self.data is None:
self.data = {}
if self.file is None:
# TODO(dcramer): this is super hacky but not sure a better way to
# do it with SQLAlchemy
self.file = FileData({}, ARTIFACT_STORAGE_OPTIONS)
|
enaml/qt/docking/dock_resources.py
|
xtuzy/enaml
| 1,080 |
81476
|
# -*- coding: utf-8 -*-
# Resource object code
#
# Created: Tue Jul 2 13:23:21 2013
# by: The Resource Compiler for PyQt (Qt v4.8.3)
#
# WARNING! All changes made in this file will be lost!
# this line manually edited
from enaml.qt import QtCore
qt_resource_data = b"\
\x00\x00\x02\x61\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x6f\x00\x00\x00\x6f\x08\x06\x00\x00\x00\xe2\xc5\x9e\x60\
\x00\x00\x00\x04\x73\x42\x49\x54\x08\x08\x08\x08\x7c\x08\x64\x88\
\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0e\xc4\x00\x00\x0e\xc4\
\x01\x95\x2b\x0e\x1b\x00\x00\x02\x03\x49\x44\x41\x54\x78\x9c\xed\
\xdc\xc1\x49\xc5\x40\x14\x46\xe1\xff\xe9\x2b\x2d\xb5\xd8\x42\xb2\
\x75\xf7\xd2\x82\x6d\x58\x43\x96\xf6\x93\x9d\x2e\x64\xe0\x21\x88\
\x20\x38\x77\xce\x78\x4e\x03\x73\xc9\x47\x36\x21\x73\x2f\x19\xa8\
\x75\x5d\xdf\xab\x67\xf8\xa9\x7d\xdf\x2f\xd5\x33\xb4\xae\xd5\x03\
\x7c\xed\x76\xbb\xbd\x54\xcf\xf0\x5d\xdb\xb6\x3d\x55\xcf\x70\xdf\
\x43\xf5\x00\xf6\xfb\xc4\x03\x27\x1e\x38\xf1\xc0\x89\x07\x4e\x3c\
\x70\xe2\x81\x13\x0f\x9c\x78\xe0\xc4\x03\x27\x1e\x38\xf1\xc0\x89\
\x07\x4e\x3c\x70\xe2\x81\x13\x0f\x9c\x78\xe0\xc4\x03\x27\x1e\x38\
\xf1\xc0\x89\x07\x4e\x3c\x70\xe2\x81\x13\x0f\xdc\x63\xf5\x00\xad\
\x75\x5d\xdf\x47\xfe\xe1\x36\x49\x96\x65\x79\x3b\xcf\xf3\xf5\x38\
\x8e\xe7\xea\x59\x92\x41\xf0\x08\x70\xad\x91\x00\xcb\xf1\x48\x70\
\xad\x51\x00\x4b\xf1\x88\x70\xad\x11\x00\xcb\xf0\xc8\x70\xad\x6a\
\xc0\x12\xbc\x19\xe0\x5a\x95\x80\xdd\xf1\x66\x82\x6b\x55\x01\x76\
\xc5\x9b\x11\xae\x55\x01\xd8\x0d\x6f\x66\xb8\x56\x6f\xc0\x4b\xaf\
\xab\xc4\xb3\xc3\xdd\xd7\xeb\x06\xed\xe5\x3f\xbc\x11\x33\xb6\x6d\
\xdb\x93\xdf\x36\xc1\x89\x07\x4e\x3c\x70\xe2\x81\x13\x0f\x9c\x78\
\xe0\xc4\x03\x27\x1e\x38\xf1\xc0\x89\x07\x4e\x3c\x70\xe2\x81\x13\
\x0f\x9c\x78\xe0\xc4\x03\x27\x1e\x38\xf1\xc0\x89\x07\x4e\x3c\x70\
\xe2\x81\x13\x0f\x9c\x78\xe0\xc4\x03\x27\x1e\x38\xf1\xc0\x89\x07\
\x4e\x3c\x70\xe2\x81\x13\x0f\x9c\x78\xe0\xc4\x03\x27\x1e\x38\xf1\
\xc0\x89\x07\x4e\x3c\x70\xe2\x81\x13\x0f\x9c\x78\xe0\xdc\xc3\xf2\
\x07\x75\xdb\xc3\xd2\xe3\x90\xe4\x7f\x6c\x40\x4a\x3e\xe1\xf6\x7d\
\xef\xf2\x5c\xbb\xad\xaf\x3a\x8e\xe3\xf9\x3c\xcf\xd7\x65\x59\xde\
\x7a\x9d\xd9\xbb\x9e\x70\x49\xe7\xc5\x71\x33\x03\xf6\x86\x4b\x0a\
\x56\x36\xce\x08\x58\x01\x97\x14\x2d\x4b\x9d\x09\xb0\x0a\x2e\x29\
\x5c\x53\x3c\x03\x60\x25\x5c\x52\xbc\x20\x9c\x0c\x58\x0d\x97\x0c\
\xb0\x9a\x9f\x08\x38\x02\x5c\x32\x00\x5e\xc2\x02\x1c\x05\x2e\x19\
\x04\x2f\x61\x00\x8e\x04\x97\xf8\x6d\x13\x9d\x78\xe0\xc4\x03\x27\
\x1e\x38\xf1\xc0\x89\x07\x4e\x3c\x70\xe2\x81\x13\x0f\x9c\x78\xe0\
\xc4\x03\x27\x1e\x38\xf1\xc0\x89\x07\x4e\x3c\x70\xe2\x81\x13\x0f\
\x9c\x78\xe0\xc4\x03\x27\x1e\x38\xf1\xc0\x89\x07\x4e\x3c\x70\xe2\
\x81\xbb\x56\x0f\xf0\xb5\x5e\x57\x82\x67\xe8\x03\xdb\xf1\xfe\x32\
\xdf\x7a\xb4\x66\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\
\x00\x00\x01\xda\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x1f\x00\x00\x00\x1f\x08\x02\x00\x00\x00\x90\xcc\x81\x6e\
\x00\x00\x00\x01\x73\x52\x47\x42\x00\xae\xce\x1c\xe9\x00\x00\x00\
\x04\x67\x41\x4d\x41\x00\x00\xb1\x8f\x0b\xfc\x61\x05\x00\x00\x00\
\x09\x70\x48\x59\x73\x00\x00\x0e\xc3\x00\x00\x0e\xc3\x01\xc7\x6f\
\xa8\x64\x00\x00\x00\x1a\x74\x45\x58\x74\x53\x6f\x66\x74\x77\x61\
\x72\x65\x00\x50\x61\x69\x6e\x74\x2e\x4e\x45\x54\x20\x76\x33\x2e\
\x35\x2e\x31\x30\x30\xf4\x72\xa1\x00\x00\x01\x49\x49\x44\x41\x54\
\x48\x4b\xbd\xcc\xbd\x4a\x03\x51\x10\x86\x61\x2f\xce\x8b\xf0\x46\
\x2c\x2c\x2c\x2c\x2c\x14\x2c\x62\x61\x21\xc1\xc6\xc2\xc2\xc2\xc2\
\xc2\xc2\x1b\x10\x11\x11\x91\x20\x22\x31\x6e\xf6\xf7\xec\x6e\xf6\
\x37\xd9\x24\x36\xce\x30\x73\xa6\x90\x54\x0e\x67\xe1\xed\xbe\x8f\
\x67\xeb\x78\x30\x74\x17\xea\x6d\xb7\x76\x11\xeb\xcd\x62\xe5\xa2\
\x5e\xf4\x7a\xbe\xa4\xb6\x77\xf6\xf4\x89\xd6\x8b\x5e\xb5\x1d\xf5\
\xe7\xf7\xbf\x44\xeb\x45\x2f\x9b\x05\x05\xdb\x8f\xb9\xd7\x04\x82\
\x68\x9b\xf4\xf8\x4e\xd3\x06\xbd\xa8\xe7\x14\xea\xe1\xad\x26\x10\
\x44\x63\x7d\x56\xb5\x14\xea\xfe\x8d\x26\x10\x44\x63\x3d\x2f\x1b\
\x0a\xb6\xb5\x77\xad\x09\x04\xd1\x58\xcf\x8a\x86\x42\x7d\x72\xa5\
\x09\x04\xd1\x58\x37\x79\x4d\xa1\x3e\xbe\xd4\x04\x82\x68\xac\x27\
\x59\x45\xc1\xb6\xfa\xbc\xd0\x04\x82\x68\xac\xc7\x69\x49\xa1\xfe\
\x71\xae\x09\x04\xd1\x58\x8f\x4c\x41\xa1\xfe\x7e\xa6\x09\x04\xd1\
\x58\x0f\x93\x82\x42\x7d\x74\xaa\x09\x04\xd1\x58\x0f\xe2\x19\x05\
\xdb\xf2\xed\x44\x13\x08\xa2\xb1\xee\x47\x39\x85\xfa\xeb\x91\x26\
\x10\x44\x63\x7d\x1a\xe5\x14\xea\x2f\x87\x9a\x40\x10\xcd\xea\x61\
\x46\xc1\xd6\x3d\x1f\x68\x42\xdd\x6a\xac\x7b\x41\x46\xa1\xfe\xb4\
\xaf\x09\x04\xd1\x58\xff\x0e\x52\x0a\xf5\x47\x55\x20\x88\x66\x75\
\x3f\xa5\x50\x7f\xd8\xd5\x84\xba\xd5\x58\x9f\xf8\x86\x82\x4d\x9f\
\x68\xac\x7f\x4d\x8d\x8b\xac\xee\x25\x2e\x62\x7d\xec\x25\x2e\x62\
\xdd\x55\x83\xe1\x2f\x82\x32\x64\x70\x80\xdc\x0e\xed\x00\x00\x00\
\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x02\xcb\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x8b\x00\x00\x00\x8b\x08\x06\x00\x00\x00\x51\x19\x6a\xff\
\x00\x00\x00\x04\x73\x42\x49\x54\x08\x08\x08\x08\x7c\x08\x64\x88\
\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0e\xc4\x00\x00\x0e\xc4\
\x01\x95\x2b\x0e\x1b\x00\x00\x02\x6d\x49\x44\x41\x54\x78\x9c\xed\
\xdd\xb1\x6d\xdc\x50\x10\x45\xd1\x59\x5b\xa5\xb1\x16\xb5\x40\xa6\
\xca\x96\x2d\xb8\x0d\xd5\xc0\x50\xfd\x30\x93\x23\x02\x4e\x0c\x3d\
\x63\xd7\x20\xe7\xeb\x9c\x0a\x1e\xb0\x77\x23\x02\xf3\x6f\x35\x80\
\x79\x9e\x3f\xcf\xde\xf0\x95\x75\x5d\x6f\x67\x6f\x78\xd4\xcb\xd9\
\x03\x9e\xe5\x7e\xbf\xff\x3a\x7b\xc3\xdf\x2c\xcb\xf2\x7a\xf6\x86\
\x67\xf8\x71\xf6\x00\xfa\x10\x0b\x31\xb1\x10\x13\x0b\x31\xb1\x10\
\x13\x0b\x31\xb1\x10\x13\x0b\x31\xb1\x10\x13\x0b\x31\xb1\x10\x13\
\x0b\x31\xb1\x10\x13\x0b\x31\xb1\x10\x13\x0b\x31\xb1\x10\x13\x0b\
\x31\xb1\x10\x13\x0b\x31\xb1\x10\x13\x0b\x31\xb1\x10\x13\x0b\x31\
\xb1\x10\x13\x0b\x31\xb1\x10\x13\x0b\x31\xb1\x10\x13\x0b\x31\xb1\
\x10\x13\x0b\x31\xb1\x10\x13\x0b\x31\xb1\x10\x13\x0b\x31\xb1\x10\
\x13\x0b\xb1\x9f\x67\x0f\x78\xd4\x3c\xcf\x9f\x57\x3e\x6b\x5a\x55\
\x35\x4d\xd3\xc7\xbe\xef\xef\xdb\xb6\xbd\x9d\xbd\xe5\x11\xad\x63\
\xe9\x10\xca\x61\x84\x60\xda\xc6\xd2\x29\x94\x43\xf7\x60\x5a\xc6\
\xd2\x31\x94\x43\xe7\x60\xda\xc5\xd2\x39\x94\x43\xd7\x60\x5a\xc5\
\x32\x42\x28\x87\x8e\xc1\xb4\x89\x65\xa4\x50\x0e\xdd\x82\x69\x11\
\xcb\x88\xa1\x1c\x3a\x05\x73\xf9\x58\x46\x0e\xe5\xd0\x25\x98\xdb\
\xd5\x1f\x76\x1a\x3d\x94\x3f\x5d\xfd\x5d\xa2\xdb\x77\xf8\xe7\xf2\
\xb8\x65\x59\x5e\x7d\x1b\x22\x26\x16\x62\x62\x21\x26\x16\x62\x62\
\x21\x26\x16\x62\x62\x21\x26\x16\x62\x62\x21\x26\x16\x62\x62\x21\
\x26\x16\x62\x62\x21\x26\x16\x62\x62\x21\x26\x16\x62\x62\x21\x26\
\x16\x62\x62\x21\x26\x16\x62\x62\x21\x26\x16\x62\x62\x21\x26\x16\
\x62\x62\x21\x26\x16\x62\x62\x21\x26\x16\x62\x62\x21\x26\x16\x62\
\x62\x21\x26\x16\x62\x62\x21\x26\x16\x62\x62\x21\x26\x16\x62\x62\
\x21\x26\x16\x62\xee\xe0\x5e\xc8\xe5\xef\xe0\x9e\x3d\xe0\x2b\xdf\
\xe5\x4e\xef\xb2\x2c\xaf\xeb\xba\x5e\xfa\xf7\xb8\xfc\x39\xf6\x6d\
\xdb\xde\xf6\x7d\x7f\x9f\xa6\xe9\xe3\xec\x2d\xff\x4b\x87\x50\xaa\
\x1a\xc4\x52\x35\x76\x30\x5d\x42\xa9\x6a\x12\x4b\xd5\x98\xc1\x74\
\x0a\xa5\xaa\x51\x2c\x55\x63\x05\xd3\x2d\x94\xaa\x66\xb1\x54\x8d\
\x11\x4c\xc7\x50\xaa\x1a\xc6\x52\xd5\x3b\x98\xae\xa1\x54\x35\x8d\
\xa5\xaa\x67\x30\x9d\x43\xa9\x6a\x1c\x4b\x55\xaf\x60\xba\x87\x52\
\xd5\x3c\x96\xaa\x1e\xc1\x8c\x10\x4a\x95\x6f\x43\xfc\x03\xb1\x10\
\x13\x0b\x31\xb1\x10\x13\x0b\x31\xb1\x10\x13\x0b\x31\xb1\x10\x13\
\x0b\x31\xb1\x10\x13\x0b\x31\xb1\x10\x13\x0b\x31\xb1\x10\x13\x0b\
\x31\xb1\x10\x13\x0b\x31\xb1\x10\x13\x0b\x31\xb1\x10\x13\x0b\x31\
\xb1\x10\x13\x0b\x31\xb1\x10\x13\x0b\x31\xb1\x10\x13\x0b\x31\xb1\
\x10\x13\x0b\x31\xb1\x10\x13\x0b\x31\xb1\x10\x13\x0b\x31\xb1\x10\
\x13\x0b\x31\xb1\x10\x13\x0b\x31\xb1\x10\x7b\x39\x7b\xc0\xb3\x5c\
\xfd\x61\xa7\x11\xfc\x06\x85\xf5\xfe\x6a\xa4\x26\xa3\xb0\x00\x00\
\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x01\xe1\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x13\x00\x00\x00\x1f\x08\x02\x00\x00\x00\x8a\xf0\x61\xe0\
\x00\x00\x00\x01\x73\x52\x47\x42\x00\xae\xce\x1c\xe9\x00\x00\x00\
\x04\x67\x41\x4d\x41\x00\x00\xb1\x8f\x0b\xfc\x61\x05\x00\x00\x00\
\x09\x70\x48\x59\x73\x00\x00\x0e\xc3\x00\x00\x0e\xc3\x01\xc7\x6f\
\xa8\x64\x00\x00\x00\x1a\x74\x45\x58\x74\x53\x6f\x66\x74\x77\x61\
\x72\x65\x00\x50\x61\x69\x6e\x74\x2e\x4e\x45\x54\x20\x76\x33\x2e\
\x35\x2e\x31\x30\x30\xf4\x72\xa1\x00\x00\x01\x50\x49\x44\x41\x54\
\x48\x4b\xa5\x95\x39\x4a\x04\x61\x10\x46\xe7\x70\x1e\xc2\x8b\x18\
\x18\x18\x18\x18\x28\x18\x68\x60\x20\x62\x62\x60\x60\x60\x60\x60\
\xe0\x05\x44\x44\x44\x64\x10\x91\x59\x7a\x5f\xa7\xd7\x99\x9e\x25\
\xb1\x6a\xaa\xfe\x02\xc1\x1a\xe9\x16\x5e\xf6\xbe\x47\x85\xd5\x3b\
\x3a\xb9\xe8\x46\x0f\xb2\xba\x59\xb6\x05\x2a\x2c\xab\xd9\xa2\x2d\
\xff\x2e\xcb\xe9\x9c\xd8\xda\xde\xdd\x8c\x2c\xf9\x66\xf7\xb2\xa8\
\x1b\xe2\xcf\x9b\xb2\xe4\x9b\xdd\xcb\xbc\x9a\x11\x70\x73\x15\x3d\
\x68\x80\x95\x25\xdf\xfc\x51\x06\xf7\x2b\x85\x5f\xca\xac\x9c\x12\
\x78\xd3\xbb\xd3\x00\x2b\x4b\xbe\x39\x29\x6a\x02\x4b\xe7\x56\x03\
\xac\x2c\xb9\x4c\xf3\x8a\x00\xb7\xb4\x6e\x34\xc0\xca\x92\xcb\x24\
\xab\x08\x2c\x47\xd7\x1a\x60\x65\xc9\x65\x94\x96\x04\x96\x83\x2b\
\x0d\xb0\xb2\xe4\x32\x4c\x0a\x02\xdc\xe2\xeb\x52\x03\xac\x2c\xb9\
\x0c\xe2\x9c\xc0\xf2\xf3\x5c\x03\xac\x2c\xb9\xf4\xa3\x8c\xc0\xf2\
\xe3\x4c\x03\xac\x2c\xb9\xf4\xc2\x8c\xc0\xb2\x7f\xaa\x01\x56\x96\
\x5c\xba\xc1\x84\x00\x37\x7f\x3f\xd6\x00\x2b\x4b\x2e\x1d\x3f\x25\
\xb0\x7c\x3b\xd4\x00\x2b\x4b\x2e\x6d\x3f\x25\xb0\x7c\x3d\xd0\x00\
\x2b\x4b\x53\x7a\x89\xbd\x06\x5c\xf3\xb2\xaf\x81\xa5\x59\x72\x69\
\xb9\x09\x81\xe5\xf3\x9e\x06\x58\x59\x72\x39\x76\x63\x02\xcb\x27\
\x15\xb0\xb2\x34\xa5\x13\x8f\xd7\x60\xf9\xb8\xa3\x81\xa5\x59\x72\
\x39\x72\x22\x02\xdc\x66\x64\xc9\xe5\xd0\x8e\xda\x62\x4a\x2b\x1c\
\xb6\x84\xcb\x81\x15\xb6\x85\xcb\x6e\xbf\xec\x1b\xdd\xce\x28\xdf\
\xf5\x17\x62\x31\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\
\x00\x00\x01\xb8\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x1f\x00\x00\x00\x1f\x08\x02\x00\x00\x00\x90\xcc\x81\x6e\
\x00\x00\x00\x01\x73\x52\x47\x42\x00\xae\xce\x1c\xe9\x00\x00\x00\
\x04\x67\x41\x4d\x41\x00\x00\xb1\x8f\x0b\xfc\x61\x05\x00\x00\x00\
\x09\x70\x48\x59\x73\x00\x00\x0e\xc3\x00\x00\x0e\xc3\x01\xc7\x6f\
\xa8\x64\x00\x00\x00\x1a\x74\x45\x58\x74\x53\x6f\x66\x74\x77\x61\
\x72\x65\x00\x50\x61\x69\x6e\x74\x2e\x4e\x45\x54\x20\x76\x33\x2e\
\x35\x2e\x31\x30\x30\xf4\x72\xa1\x00\x00\x01\x27\x49\x44\x41\x54\
\x48\x4b\xbd\xd2\x3d\x4e\xc3\x40\x10\x86\x61\x0e\xc7\x21\x38\x0a\
\xd4\x74\x48\x29\x42\x1a\x8a\xc8\x14\xd4\x34\x49\xc3\x15\x68\xa8\
\x68\x68\x02\x38\x76\x9c\xf5\x4f\xfc\x9b\x38\x09\x0d\x63\xcd\xe7\
\x45\x5a\x27\x01\xb1\x1a\x4b\xaf\xb6\xd8\x19\x3d\xdb\xec\xd9\xcd\
\x70\x2c\x57\xa3\x57\xf5\x5e\x22\xe8\xe5\x66\x27\x51\x2f\x7a\xb1\
\xde\x72\xe7\x17\x57\xf6\x69\xad\x17\x3d\xaf\x6a\xce\xd8\xfb\x5f\
\x5a\xeb\x45\xcf\xca\x0d\x47\xb3\x2f\xf5\x64\x13\x09\x5a\x3b\xa0\
\xef\xbd\x47\x9b\x0e\xe8\x69\xb1\xe6\x1a\x7d\xf6\x60\x13\x09\x5a\
\x83\xbe\xca\x2b\x8e\x66\xbb\xb7\x3b\x9b\x48\xd0\x1a\xf4\x24\x2b\
\x39\x9a\x6d\x5f\x07\x36\x91\xa0\x35\xe8\x71\x5a\x72\x8d\xfe\x72\
\x6d\x13\x09\x5a\x83\x1e\x26\x05\x47\xb3\xfa\xf9\xd2\x26\x12\xb4\
\x06\x5d\xc5\x39\x47\x33\xfb\xb4\x06\x7d\x19\x65\x12\x41\x0f\xc2\
\x54\x22\xe8\x0b\x95\x4a\x04\xdd\x5f\xae\x24\x82\xee\x05\x89\x44\
\xd0\xe7\x41\xf2\x97\x46\xf7\x53\xfa\x12\x74\x1a\xf7\xc7\x6a\xf5\
\x45\xfc\x6b\x4c\x73\xcd\x03\x9d\x85\x6e\xd0\x5d\x3f\x3e\xdd\xc8\
\xf9\xa1\xf1\x80\x33\x35\x76\xba\x41\xff\xf4\xa3\x13\xdd\x3a\x13\
\x83\xe6\xe8\xde\xd8\x34\x6a\x75\x2f\x92\x08\xfa\x87\x17\x4a\x04\
\xfd\x7d\x1e\x4a\xd4\xea\xae\x92\x08\xfa\xcc\x55\x12\x41\x97\x6a\
\x38\xfe\x06\xe0\x80\xad\xee\xa3\x69\x89\x6f\x00\x00\x00\x00\x49\
\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x00\xc2\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x29\x00\x00\x00\x29\x08\x06\x00\x00\x00\xa8\x60\x00\xf6\
\x00\x00\x00\x04\x73\x42\x49\x54\x08\x08\x08\x08\x7c\x08\x64\x88\
\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0e\xc4\x00\x00\x0e\xc4\
\x01\x95\x2b\x0e\x1b\x00\x00\x00\x64\x49\x44\x41\x54\x58\x85\xed\
\xd9\xc1\x0d\x80\x30\x0c\xc0\xc0\x14\x31\x2b\x2b\x24\x23\x64\x06\
\x96\x85\x09\x90\x78\x58\x6a\x2b\xd9\x13\xdc\xdf\x23\x33\x9f\x58\
\xbc\x33\x22\xa2\xbb\xef\xd9\x90\xaf\xaa\xea\x3a\x66\x23\xfe\x24\
\x92\x4a\x24\x95\x48\x2a\x91\x54\x22\xa9\x44\x52\x89\xa4\x12\x49\
\x25\x92\x4a\x24\x95\x48\x2a\x91\x54\x22\xa9\x44\x52\x89\xa4\x12\
\x49\x25\x92\x4a\x24\xd5\x16\xc8\xb1\xc3\xc7\x79\x01\x28\xc6\x09\
\x1b\x33\x94\xbf\xef\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\
\x82\
\x00\x00\x01\xc2\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x1f\x00\x00\x00\x1f\x08\x02\x00\x00\x00\x90\xcc\x81\x6e\
\x00\x00\x00\x01\x73\x52\x47\x42\x00\xae\xce\x1c\xe9\x00\x00\x00\
\x04\x67\x41\x4d\x41\x00\x00\xb1\x8f\x0b\xfc\x61\x05\x00\x00\x00\
\x09\x70\x48\x59\x73\x00\x00\x0e\xc3\x00\x00\x0e\xc3\x01\xc7\x6f\
\xa8\x64\x00\x00\x00\x1a\x74\x45\x58\x74\x53\x6f\x66\x74\x77\x61\
\x72\x65\x00\x50\x61\x69\x6e\x74\x2e\x4e\x45\x54\x20\x76\x33\x2e\
\x35\x2e\x31\x30\x30\xf4\x72\xa1\x00\x00\x01\x31\x49\x44\x41\x54\
\x48\x4b\xbd\xcd\xbb\x4e\xc3\x40\x10\x85\x61\x1e\x8e\x87\xe0\x4d\
\x10\x25\x6d\xba\x88\x36\xb4\xb4\xd4\x3c\x02\x05\x0d\x0d\x05\x12\
\x04\xc7\x8e\xb3\xbe\xc4\xd7\xc4\x49\x68\x38\xab\x99\x1d\xad\x28\
\x67\xb5\x96\x7e\x59\x23\x9d\xd5\xe7\xab\xc5\x72\x15\x2f\xab\x8f\
\xd3\x25\x46\xac\x0f\xc7\x73\x8c\x66\xd1\xfb\xc3\x89\xba\xbe\xb9\
\x0b\x4f\xb4\x59\xf4\x6e\x9c\xa8\x7f\xef\x74\x89\x36\x8b\xde\x0e\
\x47\x0a\x9b\xdc\xba\x7c\x61\x16\xbd\xe9\x0f\x14\x36\xb9\x75\xf9\
\x02\xeb\xfb\x6e\xa4\xb0\xc9\xad\xcb\x17\x58\xaf\xdb\x81\xc2\x26\
\xb7\x2e\x5f\x60\xbd\x6a\x06\x0a\x1b\xfa\x35\x2f\xea\x2f\x12\x8d\
\xf5\xa2\xee\x29\xfb\x28\x7b\x0e\x09\x82\x68\xac\x9b\xaa\xa3\xb0\
\x5d\xd6\x4f\x21\x41\x10\x8d\xf5\x5d\xd9\x52\xd8\xce\x9f\x8f\x21\
\x41\x10\x8d\xf5\xbc\x68\x28\xab\x7f\x3c\x84\x04\x41\x34\xd6\xb7\
\xa6\xa1\xb0\x9d\xde\x17\x21\x41\x10\x8d\xf5\x6c\xb7\xa7\xb0\x4d\
\x6f\xf7\x21\x41\x10\x8d\xf5\x34\xaf\x29\x6c\xf6\x07\xaf\xb7\xea\
\x2f\x12\x8d\xf5\x4d\x5e\x53\xd8\xe4\xd6\xe5\x0b\x4e\xdf\x56\x94\
\xdd\xdc\xad\xcb\x17\x58\x4f\xb2\x8a\xc2\x26\xb7\x2e\x5f\x60\xfd\
\x27\x2b\x29\x6c\x72\xeb\xf2\x05\xa7\xa7\x25\x65\x37\x77\xeb\xf2\
\x05\xd6\xd7\x69\x41\x61\x0b\x4f\x34\xd6\xbf\x37\x45\x8c\x9c\x9e\
\x98\x18\xb1\xfe\x95\x98\x18\xb1\x1e\xab\xe5\xea\x0f\x0e\x98\x91\
\x35\xc6\xa1\x36\xaa\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\
\x82\
\x00\x00\x01\x4e\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x0a\x00\x00\x00\x1f\x08\x02\x00\x00\x00\x51\x4b\xcb\xc2\
\x00\x00\x00\x01\x73\x52\x47\x42\x00\xae\xce\x1c\xe9\x00\x00\x00\
\x04\x67\x41\x4d\x41\x00\x00\xb1\x8f\x0b\xfc\x61\x05\x00\x00\x00\
\x09\x70\x48\x59\x73\x00\x00\x0b\x12\x00\x00\x0b\x12\x01\xd2\xdd\
\x7e\xfc\x00\x00\x00\x1a\x74\x45\x58\x74\x53\x6f\x66\x74\x77\x61\
\x72\x65\x00\x50\x61\x69\x6e\x74\x2e\x4e\x45\x54\x20\x76\x33\x2e\
\x35\x2e\x31\x30\x30\xf4\x72\xa1\x00\x00\x00\xbd\x49\x44\x41\x54\
\x38\x4f\x8d\xc9\xc9\x8d\xc2\x50\x00\x04\x51\x82\x23\x08\x52\x21\
\x05\x6e\xdc\x89\x6d\x00\x63\x63\xbc\xe1\x95\x65\xe6\x34\xa0\xea\
\xdf\xa7\x0f\xb2\x54\x97\xd2\x5b\x6c\xb6\xbb\x2f\xbd\x79\x7a\xfc\
\x45\x13\x8f\xf7\xdf\x68\xf3\x78\xb8\x3d\x69\xb9\x5a\xbf\xf2\xce\
\xe3\x7e\x7a\x10\xec\x9d\xc7\xdd\x78\x27\xd8\x3b\x8f\xdb\xe1\x46\
\xb0\x57\x7c\xed\x27\x82\xbd\xe2\xa6\x1b\x09\xf6\x8a\xeb\x76\x24\
\xd8\x2b\x2e\x9b\x81\x60\xaf\xb8\xa8\x7b\x82\xbd\xe2\x4b\xd5\x11\
\xec\x15\xe7\x65\x4b\xb0\x57\x7c\x2e\x5a\x82\xbd\xe2\xec\x72\x25\
\xd8\x2b\x4e\xf3\x86\x60\xaf\xf8\x94\x37\x04\x7b\x03\x9f\x6b\x12\
\x87\x15\x27\x59\x4d\xb0\x57\x7c\xcc\x2a\x82\xbd\x81\xd3\x8a\xc4\
\x61\xc5\x87\xb4\x24\xd8\x2b\xde\x9f\xca\x68\x81\x93\x22\x9a\xf8\
\x27\x29\xa2\x89\x3f\xb6\xdd\xfd\x03\xaf\x34\xbc\x27\xb0\x9e\x89\
\xd7\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x02\x22\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x1f\x00\x00\x00\x1f\x08\x02\x00\x00\x00\x90\xcc\x81\x6e\
\x00\x00\x00\x01\x73\x52\x47\x42\x00\xae\xce\x1c\xe9\x00\x00\x00\
\x04\x67\x41\x4d\x41\x00\x00\xb1\x8f\x0b\xfc\x61\x05\x00\x00\x00\
\x09\x70\x48\x59\x73\x00\x00\x0e\xc3\x00\x00\x0e\xc3\x01\xc7\x6f\
\xa8\x64\x00\x00\x00\x1a\x74\x45\x58\x74\x53\x6f\x66\x74\x77\x61\
\x72\x65\x00\x50\x61\x69\x6e\x74\x2e\x4e\x45\x54\x20\x76\x33\x2e\
\x35\x2e\x31\x30\x30\xf4\x72\xa1\x00\x00\x01\x91\x49\x44\x41\x54\
\x48\x4b\xbd\xcd\xcb\x4a\xc3\x50\x10\xc6\x71\x1f\xce\x87\xf0\x45\
\x5c\xb8\x70\xe1\xc2\x85\x82\x8b\x8a\xd6\x0b\x45\x5a\x5b\x70\xe1\
\xc2\x85\xb5\x5d\xf8\x02\x22\x22\x22\x22\x22\x52\x6b\x9a\x5e\x92\
\x26\x4d\xdb\xf4\xee\xc6\x19\x32\x19\x0e\xa7\x65\x7a\x10\x52\xf8\
\xad\x26\xdf\xf9\x67\x6d\x3f\x95\x49\x0e\xd6\x07\xe3\x59\x12\xa8\
\x1e\x8e\xa6\x49\x58\x49\xbd\x3f\x9c\x18\x5a\xdf\xd8\x5a\x8a\xc7\
\x2b\xa9\xf7\x06\x63\x43\x5a\x68\x21\x1e\xaf\xa4\xde\x0d\x47\x86\
\xe0\xf1\xaf\x7b\x2f\x80\x01\x8f\xff\x55\x6f\x95\x05\x0b\xea\x41\
\x7f\x68\x08\xeb\x8d\x5b\x01\x0c\x78\x4c\xf5\x4e\x6f\xa0\x3a\x2b\
\x94\xb5\x0b\xc3\xba\x7d\x23\x80\x01\x8f\xa9\xee\x77\x43\x15\x2c\
\x4e\x0b\x25\xed\x18\x81\x4f\x33\xeb\x5a\x00\x03\x1e\x53\xdd\x0b\
\x42\x15\x2c\xf0\x07\xf9\x92\x76\x07\x58\xaf\x5e\x09\x60\xc0\x63\
\xaa\xbb\x7e\x5f\x15\xd5\xc1\xc9\x65\x69\xfe\xd3\xac\x92\x17\xc0\
\x80\xc7\x54\x77\xbc\x9e\x8a\xeb\x60\xfe\xd3\xf4\xeb\x42\xa0\x3e\
\xa1\x7a\xab\xdd\x55\x71\x3a\x9d\xbb\x9b\xff\x34\xfd\x3c\x17\xc0\
\x80\xc7\x54\x6f\xba\x81\x2a\x4a\x1f\xe7\x8a\xda\x1d\x60\xfd\x23\
\x2d\x80\x01\x8f\xa9\xde\x70\x02\x15\xa6\xb3\x45\xed\x18\xc1\xfa\
\xfb\xa1\x00\x06\x3c\xa6\x7a\xbd\xd5\x51\x1d\x65\x8b\xda\x85\xc1\
\xe3\xc9\xdb\x81\x00\x06\x3c\xa6\xba\xdd\xf4\x0d\x61\xfd\x75\x4f\
\x00\x03\x1e\x53\xbd\xd6\xf4\x0d\x61\xfd\x65\x57\x00\x03\x1e\xc7\
\xf5\x86\x67\x08\x1e\x8f\x9f\x77\x04\x58\x8f\xc7\x54\xb7\xea\x9e\
\x21\xac\x3f\x6d\x0b\x60\xc0\x63\xaa\xff\xd4\xdb\x86\xb0\xfe\x28\
\x81\x01\x8f\xe3\xba\xdd\x36\x84\xf5\x87\x4d\x01\xd6\xe3\x31\xd5\
\xab\xb6\x6b\x08\x1e\x2f\xc5\x63\xaa\x7f\xd7\xdc\x24\xc4\x75\xcb\
\x49\x02\xd5\x2b\x96\x93\x04\xaa\x27\x25\x95\xf9\x03\x6c\x41\xe7\
\xb2\x07\xe6\xaf\xd1\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\
\x82\
\x00\x00\x02\x24\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x1f\x00\x00\x00\x1f\x08\x02\x00\x00\x00\x90\xcc\x81\x6e\
\x00\x00\x00\x01\x73\x52\x47\x42\x00\xae\xce\x1c\xe9\x00\x00\x00\
\x04\x67\x41\x4d\x41\x00\x00\xb1\x8f\x0b\xfc\x61\x05\x00\x00\x00\
\x09\x70\x48\x59\x73\x00\x00\x0e\xc3\x00\x00\x0e\xc3\x01\xc7\x6f\
\xa8\x64\x00\x00\x00\x1a\x74\x45\x58\x74\x53\x6f\x66\x74\x77\x61\
\x72\x65\x00\x50\x61\x69\x6e\x74\x2e\x4e\x45\x54\x20\x76\x33\x2e\
\x35\x2e\x31\x30\x30\xf4\x72\xa1\x00\x00\x01\x93\x49\x44\x41\x54\
\x48\x4b\xbd\xcc\xbb\x4a\x03\x61\x10\x05\x60\x1f\xce\x87\xf0\x45\
\x2c\x52\x58\x58\x58\x28\x58\xc4\xc2\x42\x82\x8d\x85\x85\x85\x85\
\x85\x85\x2f\x20\x22\x22\x22\x22\x22\x31\xe6\x9e\xcd\xfd\xe6\x66\
\xb3\x8d\x27\xcc\xec\xe1\xe7\xff\x19\x10\xc3\x06\xbe\xe2\x30\x73\
\x38\x5b\x47\xc5\x52\x7e\x56\xeb\xb3\x78\x99\x07\x5d\x9f\xfe\x24\
\x79\xd8\xc8\xfa\x64\xbe\x10\xdb\x3b\x85\xf5\x71\x6d\x23\xeb\xe3\
\x59\x2c\xbc\xde\xff\x70\x6d\x23\xeb\xa3\xe9\x8f\xc0\x8f\x99\xd2\
\xe8\xce\xe2\x35\xc1\x5d\xf8\xd3\x3a\x8e\x69\xfb\x36\x64\x95\x99\
\x75\x7d\x38\x99\x0b\xfc\x98\x29\x6d\xde\x58\xbc\x26\xb8\x0b\xba\
\x3e\x18\xcf\x04\x7e\xcc\x84\x63\x5a\xbf\x0e\x59\x65\x66\x5d\xef\
\x8f\xa6\x02\x3f\x66\x5a\x56\xaf\x2c\x5e\x13\xdc\x05\x5d\xef\x0d\
\xa7\x02\x3f\x66\xc2\x71\x59\xb9\x0c\x59\x65\x66\x5d\x8f\xfa\x13\
\x81\x1f\x33\x2d\xcb\x17\x16\xaf\x09\xee\x82\xae\x77\x7a\x63\x81\
\x1f\x33\xe1\x98\x7c\x9e\x87\xac\x32\xb3\xae\xb7\xbb\x23\x81\x1f\
\x33\x25\x1f\x67\x16\xaf\x09\xee\x82\xae\xb7\xa2\xa1\xc0\x8f\x99\
\x70\x4c\xde\x4f\x43\x56\x99\x59\xd7\x9b\x9d\xa1\xc0\x8f\x99\x92\
\xb7\x13\x8b\xd7\x04\x77\x41\xd7\x1b\xed\x81\xc0\x8f\x99\x70\x5c\
\xbc\x1e\x87\xac\x32\xb3\xae\xd7\x5b\x7d\x81\x1f\x33\x2d\x5e\x0e\
\x2d\x5e\x13\xdc\x05\x5d\xaf\xb5\xfa\x02\x3f\x66\xc2\x71\xf1\x7c\
\x10\xb2\xca\xcc\xd9\x7a\xb3\x27\x56\xbf\x2c\x53\xfc\xb4\x6f\xf1\
\x9a\xe0\x2e\xe8\x7a\xb5\xd1\x13\xf8\x31\x13\x8e\xf1\xe3\x5e\xc8\
\x2a\x33\xeb\xfa\x77\xa3\x2b\xf0\x63\xa6\xf8\xa1\x60\xf1\x9a\xe0\
\x2e\x64\xeb\xf5\xae\x58\xfd\xb2\x4c\x38\xc6\xf7\xbb\x21\xab\xcc\
\xac\xeb\x95\x7a\x24\xf0\x5b\x1f\xd7\x74\xfd\xab\x16\xe5\x21\x5b\
\xaf\x76\xf2\xa0\xeb\xe5\x6a\x27\x0f\xba\x9e\x97\x62\xe9\x17\xda\
\xb5\x98\x10\x31\x42\x5d\xab\x00\x00\x00\x00\x49\x45\x4e\x44\xae\
\x42\x60\x82\
\x00\x00\x00\xed\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x1f\x00\x00\x00\x0a\x08\x02\x00\x00\x00\xc3\xd7\x12\x46\
\x00\x00\x00\x01\x73\x52\x47\x42\x00\xae\xce\x1c\xe9\x00\x00\x00\
\x04\x67\x41\x4d\x41\x00\x00\xb1\x8f\x0b\xfc\x61\x05\x00\x00\x00\
\x09\x70\x48\x59\x73\x00\x00\x0b\x12\x00\x00\x0b\x12\x01\xd2\xdd\
\x7e\xfc\x00\x00\x00\x1a\x74\x45\x58\x74\x53\x6f\x66\x74\x77\x61\
\x72\x65\x00\x50\x61\x69\x6e\x74\x2e\x4e\x45\x54\x20\x76\x33\x2e\
\x35\x2e\x31\x30\x30\xf4\x72\xa1\x00\x00\x00\x5c\x49\x44\x41\x54\
\x38\x4f\x63\xe8\x9a\x38\x87\x76\x08\x64\xfa\xa7\xaf\x3f\x68\x81\
\xa0\xa6\x7f\xf8\xfc\x8d\x16\x08\x6a\xfa\xbb\x8f\x5f\x68\x81\xa0\
\xa6\xbf\x7a\xfb\x11\x82\x5c\x22\x2a\x29\x47\x70\xd3\xa0\xa6\x3f\
\x7b\xf9\x1e\x82\xd0\xd4\x91\x87\xe0\xa6\x41\x4d\x7f\xfc\xec\x2d\
\x2d\x10\xd4\xf4\x87\x4f\x5e\xd3\x02\x41\x4d\xbf\xff\xe8\x25\x2d\
\x10\xd4\x74\x5a\xa1\x89\x73\x00\xf8\x06\xba\x5a\xe8\x93\x6f\x68\
\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x01\x3f\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x1f\x00\x00\x00\x13\x08\x02\x00\x00\x00\xe7\x0e\x41\x15\
\x00\x00\x00\x04\x67\x41\x4d\x41\x00\x00\xb1\x8f\x0b\xfc\x61\x05\
\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0e\xc2\x00\x00\x0e\xc2\
\x01\x15\x28\x4a\x80\x00\x00\x00\x1a\x74\x45\x58\x74\x53\x6f\x66\
\x74\x77\x61\x72\x65\x00\x50\x61\x69\x6e\x74\x2e\x4e\x45\x54\x20\
\x76\x33\x2e\x35\x2e\x31\x30\x30\xf4\x72\xa1\x00\x00\x00\xbb\x49\
\x44\x41\x54\x38\x4f\xbd\xd5\x2b\x0e\xc2\x50\x10\x85\xe1\x2e\x8e\
\x45\xb0\x15\x34\x8e\x04\x81\x43\xe0\xd0\x78\x56\x81\xc2\xd4\xa0\
\x48\xa0\xa5\xa5\xbd\x8f\x3e\x31\x9c\x66\x26\x13\x04\x6e\x18\x92\
\xdf\x4d\xf2\xdd\x23\x9a\x34\x59\x6d\x76\x76\x25\xa0\x63\x37\x5a\
\x04\x79\xd2\x43\x3b\x58\xf4\x17\xdd\x37\x3d\x35\x9b\x2f\xf4\x89\
\xc6\xdb\x6d\x75\x17\x3b\x4a\x3f\x1c\x82\x68\xbc\xdd\x56\xaf\x43\
\x4b\xe1\xe5\x57\x76\xd4\x04\x41\x34\xde\xfe\xa9\x8f\xd7\x83\xa6\
\x2f\x7a\xe5\x1b\x0a\xb7\xf1\xb2\xd7\x04\x41\x34\xde\xfe\x74\x91\
\xc2\x6d\x48\xb7\x9a\x20\x88\xc6\x7a\x59\x07\x0a\xb7\xfe\xbc\xd6\
\x04\x41\x34\xd6\x8b\x2a\x50\x93\x7e\x5a\x6a\x82\x20\x1a\xeb\x79\
\xe9\xa9\x9f\x7c\xef\xa2\xb1\x9e\x15\xce\x22\xd6\xef\x8f\xda\x22\
\xd6\x6f\x79\x65\x11\xeb\x76\xff\xa6\x37\x06\x80\x09\x57\x1d\xbe\
\x2e\x15\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x02\x26\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x1f\x00\x00\x00\x1f\x08\x02\x00\x00\x00\x90\xcc\x81\x6e\
\x00\x00\x00\x01\x73\x52\x47\x42\x00\xae\xce\x1c\xe9\x00\x00\x00\
\x04\x67\x41\x4d\x41\x00\x00\xb1\x8f\x0b\xfc\x61\x05\x00\x00\x00\
\x09\x70\x48\x59\x73\x00\x00\x0e\xc3\x00\x00\x0e\xc3\x01\xc7\x6f\
\xa8\x64\x00\x00\x00\x1a\x74\x45\x58\x74\x53\x6f\x66\x74\x77\x61\
\x72\x65\x00\x50\x61\x69\x6e\x74\x2e\x4e\x45\x54\x20\x76\x33\x2e\
\x35\x2e\x31\x30\x30\xf4\x72\xa1\x00\x00\x01\x95\x49\x44\x41\x54\
\x48\x4b\xbd\xcd\xbb\x4a\x03\x51\x10\xc6\x71\x1f\xce\x87\xf0\x45\
\x2c\x2c\x2c\x2c\x2c\x14\x2c\x22\x1a\xa3\x04\x49\x4c\xc0\xc2\xc2\
\xc2\x68\x0a\x5f\x40\x44\x44\x44\x44\x44\x62\xdc\x5c\x77\xb3\x9b\
\xcd\xfd\x66\xe3\x0c\x33\x3b\x84\x93\x30\x07\x84\x0d\xfc\xaa\x73\
\xbe\xf3\x3f\x6b\xfb\x89\x74\x7c\xb0\x3e\x18\xcf\xe2\xc0\xf5\xfe\
\x68\x1a\x87\x95\xd4\x7b\xc3\x09\x59\xdf\xd8\xb2\x92\xb1\xd5\x4a\
\xea\xdd\xc1\x98\x18\xa1\xa5\x64\x6c\xb5\x92\x7a\xa7\x3f\x22\xf0\
\xf8\xd7\xbb\x57\xc0\x40\xc6\x56\xcb\xea\xcd\xa2\xe2\x3f\xf5\xb0\
\x37\x24\x58\xaf\xdf\x28\x60\x20\x63\x2b\xae\xb7\xbb\x03\x82\xf5\
\xea\xb5\x02\x06\x32\x36\x9c\xe6\x8b\xc6\x09\xd7\x83\x4e\x9f\xc0\
\xe3\x99\x73\xa5\x80\x81\x8c\xe7\xa5\xf2\x77\x8b\x57\x5c\xf7\xc3\
\x3e\xc1\x7a\xf9\x52\x01\x03\x19\x8b\x54\x0e\xd3\x8b\x57\x5c\xf7\
\x82\x1e\xc1\x7a\x29\xa7\x80\x81\x8c\xc9\xc9\x05\xa7\x17\xaf\xb8\
\xee\xfa\x5d\x02\x8b\xe9\xd7\xb9\x02\x06\x32\x96\x27\xc2\xb8\xe2\
\x7a\xb3\xd5\x21\xb0\x98\x7e\x9e\x29\x60\x20\x63\x92\xcc\xde\x4a\
\xdd\xb8\xe2\x7a\xc3\x0b\x09\xd6\x3f\x92\x0a\x18\xc8\x58\x1c\x67\
\x0b\x54\x37\xce\xb9\x5e\x77\x43\x82\xf5\xf7\x43\x05\x0c\x64\x3c\
\xef\x38\x83\x1f\x18\x87\x5c\xaf\x35\xdb\x04\x16\x93\xb7\x03\x05\
\x0c\x64\x6c\x38\xca\x14\x8c\x13\xae\x57\x1b\x01\xc1\xfa\xeb\x9e\
\x02\x06\x32\xb6\xe2\x7a\xa5\x11\x10\xac\xbf\xec\x2a\x60\x20\x63\
\xab\xa8\x5e\xf7\x09\x3c\x1e\x3f\xef\x28\xb0\x1e\x8d\xad\xb8\xee\
\xd4\x7c\x82\xf5\xa7\x6d\x05\x0c\x64\x6c\xc5\xf5\x9f\x5a\x8b\x60\
\xfd\x51\x03\x03\x19\x5b\x45\xf5\x6a\x8b\x60\xfd\x61\x53\x81\xf5\
\x68\x6c\xc5\xf5\x72\xd5\x23\xf0\xd8\x4a\xc6\x56\x5c\xff\xae\x78\
\x71\x88\xea\x8e\x1b\x07\xae\x97\x1c\x37\x0e\x5c\x8f\x4b\x22\xfd\
\x07\x5d\xb2\xe7\xb2\x6f\xdb\xf3\x18\x00\x00\x00\x00\x49\x45\x4e\
\x44\xae\x42\x60\x82\
\x00\x00\x01\xbb\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x1f\x00\x00\x00\x1f\x08\x02\x00\x00\x00\x90\xcc\x81\x6e\
\x00\x00\x00\x01\x73\x52\x47\x42\x00\xae\xce\x1c\xe9\x00\x00\x00\
\x04\x67\x41\x4d\x41\x00\x00\xb1\x8f\x0b\xfc\x61\x05\x00\x00\x00\
\x09\x70\x48\x59\x73\x00\x00\x0e\xc3\x00\x00\x0e\xc3\x01\xc7\x6f\
\xa8\x64\x00\x00\x00\x1a\x74\x45\x58\x74\x53\x6f\x66\x74\x77\x61\
\x72\x65\x00\x50\x61\x69\x6e\x74\x2e\x4e\x45\x54\x20\x76\x33\x2e\
\x35\x2e\x31\x30\x30\xf4\x72\xa1\x00\x00\x01\x2a\x49\x44\x41\x54\
\x48\x4b\xbd\xd2\xbd\x4e\x02\x51\x10\x86\x61\x2f\xce\x8b\xf0\x52\
\xb0\xb6\x33\xb1\xa0\x33\x84\xce\x68\x67\xcf\x2d\xd8\x50\xd9\xd8\
\xa0\x2e\xbb\x2c\xfb\xc7\xfe\xc2\x02\x36\xce\xc9\x7c\x3b\x26\x07\
\x23\xb8\x27\x43\xf2\x16\x84\x6f\xf2\x9c\x66\x2f\x6e\x87\x63\xbd\
\x8c\xde\xb4\x7b\x8d\xa0\xd7\x9b\x9d\x46\x67\xd1\xab\xf5\x56\xa3\
\xb3\xe8\x65\xd3\xfe\xd1\xe8\x69\x72\x79\x75\x7d\x18\xfd\x6f\x5d\
\x5a\x9d\xa4\x53\x87\x0f\x1c\xa5\x29\xe8\x45\xbd\x39\xda\xe8\xf1\
\xe7\x01\xfa\x6d\xad\xbf\xf6\x0f\x9d\xe2\x07\x4e\xa4\x29\xe8\x79\
\xb5\xd6\x08\xfa\xaa\x6c\x34\x82\x9e\x15\xb5\x46\xd0\xd3\xbc\xd6\
\x08\x7a\x9c\x55\x1a\x41\x8f\xd2\x92\x93\x6f\xce\x25\xd1\xa0\x2f\
\x93\x82\xb3\xee\xfa\x25\x1a\xf4\x30\xce\x39\xeb\xae\x5f\xa2\x41\
\x5f\x44\x39\x67\xdd\xf5\x4b\x34\xe8\xc1\x72\xc5\xd1\xf6\x15\x4d\
\x5c\x22\x41\x34\xe8\x7e\x98\x71\xb4\xed\xfd\x67\x97\x48\x10\x0d\
\xfa\x3c\xcc\x38\xa3\xcf\x1e\x5c\x22\x41\xb4\x4e\x5f\xa4\x1c\x6d\
\xbb\xb7\x7b\x97\x8c\xde\x69\xd0\xbd\x20\xe5\x68\xdb\xbe\xde\xb9\
\x44\x82\x68\xd0\x3f\x83\x84\x33\xfa\xf4\xc6\x25\x12\x44\xeb\x74\
\x3f\xe1\x68\x6b\x5f\x06\x2e\x19\xbd\xd3\xa0\x7f\xf8\x31\x47\x9b\
\x7b\xa2\x41\x7f\x9f\xc7\x1a\x75\xba\x17\x69\x04\x7d\xe6\x45\x1a\
\x41\xd7\x6a\x38\xfe\x06\x3c\xec\xc9\x88\xb5\xd8\x55\x59\x00\x00\
\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x02\x62\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x80\x00\x00\x00\x80\x08\x06\x00\x00\x00\xc3\x3e\x61\xcb\
\x00\x00\x00\x04\x73\x42\x49\x54\x08\x08\x08\x08\x7c\x08\x64\x88\
\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0e\xc4\x00\x00\x0e\xc4\
\x01\x95\x2b\x0e\x1b\x00\x00\x02\x04\x49\x44\x41\x54\x78\x9c\xed\
\xd6\xb1\x0d\x03\x01\x0c\x03\x31\x21\x13\x64\xff\x65\x3f\x6d\xbc\
\x80\x2d\xe0\xa9\x8e\xfd\x15\x4a\xe6\xbe\xfc\x2e\x7f\xfe\xf5\x3c\
\x4f\xf8\x9d\x3e\x2f\x91\x6f\xfc\x49\xba\x4a\xe4\x7d\xff\xaf\xa6\
\x4c\xde\xb1\x0f\xc0\x49\x0a\x4a\xe4\x1b\xfb\x00\x3c\x56\x53\x26\
\xef\xd8\x07\xe0\x24\x05\x25\xf2\x8d\x7d\x00\x1e\xab\x29\x93\x77\
\xec\x03\x70\x92\x82\x12\xf9\xc6\x3e\x00\x8f\xd5\x94\xc9\x3b\xf6\
\x01\x38\x49\x41\x89\x7c\x63\x1f\x80\xc7\x6a\xca\xe4\x1d\xfb\x00\
\x9c\xa4\xa0\x44\xbe\xb1\x0f\xc0\x63\x35\x65\xf2\x8e\x7d\x00\x4e\
\x52\x50\x22\xdf\xd8\x07\xe0\xb1\x9a\x32\x79\xc7\x3e\x00\x27\x29\
\x28\x91\x6f\xec\x03\xf0\x58\x4d\x99\xbc\x63\x1f\x80\x93\x14\x94\
\xc8\x37\xf6\x01\x78\xac\xa6\x4c\xde\xb1\x0f\xc0\x49\x0a\x4a\xe4\
\x1b\xfb\x00\x3c\x56\x53\x26\xef\xd8\x07\xe0\x24\x05\x25\xf2\x8d\
\x7d\x00\x1e\xab\x29\x93\x77\xec\x03\x70\x92\x82\x12\xf9\xc6\x3e\
\x00\x8f\xd5\x94\xc9\x3b\xf6\x01\x38\x49\x41\x89\x7c\x63\x1f\x80\
\xc7\x6a\xca\xe4\x1d\xfb\x00\x9c\xa4\xa0\x44\xbe\xb1\x0f\xc0\x63\
\x35\x65\xf2\x8e\x7d\x00\x4e\x52\x50\x22\xdf\xd8\x07\xe0\xb1\x9a\
\x32\x79\xc7\x3e\x00\x27\x29\x28\x91\x6f\xec\x03\xf0\x58\x4d\x99\
\xbc\x63\x1f\x80\x93\x14\x94\xc8\x37\xf6\x01\x78\xac\xa6\x4c\xde\
\xb1\x0f\xc0\x49\x0a\x4a\xe4\x1b\xfb\x00\x3c\x56\x53\x26\xef\xd8\
\x07\xe0\x24\x05\x25\xf2\x8d\x7d\x00\x1e\xab\x29\x93\x77\xec\x03\
\x70\x92\x82\x12\xf9\xc6\x3e\x00\x8f\xd5\x94\xc9\x3b\xf6\x01\x38\
\x49\x41\x89\x7c\x63\x1f\x80\xc7\x6a\xca\xe4\x1d\xfb\x00\x9c\xa4\
\xa0\x44\xbe\xb1\x0f\xc0\x63\x35\x65\xf2\x8e\x7d\x00\x4e\x52\x50\
\x22\xdf\xd8\x07\xe0\xb1\x9a\x32\x79\xc7\x3e\x00\x27\x29\x28\x91\
\x6f\xec\x03\xf0\x58\x4d\x99\xbc\x63\x1f\x80\x93\x14\x94\xc8\x37\
\xf6\x01\x78\xac\xa6\x4c\xde\xb1\x0f\xc0\x49\x0a\x4a\xe4\x1b\xfb\
\x00\x3c\x56\x53\x26\xef\xd8\x07\xe0\x24\x05\x25\xf2\x8d\x7d\x00\
\x1e\xab\x29\x93\x77\xec\x03\x70\x92\x82\x12\xf9\xc6\x3e\x00\x8f\
\xd5\x94\xc9\x3b\xf6\x01\x38\x49\x41\x89\x7c\x63\x1f\x80\xc7\x6a\
\xca\xe4\x1d\xfb\x00\x9c\xa4\xa0\x44\xbe\xb1\x0f\xc0\x63\x35\x65\
\xf2\x8e\x7d\x00\x4e\x52\x50\x22\xdf\xd8\x07\xe0\xb1\x9a\x32\x79\
\xc7\x3e\x00\x27\x29\x28\x91\x6f\xec\x03\xbc\xdc\x3f\xe4\x79\x69\
\xe9\x67\xab\xcf\x62\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\
\x82\
"
qt_resource_name = b"\
\x00\x0b\
\x05\x55\xc9\xe3\
\x00\x64\
\x00\x6f\x00\x63\x00\x6b\x00\x5f\x00\x69\x00\x6d\x00\x61\x00\x67\x00\x65\x00\x73\
\x00\x0d\
\x0c\x46\x04\x47\
\x00\x63\
\x00\x72\x00\x6f\x00\x73\x00\x73\x00\x5f\x00\x62\x00\x6f\x00\x78\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x0a\
\x0a\xc8\x6f\xe7\
\x00\x63\
\x00\x65\x00\x6e\x00\x74\x00\x65\x00\x72\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x10\
\x0c\x5a\x16\x47\
\x00\x63\
\x00\x72\x00\x6f\x00\x73\x00\x73\x00\x5f\x00\x65\x00\x78\x00\x5f\x00\x62\x00\x6f\x00\x78\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x11\
\x05\x0d\xa3\xa7\
\x00\x74\
\x00\x68\x00\x69\x00\x6e\x00\x5f\x00\x76\x00\x65\x00\x72\x00\x74\x00\x69\x00\x63\x00\x61\x00\x6c\x00\x2e\x00\x70\x00\x6e\x00\x67\
\
\x00\x0f\
\x0b\x70\x3f\xe7\
\x00\x61\
\x00\x72\x00\x72\x00\x6f\x00\x77\x00\x5f\x00\x6e\x00\x6f\x00\x72\x00\x74\x00\x68\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x0d\
\x04\x14\x00\x47\
\x00\x67\
\x00\x75\x00\x69\x00\x64\x00\x65\x00\x5f\x00\x62\x00\x6f\x00\x78\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x12\
\x0a\x7a\xa0\x07\
\x00\x73\
\x00\x70\x00\x6c\x00\x69\x00\x74\x00\x5f\x00\x76\x00\x65\x00\x72\x00\x74\x00\x69\x00\x63\x00\x61\x00\x6c\x00\x2e\x00\x70\x00\x6e\
\x00\x67\
\x00\x10\
\x04\xfc\x40\xa7\
\x00\x62\
\x00\x61\x00\x72\x00\x5f\x00\x76\x00\x65\x00\x72\x00\x74\x00\x69\x00\x63\x00\x61\x00\x6c\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x0e\
\x0b\x8a\xe6\x07\
\x00\x61\
\x00\x72\x00\x72\x00\x6f\x00\x77\x00\x5f\x00\x65\x00\x61\x00\x73\x00\x74\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x14\
\x0b\x9f\xd1\x07\
\x00\x73\
\x00\x70\x00\x6c\x00\x69\x00\x74\x00\x5f\x00\x68\x00\x6f\x00\x72\x00\x69\x00\x7a\x00\x6f\x00\x6e\x00\x74\x00\x61\x00\x6c\x00\x2e\
\x00\x70\x00\x6e\x00\x67\
\x00\x12\
\x0d\x7f\x14\x07\
\x00\x62\
\x00\x61\x00\x72\x00\x5f\x00\x68\x00\x6f\x00\x72\x00\x69\x00\x7a\x00\x6f\x00\x6e\x00\x74\x00\x61\x00\x6c\x00\x2e\x00\x70\x00\x6e\
\x00\x67\
\x00\x13\
\x0c\x9c\x17\xe7\
\x00\x74\
\x00\x68\x00\x69\x00\x6e\x00\x5f\x00\x68\x00\x6f\x00\x72\x00\x69\x00\x7a\x00\x6f\x00\x6e\x00\x74\x00\x61\x00\x6c\x00\x2e\x00\x70\
\x00\x6e\x00\x67\
\x00\x0e\
\x0f\x8a\xe0\xc7\
\x00\x61\
\x00\x72\x00\x72\x00\x6f\x00\x77\x00\x5f\x00\x77\x00\x65\x00\x73\x00\x74\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x0f\
\x0e\x70\x21\xe7\
\x00\x61\
\x00\x72\x00\x72\x00\x6f\x00\x77\x00\x5f\x00\x73\x00\x6f\x00\x75\x00\x74\x00\x68\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x0e\
\x07\x04\x9f\x87\
\x00\x62\
\x00\x61\x00\x63\x00\x6b\x00\x67\x00\x72\x00\x6f\x00\x75\x00\x6e\x00\x64\x00\x2e\x00\x70\x00\x6e\x00\x67\
"
qt_resource_struct = b"\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x0f\x00\x00\x00\x02\
\x00\x00\x00\xc8\x00\x00\x00\x00\x00\x01\x00\x00\x0a\xb3\
\x00\x00\x01\x12\x00\x00\x00\x00\x00\x01\x00\x00\x0d\x3f\
\x00\x00\x00\x7c\x00\x00\x00\x00\x00\x01\x00\x00\x07\x12\
\x00\x00\x02\x24\x00\x00\x00\x00\x00\x01\x00\x00\x18\xfc\
\x00\x00\x00\xe8\x00\x00\x00\x00\x00\x01\x00\x00\x0b\x79\
\x00\x00\x00\x3c\x00\x00\x00\x00\x00\x01\x00\x00\x02\x65\
\x00\x00\x00\xa4\x00\x00\x00\x00\x00\x01\x00\x00\x08\xf7\
\x00\x00\x01\x38\x00\x00\x00\x00\x00\x01\x00\x00\x0e\x91\
\x00\x00\x01\x5a\x00\x00\x00\x00\x00\x01\x00\x00\x10\xb7\
\x00\x00\x00\x1c\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
\x00\x00\x00\x56\x00\x00\x00\x00\x00\x01\x00\x00\x04\x43\
\x00\x00\x01\xb2\x00\x00\x00\x00\x00\x01\x00\x00\x13\xd0\
\x00\x00\x01\x88\x00\x00\x00\x00\x00\x01\x00\x00\x12\xdf\
\x00\x00\x02\x00\x00\x00\x00\x00\x00\x01\x00\x00\x17\x3d\
\x00\x00\x01\xde\x00\x00\x00\x00\x00\x01\x00\x00\x15\x13\
"
def qInitResources():
QtCore.qRegisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
def qCleanupResources():
QtCore.qUnregisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
qInitResources()
|
rpmvenv/rpmbuild.py
|
oleynikandrey/rpmvenv
| 150 |
81490
|
<filename>rpmvenv/rpmbuild.py
"""Functions for running the rpm build commands."""
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import glob
import os
import shlex
import shutil
import subprocess
import sys
import tempfile
IGNORED_PATTERNS = (
'*.pyc',
'*.pyo',
'*.pyd',
'__pycache__',
)
class RpmProcessError(subprocess.CalledProcessError):
"""An exception thrown during the RPM build process.
This exception extends the subprocess CalledProcessError to add standard
out and standard error string fields.
"""
def __init__(self, returncode, cmd, output=None, stdout=None, stderr=None):
"""Initialize the exception with process information."""
super(RpmProcessError, self).__init__(returncode, cmd)
self.output = output or ''
self.stdout = stdout or ''
self.stderr = stderr or ''
def topdir():
"""Get the absolute path to a valid rpmbuild %_topdir."""
top = tempfile.mkdtemp(prefix='rpmvenv')
os.makedirs(os.path.join(top, 'SOURCES'))
os.makedirs(os.path.join(top, 'SPECS'))
os.makedirs(os.path.join(top, 'BUILD'))
os.makedirs(os.path.join(top, 'RPMS'))
os.makedirs(os.path.join(top, 'SRPMS'))
return top
def write_spec(top, spec):
"""Write a SPEC file to the SOURCES directory.
Args:
top: The absolute path to the %_topdir.
spec: The contents of the SPEC file.
Returns:
The absolute path to the SPEC file.
"""
path = os.path.join(top, 'SOURCES', 'package.spec')
with open(path, 'w') as specfile:
specfile.write(spec)
return path
def copy_source(top, source, name=None):
"""Copy the source directory into the SOURCES directory.
Args:
top: The absolute path to the %_topdir.
source: The absolute path to the source directory.
name: The name of the directory to place in SOURCES.
Returns:
The absolute path to the copy.
"""
name = name or os.path.basename(source)
path = os.path.join(top, 'SOURCES', name)
shutil.copytree(
source,
path,
ignore=shutil.ignore_patterns(*IGNORED_PATTERNS),
)
return path
def verbose_popen(cmd):
"""Run a command with streaming output.
Args:
cmd (str): A command to run with popen.
Raises:
CalledProcessError: If the returncode is not 0.
"""
proc = subprocess.Popen(shlex.split(cmd))
proc.wait()
if proc.returncode != 0:
raise subprocess.CalledProcessError(
returncode=proc.returncode,
cmd=cmd,
)
def quiet_popen(cmd):
"""Run a command with captured output.
Args:
cmd (str): A command to run with popen.
Raises:
RpmProcessError: If the returncode is not 0.
"""
proc = subprocess.Popen(
shlex.split(cmd),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
out, err = proc.communicate()
if proc.returncode != 0:
raise RpmProcessError(
returncode=proc.returncode,
cmd=cmd,
output=err,
stdout=out,
stderr=err,
)
def build(specfile, top=None, verbose=False):
"""Run rpmbuild with options.
Args:
specfile: The absolute path to the SPEC file to build.
top: The %_topdir to use during the build. The default is a temporary
directory which is automatically generated.
verbose: Whether or not to stream the rpmbuild output in real time
or only during errors.
Returns:
The absolute path to the new RPM.
"""
top = top or topdir()
cmd = "rpmbuild -ba --define='_topdir {0}' {1}".format(
top,
specfile,
).encode('ascii')
# PY3 shlex only works with unicode strings. Convert as needed.
if sys.version_info[0] > 2:
cmd = cmd.decode('utf8')
if not verbose:
quiet_popen(cmd)
else:
verbose_popen(cmd)
return glob.glob(os.path.join(top, 'RPMS', '**', '*.rpm')).pop()
|
myia/utils/variables.py
|
strint/myia
| 222 |
81497
|
"""Define variables for use in patterns all over Myia."""
from ..ir import Graph
from .misc import Namespace
from .unify import SVar, Var, var
def constvar(cls=object):
"""Return a variable matching a Constant of the given type."""
def _is_c(n):
return n.is_constant(cls)
return var(_is_c)
#####################
# Generic variables #
#####################
X = Var("X")
Y = Var("Y")
Z = Var("Z")
X1 = Var("X1")
X2 = Var("X2")
X3 = Var("X3")
X4 = Var("X4")
X5 = Var("X5")
#############
# Constants #
#############
C = constvar()
C1 = constvar()
C2 = constvar()
CNS = constvar(Namespace)
G = constvar(Graph)
G1 = constvar(Graph)
G2 = constvar(Graph)
NIL = var(lambda x: x.is_constant() and x.value == ())
######################
# Sequence variables #
######################
Xs = SVar(Var())
Ys = SVar(Var())
Cs = SVar(constvar())
__all__ = [
"X",
"Y",
"Z",
"X1",
"X2",
"X3",
"X4",
"X5",
"C",
"C1",
"C2",
"CNS",
"G",
"G1",
"G2",
"NIL",
"Xs",
"Ys",
"Cs",
"constvar",
]
|
vta/python/vta/top/nnvm_graphpack.py
|
mingwayzhang/tvm
| 286 |
81534
|
<reponame>mingwayzhang/tvm<gh_stars>100-1000
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""An NNVM implementation of graph packing."""
import nnvm
from nnvm.compiler import graph_attr, graph_util
def _pack_batch_channel(data, dshape, bfactor, cfactor):
"""Pack the data channel dimension.
"""
assert dshape[0] % bfactor == 0
assert dshape[1] % cfactor == 0
data = nnvm.sym.reshape(data,
shape=(dshape[0] // bfactor, bfactor,
dshape[1] // cfactor, cfactor,
dshape[2], dshape[3]))
data = nnvm.sym.transpose(
data, axes=(0, 2, 4, 5, 1, 3))
return data
def _unpack_batch_channel(data, old_shape):
"""Unpack the data channel dimension.
"""
data = nnvm.sym.transpose(data, axes=(0, 4, 1, 5, 2, 3))
data = nnvm.sym.reshape(data, shape=old_shape)
return data
def _pack_weight(data, dshape, cfactor):
"""Pack the weight into packed format.
"""
assert len(dshape) == 4
assert dshape[0] % cfactor == 0
assert dshape[1] % cfactor == 0
data = nnvm.sym.reshape(data,
shape=(dshape[0] // cfactor, cfactor,
dshape[1] // cfactor, cfactor,
dshape[2], dshape[3]))
data = nnvm.sym.transpose(
data, axes=(0, 2, 4, 5, 1, 3))
return data
def _pack_weight_conv2d_transpose(data, dshape, cfactor):
"""Pack the weight into packed format.
"""
assert len(dshape) == 4
assert dshape[0] % cfactor == 0
assert dshape[1] % cfactor == 0
data = nnvm.sym.reshape(data,
shape=(dshape[0] // cfactor, cfactor,
dshape[1] // cfactor, cfactor,
dshape[2], dshape[3]))
data = nnvm.sym.transpose(
data, axes=(2, 0, 4, 5, 3, 1))
return data
def _pack_bias(data, dshape, bfactor, cfactor):
"""Pack the bias parameter.
"""
assert len(dshape) == 3
assert dshape[0] % cfactor == 0
data = nnvm.sym.reshape(data,
shape=(dshape[0] // cfactor,
cfactor, dshape[1],
dshape[2], 1))
data = nnvm.sym.transpose(
data, axes=(0, 2, 3, 4, 1))
# broadcast batch dimension to bfactor
data = nnvm.sym.broadcast_to(
data,
shape=(dshape[0] // cfactor, dshape[1], dshape[2], bfactor, cfactor))
return data
def _get_shape(sym, shape_dict):
"""Get the shape of a node.
"""
return graph_util.infer_shape(
nnvm.graph.create(sym), **shape_dict)[1][0]
def nnvm_graph_pack(graph,
shape_dict,
bfactor,
cfactor,
weight_bits,
start_name="max_pool2d0",
stop_name="global_avg_pool2d0"):
"""Pack the graph into batch&channel packed format.
Parameters
----------
graph : Graph
The input graph.
shape_dict : dict of str to shape
The input shape.
bfactor : int
The packing factor in batch
cfactor : int
The packing factor in channel
start_name: str, optional
Start packing from certain known node.
start_name: str, optional
Stop packing from certain known node.
Returns
-------
graph : Graph
The transformed graph.
"""
graph = graph_attr.set_shape_inputs(graph, shape_dict)
graph = graph.apply("InferShape")
shape = graph.json_attr("shape")
gidx = graph.index
node_map = {}
dset = set()
start_pack = False
for nid, node in enumerate(gidx.nodes):
children = [node_map[e[0]] for e in node["inputs"]]
ishape = [shape[gidx.entry_id(e)] for e in node["inputs"]]
oshape = shape[gidx.entry_id(nid, 0)]
attrs = node.get("attrs", {})
node_name = node["name"]
op_name = node["op"]
get_clone = lambda c, o_n, n_n, a: getattr(nnvm.symbol, o_n)(
*c, name=n_n, **a)
if op_name == "null":
new_node = nnvm.symbol.Variable(node_name)
if start_name and node_name == start_name:
start_pack = True
new_node = _pack_batch_channel(new_node, oshape, bfactor, cfactor)
if start_pack and "_begin_state_" in node_name: # RNN -> CNN, pack
new_node = _pack_batch_channel(new_node, oshape, bfactor, cfactor)
elif node_name == start_name:
assert not start_pack
start_pack = True
new_node = get_clone(children, op_name, node_name, attrs)
new_node = _pack_batch_channel(new_node, oshape, bfactor, cfactor)
elif node_name == stop_name:
if start_pack:
start_pack = False
children[0] = _unpack_batch_channel(children[0], ishape[0])
new_node = getattr(nnvm.symbol, op_name)(
*children, name=node_name, **attrs)
else:
new_node = get_clone(children, op_name, node_name, attrs)
elif op_name == "conv2d" and attrs.get("out_dtype", None) == "int32":
assert 8 % weight_bits == 0
w_lanes = 8 // weight_bits
if start_pack:
attrs["layout"] = "NCHW%dn%dc" % (bfactor, cfactor)
attrs["kernel_layout"] = "OIHW%do%di%dp" % (cfactor, cfactor, w_lanes)
data, weight = children
weight = _pack_weight(weight, ishape[1], cfactor)
# insert bit packing when necessary
if w_lanes != 1:
assert 8 % w_lanes == 0
weight = nnvm.sym.bitpack(weight, lanes=w_lanes)
new_node = nnvm.sym.conv2d(
data, weight, name=node_name, **attrs)
else:
new_node = get_clone(children, op_name, node_name, attrs)
elif op_name == "conv2d_transpose" and attrs.get("out_dtype", None) == "int32":
assert 8 % weight_bits == 0
w_lanes = 8 // weight_bits
if start_pack:
attrs["layout"] = "NCHW%dn%dc" % (bfactor, cfactor)
attrs["kernel_layout"] = "IOHW%di%do%dp" % (cfactor, cfactor, w_lanes)
data, weight = children
weight = _pack_weight_conv2d_transpose(weight, ishape[1], cfactor)
new_node = nnvm.sym.conv2d_transpose(
data, weight, name=node_name, **attrs)
else:
new_node = get_clone(children, op_name, node_name, attrs)
elif op_name.startswith("broadcast_") and tuple(ishape[0]) == tuple(ishape[1]):
new_node = get_clone(children, op_name, node_name, attrs)
elif op_name.startswith("broadcast") and len(ishape[1]) == 3:
if start_pack:
children[1] = _pack_bias(children[1], ishape[1], bfactor, cfactor)
new_node = getattr(nnvm.symbol, op_name)(
*children, name=node_name, **attrs)
else:
new_node = get_clone(children, op_name, node_name, attrs)
elif op_name.startswith("elementwise_add"):
new_node = get_clone(children, op_name, node_name, attrs)
else:
new_node = get_clone(children, op_name, node_name, attrs)
dset.add(op_name)
node_map[nid] = new_node
assert len(graph.index.output_entries) == 1
ret = node_map[graph.index.output_entries[0][0]]
if start_pack:
oshape = shape[graph.index.output_entries[0][0]]
ret = _unpack_batch_channel(ret, oshape)
graph = nnvm.graph.create(ret)
graph = graph_attr.set_shape_inputs(graph, shape_dict)
graph = graph.apply("InferShape")
return graph
|
src/pythae/samplers/vamp_sampler/vamp_sampler_config.py
|
clementchadebec/benchmark_VAE
| 143 |
81544
|
from pydantic.dataclasses import dataclass
from ...samplers import BaseSamplerConfig
@dataclass
class VAMPSamplerConfig(BaseSamplerConfig):
"""This is the VAMP prior sampler configuration instance deriving from
:class:`BaseSamplerConfig`.
"""
pass
|
th_github/models.py
|
Leopere/django-th
| 1,069 |
81583
|
# coding: utf-8
from django.db import models
from django_th.models.services import Services
from django_th.models import TriggerService
class Github(Services):
"""
github model to be adapted for the new service
"""
# put whatever you need here
# eg title = models.CharField(max_length=80)
# but keep at least this one
repo = models.CharField(max_length=80) # owner
project = models.CharField(max_length=80) # repo
trigger = models.ForeignKey(TriggerService, on_delete=models.CASCADE)
class Meta:
app_label = 'th_github'
db_table = 'django_th_github'
def show(self):
"""
:return: string representing object
"""
return "My Github %s" % self.name
def __str__(self):
return self.name
|
Python/met_brewer/__init__.py
|
vitusbenson/MetBrewer
| 570 |
81585
|
from met_brewer.palettes import (
MET_PALETTES, COLORBLIND_PALETTES_NAMES, COLORBLIND_PALETTES,
met_brew, export, is_colorblind_friendly
)
MET_PALETTES
COLORBLIND_PALETTES_NAMES
COLORBLIND_PALETTES
met_brew
export
is_colorblind_friendly
|
recipes/Python/578103_Singleton_parameter_based/recipe-578103.py
|
tdiprima/code
| 2,023 |
81591
|
<reponame>tdiprima/code<gh_stars>1000+
def singleton(theClass):
""" decorator for a class to make a singleton out of it """
classInstances = {}
def getInstance(*args, **kwargs):
""" creating or just return the one and only class instance.
The singleton depends on the parameters used in __init__ """
key = (theClass, args, str(kwargs))
if key not in classInstances:
classInstances[key] = theClass(*args, **kwargs)
return classInstances[key]
return getInstance
# Example
@singleton
class A:
""" test class """
def __init__(self, key=None, subkey=None):
self.key = key
self.subkey = subkey
def __repr__(self):
return "A(id=%d, %s,%s)" % (id(self), self.key, self.subkey)
def tests():
""" some basic tests """
testCases = [ (None, None), (10, 20), (30, None), (None, 30) ]
instances = set()
instance1 = None
instance2 = None
for key, subkey in testCases:
if key == None:
if subkey == None: instance1, instance2 = A(), A()
else: instance1, instance2 = A(subkey=subkey), A(subkey=subkey)
else:
if subkey == None: instance1, instance2 = A(key), A(key)
else: instance1, instance2 = A(key, subkey=subkey), A(key, subkey=subkey)
print("instance1: %-25s" % instance1, " instance2: %-25s" % instance2)
assert instance1 == instance2
assert instance1.key == key and instance1.subkey == subkey
instances.add(instance1)
assert len(instances) == len(testCases)
tests()
|
parsifal/apps/invites/tests/test_manage_access_view.py
|
ShivamPytho/parsifal
| 342 |
81610
|
<reponame>ShivamPytho/parsifal
from django.core import mail
from django.test.testcases import TestCase
from django.urls import reverse
from parsifal.apps.activities.constants import ActivityTypes
from parsifal.apps.activities.models import Activity
from parsifal.apps.authentication.tests.factories import UserFactory
from parsifal.apps.invites.constants import InviteStatus
from parsifal.apps.invites.models import Invite
from parsifal.apps.invites.tests.factories import InviteFactory
from parsifal.utils.test import login_redirect_url
class TestManageAccessView(TestCase):
@classmethod
def setUpTestData(cls):
cls.invite = InviteFactory()
cls.co_author = UserFactory()
cls.invite.review.co_authors.add(cls.co_author)
cls.url = reverse(
"invites:manage_access",
args=(
cls.invite.review.author.username,
cls.invite.review.name,
),
)
def test_login_required(self):
response = self.client.get(self.url)
self.assertRedirects(response, login_redirect_url(self.url))
def test_main_author_required(self):
self.client.force_login(self.co_author)
response = self.client.get(self.url)
self.assertEqual(403, response.status_code)
def test_get_success(self):
self.client.force_login(self.invite.review.author)
response = self.client.get(self.url)
with self.subTest(msg="Test get status code"):
self.assertEqual(200, response.status_code)
parts = ("csrfmiddlewaretoken", 'name="invitee"', 'name="invitee_email"', self.invite.get_invitee_email())
for part in parts:
with self.subTest(msg="Test response body", part=part):
self.assertContains(response, part)
def test_post_success_invitee_email(self):
data = {
"invitee_email": "<EMAIL>",
}
self.client.force_login(self.invite.review.author)
response = self.client.post(self.url, data, follow=True)
with self.subTest(msg="Test post status code"):
self.assertEqual(302, response.redirect_chain[0][1])
with self.subTest(msg="Test post redirect status code"):
self.assertEqual(200, response.status_code)
with self.subTest(msg="Test success message"):
self.assertContains(response, "An invitation was sent to <EMAIL>.")
with self.subTest(msg="Test invite created"):
self.assertTrue(
Invite.objects.filter(invitee_email="<EMAIL>", status=InviteStatus.PENDING).exists()
)
with self.subTest(msg="Test email sent"):
self.assertEqual(1, len(mail.outbox))
def test_post_success_invitee(self):
contact = UserFactory(email="<EMAIL>")
Activity.objects.create(
from_user=self.invite.review.author, to_user=contact, activity_type=ActivityTypes.FOLLOW
)
with self.subTest(msg="Test setup"):
self.assertFalse(self.invite.review.is_author_or_coauthor(contact))
data = {
"invitee": contact.pk,
}
self.client.force_login(self.invite.review.author)
response = self.client.post(self.url, data, follow=True)
with self.subTest(msg="Test post status code"):
self.assertEqual(302, response.redirect_chain[0][1])
with self.subTest(msg="Test post redirect status code"):
self.assertEqual(200, response.status_code)
with self.subTest(msg="Test success message"):
self.assertContains(response, "An invitation was sent to <EMAIL>.")
with self.subTest(msg="Test invite created"):
self.assertTrue(
Invite.objects.filter(
invitee=contact, invitee_email="<EMAIL>", status=InviteStatus.PENDING
).exists()
)
with self.subTest(msg="Test email sent"):
self.assertEqual(1, len(mail.outbox))
|
django_extensions/management/commands/list_signals.py
|
KazakovDenis/django-extensions
| 4,057 |
81651
|
# -*- coding: utf-8 -*-
# Based on https://gist.github.com/voldmar/1264102
# and https://gist.github.com/runekaagaard/2eecf0a8367959dc634b7866694daf2c
import gc
import inspect
import weakref
from collections import defaultdict
from django.apps import apps
from django.core.management.base import BaseCommand
from django.db.models.signals import (
ModelSignal, pre_init, post_init, pre_save, post_save, pre_delete,
post_delete, m2m_changed, pre_migrate, post_migrate
)
from django.utils.encoding import force_str
MSG = '{module}.{name} #{line}'
SIGNAL_NAMES = {
pre_init: 'pre_init',
post_init: 'post_init',
pre_save: 'pre_save',
post_save: 'post_save',
pre_delete: 'pre_delete',
post_delete: 'post_delete',
m2m_changed: 'm2m_changed',
pre_migrate: 'pre_migrate',
post_migrate: 'post_migrate',
}
class Command(BaseCommand):
help = 'List all signals by model and signal type'
def handle(self, *args, **options):
all_models = apps.get_models(include_auto_created=True, include_swapped=True)
model_lookup = {id(m): m for m in all_models}
signals = [obj for obj in gc.get_objects() if isinstance(obj, ModelSignal)]
models = defaultdict(lambda: defaultdict(list))
for signal in signals:
signal_name = SIGNAL_NAMES.get(signal, 'unknown')
for receiver in signal.receivers:
lookup, receiver = receiver
if isinstance(receiver, weakref.ReferenceType):
receiver = receiver()
if receiver is None:
continue
receiver_id, sender_id = lookup
model = model_lookup.get(sender_id, '_unknown_')
if model:
models[model][signal_name].append(MSG.format(
name=receiver.__name__,
module=receiver.__module__,
line=inspect.getsourcelines(receiver)[1],
path=inspect.getsourcefile(receiver))
)
output = []
for key in sorted(models.keys(), key=str):
verbose_name = force_str(key._meta.verbose_name)
output.append('{}.{} ({})'.format(
key.__module__, key.__name__, verbose_name))
for signal_name in sorted(models[key].keys()):
lines = models[key][signal_name]
output.append(' {}'.format(signal_name))
for line in lines:
output.append(' {}'.format(line))
return '\n'.join(output)
|
python_modules/libraries/dagster-aws/dagster_aws/s3/solids.py
|
rpatil524/dagster
| 4,606 |
81672
|
# pylint: disable=unused-import
# Keep module for legacy backcompat
from .ops import S3Coordinate, file_handle_to_s3
|
setup.py
|
sighingnow/delocate
| 175 |
81693
|
<reponame>sighingnow/delocate<gh_stars>100-1000
#!/usr/bin/env python
""" setup script for delocate package """
import codecs
from os.path import join as pjoin
import versioneer
from setuptools import find_packages, setup
setup(
name="delocate",
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
description="Move macOS dynamic libraries into package",
author="<NAME>",
maintainer="<NAME>",
author_email="<EMAIL>",
url="http://github.com/matthew-brett/delocate",
packages=find_packages(),
python_requires=">=3.6",
install_requires=[
"machomachomangler; sys_platform == 'win32'",
"bindepend; sys_platform == 'win32'",
"wheel",
"typing_extensions",
],
package_data={
"delocate.tests": [
pjoin("data", "*.dylib"),
pjoin("data", "*.txt"),
pjoin("data", "*.bin"),
pjoin("data", "*.py"),
pjoin("data", "liba.a"),
pjoin("data", "a.o"),
pjoin("data", "*.whl"),
pjoin("data", "test-lib"),
pjoin("data", "*patch"),
pjoin("data", "make_libs.sh"),
pjoin("data", "icon.ico"),
],
"delocate": ["py.typed"],
},
entry_points={
"console_scripts": [
"delocate-{} = delocate.cmd.delocate_{}:main".format(name, name)
for name in (
"addplat",
"fuse",
"listdeps",
"patch",
"path",
"wheel",
)
]
},
license="BSD license",
classifiers=[
"Intended Audience :: Developers",
"Environment :: Console",
"License :: OSI Approved :: BSD License",
"Programming Language :: Python",
"Operating System :: MacOS :: MacOS X",
"Development Status :: 5 - Production/Stable",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Software Development :: Build Tools",
],
long_description=codecs.open("README.rst", "r", encoding="utf-8").read(),
)
|
pyxl/codec/html_tokenizer.py
|
adamserafini/pyxl
| 366 |
81708
|
<gh_stars>100-1000
"""
A naive but strict HTML tokenizer. Based directly on
http://www.w3.org/TR/2011/WD-html5-20110525/tokenization.html
In the ATTRIBUTE_VALUE and BEFORE_ATTRIBUTE_VALUE states, python tokens are accepted.
"""
import sys
from collections import OrderedDict
class State(object):
DATA = 1
# unused states: charrefs, RCDATA, script, RAWTEXT, PLAINTEXT
TAG_OPEN = 7
END_TAG_OPEN = 8
TAG_NAME = 9
# unused states: RCDATA, RAWTEXT, script
BEFORE_ATTRIBUTE_NAME = 34
ATTRIBUTE_NAME = 35
AFTER_ATTRIBUTE_NAME = 36
BEFORE_ATTRIBUTE_VALUE = 37
ATTRIBUTE_VALUE_DOUBLE_QUOTED = 38
ATTRIBUTE_VALUE_SINGLE_QUOTED = 39
ATTRIBUTE_VALUE_UNQUOTED = 40
# unused state: CHARREF_IN_ATTRIBUTE_VALUE = 41
AFTER_ATTRIBUTE_VALUE = 42
SELF_CLOSING_START_TAG = 43
# unused state: BOGUS_COMMENT_STATE = 44
MARKUP_DECLARATION_OPEN = 45
COMMENT_START = 46
COMMENT_START_DASH = 47
COMMENT = 48
COMMENT_END_DASH = 49
COMMENT_END = 50
# unused state: COMMENT_END_BANG = 51
DOCTYPE = 52
DOCTYPE_CONTENTS = 53 # Gross oversimplification. Not to spec.
# unused states: doctypes
CDATA_SECTION = 68
@classmethod
def state_name(cls, state_val):
for k, v in cls.__dict__.iteritems():
if v == state_val:
return k
assert False, "impossible state value %r!" % state_val
class Tag(object):
def __init__(self):
self.tag_name = None
self.attrs = OrderedDict()
self.endtag = False
self.startendtag = False
class ParseError(Exception):
pass
class BadCharError(Exception):
def __init__(self, state, char):
super(BadCharError, self).__init__("unexpected character %r in state %r" %
(char, State.state_name(state)))
class Unimplemented(Exception):
pass
class HTMLTokenizer(object):
def __init__(self):
self.state = State.DATA
# attribute_value is a list, where each element is either a string or a list of python
# tokens.
self.data = ""
self.tag = None
self.tag_name = None
self.attribute_name = None
self.attribute_value = None
self.markup_declaration_buffer = None
def handle_data(self, data):
assert False, "subclass should override"
def handle_starttag(self, tag_name, attrs):
assert False, "subclass should override"
def handle_startendtag(self, tag_name, attrs):
assert False, "subclass should override"
def handle_endtag(self, tag_name):
assert False, "subclass should override"
def handle_comment(self, tag_name):
assert False, "subclass should override"
def handle_doctype(self, data):
assert False, "subclass should override"
def handle_cdata(self, tag_name):
assert False, "subclass should override"
def emit_data(self):
self.handle_data(self.data)
self.data = ""
def emit_tag(self):
if self.tag.startendtag and self.tag.endtag:
raise ParseError("both startendtag and endtag!?")
if self.tag.startendtag:
self.handle_startendtag(self.tag.tag_name, self.tag.attrs)
elif self.tag.endtag:
self.handle_endtag(self.tag.tag_name)
else:
self.handle_starttag(self.tag.tag_name, self.tag.attrs)
def emit_comment(self):
self.handle_comment(self.data)
self.data = ""
def emit_doctype(self):
self.handle_doctype(self.data)
self.data = ""
def emit_cdata(self):
self.handle_cdata(self.data)
self.data = ""
def got_attribute(self):
if self.attribute_name in self.tag.attrs:
raise ParseError("repeat attribute name %r" % self.attribute_name)
self.tag.attrs[self.attribute_name] = self.attribute_value
self.attribute_name = None
self.attribute_value = None
def add_data_char(self, build, c):
""" For adding a new character to e.g. an attribute value """
if len(build) and type(build[-1]) == str:
build[-1] += c
else:
build.append(c)
def feed(self, c):
if self.state == State.DATA:
if c == '<':
self.emit_data()
self.state = State.TAG_OPEN
# Pass through; it's the browser's problem to understand these.
#elif c == '&':
# raise Unimplemented
else:
self.data += c
elif self.state == State.TAG_OPEN:
self.tag = Tag()
if c == '!':
self.markup_declaration_buffer = ""
self.state = State.MARKUP_DECLARATION_OPEN
elif c == '/':
self.state = State.END_TAG_OPEN
elif c.isalpha():
self.tag.tag_name = c
self.state = State.TAG_NAME
else:
raise BadCharError(self.state, c)
elif self.state == State.END_TAG_OPEN:
self.tag.endtag = True
if c.isalpha():
self.tag.tag_name = c
self.state = State.TAG_NAME
else:
raise BadCharError(self.state, c)
elif self.state == State.TAG_NAME:
if c in '\t\n\f ':
self.state = State.BEFORE_ATTRIBUTE_NAME
elif c == '/':
self.state = State.SELF_CLOSING_START_TAG
elif c == '>':
self.emit_tag()
self.state = State.DATA
else:
self.tag.tag_name += c
elif self.state == State.BEFORE_ATTRIBUTE_NAME:
if c in '\t\n\f ':
pass
elif c == '/':
self.state = State.SELF_CLOSING_START_TAG
elif c == '>':
self.emit_tag()
self.state = State.DATA
elif c in "\"'<=":
raise BadCharError(self.state, c)
else:
self.attribute_name = c.lower()
self.state = State.ATTRIBUTE_NAME
elif self.state == State.ATTRIBUTE_NAME:
if c in '\t\n\f ':
self.state = State.AFTER_ATTRIBUTE_NAME
elif c == '/':
self.got_attribute()
self.state = State.SELF_CLOSING_START_TAG
elif c == '=':
self.state = State.BEFORE_ATTRIBUTE_VALUE
elif c == '>':
self.emit_tag()
self.state = State.DATA
elif c in "\"'<":
raise BadCharError(self.state, c)
else:
self.attribute_name += c.lower()
elif self.state == State.AFTER_ATTRIBUTE_NAME:
if c in '\t\n\f ':
pass
elif c == '/':
self.got_attribute()
self.state = State.SELF_CLOSING_START_TAG
elif c == '=':
self.state = State.BEFORE_ATTRIBUTE_VALUE
elif c == '>':
self.got_attribute()
self.emit_tag()
self.state = State.DATA
elif c in "\"'<":
raise BadCharError(self.state, c)
elif self.state == State.BEFORE_ATTRIBUTE_VALUE:
if c in '\t\n\f ':
pass
elif c == '"':
self.attribute_value = []
self.state = State.ATTRIBUTE_VALUE_DOUBLE_QUOTED
elif c == '&':
self.attribute_value = []
self.state = State.ATTRIBUTE_VALUE_UNQUOTED
self.feed(c) # rehandle c
elif c == "'":
self.attribute_value = []
self.state = State.ATTRIBUTE_VALUE_SINGLE_QUOTED
elif c in '><=`':
raise BadCharError(self.state, c)
else:
self.attribute_value = [c]
self.state = State.ATTRIBUTE_VALUE_UNQUOTED
elif self.state == State.ATTRIBUTE_VALUE_DOUBLE_QUOTED:
if c == '"':
self.state = State.AFTER_ATTRIBUTE_VALUE
# Pass through; it's the browser's problem to understand these.
#elif c == '&':
# raise Unimplemented
else:
self.add_data_char(self.attribute_value, c)
elif self.state == State.ATTRIBUTE_VALUE_SINGLE_QUOTED:
if c == "'":
self.state = State.AFTER_ATTRIBUTE_VALUE
# Pass through; it's the browser's problem to understand these.
#elif c == '&':
# raise Unimplemented
else:
self.add_data_char(self.attribute_value, c)
elif self.state == State.ATTRIBUTE_VALUE_UNQUOTED:
if c in '\t\n\f ':
self.got_attribute()
self.state = State.BEFORE_ATTRIBUTE_NAME
elif c == '>':
self.got_attribute()
self.emit_tag()
self.state = State.DATA
elif c in "\"'<=`":
raise BadCharError(self.state, c)
# Pass through; it's the browser's problem to understand these.
#elif c == '&':
# raise Unimplemented
else:
self.add_data_char(self.attribute_value, c)
elif self.state == State.AFTER_ATTRIBUTE_VALUE:
self.got_attribute()
if c in '\t\n\f ':
self.state = State.BEFORE_ATTRIBUTE_NAME
elif c == '/':
self.state = State.SELF_CLOSING_START_TAG
elif c == '>':
self.emit_tag()
self.state = State.DATA
else:
raise BadCharError(self.state, c)
elif self.state == State.SELF_CLOSING_START_TAG:
self.tag.startendtag = True
if c == '>':
self.emit_tag()
self.state = State.DATA
else:
raise BadCharError(self.state, c)
elif self.state == State.MARKUP_DECLARATION_OPEN:
self.markup_declaration_buffer += c
if self.markup_declaration_buffer == "--":
self.data = ""
self.state = State.COMMENT_START
elif self.markup_declaration_buffer.lower() == "DOCTYPE".lower():
self.state = State.DOCTYPE
elif self.markup_declaration_buffer == "[CDATA[":
self.data = ""
self.cdata_buffer = ""
self.state = State.CDATA_SECTION
elif not ("--".startswith(self.markup_declaration_buffer) or
"DOCTYPE".lower().startswith(self.markup_declaration_buffer.lower()) or
"[CDATA[".startswith(self.markup_declaration_buffer)):
raise BadCharError(self.state, c)
elif self.state == State.COMMENT_START:
if c == "-":
self.state = State.COMMENT_START_DASH
elif c == ">":
raise BadCharError(self.state, c)
else:
self.data += c
self.state = State.COMMENT
elif self.state == State.COMMENT_START_DASH:
if c == "-":
self.state = State.COMMENT_END
elif c == ">":
raise BadCharError(self.state, c)
else:
self.data += "-" + c
self.state = State.COMMENT
elif self.state == State.COMMENT:
if c == "-":
self.state = State.COMMENT_END_DASH
else:
self.data += c
elif self.state == State.COMMENT_END_DASH:
if c == "-":
self.state = State.COMMENT_END
else:
self.data += "-" + c
self.state = State.COMMENT
elif self.state == State.COMMENT_END:
if c == ">":
self.emit_comment()
self.state = State.DATA
else:
raise BadCharError(self.state, c)
elif self.state == State.DOCTYPE:
if c in "\t\n\f ":
self.data = ""
self.state = State.DOCTYPE_CONTENTS
else:
raise BadCharError(self.state, c)
elif self.state == State.DOCTYPE_CONTENTS:
if c == ">":
self.emit_doctype()
self.state = State.DATA
else:
self.data += c
elif self.state == State.CDATA_SECTION:
self.cdata_buffer += c
if self.cdata_buffer == "]]>":
self.emit_cdata()
self.state = State.DATA
else:
while self.cdata_buffer and not "]]>".startswith(self.cdata_buffer):
self.data += self.cdata_buffer[0]
self.cdata_buffer = self.cdata_buffer[1:]
else:
assert False, "bad state! %r" % self.state
def feed_python(self, tokens):
if self.state == State.BEFORE_ATTRIBUTE_VALUE:
self.attribute_value = [tokens]
self.state = State.ATTRIBUTE_VALUE_UNQUOTED
elif self.state in [State.ATTRIBUTE_VALUE_DOUBLE_QUOTED,
State.ATTRIBUTE_VALUE_SINGLE_QUOTED,
State.ATTRIBUTE_VALUE_UNQUOTED]:
self.attribute_value.append(tokens)
else:
raise ParseError("python not allow in state %r" % State.state_name(self.state))
class HTMLTokenDumper(HTMLTokenizer):
def handle_data(self, data):
print "DATA %r" % data
def handle_starttag(self, tag_name, attrs):
print "STARTTAG %r %r" % (tag_name, attrs)
def handle_startendtag(self, tag_name, attrs):
print "STARTENDTAG %r %r" % (tag_name, attrs)
def handle_endtag(self, tag_name):
print "ENDTAG %r" % tag_name
def main(filename):
dumper = HTMLTokenDumper()
with open(filename) as f:
for line in f:
for c in line:
dumper.feed(c)
if __name__ == "__main__":
main(*sys.argv[1:])
|
src/pytest_recording/exceptions.py
|
kiwicom/pytest-recording
| 186 |
81715
|
<reponame>kiwicom/pytest-recording
class UsageError(Exception):
"""Error in plugin usage."""
__module__ = "builtins"
|
preql/__main__.py
|
erezsh/Preql
| 522 |
81734
|
<reponame>erezsh/Preql
import json
import argparse
from pathlib import Path
from itertools import chain
import time
from . import Preql, __version__, Signal
from . import settings
parser = argparse.ArgumentParser(description='Preql command-line interface (aka REPL)')
parser.add_argument('-i', '--interactive', action='store_true', default=False,
help="enter interactive mode after running the script")
parser.add_argument('-v', '--version', action='store_true', help="print version")
parser.add_argument('--install-jupyter', action='store_true', help="installs the Preql plugin for Jupyter notebook")
parser.add_argument('--print-sql', action='store_true', help="print the SQL code that's being executed")
parser.add_argument('-f', '--file', type=str, help='path to a Preql script to run')
parser.add_argument('-m', '--module', type=str, help='name of a Preql module to run')
parser.add_argument('--time', action='store_true', help='displays how long the script ran')
parser.add_argument('-c', '--config', type=str, help='path to a JSON configuration file for Preql (default: ~/.preql_conf.json)')
parser.add_argument('database', type=str, nargs='?', default=None,
help="database url (postgres://user:password@host:port/db_name")
parser.add_argument('--python-traceback', action='store_true',
help="Show the Python traceback when an exception causes the interpreter to quit")
def find_dot_preql():
cwd = Path.cwd()
for p in chain([cwd], cwd.parents):
dot_preql = p / ".preql"
if dot_preql.exists():
return dot_preql
def update_settings(path):
config = json.load(path.open())
if 'debug' in config:
settings.debug = config['debug']
if 'color_scheme' in config:
settings.color_theme.update(config['color_scheme'])
def main():
args = parser.parse_args()
if args.version:
print(__version__)
if args.install_jupyter:
from .jup_kernel.install import main as install_jupyter
install_jupyter([])
print("Install successful. To start working, run 'jupyter notebook' and create a new Preql notebook.")
return
from pathlib import Path
if args.config:
update_settings(Path(args.config))
else:
config_path = Path.home() / '.preql_conf.json'
if config_path.exists():
update_settings(config_path)
kw = {'print_sql': args.print_sql}
if args.database:
kw['db_uri'] = args.database
kw['auto_create'] = True
p = Preql(**kw)
interactive = args.interactive
error_code = 0
start = time.time()
try:
if args.file:
p.load(args.file)
elif args.module:
p('import ' + args.module)
elif args.version or args.install_jupyter:
pass
else:
dot_preql = find_dot_preql()
if dot_preql:
print("Auto-running", dot_preql)
p._run_code(dot_preql.read_text(), dot_preql)
interactive = True
except Signal as e:
p._display.print_exception(e)
error_code = -1
if args.python_traceback:
raise
except KeyboardInterrupt:
print("Interrupted (Ctrl+C)")
end = time.time()
if args.time:
print('Script took %.2f seconds to run' % (end -start))
if interactive:
p.load_all_tables()
p.start_repl()
else:
return error_code
if __name__ == '__main__':
main()
|
test/test_neg_examples.py
|
michael-harris/zincbase
| 298 |
81748
|
<reponame>michael-harris/zincbase
"""Test negative examples using Countries.
The main idea here is that if we explicitly enter some false facts (signalling
to the KB that they are false, it should make less-wrong predictions
for them, versus just going by its own synthetic negative examples.)
It may have the side effect of pushing UP the probability of other wrong
triples, see e.g. "canada in asia" below.
"""
import context
from zincbase import KB
kb = KB()
kb.seed(555)
kb.from_csv('./assets/countries_s1_train.csv', delimiter='\t')
rule_num = kb.store('~locatedin(canada, africa)')
b = list(kb.query('locatedin(canada, X)'))
assert len(b) == 1; assert b[0]['X'] == 'northern_america'
assert kb.delete_rule(rule_num)
kb.build_kg_model(cuda=False, embedding_size=100)
kb.train_kg_model(steps=500, batch_size=512, neg_ratio=0.01)
canada_in_africa_naive = kb.estimate_triple_prob('canada', 'locatedin', 'africa')
canada_in_asia_naive = kb.estimate_triple_prob('canada', 'locatedin', 'asia')
austria_neighbors_spain_naive = kb.estimate_triple_prob('austria', 'neighbor', 'spain')
austria_neighbors_france_naive = kb.estimate_triple_prob('austria', 'neighbor', 'france')
kb = KB()
kb.seed(555)
kb.from_csv('./assets/countries_s1_train.csv', delimiter='\t')
kb.store('~locatedin(canada, africa)')
kb.store('~neighbor(austria, spain)')
kb.build_kg_model(cuda=False, embedding_size=100)
kb.train_kg_model(steps=500, batch_size=512, neg_ratio=0.1)
canada_in_africa_explicit = kb.estimate_triple_prob('canada', 'locatedin', 'africa')
canada_in_asia_explicit = kb.estimate_triple_prob('canada', 'locatedin', 'asia')
austria_neighbors_spain_explicit = kb.estimate_triple_prob('austria', 'neighbor', 'spain')
austria_neighbors_france_explicit = kb.estimate_triple_prob('austria', 'neighbor', 'france')
assert canada_in_africa_naive > canada_in_africa_explicit
assert austria_neighbors_spain_naive > austria_neighbors_spain_explicit
print('All negative example tests passed.')
|
Basic/Check if A String is Palindrome or Not/SolutionByIshleen.py
|
rajethanm4/Programmers-Community
| 261 |
81773
|
# program checks if the string is palindrome or not.
def function(string):
if(string == string[:: - 1]):
print("This is a Palindrome String")
else:
print("This is Not a Palindrome String")
string = input("Please enter your own String : ")
function(string)
|
pyvisa/ctwrapper/types.py
|
jpsecher/pyvisa
| 393 |
81786
|
# -*- coding: utf-8 -*-
"""VISA VPP-4.3 data types (VPP-4.3.2 spec, section 3) using ctypes constants.
This file is part of PyVISA.
All data types that are defined by VPP-4.3.2.
The module exports all data types including the pointer and array types. This
means "ViUInt32" and such.
:copyright: 2014-2020 by PyVISA Authors, see AUTHORS for more details.
:license: MIT, see LICENSE for more details.
"""
import ctypes as _ctypes
from .cthelper import FUNCTYPE
# Part One: Type Assignments for VISA and Instrument Drivers, see spec table
# 3.1.1.
#
# Remark: The pointer and probably also the array variants are of no
# significance in Python because there is no native call-by-reference.
# However, as long as I'm not fully sure about this, they won't hurt.
def _type_pair(ctypes_type):
return ctypes_type, _ctypes.POINTER(ctypes_type)
def _type_triplet(ctypes_type):
return _type_pair(ctypes_type) + (_ctypes.POINTER(ctypes_type),)
ViUInt64, ViPUInt64, ViAUInt64 = _type_triplet(_ctypes.c_uint64)
ViInt64, ViPInt64, ViAInt64 = _type_triplet(_ctypes.c_int64)
ViUInt32, ViPUInt32, ViAUInt32 = _type_triplet(_ctypes.c_uint32)
ViInt32, ViPInt32, ViAInt32 = _type_triplet(_ctypes.c_int32)
ViUInt16, ViPUInt16, ViAUInt16 = _type_triplet(_ctypes.c_ushort)
ViInt16, ViPInt16, ViAInt16 = _type_triplet(_ctypes.c_short)
ViUInt8, ViPUInt8, ViAUInt8 = _type_triplet(_ctypes.c_ubyte)
ViInt8, ViPInt8, ViAInt8 = _type_triplet(_ctypes.c_byte)
ViAddr, ViPAddr, ViAAddr = _type_triplet(_ctypes.c_void_p)
ViChar, ViPChar, ViAChar = _type_triplet(_ctypes.c_char)
ViByte, ViPByte, ViAByte = _type_triplet(_ctypes.c_ubyte)
ViBoolean, ViPBoolean, ViABoolean = _type_triplet(ViUInt16)
ViReal32, ViPReal32, ViAReal32 = _type_triplet(_ctypes.c_float)
ViReal64, ViPReal64, ViAReal64 = _type_triplet(_ctypes.c_double)
class ViString(object):
@classmethod
def from_param(cls, obj):
if isinstance(obj, str):
return bytes(obj, "ascii")
return obj
class ViAString(object):
@classmethod
def from_param(cls, obj):
return _ctypes.POINTER(obj)
ViPString = ViString
# This follows visa.h definition, but involves a lot of manual conversion.
# ViBuf, ViPBuf, ViABuf = ViPByte, ViPByte, _ctypes.POINTER(ViPByte)
ViBuf, ViPBuf, ViABuf = ViPString, ViPString, ViAString
def buffer_to_text(buf) -> str:
return buf.value.decode("ascii")
ViRsrc = ViString
ViPRsrc = ViString
ViARsrc = ViAString
ViKeyId, ViPKeyId = ViString, ViPString
ViStatus, ViPStatus, ViAStatus = _type_triplet(ViInt32)
ViVersion, ViPVersion, ViAVersion = _type_triplet(ViUInt32)
_ViObject, ViPObject, ViAObject = _type_triplet(ViUInt32)
_ViSession, ViPSession, ViASession = _type_triplet(ViUInt32)
class ViObject(_ViObject): # type: ignore
@classmethod
def from_param(cls, obj):
if obj is None:
raise ValueError("Session cannot be None. The resource might be closed.")
return _ViObject.from_param(obj)
ViSession = ViObject
ViAttr = ViUInt32
ViConstString = _ctypes.POINTER(ViChar)
# Part Two: Type Assignments for VISA only, see spec table 3.1.2. The
# difference to the above is of no significance in Python, so I use it here
# only for easier synchronisation with the spec.
ViAccessMode, ViPAccessMode = _type_pair(ViUInt32)
ViBusAddress, ViPBusAddress = _type_pair(ViUInt32)
ViBusAddress64, ViPBusAddress64 = _type_pair(ViUInt64)
ViBusSize = ViUInt32
ViAttrState, ViPAttrState = _type_pair(ViUInt32)
# The following is weird, taken from news:<EMAIL>
ViVAList = _ctypes.POINTER(_ctypes.c_char)
ViEventType, ViPEventType, ViAEventType = _type_triplet(ViUInt32)
ViPAttr = _ctypes.POINTER(ViAttr)
ViAAttr = ViPAttr
ViEventFilter = ViUInt32
ViFindList, ViPFindList = _type_pair(ViObject)
ViEvent, ViPEvent = _type_pair(ViObject)
ViJobId, ViPJobId = _type_pair(ViUInt32)
# Class of callback functions for event handling, first type is result type
ViHndlr = FUNCTYPE(ViStatus, ViSession, ViEventType, ViEvent, ViAddr)
|
src/dispatch/plugins/dispatch_test/storage.py
|
roor0/dispatch
| 3,417 |
81791
|
from dispatch.plugins.bases import StoragePlugin
class TestStoragePlugin(StoragePlugin):
title = "Dispatch Test Plugin - Storage"
slug = "test-storage"
def get(self, **kwargs):
return
def create(self, items, **kwargs):
return
def update(self, items, **kwargs):
return
def delete(self, items, **kwargs):
return
def list(self, **kwargs):
return
def add_participant(self, items, **kwargs):
return
def remove_participant(self, items, **kwargs):
return
def add_file(self, **kwargs):
return
def delete_file(self, **kwargs):
return
def move_file(self, **kwargs):
return
def list_files(self, **kwargs):
return
|
homeassistant/components/hp_ilo/__init__.py
|
domwillcode/home-assistant
| 30,023 |
81801
|
<reponame>domwillcode/home-assistant<filename>homeassistant/components/hp_ilo/__init__.py<gh_stars>1000+
"""The HP Integrated Lights-Out (iLO) component."""
|
examples/pytorch/fx/object_detection/maskrcnn/pytorch/maskrcnn_benchmark/utils/collect_env.py
|
intelkevinputnam/lpot-docs
| 567 |
81822
|
<reponame>intelkevinputnam/lpot-docs<gh_stars>100-1000
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import PIL
from torch.utils.collect_env import get_pretty_env_info
def get_pil_version():
return "\n Pillow ({})".format(PIL.__version__)
def collect_env_info():
env_str = get_pretty_env_info()
env_str += get_pil_version()
return env_str
|
qiskit/algorithms/phase_estimators/ipe.py
|
t-imamichi/qiskit-core
| 1,456 |
81823
|
<reponame>t-imamichi/qiskit-core
# This code is part of Qiskit.
#
# (C) Copyright IBM 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""The Iterative Quantum Phase Estimation Algorithm."""
from typing import Optional, Union
import numpy
import qiskit
from qiskit.circuit import QuantumCircuit, QuantumRegister
from qiskit.circuit.classicalregister import ClassicalRegister
from qiskit.providers import Backend
from qiskit.utils import QuantumInstance
from .phase_estimator import PhaseEstimator
from .phase_estimator import PhaseEstimatorResult
class IterativePhaseEstimation(PhaseEstimator):
"""Run the Iterative quantum phase estimation (QPE) algorithm.
Given a unitary circuit and a circuit preparing an eigenstate, return the phase of the
eigenvalue as a number in :math:`[0,1)` using the iterative phase estimation algorithm.
[1]: Dobsicek et al. (2006), Arbitrary accuracy iterative phase estimation algorithm as a two
qubit benchmark, `arxiv/quant-ph/0610214 <https://arxiv.org/abs/quant-ph/0610214>`_
"""
def __init__(
self,
num_iterations: int,
quantum_instance: Optional[Union[QuantumInstance, Backend]] = None,
) -> None:
"""Args:
num_iterations: The number of iterations (rounds) of the phase estimation to run.
quantum_instance: The quantum instance on which the circuit will be run.
Raises:
ValueError: if num_iterations is not greater than zero.
"""
if isinstance(quantum_instance, Backend):
quantum_instance = QuantumInstance(quantum_instance)
self._quantum_instance = quantum_instance
if num_iterations <= 0:
raise ValueError("`num_iterations` must be greater than zero.")
self._num_iterations = num_iterations
def construct_circuit(
self,
unitary: QuantumCircuit,
state_preparation: QuantumCircuit,
k: int,
omega: float = 0,
measurement: bool = False,
) -> QuantumCircuit:
"""Construct the kth iteration Quantum Phase Estimation circuit.
For details of parameters, see Fig. 2 in https://arxiv.org/pdf/quant-ph/0610214.pdf.
Args:
unitary: The circuit representing the unitary operator whose eigenvalue (via phase)
will be measured.
state_preparation: The circuit that prepares the state whose eigenphase will be
measured. If this parameter is omitted, no preparation circuit
will be run and input state will be the all-zero state in the
computational basis.
k: the iteration idx.
omega: the feedback angle.
measurement: Boolean flag to indicate if measurement should
be included in the circuit.
Returns:
QuantumCircuit: the quantum circuit per iteration
"""
k = self._num_iterations if k is None else k
# The auxiliary (phase measurement) qubit
phase_register = QuantumRegister(1, name="a")
eigenstate_register = QuantumRegister(unitary.num_qubits, name="q")
qc = QuantumCircuit(eigenstate_register)
qc.add_register(phase_register)
if isinstance(state_preparation, QuantumCircuit):
qc.append(state_preparation, eigenstate_register)
elif state_preparation is not None:
qc += state_preparation.construct_circuit("circuit", eigenstate_register)
# hadamard on phase_register[0]
qc.h(phase_register[0])
# controlled-U
# TODO: We may want to allow flexibility in how the power is computed
# For example, it may be desirable to compute the power via Trotterization, if
# we are doing Trotterization anyway.
unitary_power = unitary.power(2 ** (k - 1)).control()
qc = qc.compose(unitary_power, list(range(1, unitary.num_qubits + 1)) + [0])
qc.p(omega, phase_register[0])
# hadamard on phase_register[0]
qc.h(phase_register[0])
if measurement:
c = ClassicalRegister(1, name="c")
qc.add_register(c)
qc.measure(phase_register, c)
return qc
def _estimate_phase_iteratively(self, unitary, state_preparation):
"""
Main loop of iterative phase estimation.
"""
omega_coef = 0
# k runs from the number of iterations back to 1
for k in range(self._num_iterations, 0, -1):
omega_coef /= 2
if self._quantum_instance.is_statevector:
qc = self.construct_circuit(
unitary, state_preparation, k, -2 * numpy.pi * omega_coef, measurement=False
)
result = self._quantum_instance.execute(qc)
complete_state_vec = result.get_statevector(qc)
ancilla_density_mat = qiskit.quantum_info.partial_trace(
complete_state_vec, range(unitary.num_qubits)
)
ancilla_density_mat_diag = numpy.diag(ancilla_density_mat)
max_amplitude = max(
ancilla_density_mat_diag.min(), ancilla_density_mat_diag.max(), key=abs
)
x = numpy.where(ancilla_density_mat_diag == max_amplitude)[0][0]
else:
qc = self.construct_circuit(
unitary, state_preparation, k, -2 * numpy.pi * omega_coef, measurement=True
)
measurements = self._quantum_instance.execute(qc).get_counts(qc)
x = 1 if measurements.get("1", 0) > measurements.get("0", 0) else 0
omega_coef = omega_coef + x / 2
return omega_coef
# pylint: disable=arguments-differ
def estimate(
self, unitary: QuantumCircuit, state_preparation: QuantumCircuit
) -> "IterativePhaseEstimationResult":
"""
Estimate the eigenphase of the input unitary and initial-state pair.
Args:
unitary: The circuit representing the unitary operator whose eigenvalue (via phase)
will be measured.
state_preparation: The circuit that prepares the state whose eigenphase will be
measured. If this parameter is omitted, no preparation circuit
will be run and input state will be the all-zero state in the
computational basis.
Returns:
Estimated phase in an IterativePhaseEstimationResult object.
"""
phase = self._estimate_phase_iteratively(unitary, state_preparation)
return IterativePhaseEstimationResult(self._num_iterations, phase)
class IterativePhaseEstimationResult(PhaseEstimatorResult):
"""Phase Estimation Result."""
def __init__(self, num_iterations: int, phase: float) -> None:
"""
Args:
num_iterations: number of iterations used in the phase estimation.
phase: the estimated phase.
"""
self._num_iterations = num_iterations
self._phase = phase
@property
def phase(self) -> float:
r"""Return the estimated phase as a number in :math:`[0.0, 1.0)`.
1.0 corresponds to a phase of :math:`2\pi`. It is assumed that the input vector is an
eigenvector of the unitary so that the peak of the probability density occurs at the bit
string that most closely approximates the true phase.
"""
return self._phase
@property
def num_iterations(self) -> int:
r"""Return the number of iterations used in the estimation algorithm."""
return self._num_iterations
|
django_dynamic_fixture/tests/test_django_helper.py
|
Kaniabi/django-dynamic-fixture
| 190 |
81834
|
# -*- coding: utf-8 -*-
from django.test import TestCase
from django.db import models
import pytest
from django_dynamic_fixture import N, G
from django_dynamic_fixture.models_test import *
from django_dynamic_fixture.django_helper import *
class DjangoHelperAppsTest(TestCase):
def test_get_apps_must_return_all_installed_apps(self):
assert len(get_apps()) >= 1
def test_get_apps_may_be_filtered_by_app_names(self):
apps = get_apps(application_labels=['django_dynamic_fixture'])
assert len(apps) == 1
def test_get_apps_may_ignore_some_apps(self):
apps = len(get_apps(exclude_application_labels=['django_dynamic_fixture']))
assert len(get_apps()) - apps == 1
def test_app_name_must_be_valid(self):
with pytest.raises(Exception):
get_apps(application_labels=['x'])
with pytest.raises(Exception):
get_apps(exclude_application_labels=['x'])
def test_get_app_name_must(self):
import django_dynamic_fixture.models as ddf
assert get_app_name(ddf) == 'django_dynamic_fixture'
def test_get_models_of_an_app_must(self):
ddf = get_apps(application_labels=['django_dynamic_fixture'])[0]
models_ddf = get_models_of_an_app(ddf)
assert len(models_ddf) > 0
assert ModelWithNumbers in models_ddf
class DjangoHelperModelsTest(TestCase):
def test_get_model_name(self):
class MyModel_test_get_model_name(models.Model): pass
assert get_model_name(MyModel_test_get_model_name) == 'MyModel_test_get_model_name'
def test_get_unique_model_name(self):
class MyModel_test_get_unique_model_name(models.Model): pass
assert get_unique_model_name(MyModel_test_get_unique_model_name) == 'django_dynamic_fixture.tests.test_django_helper.MyModel_test_get_unique_model_name'
def test_get_fields_from_model(self):
class Model4GetFields_test_get_fields_from_model(models.Model):
integer = models.IntegerField()
fields = get_fields_from_model(Model4GetFields_test_get_fields_from_model)
assert get_field_by_name_or_raise(Model4GetFields_test_get_fields_from_model, 'id') in fields
assert get_field_by_name_or_raise(Model4GetFields_test_get_fields_from_model, 'integer') in fields
def test_get_local_fields(self):
class ModelForGetLocalFields_test_get_local_fields(models.Model):
integer = models.IntegerField()
fields = get_local_fields(ModelForGetLocalFields_test_get_local_fields)
assert get_field_by_name_or_raise(ModelForGetLocalFields_test_get_local_fields, 'id') in fields
assert get_field_by_name_or_raise(ModelForGetLocalFields_test_get_local_fields, 'integer') in fields
def test_get_field_names_of_model(self):
class Model4GetFieldNames_test_get_field_names_of_model(models.Model):
smallinteger = models.SmallIntegerField()
fields = get_field_names_of_model(Model4GetFieldNames_test_get_field_names_of_model)
assert 'smallinteger' in fields
assert 'unknown' not in fields
def test_get_many_to_many_fields_from_model(self):
class ModelRelated_test_get_many_to_many_fields_from_model(models.Model): pass
class ModelWithM2M_test_get_many_to_many_fields_from_model(models.Model):
manytomany = models.ManyToManyField('ModelRelated_test_get_many_to_many_fields_from_model', related_name='m2m')
fields = get_many_to_many_fields_from_model(ModelWithM2M_test_get_many_to_many_fields_from_model)
assert get_field_by_name_or_raise(ModelWithM2M_test_get_many_to_many_fields_from_model, 'manytomany') in fields
assert get_field_by_name_or_raise(ModelWithM2M_test_get_many_to_many_fields_from_model, 'id') not in fields
def test_is_model_class(self):
class MyModel_test_is_model_class(models.Model): pass
assert is_model_class(MyModel_test_is_model_class) == True
class X(object): pass
assert is_model_class(X) == False
def test_is_model_abstract(self):
class AbstractModel_test_is_model_abstract(models.Model):
class Meta:
abstract = True
assert is_model_abstract(AbstractModel_test_is_model_abstract)
class ConcreteModel_test_is_model_abstract(models.Model):
class Meta:
abstract = False
assert is_model_abstract(ConcreteModel_test_is_model_abstract) == False
def test_is_model_managed(self):
class NotManagedModel_test_is_model_managed(models.Model):
class Meta:
managed = False
assert is_model_managed(NotManagedModel_test_is_model_managed) == False
class ManagedModel_test_is_model_managed(models.Model):
class Meta:
managed = True
assert is_model_managed(ManagedModel_test_is_model_managed)
def test_model_has_the_field(self):
class ModelWithWithoutFields_test_model_has_the_field(models.Model):
integer = models.IntegerField()
selfforeignkey = models.ForeignKey('self', null=True, on_delete=models.DO_NOTHING)
manytomany = models.ManyToManyField('self', related_name='m2m')
assert model_has_the_field(ModelWithWithoutFields_test_model_has_the_field, 'integer')
assert model_has_the_field(ModelWithWithoutFields_test_model_has_the_field, 'selfforeignkey')
assert model_has_the_field(ModelWithWithoutFields_test_model_has_the_field, 'manytomany')
assert model_has_the_field(ModelWithWithoutFields_test_model_has_the_field, 'x') == False
class DjangoHelperFieldsTest(TestCase):
def test_get_unique_field_name(self):
class Model4GetUniqueFieldName_test_get_unique_field_name(models.Model):
integer = models.IntegerField()
field = get_field_by_name_or_raise(Model4GetUniqueFieldName_test_get_unique_field_name, 'integer')
assert get_unique_field_name(field) == 'django_dynamic_fixture.tests.test_django_helper.Model4GetUniqueFieldName_test_get_unique_field_name.integer'
def test_get_related_model(self):
class ModelRelated_test_get_related_model(models.Model): pass
class Model4GetRelatedModel_test_get_related_model(models.Model):
fk = models.ForeignKey(ModelRelated_test_get_related_model, on_delete=models.DO_NOTHING)
assert get_related_model(get_field_by_name_or_raise(Model4GetRelatedModel_test_get_related_model, 'fk')) == \
ModelRelated_test_get_related_model
def test_field_is_a_parent_link(self):
class ModelParent_test_get_related_model(models.Model): pass
class Model4FieldIsParentLink_test_get_related_model(ModelParent):
o2o_with_parent_link = models.OneToOneField(ModelParent_test_get_related_model, parent_link=True, related_name='my_custom_ref_x', on_delete=models.DO_NOTHING)
class Model4FieldIsParentLink2(ModelParent):
o2o_without_parent_link = models.OneToOneField(ModelParent_test_get_related_model, parent_link=False, related_name='my_custom_ref_y', on_delete=models.DO_NOTHING)
# FIXME
# assert field_is_a_parent_link(get_field_by_name_or_raise(Model4FieldIsParentLink, 'o2o_with_parent_link'))
assert field_is_a_parent_link(get_field_by_name_or_raise(Model4FieldIsParentLink2, 'o2o_without_parent_link')) == False
def test_field_has_choices(self):
class Model4FieldHasChoices_test_get_related_model(models.Model):
with_choices = models.IntegerField(choices=((1, 1), (2, 2)))
without_choices = models.IntegerField()
assert field_has_choices(get_field_by_name_or_raise(Model4FieldHasChoices_test_get_related_model, 'with_choices'))
assert field_has_choices(get_field_by_name_or_raise(Model4FieldHasChoices_test_get_related_model, 'without_choices')) == False
def test_field_has_default_value(self):
class Model4FieldHasDefault_test_field_has_default_value(models.Model):
with_default = models.IntegerField(default=1)
without_default = models.IntegerField()
assert field_has_default_value(get_field_by_name_or_raise(Model4FieldHasDefault_test_field_has_default_value, 'with_default'))
assert field_has_default_value(get_field_by_name_or_raise(Model4FieldHasDefault_test_field_has_default_value, 'without_default')) == False
def test_field_is_unique(self):
class Model4FieldMustBeUnique_test_field_is_unique(models.Model):
unique = models.IntegerField(unique=True)
not_unique = models.IntegerField()
assert field_is_unique(get_field_by_name_or_raise(Model4FieldMustBeUnique_test_field_is_unique, 'unique'))
assert field_is_unique(get_field_by_name_or_raise(Model4FieldMustBeUnique_test_field_is_unique, 'not_unique')) == False
def test_is_key_field(self):
class ModelForKeyField_test_is_key_field(models.Model):
integer = models.IntegerField()
assert is_key_field(get_field_by_name_or_raise(ModelForKeyField_test_is_key_field, 'id'))
assert is_key_field(get_field_by_name_or_raise(ModelForKeyField_test_is_key_field, 'integer')) == False
def test_is_relationship_field(self):
class ModelForRelationshipField_test_is_relationship_field(models.Model):
fk = models.ForeignKey('self', on_delete=models.DO_NOTHING)
one2one = models.OneToOneField('self', on_delete=models.DO_NOTHING)
assert is_relationship_field(get_field_by_name_or_raise(ModelForRelationshipField_test_is_relationship_field, 'fk'))
assert is_relationship_field(get_field_by_name_or_raise(ModelForRelationshipField_test_is_relationship_field, 'one2one'))
assert is_relationship_field(get_field_by_name_or_raise(ModelForRelationshipField_test_is_relationship_field, 'id')) == False
def test_is_file_field(self):
class ModelForFileField_test_is_file_field(models.Model):
filefield = models.FileField()
assert is_file_field(get_field_by_name_or_raise(ModelForFileField_test_is_file_field, 'filefield'))
assert is_file_field(get_field_by_name_or_raise(ModelForFileField_test_is_file_field, 'id')) == False
class PrintFieldValuesTest(TestCase):
def test_model_not_saved_do_not_raise_an_exception(self):
instance = N(ModelWithNumbers)
print_field_values(instance)
def test_model_saved_do_not_raise_an_exception(self):
instance = G(ModelWithNumbers)
print_field_values(instance)
def test_print_accept_list_of_models_too(self):
instances = G(ModelWithNumbers, n=2)
print_field_values(instances)
print_field_values([G(ModelWithNumbers), G(ModelWithNumbers)])
def test_print_accept_a_queryset_too(self):
G(ModelWithNumbers, n=2)
print_field_values(ModelWithNumbers.objects.all())
|
src/pymap3d/__init__.py
|
scivision/pymap3d
| 108 |
81849
|
"""
PyMap3D provides coordinate transforms and geodesy functions with a similar API
to the Matlab Mapping Toolbox, but was of course independently derived.
For all functions, the default units are:
distance : float
METERS
angles : float
DEGREES
time : datetime.datetime
UTC time of observation
These functions may be used with any planetary body, provided the appropriate
reference ellipsoid is defined. The default ellipsoid is WGS-84
deg : bool = True means degrees. False = radians.
Most functions accept NumPy arrays of any shape, as well as compatible data types
including AstroPy, Pandas and Xarray that have Numpy-like data properties.
For clarity, we omit all these types in the docs, and just specify the scalar type.
Other languages
---------------
Companion packages exist for:
* Matlab / GNU Octave: [Matmap3D](https://github.com/geospace-code/matmap3d)
* Fortran: [Maptran3D](https://github.com/geospace-code/maptran3d)
"""
__version__ = "2.9.0"
from .aer import ecef2aer, aer2ecef, geodetic2aer, aer2geodetic
from .enu import enu2geodetic, geodetic2enu, aer2enu, enu2aer
from .ned import ned2ecef, ned2geodetic, geodetic2ned, ecef2nedv, ned2aer, aer2ned, ecef2ned
from .ecef import (
geodetic2ecef,
ecef2geodetic,
eci2geodetic,
geodetic2eci,
ecef2enuv,
enu2ecef,
ecef2enu,
enu2uvw,
uvw2enu,
)
from .sidereal import datetime2sidereal, greenwichsrt
from .ellipsoid import Ellipsoid
from .timeconv import str2dt
from .spherical import spherical2geodetic, geodetic2spherical
try:
from .azelradec import radec2azel, azel2radec
from .eci import eci2ecef, ecef2eci
from .aer import eci2aer, aer2eci
except ImportError:
from .vallado import radec2azel, azel2radec
|
tests/storage/test_local.py
|
operatorai/modelstore
| 151 |
81856
|
# Copyright 2020 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import pytest
from modelstore.storage.local import FileSystemStorage
# pylint: disable=unused-import
from tests.storage.test_utils import (
TEST_FILE_CONTENTS,
TEST_FILE_NAME,
file_contains_expected_contents,
remote_file_path,
remote_path,
temp_file,
)
# pylint: disable=protected-access
# pylint: disable=redefined-outer-name
@pytest.fixture
def fs_model_store(tmp_path):
return FileSystemStorage(root_path=str(tmp_path))
def test_validate(fs_model_store):
assert fs_model_store.validate()
assert os.path.exists(fs_model_store.root_dir)
def test_push(temp_file, remote_file_path, fs_model_store):
result = fs_model_store._push(temp_file, remote_file_path)
assert result == os.path.join(fs_model_store.root_dir, remote_file_path)
def test_pull(temp_file, tmp_path, remote_file_path, fs_model_store):
# Push the file to storage
remote_destination = fs_model_store._push(temp_file, remote_file_path)
# Pull the file back from storage
local_destination = os.path.join(tmp_path, TEST_FILE_NAME)
result = fs_model_store._pull(remote_destination, tmp_path)
assert result == local_destination
assert os.path.exists(local_destination)
assert file_contains_expected_contents(local_destination)
def test_read_json_objects_ignores_non_json(
tmp_path, remote_path, fs_model_store
):
# Create files with different suffixes
for file_type in ["txt", "json"]:
source = os.path.join(tmp_path, f"test-file-source.{file_type}")
with open(source, "w") as out:
out.write(json.dumps({"key": "value"}))
# Push the file to storage
remote_destination = os.path.join(
remote_path, f"test-file-destination.{file_type}"
)
fs_model_store._push(source, remote_destination)
# Read the json files at the prefix
items = fs_model_store._read_json_objects(remote_path)
assert len(items) == 1
def test_read_json_object_fails_gracefully(
temp_file, remote_file_path, fs_model_store
):
# Push a file that doesn't contain JSON to storage
remote_path = fs_model_store._push(temp_file, remote_file_path)
# Read the json files at the prefix
item = fs_model_store._read_json_object(remote_path)
assert item is None
def test_list_versions_missing_domain(fs_model_store):
versions = fs_model_store.list_versions("domain-that-doesnt-exist")
assert len(versions) == 0
def test_storage_location(fs_model_store):
# Asserts that the location meta data is correctly formatted
prefix = "/path/to/file"
exp = {
"type": "file_system",
"path": prefix,
}
assert fs_model_store._storage_location(prefix) == exp
@pytest.mark.parametrize(
"meta_data,should_raise,result",
[
(
{
"path": "/path/to/file",
},
False,
"/path/to/file",
),
],
)
def test_get_location(fs_model_store, meta_data, should_raise, result):
# Asserts that pulling the location out of meta data is correct
if should_raise:
with pytest.raises(ValueError):
fs_model_store._get_storage_location(meta_data)
else:
assert fs_model_store._get_storage_location(meta_data) == result
@pytest.mark.parametrize(
"state_name,should_create,expect_exists",
[
("state-1", False, False),
("state-2", True, True),
],
)
def test_state_exists(fs_model_store, state_name, should_create, expect_exists):
if should_create:
fs_model_store.create_model_state(state_name)
assert fs_model_store.state_exists(state_name) == expect_exists
|
test/test_pyiter.py
|
ssameerr/pytubes
| 166 |
81869
|
import itertools
import pytest
import tubes
def test_static_tube_takes_a_list():
tube = tubes.Each([1, 2, 3])
assert list(tube) == [1, 2, 3]
def test_static_tube_takes_an_iter():
tube = tubes.Each(itertools.count(10)).first(3)
assert list(tube) == [10, 11, 12]
def test_static_tube_with_strings():
tube = tubes.Each(['a', 'b', 'c'])
assert list(tube) == ['a', 'b', 'c']
def test_static_tube_with_strings():
tube = tubes.Each(['a', 'b', 'c'])
assert list(tube.to(str)) == ['a', 'b', 'c']
assert list(tube.to(bytes)) == [b'a', b'b', b'c']
def test_static_tube_with_encoding():
tube = tubes.Each(['£', '😃', ''])
assert list(tube.to(str)) == ['£', '😃', '']
assert list(tube.to(bytes)) == [b'\xc2\xa3', b'\xf0\x9f\x98\x83', b'']
with pytest.raises(UnicodeEncodeError):
list(tube.to(bytes, codec='ascii'))
|
Server/integrations/privacyidea/privacyidea.py
|
premeau/oxAuth
| 380 |
81870
|
<reponame>premeau/oxAuth
# -*- coding: utf-8 -*-
#
# privacyIDEA is a Multi-Factor-Management system that supports
# a wide variety of different tokentypes like smartphone apps, key fob tokens,
# yubikeys, u2f, fido2, email, sms...
# The administrator of an organization can manage the 2nd factors of the
# users centrally in privacyIDEA and connect any application with privacyIDEA
# to secure the login process.
#
# This authentication script adds a most flexible multi-factor-authentication
# to Gluu. See:
# https://privacyidea.org
# Get enterprise support at:
# https://netknights.it/en/produkte/privacyidea/
#
# License: AGPLv3
#
# This code is free software; you can redistribute it and/or
# modify it under the terms of the GNU AFFERO GENERAL PUBLIC LICENSE
# License as published by the Free Software Foundation; either
# version 3 of the License, or any later version.
#
# This code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU AFFERO GENERAL PUBLIC LICENSE for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
__doc__ = """This script enables Gluu to communicate to privacyIDEA server
and have the privacyIDEA verify the second factor.
"""
__version__ = "1.0.0"
from org.gluu.oxauth.security import Identity
from org.gluu.oxauth.service import AuthenticationService, SessionIdService
from org.gluu.model.custom.script.type.auth import PersonAuthenticationType
from org.gluu.service.cdi.util import CdiUtil
from org.gluu.oxauth.service import UserService
from org.gluu.util import StringHelper
from javax.faces.application import FacesMessage
from org.gluu.jsf2.message import FacesMessages
from java.util import Arrays
import sys
PI_USER_AGENT = "privacyIDEA-Gluu"
GLUU_API_VERSION = 11
def logFromSDK(message):
print("privacyIDEA. JavaSDK: " + message)
class PersonAuthentication(PersonAuthenticationType):
def __init__(self, currentTimeMillis):
print("__init__")
self.currentTimeMillis = currentTimeMillis
def init(self, customScript, configurationAttributes):
print("privacyIDEA. init")
self.pi = None
sdk_path = "/opt/java_sdk.jar"
if configurationAttributes.containsKey("sdk_path"):
sdk_path = configurationAttributes.get("sdk_path").getValue2()
sys.path.append(sdk_path)
try:
from org.privacyidea import Challenge
from org.privacyidea import PrivacyIDEA
from org.privacyidea import PIResponse
except ImportError:
print("privacyIDEA. Java SDK import not found! Make sure the jar is located at '{}'.".format(sdk_path))
# returning success here allows to display a error message in the authenticate function
# because self.pi will be None
return True
if not configurationAttributes.containsKey("privacyidea_url"):
print("privacyIDEA. Missing mandatory configuration value 'privacyidea_url'!")
return True
privacyidea_url = configurationAttributes.get("privacyidea_url").getValue2()
builder = PrivacyIDEA.Builder(privacyidea_url, PI_USER_AGENT)
if configurationAttributes.containsKey("log_from_sdk"):
builder.setSimpleLog(logFromSDK)
if configurationAttributes.containsKey("sslverify"):
sslverify = configurationAttributes.get("sslverify").getValue2()
builder.setSSLVerify(sslverify != "0")
if configurationAttributes.containsKey("realm"):
realm = configurationAttributes.get("realm").getValue2()
builder.setRealm(realm)
else:
print("privacyIDEA. Config param 'realm' not set")
self.disableGluuPass = False
if configurationAttributes.containsKey("disablegluupass"):
self.disableGluuPass = configurationAttributes.get("disablegluupass").getValue2() == "1"
# Load configuration for optional trigger challenge or send password in first step
self.sendPassword = False
self.triggerChallenge = False
serviceAccountName = None
serviceAccountPass = <PASSWORD>
serviceAccountRealm = None
if configurationAttributes.containsKey("triggerchallenges"):
self.triggerChallenge = configurationAttributes.get("triggerchallenges").getValue2() == "1"
if configurationAttributes.containsKey("sendpassword"):
self.sendPassword = configurationAttributes.get("sendpassword").getValue2() == "1"
if configurationAttributes.containsKey("serviceaccountname"):
serviceAccountName = configurationAttributes.get("serviceaccountname").getValue2()
if configurationAttributes.containsKey("serviceaccountpass"):
serviceAccountPass = configurationAttributes.get("serviceaccountpass").getValue2()
if serviceAccountName is not None and serviceAccountPass is not None and self.triggerChallenge:
builder.setServiceAccount(serviceAccountName, serviceAccountPass)
elif self.triggerChallenge:
print("Trigger challenge enabled but no service account set!")
self.triggerChallenge = False
if configurationAttributes.containsKey("serviceaccountrealm"):
serviceAccountRealm = configurationAttributes.get("serviceaccountrealm").getValue2()
builder.setServiceAccountRealm(serviceAccountRealm)
self.pi = builder.build()
self.sessionIdservice = CdiUtil.bean(SessionIdService)
print("privacyIDEA. init done")
return True
def authenticate(self, configurationAttributes, requestParameters, step):
#print("Authenticate step={} with sendpass={} and triggerchallenge={}".format(step, self.sendPassword, self.triggerChallenge))
fm = CdiUtil.bean(FacesMessages)
fm.clear()
fm.setKeepMessages()
if self.pi is None:
fm.add(FacesMessage.SEVERITY_ERROR, "Failed to communicate to privacyIDEA. Possible misconfiguration. Please have the administrator check the log files.")
return False
identity = CdiUtil.bean(Identity)
if step == 1:
credentials = identity.getCredentials()
username = credentials.getUsername()
password = <PASSWORD>.<PASSWORD>()
if username:
if self.sendPassword:
response = self.pi.validateCheck(username, password)
if response:
# First check if what was entered is sufficient to log in
if response.getValue():
logged_in = self.login(username, password)
if logged_in:
self.addToSession("auth_success", True)
return logged_in
# If not, check if transaction was triggered
elif response.getTransactionID():
self.evaluateTriggeredChallenges(response, identity)
else:
print("privacyIDEA. Empty response from server")
fm.add(FacesMessage.SEVERITY_ERROR, "No response from the privacyIDEA Server. Please check the connection!")
elif self.triggerChallenge:
response = self.pi.triggerChallenges(username)
if response is None:
fm.add(FacesMessage.SEVERITY_ERROR, "Failed to communicate to privacyIDEA. Possible misconfiguration. Please have the administrator check the log files.")
print("Service account misconfiguration or no response from the privacyIDEA server!")
elif response.getTransactionID():
self.evaluateTriggeredChallenges(response, identity)
else:
# Setup for just OTP in second step
identity.setWorkingParameter("otp_available", "1")
identity.setWorkingParameter("transaction_message", "Please enter the OTP:")
self.addToSession("currentUser", username)
self.addToSession("currentPassword", password)
return True
else:
#print("privacyIDEA. Username is empty")
fm.add(FacesMessage.SEVERITY_ERROR, "Please enter a username!")
return False
else:
# Get the user from step 1
currentUser = self.getFromSession("currentUser")
currentPassword = self.getFromSession("currentPassword")
if currentUser and currentPassword:
self.login(currentUser, currentPassword)
else:
print("privacyIDEA. No user found in session for second step")
fm.add(FacesMessage.SEVERITY_ERROR, "Session data got lost. Please try to restart the authentication!")
return False
try:
# Persist the mode between the script and the js
mode = requestParameters.get("modeField")[0].strip()
identity.setWorkingParameter("mode", mode)
except TypeError:
print("privacyIDEA. Mode not found in request parameters")
txid = self.getFromSession("transaction_id")
# If mode is push: poll for the transactionID to see if the user confirmed on the smartphone
if mode == "push":
if not txid:
print("privacyIDEA. Transaction ID not found in session, but it is mandatory for polling!")
fm.add(FacesMessage.SEVERITY_ERROR, "Your transaction id could not be found. Please try to restart the authentication!")
return False
if self.pi.pollTransaction(txid):
# If polling is successful, the authentication has to be finished by a call to validateCheck
# with the username, NO otp and the transactionID
response = self.pi.validateCheck(currentUser, "", txid)
return response.getValue()
elif mode == "otp":
try:
otp = requestParameters.get("otp")[0]
except TypeError:
print("privacyIDEA. Unable to obtain OTP from requestParameters, but it is required!")
fm.add(FacesMessage.SEVERITY_ERROR, "Your input could not be read. Please try to restart the authentication!")
if otp:
# Either do validate/check with transaction id if there is one in the session or just with the input
if txid:
resp = self.pi.validateCheck(currentUser, otp, txid)
else:
resp = self.pi.validateCheck(currentUser, otp)
if resp:
return resp.getValue()
return False
def evaluateTriggeredChallenges(self, response, identity):
identity.setWorkingParameter("transaction_message", response.getMessage())
self.addToSession("transaction_id", response.getTransactionID())
# Check if push is available
tttList = response.getTriggeredTokenTypes()
if tttList.contains("push"):
identity.setWorkingParameter("push_available", "1")
tttList.remove("push")
# Check if an input field is needed for any other token type
if tttList.size() > 0:
identity.setWorkingParameter("otp_available", "1")
def login(self, username, password):
#print("Login with user={} and pass={}, verifyGluuPassword={}".format(username, password, self.verifyGluuPassword))
authenticationService = CdiUtil.bean(AuthenticationService)
if not self.disableGluuPass:
logged_in = authenticationService.authenticate(username, password)
else:
logged_in = authenticationService.authenticate(username)
return logged_in
def addToSession(self, key, value):
#print("addToSession: {}, {}".format(key, value))
session = self.sessionIdservice.getSessionId()
session.getSessionAttributes().put(key, value)
self.sessionIdservice.updateSessionId(session)
def getFromSession(self, key):
#print("getFromSession: {}".format(key))
session = self.sessionIdservice.getSessionId()
return session.getSessionAttributes().get(key) if session else None
def prepareForStep(self, configurationAttributes, requestParameters, step):
#print("prepareForStep {}, params={}".format(step, requestParameters))
if step == 1: return True
# Set the initial state for our template
identity = CdiUtil.bean(Identity)
identity.setWorkingParameter("mode", "otp")
return True
def getExtraParametersForStep(self, configurationAttributes, step):
#print("getExtraParametersForStep {}".format(step))
return Arrays.asList("transaction_message", "push_available", "otp_available", "mode")
def getCountAuthenticationSteps(self, configurationAttributes):
#print("getCountAuthenticationSteps")
if self.getFromSession("auth_success"):
#print("Auth success in session, returning 1")
return 1
else:
#print("Auth success not in session, returning 2")
return 2
def getPageForStep(self, configurationAttributes, step):
#print("getPageForStep {}".format(step))
return "" if step == 1 else "/auth/privacyidea/privacyidea.xhtml"
def getNextStep(self, configurationAttributes, requestParameters, step):
#print("getNextStep {}".format(step))
return -1
def destroy(self, configurationAttributes):
#print("destroy")
return True
def getApiVersion(self):
#print("getApiVersion = {}".format(SCRIPT_API_VERSION))
return GLUU_API_VERSION
def isValidAuthenticationMethod(self, usageType, configurationAttributes):
#print("isValidAuthenticationMethod")
return True
def getAlternativeAuthenticationMethod(self, usageType, configurationAttributes):
#print("getAlternativeAuthenticationMethod")
return None
def getAuthenticationMethodClaims(self, requestParameters):
#print("getAuthenticationMethodClaims")
return None
def logout(self, configurationAttributes, requestParameters):
#print("logout")
return True
|
ztag/annotations/FtpSharp.py
|
justinbastress/ztag
| 107 |
81875
|
<filename>ztag/annotations/FtpSharp.py
import re
from ztag.annotation import Annotation
from ztag.annotation import OperatingSystem
from ztag.annotation import Type
from ztag.annotation import Manufacturer
from ztag import protocols
import ztag.test
class FtpSharp(Annotation):
protocol = protocols.FTP
subprotocol = protocols.FTP.BANNER
port = None
manufact_re = re.compile(
"^220 SHARP ((MX)|(AR))-[0-9A-Z]+ Ver \d+(\.[0-9a-z]+)+ FTP server",
re.IGNORECASE
)
product_re = re.compile("SHARP (.+) Ver", re.IGNORECASE)
version_re = re.compile(
"Ver (\d+(?:\.\d+[a-z]?)*) FTP",
re.IGNORECASE
)
tests = {
"FtpSharp_1": {
"global_metadata": {
"device_type": Type.GENERIC_PRINTER,
"manufacturer": Manufacturer.SHARP,
"product": "MX-5110N"
},
"local_metadata": {
"version": "01.05.00.0m.80"
}
}
}
def process(self, obj, meta):
banner = obj["banner"]
if self.manufact_re.search(banner):
meta.global_metadata.device_type = Type.GENERIC_PRINTER
meta.global_metadata.manufacturer = Manufacturer.SHARP
product = self.product_re.search(banner).group(1)
meta.global_metadata.product = product
match = self.version_re.search(banner)
meta.local_metadata.version = match.group(1)
return meta
""" Tests
"220 SHARP MX-3100N Ver 01.05.00.0b FTP server.\r\n"
"220 SHARP MX-2010U Ver 01.05.00.2k.56 FTP server.\r\n"
"220 SHARP MX-2300N Ver 01.02.00.0i FTP server.\r\n"
"220 SHARP MX-5001N Ver 01.05.00.0k FTP server.\r\n"
"220 SHARP MX-M502N Ver 01.05.00.0m FTP server.\r\n"
"220 SHARP MX-C312 Ver 01.05.00.0m FTP server.\r\n"
"220 SHARP MX-4140N Ver 01.06.00.0f.01 FTP server.\r\n"
"220 SHARP MX-5110N Ver 01.05.00.0m.80 FTP server.\r\n"
"220 SHARP MX-M450N Ver 01.04.00.0g FTP server.\r\n"
"220 SHARP MX-C312 Ver 01.05.00.0m FTP server.\r\n"
"220 SHARP AR-M257 Ver 01.04.00.0e FTP server.\r\n"
"220 SHARP MX-M550N Ver 01.04.00.0c FTP server.\r\n"
"220 SHARP MX-C300W Ver 02.03.E1.00 FTP server.\r\n"
"220 SHARP MX-M452N Ver 01.05.00.0k FTP server.\r\n"
"220 SHARP MX-M452N Ver 01.05.00.0k FTP server.\r\n"
"220 SHARP MX-2010U Ver 01.05.00.2k.51 FTP server.\r\n"
"220 SHARP MX-2010U Ver 01.05.00.2k.56 FTP server.\r\n"
"220 SHARP MX-2615N Ver 01.05.00.0q.06 FTP server.\r\n"
"220 SHARP MX-M450U Ver 01.04.00.0e FTP server.\r\n"
"220 SHARP MX-4101N Ver 01.05.00.0k FTP server.\r\n"
"220 SHARP MX-M452N Ver 01.05.00.0k FTP server.\r\n"
"220 SHARP MX-4112N Ver 01.05.00.0o.12 FTP server.\r\n"
"220 SHARP MX-2300N Ver 01.02.00.0d FTP server.\r\n"
"220 SHARP MX-2314N Ver 01.05.00.0q.06 FTP server.\r\n"
"220 SHARP MX-3501N Ver 01.02.00.0e FTP server.\r\n"
"220 SHARP MX-6240N Ver 01.06.00.00.107 FTP server.\r\n"
"220 SHARP MX-2600FN Ver 01.05.00.0m FTP server.\r\n"
"220 SHARP MX-2300N Ver 01.02.00.0i FTP server.\r\n"
"220 SHARP MX-B400P Ver 01.05.00.0k FTP server.\r\n"
"220 SHARP MX-5112N Ver 01.05.00.0o.12 FTP server.\r\n"
"220 SHARP MX-2610N Ver 01.05.00.0m.93.U FTP server.\r\n"
"""
|
043_face_landmark/lib/helper/init.py
|
IgiArdiyanto/PINTO_model_zoo
| 1,529 |
81884
|
<filename>043_face_landmark/lib/helper/init.py
import tensorflow as tf
def init(*args):
if len(args)==1:
use_pb=True
pb_path=args[0]
else:
use_pb=False
meta_path=args[0]
restore_model_path=args[1]
def ini_ckpt():
graph = tf.Graph()
graph.as_default()
configProto = tf.ConfigProto()
configProto.gpu_options.allow_growth = True
sess = tf.Session(config=configProto)
#load_model(model_path, sess)
saver = tf.train.import_meta_graph(meta_path)
saver.restore(sess, restore_model_path)
print("Model restred!")
return (graph, sess)
def init_pb(model_path):
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.2
compute_graph = tf.Graph()
compute_graph.as_default()
sess = tf.Session(config=config)
with tf.gfile.GFile(model_path, 'rb') as fid:
graph_def = tf.GraphDef()
graph_def.ParseFromString(fid.read())
tf.import_graph_def(graph_def, name='')
# saver = tf.train.Saver(tf.global_variables())
# saver.save(sess, save_path='./tmp.ckpt')
return (compute_graph, sess)
if use_pb:
model = init_pb(pb_path)
else:
model = ini_ckpt()
graph = model[0]
sess = model[1]
return graph,sess
|
test/example-jit.py
|
KennethNielsen/llvmpy
| 140 |
81903
|
<reponame>KennethNielsen/llvmpy<filename>test/example-jit.py
#!/usr/bin/env python
# Import the llvm-py modules.
from llvm import *
from llvm.core import *
from llvm.ee import * # new import: ee = Execution Engine
import logging
import unittest
class TestExampleJIT(unittest.TestCase):
def test_example_jit(self):
# Create a module, as in the previous example.
my_module = Module.new('my_module')
ty_int = Type.int() # by default 32 bits
ty_func = Type.function(ty_int, [ty_int, ty_int])
f_sum = my_module.add_function(ty_func, "sum")
f_sum.args[0].name = "a"
f_sum.args[1].name = "b"
bb = f_sum.append_basic_block("entry")
builder = Builder.new(bb)
tmp = builder.add(f_sum.args[0], f_sum.args[1], "tmp")
builder.ret(tmp)
# Create an execution engine object. This will create a JIT compiler
# on platforms that support it, or an interpreter otherwise.
ee = ExecutionEngine.new(my_module)
# The arguments needs to be passed as "GenericValue" objects.
arg1_value = 100
arg2_value = 42
arg1 = GenericValue.int(ty_int, arg1_value)
arg2 = GenericValue.int(ty_int, arg2_value)
# Now let's compile and run!
retval = ee.run_function(f_sum, [arg1, arg2])
# The return value is also GenericValue. Let's print it.
logging.debug("returned %d", retval.as_int())
self.assertEqual(retval.as_int(), (arg1_value + arg2_value))
if __name__ == '__main__':
unittest.main()
|
tests/test_match.py
|
themadsens/qfc
| 600 |
81908
|
<filename>tests/test_match.py<gh_stars>100-1000
import sys
import os
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)),'..'))
from qfc.core import filter_files, get_weight
def _equals(marks_list1, marks_list2):
l1 = sorted(marks_list1)
l2 = sorted(marks_list2)
if len(l1) != len(l2):
return False
for i,_ in enumerate(l1):
if l1[i] != l2[i]:
return False
return True
def test_filter_files():
files = [
'/',
'/a/',
'/b/',
'/a/b',
'/a/b/c',
'/b/a/',
'/b/a/c',
'd',
'da'
]
assert(_equals(filter_files(files,''), ['/','d','da']))
assert(_equals(filter_files(files,'/'), ['/']))
assert(_equals(filter_files(files,'a'), ['/a/', '/b/a/', 'da']))
def test_weight():
assert(get_weight('a','') == 1001)
assert(get_weight('a/','') == 1000)
assert(get_weight('a/b/','') == 2000)
assert(get_weight('a/b/c','') == 3001)
assert(get_weight('a','a') == 1001)
assert(get_weight('ab','a') == 1021)
assert(get_weight('bab','a') == 1111)
assert(get_weight('a_b','a') == 1011)
assert(get_weight('root/a_b','a') == 2011)
assert(get_weight('root/a_b_c_d_e_f_g_h_i_j_k','k') == 2091)
assert(get_weight('a/b/c/d/e/f/g/h/i/j/k','k') == 10001)
assert(get_weight('a/B/','b') == 2000)
|
release/stubs/System/Windows/Forms/VisualStyles.py
|
htlcnn/ironpython-stubs
| 182 |
81910
|
# encoding: utf-8
# module System.Windows.Forms.VisualStyles calls itself VisualStyles
# from System.Windows.Forms, Version=4.0.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089
# by generator 1.145
""" NamespaceTracker represent a CLS namespace. """
# no imports
# no functions
# classes
class BackgroundType(Enum, IComparable, IFormattable, IConvertible):
"""
Specifies the source of a visual style element's background.
enum BackgroundType, values: BorderFill (1), ImageFile (0), None (2)
"""
def __eq__(self, *args): #cannot find CLR method
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self, *args): #cannot find CLR method
""" __format__(formattable: IFormattable, format: str) -> str """
pass
def __ge__(self, *args): #cannot find CLR method
pass
def __gt__(self, *args): #cannot find CLR method
pass
def __init__(self, *args): #cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self, *args): #cannot find CLR method
pass
def __lt__(self, *args): #cannot find CLR method
pass
def __ne__(self, *args): #cannot find CLR method
pass
def __reduce_ex__(self, *args): #cannot find CLR method
pass
def __str__(self, *args): #cannot find CLR method
pass
BorderFill = None
ImageFile = None
None = None
value__ = None
class BooleanProperty(Enum, IComparable, IFormattable, IConvertible):
"""
Identifies the Boolean properties of a visual style element.
enum BooleanProperty, values: AlwaysShowSizingBar (2208), AutoSize (2202), BackgroundFill (2205), BorderOnly (2203), Composited (2204), GlyphOnly (2207), GlyphTransparent (2206), IntegralSizing (2211), MirrorImage (2209), SourceGrow (2212), SourceShrink (2213), Transparent (2201), UniformSizing (2210)
"""
def __eq__(self, *args): #cannot find CLR method
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self, *args): #cannot find CLR method
""" __format__(formattable: IFormattable, format: str) -> str """
pass
def __ge__(self, *args): #cannot find CLR method
pass
def __gt__(self, *args): #cannot find CLR method
pass
def __init__(self, *args): #cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self, *args): #cannot find CLR method
pass
def __lt__(self, *args): #cannot find CLR method
pass
def __ne__(self, *args): #cannot find CLR method
pass
def __reduce_ex__(self, *args): #cannot find CLR method
pass
def __str__(self, *args): #cannot find CLR method
pass
AlwaysShowSizingBar = None
AutoSize = None
BackgroundFill = None
BorderOnly = None
Composited = None
GlyphOnly = None
GlyphTransparent = None
IntegralSizing = None
MirrorImage = None
SourceGrow = None
SourceShrink = None
Transparent = None
UniformSizing = None
value__ = None
class BorderType(Enum, IComparable, IFormattable, IConvertible):
"""
Specifies the border type of a visual style element with a filled-border background.
enum BorderType, values: Ellipse (2), Rectangle (0), RoundedRectangle (1)
"""
def __eq__(self, *args): #cannot find CLR method
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self, *args): #cannot find CLR method
""" __format__(formattable: IFormattable, format: str) -> str """
pass
def __ge__(self, *args): #cannot find CLR method
pass
def __gt__(self, *args): #cannot find CLR method
pass
def __init__(self, *args): #cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self, *args): #cannot find CLR method
pass
def __lt__(self, *args): #cannot find CLR method
pass
def __ne__(self, *args): #cannot find CLR method
pass
def __reduce_ex__(self, *args): #cannot find CLR method
pass
def __str__(self, *args): #cannot find CLR method
pass
Ellipse = None
Rectangle = None
RoundedRectangle = None
value__ = None
class CheckBoxState(Enum, IComparable, IFormattable, IConvertible):
"""
Specifies the visual state of a check box that is drawn with visual styles.
enum CheckBoxState, values: CheckedDisabled (8), CheckedHot (6), CheckedNormal (5), CheckedPressed (7), MixedDisabled (12), MixedHot (10), MixedNormal (9), MixedPressed (11), UncheckedDisabled (4), UncheckedHot (2), UncheckedNormal (1), UncheckedPressed (3)
"""
def __eq__(self, *args): #cannot find CLR method
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self, *args): #cannot find CLR method
""" __format__(formattable: IFormattable, format: str) -> str """
pass
def __ge__(self, *args): #cannot find CLR method
pass
def __gt__(self, *args): #cannot find CLR method
pass
def __init__(self, *args): #cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self, *args): #cannot find CLR method
pass
def __lt__(self, *args): #cannot find CLR method
pass
def __ne__(self, *args): #cannot find CLR method
pass
def __reduce_ex__(self, *args): #cannot find CLR method
pass
def __str__(self, *args): #cannot find CLR method
pass
CheckedDisabled = None
CheckedHot = None
CheckedNormal = None
CheckedPressed = None
MixedDisabled = None
MixedHot = None
MixedNormal = None
MixedPressed = None
UncheckedDisabled = None
UncheckedHot = None
UncheckedNormal = None
UncheckedPressed = None
value__ = None
class ColorProperty(Enum, IComparable, IFormattable, IConvertible):
"""
Identifies the color properties of a visual style element.
enum ColorProperty, values: AccentColorHint (3823), BorderColor (3801), BorderColorHint (3822), EdgeDarkShadowColor (3807), EdgeFillColor (3808), EdgeHighlightColor (3805), EdgeLightColor (3804), EdgeShadowColor (3806), FillColor (3802), FillColorHint (3821), GlowColor (3816), GlyphTextColor (3819), GlyphTransparentColor (3820), GradientColor1 (3810), GradientColor2 (3811), GradientColor3 (3812), GradientColor4 (3813), GradientColor5 (3814), ShadowColor (3815), TextBorderColor (3817), TextColor (3803), TextShadowColor (3818), TransparentColor (3809)
"""
def __eq__(self, *args): #cannot find CLR method
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self, *args): #cannot find CLR method
""" __format__(formattable: IFormattable, format: str) -> str """
pass
def __ge__(self, *args): #cannot find CLR method
pass
def __gt__(self, *args): #cannot find CLR method
pass
def __init__(self, *args): #cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self, *args): #cannot find CLR method
pass
def __lt__(self, *args): #cannot find CLR method
pass
def __ne__(self, *args): #cannot find CLR method
pass
def __reduce_ex__(self, *args): #cannot find CLR method
pass
def __str__(self, *args): #cannot find CLR method
pass
AccentColorHint = None
BorderColor = None
BorderColorHint = None
EdgeDarkShadowColor = None
EdgeFillColor = None
EdgeHighlightColor = None
EdgeLightColor = None
EdgeShadowColor = None
FillColor = None
FillColorHint = None
GlowColor = None
GlyphTextColor = None
GlyphTransparentColor = None
GradientColor1 = None
GradientColor2 = None
GradientColor3 = None
GradientColor4 = None
GradientColor5 = None
ShadowColor = None
TextBorderColor = None
TextColor = None
TextShadowColor = None
TransparentColor = None
value__ = None
class ComboBoxState(Enum, IComparable, IFormattable, IConvertible):
"""
Specifies the visual state of a combo box that is drawn with visual styles.
enum ComboBoxState, values: Disabled (4), Hot (2), Normal (1), Pressed (3)
"""
def __eq__(self, *args): #cannot find CLR method
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self, *args): #cannot find CLR method
""" __format__(formattable: IFormattable, format: str) -> str """
pass
def __ge__(self, *args): #cannot find CLR method
pass
def __gt__(self, *args): #cannot find CLR method
pass
def __init__(self, *args): #cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self, *args): #cannot find CLR method
pass
def __lt__(self, *args): #cannot find CLR method
pass
def __ne__(self, *args): #cannot find CLR method
pass
def __reduce_ex__(self, *args): #cannot find CLR method
pass
def __str__(self, *args): #cannot find CLR method
pass
Disabled = None
Hot = None
Normal = None
Pressed = None
value__ = None
class ContentAlignment(Enum, IComparable, IFormattable, IConvertible):
"""
Specifies how text is aligned in a window caption.
enum ContentAlignment, values: Center (1), Left (0), Right (2)
"""
def __eq__(self, *args): #cannot find CLR method
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self, *args): #cannot find CLR method
""" __format__(formattable: IFormattable, format: str) -> str """
pass
def __ge__(self, *args): #cannot find CLR method
pass
def __gt__(self, *args): #cannot find CLR method
pass
def __init__(self, *args): #cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self, *args): #cannot find CLR method
pass
def __lt__(self, *args): #cannot find CLR method
pass
def __ne__(self, *args): #cannot find CLR method
pass
def __reduce_ex__(self, *args): #cannot find CLR method
pass
def __str__(self, *args): #cannot find CLR method
pass
Center = None
Left = None
Right = None
value__ = None
class EdgeEffects(Enum, IComparable, IFormattable, IConvertible):
"""
Specifies the visual effects that can be applied to the edges of a visual style element.
enum (flags) EdgeEffects, values: FillInterior (2048), Flat (4096), Mono (32768), None (0), Soft (16384)
"""
def __eq__(self, *args): #cannot find CLR method
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self, *args): #cannot find CLR method
""" __format__(formattable: IFormattable, format: str) -> str """
pass
def __ge__(self, *args): #cannot find CLR method
pass
def __gt__(self, *args): #cannot find CLR method
pass
def __init__(self, *args): #cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self, *args): #cannot find CLR method
pass
def __lt__(self, *args): #cannot find CLR method
pass
def __ne__(self, *args): #cannot find CLR method
pass
def __reduce_ex__(self, *args): #cannot find CLR method
pass
def __str__(self, *args): #cannot find CLR method
pass
FillInterior = None
Flat = None
Mono = None
None = None
Soft = None
value__ = None
class Edges(Enum, IComparable, IFormattable, IConvertible):
"""
Specifies which edges of a visual style element to draw.
enum (flags) Edges, values: Bottom (8), Diagonal (16), Left (1), Right (4), Top (2)
"""
def __eq__(self, *args): #cannot find CLR method
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self, *args): #cannot find CLR method
""" __format__(formattable: IFormattable, format: str) -> str """
pass
def __ge__(self, *args): #cannot find CLR method
pass
def __gt__(self, *args): #cannot find CLR method
pass
def __init__(self, *args): #cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self, *args): #cannot find CLR method
pass
def __lt__(self, *args): #cannot find CLR method
pass
def __ne__(self, *args): #cannot find CLR method
pass
def __reduce_ex__(self, *args): #cannot find CLR method
pass
def __str__(self, *args): #cannot find CLR method
pass
Bottom = None
Diagonal = None
Left = None
Right = None
Top = None
value__ = None
class EdgeStyle(Enum, IComparable, IFormattable, IConvertible):
"""
Specifies the styles that can be applied to the edges of a visual style element.
enum EdgeStyle, values: Bump (9), Etched (6), Raised (5), Sunken (10)
"""
def __eq__(self, *args): #cannot find CLR method
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self, *args): #cannot find CLR method
""" __format__(formattable: IFormattable, format: str) -> str """
pass
def __ge__(self, *args): #cannot find CLR method
pass
def __gt__(self, *args): #cannot find CLR method
pass
def __init__(self, *args): #cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self, *args): #cannot find CLR method
pass
def __lt__(self, *args): #cannot find CLR method
pass
def __ne__(self, *args): #cannot find CLR method
pass
def __reduce_ex__(self, *args): #cannot find CLR method
pass
def __str__(self, *args): #cannot find CLR method
pass
Bump = None
Etched = None
Raised = None
Sunken = None
value__ = None
class EnumProperty(Enum, IComparable, IFormattable, IConvertible):
"""
Identifies the enumerated type properties of a visual style element.
enum EnumProperty, values: BackgroundType (4001), BorderType (4002), ContentAlignment (4006), FillType (4003), GlyphFontSizingType (4014), GlyphType (4012), HorizontalAlignment (4005), IconEffect (4009), ImageLayout (4011), ImageSelectType (4013), OffsetType (4008), SizingType (4004), TextShadowType (4010), TrueSizeScalingType (4015), VerticalAlignment (4007)
"""
def __eq__(self, *args): #cannot find CLR method
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self, *args): #cannot find CLR method
""" __format__(formattable: IFormattable, format: str) -> str """
pass
def __ge__(self, *args): #cannot find CLR method
pass
def __gt__(self, *args): #cannot find CLR method
pass
def __init__(self, *args): #cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self, *args): #cannot find CLR method
pass
def __lt__(self, *args): #cannot find CLR method
pass
def __ne__(self, *args): #cannot find CLR method
pass
def __reduce_ex__(self, *args): #cannot find CLR method
pass
def __str__(self, *args): #cannot find CLR method
pass
BackgroundType = None
BorderType = None
ContentAlignment = None
FillType = None
GlyphFontSizingType = None
GlyphType = None
HorizontalAlignment = None
IconEffect = None
ImageLayout = None
ImageSelectType = None
OffsetType = None
SizingType = None
TextShadowType = None
TrueSizeScalingType = None
value__ = None
VerticalAlignment = None
class FilenameProperty(Enum, IComparable, IFormattable, IConvertible):
"""
Identifies the names of the image files that are used to draw a visual style element.
enum FilenameProperty, values: GlyphImageFile (3008), ImageFile (3001), ImageFile1 (3002), ImageFile2 (3003), ImageFile3 (3004), ImageFile4 (3005), ImageFile5 (3006), StockImageFile (3007)
"""
def __eq__(self, *args): #cannot find CLR method
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self, *args): #cannot find CLR method
""" __format__(formattable: IFormattable, format: str) -> str """
pass
def __ge__(self, *args): #cannot find CLR method
pass
def __gt__(self, *args): #cannot find CLR method
pass
def __init__(self, *args): #cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self, *args): #cannot find CLR method
pass
def __lt__(self, *args): #cannot find CLR method
pass
def __ne__(self, *args): #cannot find CLR method
pass
def __reduce_ex__(self, *args): #cannot find CLR method
pass
def __str__(self, *args): #cannot find CLR method
pass
GlyphImageFile = None
ImageFile = None
ImageFile1 = None
ImageFile2 = None
ImageFile3 = None
ImageFile4 = None
ImageFile5 = None
StockImageFile = None
value__ = None
class FillType(Enum, IComparable, IFormattable, IConvertible):
"""
Specifies the interior of visual style elements with a filled border background.
enum FillType, values: HorizontalGradient (2), RadialGradient (3), Solid (0), TileImage (4), VerticalGradient (1)
"""
def __eq__(self, *args): #cannot find CLR method
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self, *args): #cannot find CLR method
""" __format__(formattable: IFormattable, format: str) -> str """
pass
def __ge__(self, *args): #cannot find CLR method
pass
def __gt__(self, *args): #cannot find CLR method
pass
def __init__(self, *args): #cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self, *args): #cannot find CLR method
pass
def __lt__(self, *args): #cannot find CLR method
pass
def __ne__(self, *args): #cannot find CLR method
pass
def __reduce_ex__(self, *args): #cannot find CLR method
pass
def __str__(self, *args): #cannot find CLR method
pass
HorizontalGradient = None
RadialGradient = None
Solid = None
TileImage = None
value__ = None
VerticalGradient = None
class FontProperty(Enum, IComparable, IFormattable, IConvertible):
"""
Identifies the font properties of a visual style element.
enum FontProperty, values: GlyphFont (2601)
"""
def __eq__(self, *args): #cannot find CLR method
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self, *args): #cannot find CLR method
""" __format__(formattable: IFormattable, format: str) -> str """
pass
def __ge__(self, *args): #cannot find CLR method
pass
def __gt__(self, *args): #cannot find CLR method
pass
def __init__(self, *args): #cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self, *args): #cannot find CLR method
pass
def __lt__(self, *args): #cannot find CLR method
pass
def __ne__(self, *args): #cannot find CLR method
pass
def __reduce_ex__(self, *args): #cannot find CLR method
pass
def __str__(self, *args): #cannot find CLR method
pass
GlyphFont = None
value__ = None
class GlyphFontSizingType(Enum, IComparable, IFormattable, IConvertible):
"""
Specifies when the visual style selects a different glyph font size.
enum GlyphFontSizingType, values: Dpi (2), None (0), Size (1)
"""
def __eq__(self, *args): #cannot find CLR method
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self, *args): #cannot find CLR method
""" __format__(formattable: IFormattable, format: str) -> str """
pass
def __ge__(self, *args): #cannot find CLR method
pass
def __gt__(self, *args): #cannot find CLR method
pass
def __init__(self, *args): #cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self, *args): #cannot find CLR method
pass
def __lt__(self, *args): #cannot find CLR method
pass
def __ne__(self, *args): #cannot find CLR method
pass
def __reduce_ex__(self, *args): #cannot find CLR method
pass
def __str__(self, *args): #cannot find CLR method
pass
Dpi = None
None = None
Size = None
value__ = None
class GlyphType(Enum, IComparable, IFormattable, IConvertible):
"""
Specifies the type of glyph for elements with a bitmap background.
enum GlyphType, values: FontGlyph (2), ImageGlyph (1), None (0)
"""
def __eq__(self, *args): #cannot find CLR method
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self, *args): #cannot find CLR method
""" __format__(formattable: IFormattable, format: str) -> str """
pass
def __ge__(self, *args): #cannot find CLR method
pass
def __gt__(self, *args): #cannot find CLR method
pass
def __init__(self, *args): #cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self, *args): #cannot find CLR method
pass
def __lt__(self, *args): #cannot find CLR method
pass
def __ne__(self, *args): #cannot find CLR method
pass
def __reduce_ex__(self, *args): #cannot find CLR method
pass
def __str__(self, *args): #cannot find CLR method
pass
FontGlyph = None
ImageGlyph = None
None = None
value__ = None
class GroupBoxState(Enum, IComparable, IFormattable, IConvertible):
"""
Specifies the visual state of a group box that is drawn with visual styles.
enum GroupBoxState, values: Disabled (2), Normal (1)
"""
def __eq__(self, *args): #cannot find CLR method
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self, *args): #cannot find CLR method
""" __format__(formattable: IFormattable, format: str) -> str """
pass
def __ge__(self, *args): #cannot find CLR method
pass
def __gt__(self, *args): #cannot find CLR method
pass
def __init__(self, *args): #cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self, *args): #cannot find CLR method
pass
def __lt__(self, *args): #cannot find CLR method
pass
def __ne__(self, *args): #cannot find CLR method
pass
def __reduce_ex__(self, *args): #cannot find CLR method
pass
def __str__(self, *args): #cannot find CLR method
pass
Disabled = None
Normal = None
value__ = None
class HitTestCode(Enum, IComparable, IFormattable, IConvertible):
"""
Describes the location of a point in the background specified by a visual style.
enum HitTestCode, values: Bottom (15), BottomLeft (16), BottomRight (17), Client (1), Left (10), Nowhere (0), Right (11), Top (12), TopLeft (13), TopRight (14)
"""
def __eq__(self, *args): #cannot find CLR method
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self, *args): #cannot find CLR method
""" __format__(formattable: IFormattable, format: str) -> str """
pass
def __ge__(self, *args): #cannot find CLR method
pass
def __gt__(self, *args): #cannot find CLR method
pass
def __init__(self, *args): #cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self, *args): #cannot find CLR method
pass
def __lt__(self, *args): #cannot find CLR method
pass
def __ne__(self, *args): #cannot find CLR method
pass
def __reduce_ex__(self, *args): #cannot find CLR method
pass
def __str__(self, *args): #cannot find CLR method
pass
Bottom = None
BottomLeft = None
BottomRight = None
Client = None
Left = None
Nowhere = None
Right = None
Top = None
TopLeft = None
TopRight = None
value__ = None
class HitTestOptions(Enum, IComparable, IFormattable, IConvertible):
"""
Specifies the options that can be used when performing a hit test on the background specified by a visual style.
enum (flags) HitTestOptions, values: BackgroundSegment (0), Caption (4), FixedBorder (2), ResizingBorder (240), ResizingBorderBottom (128), ResizingBorderLeft (16), ResizingBorderRight (64), ResizingBorderTop (32), SizingTemplate (256), SystemSizingMargins (512)
"""
def __eq__(self, *args): #cannot find CLR method
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self, *args): #cannot find CLR method
""" __format__(formattable: IFormattable, format: str) -> str """
pass
def __ge__(self, *args): #cannot find CLR method
pass
def __gt__(self, *args): #cannot find CLR method
pass
def __init__(self, *args): #cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self, *args): #cannot find CLR method
pass
def __lt__(self, *args): #cannot find CLR method
pass
def __ne__(self, *args): #cannot find CLR method
pass
def __reduce_ex__(self, *args): #cannot find CLR method
pass
def __str__(self, *args): #cannot find CLR method
pass
BackgroundSegment = None
Caption = None
FixedBorder = None
ResizingBorder = None
ResizingBorderBottom = None
ResizingBorderLeft = None
ResizingBorderRight = None
ResizingBorderTop = None
SizingTemplate = None
SystemSizingMargins = None
value__ = None
class HorizontalAlign(Enum, IComparable, IFormattable, IConvertible):
"""
Specifies the horizontal alignment for visual style elements with a fixed size.
enum HorizontalAlign, values: Center (1), Left (0), Right (2)
"""
def __eq__(self, *args): #cannot find CLR method
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self, *args): #cannot find CLR method
""" __format__(formattable: IFormattable, format: str) -> str """
pass
def __ge__(self, *args): #cannot find CLR method
pass
def __gt__(self, *args): #cannot find CLR method
pass
def __init__(self, *args): #cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self, *args): #cannot find CLR method
pass
def __lt__(self, *args): #cannot find CLR method
pass
def __ne__(self, *args): #cannot find CLR method
pass
def __reduce_ex__(self, *args): #cannot find CLR method
pass
def __str__(self, *args): #cannot find CLR method
pass
Center = None
Left = None
Right = None
value__ = None
class IconEffect(Enum, IComparable, IFormattable, IConvertible):
"""
Specifies the visual effect that the visual style will apply to an icon.
enum IconEffect, values: Alpha (4), Glow (1), None (0), Pulse (3), Shadow (2)
"""
def __eq__(self, *args): #cannot find CLR method
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self, *args): #cannot find CLR method
""" __format__(formattable: IFormattable, format: str) -> str """
pass
def __ge__(self, *args): #cannot find CLR method
pass
def __gt__(self, *args): #cannot find CLR method
pass
def __init__(self, *args): #cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self, *args): #cannot find CLR method
pass
def __lt__(self, *args): #cannot find CLR method
pass
def __ne__(self, *args): #cannot find CLR method
pass
def __reduce_ex__(self, *args): #cannot find CLR method
pass
def __str__(self, *args): #cannot find CLR method
pass
Alpha = None
Glow = None
None = None
Pulse = None
Shadow = None
value__ = None
class ImageOrientation(Enum, IComparable, IFormattable, IConvertible):
"""
Specifies how multiple images are arranged in a single image file.
enum ImageOrientation, values: Horizontal (1), Vertical (0)
"""
def __eq__(self, *args): #cannot find CLR method
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self, *args): #cannot find CLR method
""" __format__(formattable: IFormattable, format: str) -> str """
pass
def __ge__(self, *args): #cannot find CLR method
pass
def __gt__(self, *args): #cannot find CLR method
pass
def __init__(self, *args): #cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self, *args): #cannot find CLR method
pass
def __lt__(self, *args): #cannot find CLR method
pass
def __ne__(self, *args): #cannot find CLR method
pass
def __reduce_ex__(self, *args): #cannot find CLR method
pass
def __str__(self, *args): #cannot find CLR method
pass
Horizontal = None
value__ = None
Vertical = None
class ImageSelectType(Enum, IComparable, IFormattable, IConvertible):
"""
Specifies when the visual style selects a different multiple-image file to draw an element.
enum ImageSelectType, values: Dpi (2), None (0), Size (1)
"""
def __eq__(self, *args): #cannot find CLR method
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self, *args): #cannot find CLR method
""" __format__(formattable: IFormattable, format: str) -> str """
pass
def __ge__(self, *args): #cannot find CLR method
pass
def __gt__(self, *args): #cannot find CLR method
pass
def __init__(self, *args): #cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self, *args): #cannot find CLR method
pass
def __lt__(self, *args): #cannot find CLR method
pass
def __ne__(self, *args): #cannot find CLR method
pass
def __reduce_ex__(self, *args): #cannot find CLR method
pass
def __str__(self, *args): #cannot find CLR method
pass
Dpi = None
None = None
Size = None
value__ = None
class IntegerProperty(Enum, IComparable, IFormattable, IConvertible):
"""
Identifies the integer properties of a visual style element.
enum IntegerProperty, values: AlphaLevel (2402), AlphaThreshold (2415), BorderSize (2403), GlyphIndex (2418), GradientRatio1 (2406), GradientRatio2 (2407), GradientRatio3 (2408), GradientRatio4 (2409), GradientRatio5 (2410), Height (2417), ImageCount (2401), MinDpi1 (2420), MinDpi2 (2421), MinDpi3 (2422), MinDpi4 (2423), MinDpi5 (2424), ProgressChunkSize (2411), ProgressSpaceSize (2412), RoundCornerHeight (2405), RoundCornerWidth (2404), Saturation (2413), TextBorderSize (2414), TrueSizeStretchMark (2419), Width (2416)
"""
def __eq__(self, *args): #cannot find CLR method
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self, *args): #cannot find CLR method
""" __format__(formattable: IFormattable, format: str) -> str """
pass
def __ge__(self, *args): #cannot find CLR method
pass
def __gt__(self, *args): #cannot find CLR method
pass
def __init__(self, *args): #cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self, *args): #cannot find CLR method
pass
def __lt__(self, *args): #cannot find CLR method
pass
def __ne__(self, *args): #cannot find CLR method
pass
def __reduce_ex__(self, *args): #cannot find CLR method
pass
def __str__(self, *args): #cannot find CLR method
pass
AlphaLevel = None
AlphaThreshold = None
BorderSize = None
GlyphIndex = None
GradientRatio1 = None
GradientRatio2 = None
GradientRatio3 = None
GradientRatio4 = None
GradientRatio5 = None
Height = None
ImageCount = None
MinDpi1 = None
MinDpi2 = None
MinDpi3 = None
MinDpi4 = None
MinDpi5 = None
ProgressChunkSize = None
ProgressSpaceSize = None
RoundCornerHeight = None
RoundCornerWidth = None
Saturation = None
TextBorderSize = None
TrueSizeStretchMark = None
value__ = None
Width = None
class MarginProperty(Enum, IComparable, IFormattable, IConvertible):
"""
Identifies the margin properties of a visual style element.
enum MarginProperty, values: CaptionMargins (3603), ContentMargins (3602), SizingMargins (3601)
"""
def __eq__(self, *args): #cannot find CLR method
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self, *args): #cannot find CLR method
""" __format__(formattable: IFormattable, format: str) -> str """
pass
def __ge__(self, *args): #cannot find CLR method
pass
def __gt__(self, *args): #cannot find CLR method
pass
def __init__(self, *args): #cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self, *args): #cannot find CLR method
pass
def __lt__(self, *args): #cannot find CLR method
pass
def __ne__(self, *args): #cannot find CLR method
pass
def __reduce_ex__(self, *args): #cannot find CLR method
pass
def __str__(self, *args): #cannot find CLR method
pass
CaptionMargins = None
ContentMargins = None
SizingMargins = None
value__ = None
class OffsetType(Enum, IComparable, IFormattable, IConvertible):
"""
Specifies where an offset is applied to a window element.
enum OffsetType, values: AboveLastButton (12), BelowLastButton (13), BottomLeft (3), BottomMiddle (5), BottomRight (4), LeftOfCaption (8), LeftOfLastButton (10), MiddleLeft (6), MiddleRight (7), RightOfCaption (9), RightOfLastButton (11), TopLeft (0), TopMiddle (2), TopRight (1)
"""
def __eq__(self, *args): #cannot find CLR method
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self, *args): #cannot find CLR method
""" __format__(formattable: IFormattable, format: str) -> str """
pass
def __ge__(self, *args): #cannot find CLR method
pass
def __gt__(self, *args): #cannot find CLR method
pass
def __init__(self, *args): #cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self, *args): #cannot find CLR method
pass
def __lt__(self, *args): #cannot find CLR method
pass
def __ne__(self, *args): #cannot find CLR method
pass
def __reduce_ex__(self, *args): #cannot find CLR method
pass
def __str__(self, *args): #cannot find CLR method
pass
AboveLastButton = None
BelowLastButton = None
BottomLeft = None
BottomMiddle = None
BottomRight = None
LeftOfCaption = None
LeftOfLastButton = None
MiddleLeft = None
MiddleRight = None
RightOfCaption = None
RightOfLastButton = None
TopLeft = None
TopMiddle = None
TopRight = None
value__ = None
class PointProperty(Enum, IComparable, IFormattable, IConvertible):
"""
Identifies the point properties of a visual style element.
enum PointProperty, values: MinSize (3403), MinSize1 (3404), MinSize2 (3405), MinSize3 (3406), MinSize4 (3407), MinSize5 (3408), Offset (3401), TextShadowOffset (3402)
"""
def __eq__(self, *args): #cannot find CLR method
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self, *args): #cannot find CLR method
""" __format__(formattable: IFormattable, format: str) -> str """
pass
def __ge__(self, *args): #cannot find CLR method
pass
def __gt__(self, *args): #cannot find CLR method
pass
def __init__(self, *args): #cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self, *args): #cannot find CLR method
pass
def __lt__(self, *args): #cannot find CLR method
pass
def __ne__(self, *args): #cannot find CLR method
pass
def __reduce_ex__(self, *args): #cannot find CLR method
pass
def __str__(self, *args): #cannot find CLR method
pass
MinSize = None
MinSize1 = None
MinSize2 = None
MinSize3 = None
MinSize4 = None
MinSize5 = None
Offset = None
TextShadowOffset = None
value__ = None
class PushButtonState(Enum, IComparable, IFormattable, IConvertible):
"""
Specifies the visual state of a button that is drawn with visual styles.
enum PushButtonState, values: Default (5), Disabled (4), Hot (2), Normal (1), Pressed (3)
"""
def __eq__(self, *args): #cannot find CLR method
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self, *args): #cannot find CLR method
""" __format__(formattable: IFormattable, format: str) -> str """
pass
def __ge__(self, *args): #cannot find CLR method
pass
def __gt__(self, *args): #cannot find CLR method
pass
def __init__(self, *args): #cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self, *args): #cannot find CLR method
pass
def __lt__(self, *args): #cannot find CLR method
pass
def __ne__(self, *args): #cannot find CLR method
pass
def __reduce_ex__(self, *args): #cannot find CLR method
pass
def __str__(self, *args): #cannot find CLR method
pass
Default = None
Disabled = None
Hot = None
Normal = None
Pressed = None
value__ = None
class RadioButtonState(Enum, IComparable, IFormattable, IConvertible):
"""
Specifies the visual state of an option button (also known as a radio button) that is drawn with visual styles.
enum RadioButtonState, values: CheckedDisabled (8), CheckedHot (6), CheckedNormal (5), CheckedPressed (7), UncheckedDisabled (4), UncheckedHot (2), UncheckedNormal (1), UncheckedPressed (3)
"""
def __eq__(self, *args): #cannot find CLR method
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self, *args): #cannot find CLR method
""" __format__(formattable: IFormattable, format: str) -> str """
pass
def __ge__(self, *args): #cannot find CLR method
pass
def __gt__(self, *args): #cannot find CLR method
pass
def __init__(self, *args): #cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self, *args): #cannot find CLR method
pass
def __lt__(self, *args): #cannot find CLR method
pass
def __ne__(self, *args): #cannot find CLR method
pass
def __reduce_ex__(self, *args): #cannot find CLR method
pass
def __str__(self, *args): #cannot find CLR method
pass
CheckedDisabled = None
CheckedHot = None
CheckedNormal = None
CheckedPressed = None
UncheckedDisabled = None
UncheckedHot = None
UncheckedNormal = None
UncheckedPressed = None
value__ = None
class ScrollBarArrowButtonState(Enum, IComparable, IFormattable, IConvertible):
"""
Specifies the visual state of a scroll arrow that is drawn with visual styles.
enum ScrollBarArrowButtonState, values: DownDisabled (8), DownHot (6), DownNormal (5), DownPressed (7), LeftDisabled (12), LeftHot (10), LeftNormal (9), LeftPressed (11), RightDisabled (16), RightHot (14), RightNormal (13), RightPressed (15), UpDisabled (4), UpHot (2), UpNormal (1), UpPressed (3)
"""
def __eq__(self, *args): #cannot find CLR method
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self, *args): #cannot find CLR method
""" __format__(formattable: IFormattable, format: str) -> str """
pass
def __ge__(self, *args): #cannot find CLR method
pass
def __gt__(self, *args): #cannot find CLR method
pass
def __init__(self, *args): #cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self, *args): #cannot find CLR method
pass
def __lt__(self, *args): #cannot find CLR method
pass
def __ne__(self, *args): #cannot find CLR method
pass
def __reduce_ex__(self, *args): #cannot find CLR method
pass
def __str__(self, *args): #cannot find CLR method
pass
DownDisabled = None
DownHot = None
DownNormal = None
DownPressed = None
LeftDisabled = None
LeftHot = None
LeftNormal = None
LeftPressed = None
RightDisabled = None
RightHot = None
RightNormal = None
RightPressed = None
UpDisabled = None
UpHot = None
UpNormal = None
UpPressed = None
value__ = None
class ScrollBarSizeBoxState(Enum, IComparable, IFormattable, IConvertible):
"""
Specifies the visual state of a scroll bar sizing handle that is drawn with visual styles.
enum ScrollBarSizeBoxState, values: LeftAlign (2), RightAlign (1)
"""
def __eq__(self, *args): #cannot find CLR method
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self, *args): #cannot find CLR method
""" __format__(formattable: IFormattable, format: str) -> str """
pass
def __ge__(self, *args): #cannot find CLR method
pass
def __gt__(self, *args): #cannot find CLR method
pass
def __init__(self, *args): #cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self, *args): #cannot find CLR method
pass
def __lt__(self, *args): #cannot find CLR method
pass
def __ne__(self, *args): #cannot find CLR method
pass
def __reduce_ex__(self, *args): #cannot find CLR method
pass
def __str__(self, *args): #cannot find CLR method
pass
LeftAlign = None
RightAlign = None
value__ = None
class ScrollBarState(Enum, IComparable, IFormattable, IConvertible):
"""
Specifies the visual state of a scroll bar that is drawn with visual styles.
enum ScrollBarState, values: Disabled (4), Hot (2), Normal (1), Pressed (3)
"""
def __eq__(self, *args): #cannot find CLR method
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self, *args): #cannot find CLR method
""" __format__(formattable: IFormattable, format: str) -> str """
pass
def __ge__(self, *args): #cannot find CLR method
pass
def __gt__(self, *args): #cannot find CLR method
pass
def __init__(self, *args): #cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self, *args): #cannot find CLR method
pass
def __lt__(self, *args): #cannot find CLR method
pass
def __ne__(self, *args): #cannot find CLR method
pass
def __reduce_ex__(self, *args): #cannot find CLR method
pass
def __str__(self, *args): #cannot find CLR method
pass
Disabled = None
Hot = None
Normal = None
Pressed = None
value__ = None
class SizingType(Enum, IComparable, IFormattable, IConvertible):
"""
Specifies how elements with a bitmap background will adjust to fill a bounds.
enum SizingType, values: FixedSize (0), Stretch (1), Tile (2)
"""
def __eq__(self, *args): #cannot find CLR method
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self, *args): #cannot find CLR method
""" __format__(formattable: IFormattable, format: str) -> str """
pass
def __ge__(self, *args): #cannot find CLR method
pass
def __gt__(self, *args): #cannot find CLR method
pass
def __init__(self, *args): #cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self, *args): #cannot find CLR method
pass
def __lt__(self, *args): #cannot find CLR method
pass
def __ne__(self, *args): #cannot find CLR method
pass
def __reduce_ex__(self, *args): #cannot find CLR method
pass
def __str__(self, *args): #cannot find CLR method
pass
FixedSize = None
Stretch = None
Tile = None
value__ = None
class StringProperty(Enum, IComparable, IFormattable, IConvertible):
"""
Identifies the string properties of a visual style element.
enum StringProperty, values: Text (3201)
"""
def __eq__(self, *args): #cannot find CLR method
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self, *args): #cannot find CLR method
""" __format__(formattable: IFormattable, format: str) -> str """
pass
def __ge__(self, *args): #cannot find CLR method
pass
def __gt__(self, *args): #cannot find CLR method
pass
def __init__(self, *args): #cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self, *args): #cannot find CLR method
pass
def __lt__(self, *args): #cannot find CLR method
pass
def __ne__(self, *args): #cannot find CLR method
pass
def __reduce_ex__(self, *args): #cannot find CLR method
pass
def __str__(self, *args): #cannot find CLR method
pass
Text = None
value__ = None
class TabItemState(Enum, IComparable, IFormattable, IConvertible):
"""
Specifies the visual state of a tab item that is drawn with visual styles.
enum TabItemState, values: Disabled (4), Hot (2), Normal (1), Selected (3)
"""
def __eq__(self, *args): #cannot find CLR method
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self, *args): #cannot find CLR method
""" __format__(formattable: IFormattable, format: str) -> str """
pass
def __ge__(self, *args): #cannot find CLR method
pass
def __gt__(self, *args): #cannot find CLR method
pass
def __init__(self, *args): #cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self, *args): #cannot find CLR method
pass
def __lt__(self, *args): #cannot find CLR method
pass
def __ne__(self, *args): #cannot find CLR method
pass
def __reduce_ex__(self, *args): #cannot find CLR method
pass
def __str__(self, *args): #cannot find CLR method
pass
Disabled = None
Hot = None
Normal = None
Selected = None
value__ = None
class TextBoxState(Enum, IComparable, IFormattable, IConvertible):
"""
Specifies the visual state of a text box that is drawn with visual styles.
enum TextBoxState, values: Assist (7), Disabled (4), Hot (2), Normal (1), Readonly (6), Selected (3)
"""
def __eq__(self, *args): #cannot find CLR method
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self, *args): #cannot find CLR method
""" __format__(formattable: IFormattable, format: str) -> str """
pass
def __ge__(self, *args): #cannot find CLR method
pass
def __gt__(self, *args): #cannot find CLR method
pass
def __init__(self, *args): #cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self, *args): #cannot find CLR method
pass
def __lt__(self, *args): #cannot find CLR method
pass
def __ne__(self, *args): #cannot find CLR method
pass
def __reduce_ex__(self, *args): #cannot find CLR method
pass
def __str__(self, *args): #cannot find CLR method
pass
Assist = None
Disabled = None
Hot = None
Normal = None
Readonly = None
Selected = None
value__ = None
class TextMetrics(object):
""" Provides basic information about the font specified by a visual style for a particular element. """
Ascent = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets or sets the ascent of characters in the font.
Get: Ascent(self: TextMetrics) -> int
Set: Ascent(self: TextMetrics) = value
"""
AverageCharWidth = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets or sets the average width of characters in the font.
Get: AverageCharWidth(self: TextMetrics) -> int
Set: AverageCharWidth(self: TextMetrics) = value
"""
BreakChar = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets or sets the character used to define word breaks for text justification.
Get: BreakChar(self: TextMetrics) -> Char
Set: BreakChar(self: TextMetrics) = value
"""
CharSet = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets or sets the character set of the font.
Get: CharSet(self: TextMetrics) -> TextMetricsCharacterSet
Set: CharSet(self: TextMetrics) = value
"""
DefaultChar = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets or sets the character to be substituted for characters not in the font.
Get: DefaultChar(self: TextMetrics) -> Char
Set: DefaultChar(self: TextMetrics) = value
"""
Descent = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets or sets the descent of characters in the font.
Get: Descent(self: TextMetrics) -> int
Set: Descent(self: TextMetrics) = value
"""
DigitizedAspectX = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets or sets the horizontal aspect of the device for which the font was designed.
Get: DigitizedAspectX(self: TextMetrics) -> int
Set: DigitizedAspectX(self: TextMetrics) = value
"""
DigitizedAspectY = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets or sets the vertical aspect of the device for which the font was designed.
Get: DigitizedAspectY(self: TextMetrics) -> int
Set: DigitizedAspectY(self: TextMetrics) = value
"""
ExternalLeading = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets or sets the amount of extra leading that the application adds between rows.
Get: ExternalLeading(self: TextMetrics) -> int
Set: ExternalLeading(self: TextMetrics) = value
"""
FirstChar = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets or sets the first character defined in the font.
Get: FirstChar(self: TextMetrics) -> Char
Set: FirstChar(self: TextMetrics) = value
"""
Height = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets or sets the height of characters in the font.
Get: Height(self: TextMetrics) -> int
Set: Height(self: TextMetrics) = value
"""
InternalLeading = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets or sets the amount of leading inside the bounds set by the System.Windows.Forms.VisualStyles.TextMetrics.Height property.
Get: InternalLeading(self: TextMetrics) -> int
Set: InternalLeading(self: TextMetrics) = value
"""
Italic = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets or sets a value indicating whether the font is italic.
Get: Italic(self: TextMetrics) -> bool
Set: Italic(self: TextMetrics) = value
"""
LastChar = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets or sets the last character defined in the font.
Get: LastChar(self: TextMetrics) -> Char
Set: LastChar(self: TextMetrics) = value
"""
MaxCharWidth = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets or sets the width of the widest character in the font.
Get: MaxCharWidth(self: TextMetrics) -> int
Set: MaxCharWidth(self: TextMetrics) = value
"""
Overhang = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets or sets the extra width per string that may be added to some synthesized fonts.
Get: Overhang(self: TextMetrics) -> int
Set: Overhang(self: TextMetrics) = value
"""
PitchAndFamily = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets or sets information about the pitch, technology, and family of a physical font.
Get: PitchAndFamily(self: TextMetrics) -> TextMetricsPitchAndFamilyValues
Set: PitchAndFamily(self: TextMetrics) = value
"""
StruckOut = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets or sets a value indicating whether the font specifies a horizontal line through the characters.
Get: StruckOut(self: TextMetrics) -> bool
Set: StruckOut(self: TextMetrics) = value
"""
Underlined = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets or sets a value indicating whether the font is underlined.
Get: Underlined(self: TextMetrics) -> bool
Set: Underlined(self: TextMetrics) = value
"""
Weight = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets or sets the weight of the font.
Get: Weight(self: TextMetrics) -> int
Set: Weight(self: TextMetrics) = value
"""
class TextMetricsCharacterSet(Enum, IComparable, IFormattable, IConvertible):
"""
Specifies the character set of the font specified by a visual style for a particular element.
enum TextMetricsCharacterSet, values: Ansi (0), Arabic (178), Baltic (186), ChineseBig5 (136), Default (1), EastEurope (238), Gb2312 (134), Greek (161), Hangul (129), Hebrew (177), Johab (130), Mac (77), Oem (255), Russian (204), ShiftJis (128), Symbol (2), Thai (222), Turkish (162), Vietnamese (163)
"""
def __eq__(self, *args): #cannot find CLR method
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self, *args): #cannot find CLR method
""" __format__(formattable: IFormattable, format: str) -> str """
pass
def __ge__(self, *args): #cannot find CLR method
pass
def __gt__(self, *args): #cannot find CLR method
pass
def __init__(self, *args): #cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self, *args): #cannot find CLR method
pass
def __lt__(self, *args): #cannot find CLR method
pass
def __ne__(self, *args): #cannot find CLR method
pass
def __reduce_ex__(self, *args): #cannot find CLR method
pass
def __str__(self, *args): #cannot find CLR method
pass
Ansi = None
Arabic = None
Baltic = None
ChineseBig5 = None
Default = None
EastEurope = None
Gb2312 = None
Greek = None
Hangul = None
Hebrew = None
Johab = None
Mac = None
Oem = None
Russian = None
ShiftJis = None
Symbol = None
Thai = None
Turkish = None
value__ = None
Vietnamese = None
class TextMetricsPitchAndFamilyValues(Enum, IComparable, IFormattable, IConvertible):
"""
Specifies information about the pitch, technology, and family of the font specified by a visual style for a particular element.
enum (flags) TextMetricsPitchAndFamilyValues, values: Device (8), FixedPitch (1), TrueType (4), Vector (2)
"""
def __eq__(self, *args): #cannot find CLR method
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self, *args): #cannot find CLR method
""" __format__(formattable: IFormattable, format: str) -> str """
pass
def __ge__(self, *args): #cannot find CLR method
pass
def __gt__(self, *args): #cannot find CLR method
pass
def __init__(self, *args): #cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self, *args): #cannot find CLR method
pass
def __lt__(self, *args): #cannot find CLR method
pass
def __ne__(self, *args): #cannot find CLR method
pass
def __reduce_ex__(self, *args): #cannot find CLR method
pass
def __str__(self, *args): #cannot find CLR method
pass
Device = None
FixedPitch = None
TrueType = None
value__ = None
Vector = None
class TextShadowType(Enum, IComparable, IFormattable, IConvertible):
"""
Specifies the type of shadow to add to text.
enum TextShadowType, values: Continuous (2), None (0), Single (1)
"""
def __eq__(self, *args): #cannot find CLR method
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self, *args): #cannot find CLR method
""" __format__(formattable: IFormattable, format: str) -> str """
pass
def __ge__(self, *args): #cannot find CLR method
pass
def __gt__(self, *args): #cannot find CLR method
pass
def __init__(self, *args): #cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self, *args): #cannot find CLR method
pass
def __lt__(self, *args): #cannot find CLR method
pass
def __ne__(self, *args): #cannot find CLR method
pass
def __reduce_ex__(self, *args): #cannot find CLR method
pass
def __str__(self, *args): #cannot find CLR method
pass
Continuous = None
None = None
Single = None
value__ = None
class ThemeSizeType(Enum, IComparable, IFormattable, IConvertible):
"""
Specifies the size of the visual style part to retrieve.
enum ThemeSizeType, values: Draw (2), Minimum (0), True (1)
"""
def __eq__(self, *args): #cannot find CLR method
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self, *args): #cannot find CLR method
""" __format__(formattable: IFormattable, format: str) -> str """
pass
def __ge__(self, *args): #cannot find CLR method
pass
def __gt__(self, *args): #cannot find CLR method
pass
def __init__(self, *args): #cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self, *args): #cannot find CLR method
pass
def __lt__(self, *args): #cannot find CLR method
pass
def __ne__(self, *args): #cannot find CLR method
pass
def __reduce_ex__(self, *args): #cannot find CLR method
pass
def __str__(self, *args): #cannot find CLR method
pass
Draw = None
Minimum = None
True = None
value__ = None
class ToolBarState(Enum, IComparable, IFormattable, IConvertible):
"""
Specifies the visual state of a toolbar item that is drawn with visual styles.
enum ToolBarState, values: Checked (5), Disabled (4), Hot (2), HotChecked (6), Normal (1), Pressed (3)
"""
def __eq__(self, *args): #cannot find CLR method
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self, *args): #cannot find CLR method
""" __format__(formattable: IFormattable, format: str) -> str """
pass
def __ge__(self, *args): #cannot find CLR method
pass
def __gt__(self, *args): #cannot find CLR method
pass
def __init__(self, *args): #cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self, *args): #cannot find CLR method
pass
def __lt__(self, *args): #cannot find CLR method
pass
def __ne__(self, *args): #cannot find CLR method
pass
def __reduce_ex__(self, *args): #cannot find CLR method
pass
def __str__(self, *args): #cannot find CLR method
pass
Checked = None
Disabled = None
Hot = None
HotChecked = None
Normal = None
Pressed = None
value__ = None
class TrackBarThumbState(Enum, IComparable, IFormattable, IConvertible):
"""
Specifies the visual state of a track bar slider (also known as a thumb) that is drawn with visual styles.
enum TrackBarThumbState, values: Disabled (5), Hot (2), Normal (1), Pressed (3)
"""
def __eq__(self, *args): #cannot find CLR method
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self, *args): #cannot find CLR method
""" __format__(formattable: IFormattable, format: str) -> str """
pass
def __ge__(self, *args): #cannot find CLR method
pass
def __gt__(self, *args): #cannot find CLR method
pass
def __init__(self, *args): #cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self, *args): #cannot find CLR method
pass
def __lt__(self, *args): #cannot find CLR method
pass
def __ne__(self, *args): #cannot find CLR method
pass
def __reduce_ex__(self, *args): #cannot find CLR method
pass
def __str__(self, *args): #cannot find CLR method
pass
Disabled = None
Hot = None
Normal = None
Pressed = None
value__ = None
class TrueSizeScalingType(Enum, IComparable, IFormattable, IConvertible):
"""
Specifies the scaling type of a visual style element with a fixed size.
enum TrueSizeScalingType, values: Dpi (2), None (0), Size (1)
"""
def __eq__(self, *args): #cannot find CLR method
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self, *args): #cannot find CLR method
""" __format__(formattable: IFormattable, format: str) -> str """
pass
def __ge__(self, *args): #cannot find CLR method
pass
def __gt__(self, *args): #cannot find CLR method
pass
def __init__(self, *args): #cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self, *args): #cannot find CLR method
pass
def __lt__(self, *args): #cannot find CLR method
pass
def __ne__(self, *args): #cannot find CLR method
pass
def __reduce_ex__(self, *args): #cannot find CLR method
pass
def __str__(self, *args): #cannot find CLR method
pass
Dpi = None
None = None
Size = None
value__ = None
class VerticalAlignment(Enum, IComparable, IFormattable, IConvertible):
"""
Specifies the vertical alignment for visual style elements with a fixed size.
enum VerticalAlignment, values: Bottom (2), Center (1), Top (0)
"""
def __eq__(self, *args): #cannot find CLR method
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self, *args): #cannot find CLR method
""" __format__(formattable: IFormattable, format: str) -> str """
pass
def __ge__(self, *args): #cannot find CLR method
pass
def __gt__(self, *args): #cannot find CLR method
pass
def __init__(self, *args): #cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self, *args): #cannot find CLR method
pass
def __lt__(self, *args): #cannot find CLR method
pass
def __ne__(self, *args): #cannot find CLR method
pass
def __reduce_ex__(self, *args): #cannot find CLR method
pass
def __str__(self, *args): #cannot find CLR method
pass
Bottom = None
Center = None
Top = None
value__ = None
class VisualStyleElement(object):
""" Identifies a control or user interface (UI) element that is drawn with visual styles. """
@staticmethod
def CreateElement(className, part, state):
"""
CreateElement(className: str, part: int, state: int) -> VisualStyleElement
Creates a new visual style element from the specified class, part, and state values.
className: A string that represents the class name of the visual style element to be created.
part: A value that represents the part of the visual style element to be created.
state: A value that represents the state of the visual style element to be created.
Returns: A System.Windows.Forms.VisualStyles.VisualStyleElement with the
System.Windows.Forms.VisualStyles.VisualStyleElement.ClassName,
System.Windows.Forms.VisualStyles.VisualStyleElement.Part, and
System.Windows.Forms.VisualStyles.VisualStyleElement.State properties initialized to the
className, part, and state parameters.
"""
pass
ClassName = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets the class name of the visual style element that this System.Windows.Forms.VisualStyles.VisualStyleElement represents.
Get: ClassName(self: VisualStyleElement) -> str
"""
Part = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets a value indicating the part of the visual style element that this System.Windows.Forms.VisualStyles.VisualStyleElement represents.
Get: Part(self: VisualStyleElement) -> int
"""
State = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets a value indicating the state of the visual style element that this System.Windows.Forms.VisualStyles.VisualStyleElement represents.
Get: State(self: VisualStyleElement) -> int
"""
Button = None
ComboBox = None
ExplorerBar = None
Header = None
ListView = None
Menu = None
MenuBand = None
Page = None
ProgressBar = None
Rebar = None
ScrollBar = None
Spin = None
StartPanel = None
Status = None
Tab = None
TaskBand = None
Taskbar = None
TaskbarClock = None
TextBox = None
ToolBar = None
ToolTip = None
TrackBar = None
TrayNotify = None
TreeView = None
Window = None
class VisualStyleInformation(object):
""" Provides information about the current visual style of the operating system. """
Author = 'MSX'
ColorScheme = 'NormalColor'
Company = ''
ControlHighlightHot = None
Copyright = ''
Description = ''
DisplayName = 'Aero style'
IsEnabledByUser = True
IsSupportedByOS = True
MinimumColorDepth = 0
Size = 'NormalSize'
SupportsFlatMenus = False
TextControlBorder = None
Url = ''
Version = ''
__all__ = []
class VisualStyleRenderer(object):
"""
Provides methods for drawing and getting information about a System.Windows.Forms.VisualStyles.VisualStyleElement. This class cannot be inherited.
VisualStyleRenderer(element: VisualStyleElement)
VisualStyleRenderer(className: str, part: int, state: int)
"""
def DrawBackground(self, dc, bounds, clipRectangle=None):
"""
DrawBackground(self: VisualStyleRenderer, dc: IDeviceContext, bounds: Rectangle, clipRectangle: Rectangle)
Draws the background image of the current visual style element within the specified bounding
rectangle and clipped to the specified clipping rectangle.
dc: The System.Drawing.IDeviceContext used to draw the background image.
bounds: A System.Drawing.Rectangle in which the background image is drawn.
clipRectangle: A System.Drawing.Rectangle that defines a clipping rectangle for the drawing operation.
DrawBackground(self: VisualStyleRenderer, dc: IDeviceContext, bounds: Rectangle)
Draws the background image of the current visual style element within the specified bounding
rectangle.
dc: The System.Drawing.IDeviceContext used to draw the background image.
bounds: A System.Drawing.Rectangle in which the background image is drawn.
"""
pass
def DrawEdge(self, dc, bounds, edges, style, effects):
"""
DrawEdge(self: VisualStyleRenderer, dc: IDeviceContext, bounds: Rectangle, edges: Edges, style: EdgeStyle, effects: EdgeEffects) -> Rectangle
Draws one or more edges of the specified bounding rectangle.
dc: The System.Drawing.IDeviceContext used to draw the edges.
bounds: The System.Drawing.Rectangle whose bounds define the edges to draw.
edges: A bitwise combination of the System.Windows.Forms.VisualStyles.Edges values.
style: A bitwise combination of the System.Windows.Forms.VisualStyles.EdgeStyle values.
effects: A bitwise combination of the System.Windows.Forms.VisualStyles.EdgeEffects values.
Returns: A System.Drawing.Rectangle that represents the interior of the bounds parameter, minus the edges
that were drawn.
"""
pass
def DrawImage(self, g, bounds, *__args):
"""
DrawImage(self: VisualStyleRenderer, g: Graphics, bounds: Rectangle, imageList: ImageList, imageIndex: int)
Draws the image from the specified System.Windows.Forms.ImageList within the specified bounds.
g: The System.Drawing.Graphics used to draw the image.
bounds: A System.Drawing.Rectangle in which the image is drawn.
imageList: An System.Windows.Forms.ImageList that contains the System.Drawing.Image to draw.
imageIndex: The index of the System.Drawing.Image within imageList to draw.
DrawImage(self: VisualStyleRenderer, g: Graphics, bounds: Rectangle, image: Image)
Draws the specified image within the specified bounds.
g: The System.Drawing.Graphics used to draw the image.
bounds: A System.Drawing.Rectangle in which the image is drawn.
image: The System.Drawing.Image to draw.
"""
pass
def DrawParentBackground(self, dc, bounds, childControl):
"""
DrawParentBackground(self: VisualStyleRenderer, dc: IDeviceContext, bounds: Rectangle, childControl: Control)
Draws the background of a control's parent in the specified area.
dc: The System.Drawing.IDeviceContext used to draw the background of the parent of childControl.
This object typically belongs to the child control.
bounds: A System.Drawing.Rectangle in which to draw the parent control's background. This rectangle
childControl: The control whose parent's background will be drawn.
"""
pass
def DrawText(self, dc, bounds, textToDraw, drawDisabled=None, flags=None):
"""
DrawText(self: VisualStyleRenderer, dc: IDeviceContext, bounds: Rectangle, textToDraw: str, drawDisabled: bool, flags: TextFormatFlags)
Draws text in the specified bounding rectangle with the option of displaying disabled text and
applying other text formatting.
dc: The System.Drawing.IDeviceContext used to draw the text.
bounds: A System.Drawing.Rectangle in which to draw the text.
textToDraw: The text to draw.
drawDisabled: true to draw grayed-out text; otherwise, false.
flags: A bitwise combination of the System.Windows.Forms.TextFormatFlags values.
DrawText(self: VisualStyleRenderer, dc: IDeviceContext, bounds: Rectangle, textToDraw: str, drawDisabled: bool)
Draws text in the specified bounds with the option of displaying disabled text.
dc: The System.Drawing.IDeviceContext used to draw the text.
bounds: A System.Drawing.Rectangle in which to draw the text.
textToDraw: The text to draw.
drawDisabled: true to draw grayed-out text; otherwise, false.
DrawText(self: VisualStyleRenderer, dc: IDeviceContext, bounds: Rectangle, textToDraw: str)
Draws text in the specified bounds using default formatting.
dc: The System.Drawing.IDeviceContext used to draw the text.
bounds: A System.Drawing.Rectangle in which to draw the text.
textToDraw: The text to draw.
"""
pass
def GetBackgroundContentRectangle(self, dc, bounds):
"""
GetBackgroundContentRectangle(self: VisualStyleRenderer, dc: IDeviceContext, bounds: Rectangle) -> Rectangle
Returns the content area for the background of the current visual style element.
dc: The System.Drawing.IDeviceContext this operation will use.
bounds: A System.Drawing.Rectangle that contains the entire background area of the current visual style
element.
Returns: A System.Drawing.Rectangle that contains the content area for the background of the current
visual style element.
"""
pass
def GetBackgroundExtent(self, dc, contentBounds):
"""
GetBackgroundExtent(self: VisualStyleRenderer, dc: IDeviceContext, contentBounds: Rectangle) -> Rectangle
Returns the entire background area for the current visual style element.
dc: The System.Drawing.IDeviceContext this operation will use.
contentBounds: A System.Drawing.Rectangle that contains the content area of the current visual style element.
Returns: A System.Drawing.Rectangle that contains the entire background area of the current visual style
element.
"""
pass
def GetBackgroundRegion(self, dc, bounds):
"""
GetBackgroundRegion(self: VisualStyleRenderer, dc: IDeviceContext, bounds: Rectangle) -> Region
Returns the region for the background of the current visual style element.
dc: The System.Drawing.IDeviceContext this operation will use.
bounds: A System.Drawing.Rectangle that contains the entire background area of the current visual style
element.
Returns: The System.Drawing.Region that contains the background of the current visual style element.
"""
pass
def GetBoolean(self, prop):
"""
GetBoolean(self: VisualStyleRenderer, prop: BooleanProperty) -> bool
Returns the value of the specified Boolean property for the current visual style element.
prop: One of the System.Windows.Forms.VisualStyles.BooleanProperty values that specifies which
property value to retrieve for the current visual style element.
Returns: true if the property specified by the prop parameter is true for the current visual style
element; otherwise, false.
"""
pass
def GetColor(self, prop):
"""
GetColor(self: VisualStyleRenderer, prop: ColorProperty) -> Color
Returns the value of the specified color property for the current visual style element.
prop: One of the System.Windows.Forms.VisualStyles.ColorProperty values that specifies which property
value to retrieve for the current visual style element.
Returns: A System.Drawing.Color that contains the value of the property specified by the prop parameter
for the current visual style element.
"""
pass
def GetEnumValue(self, prop):
"""
GetEnumValue(self: VisualStyleRenderer, prop: EnumProperty) -> int
Returns the value of the specified enumerated type property for the current visual style element.
prop: One of the System.Windows.Forms.VisualStyles.EnumProperty values that specifies which property
value to retrieve for the current visual style element.
Returns: The integer value of the property specified by the prop parameter for the current visual style
element.
"""
pass
def GetFilename(self, prop):
"""
GetFilename(self: VisualStyleRenderer, prop: FilenameProperty) -> str
Returns the value of the specified file name property for the current visual style element.
prop: One of the System.Windows.Forms.VisualStyles.FilenameProperty values that specifies which
property value to retrieve for the current visual style element.
Returns: A System.String that contains the value of the property specified by the prop parameter for the
current visual style element.
"""
pass
def GetFont(self, dc, prop):
"""
GetFont(self: VisualStyleRenderer, dc: IDeviceContext, prop: FontProperty) -> Font
Returns the value of the specified font property for the current visual style element.
dc: The System.Drawing.IDeviceContext this operation will use.
prop: One of the System.Windows.Forms.VisualStyles.FontProperty values that specifies which property
value to retrieve for the current visual style element.
Returns: A System.Drawing.Font that contains the value of the property specified by the prop parameter
for the current visual style element.
"""
pass
def GetInteger(self, prop):
"""
GetInteger(self: VisualStyleRenderer, prop: IntegerProperty) -> int
Returns the value of the specified integer property for the current visual style element.
prop: One of the System.Windows.Forms.VisualStyles.IntegerProperty values that specifies which
property value to retrieve for the current visual style element.
Returns: The integer value of the property specified by the prop parameter for the current visual style
element.
"""
pass
def GetMargins(self, dc, prop):
"""
GetMargins(self: VisualStyleRenderer, dc: IDeviceContext, prop: MarginProperty) -> Padding
Returns the value of the specified margins property for the current visual style element.
dc: The System.Drawing.IDeviceContext this operation will use.
prop: One of the System.Windows.Forms.VisualStyles.MarginProperty values that specifies which property
value to retrieve for the current visual style element.
Returns: A System.Windows.Forms.Padding that contains the value of the property specified by the prop
parameter for the current visual style element.
"""
pass
def GetPartSize(self, dc, *__args):
"""
GetPartSize(self: VisualStyleRenderer, dc: IDeviceContext, bounds: Rectangle, type: ThemeSizeType) -> Size
Returns the value of the specified size property of the current visual style part using the
specified drawing bounds.
dc: The System.Drawing.IDeviceContext this operation will use.
bounds: A System.Drawing.Rectangle that contains the area in which the part will be drawn.
type: One of the System.Windows.Forms.VisualStyles.ThemeSizeType values that specifies which size
value to retrieve for the part.
Returns: A System.Drawing.Size that contains the size specified by the type parameter for the current
visual style part.
GetPartSize(self: VisualStyleRenderer, dc: IDeviceContext, type: ThemeSizeType) -> Size
Returns the value of the specified size property of the current visual style part.
dc: The System.Drawing.IDeviceContext this operation will use.
type: One of the System.Windows.Forms.VisualStyles.ThemeSizeType values that specifies which size
value to retrieve for the part.
Returns: A System.Drawing.Size that contains the size specified by the type parameter for the current
visual style part.
"""
pass
def GetPoint(self, prop):
"""
GetPoint(self: VisualStyleRenderer, prop: PointProperty) -> Point
Returns the value of the specified point property for the current visual style element.
prop: One of the System.Windows.Forms.VisualStyles.PointProperty values that specifies which property
value to retrieve for the current visual style element.
Returns: A System.Drawing.Point that contains the value of the property specified by the prop parameter
for the current visual style element.
"""
pass
def GetString(self, prop):
"""
GetString(self: VisualStyleRenderer, prop: StringProperty) -> str
Returns the value of the specified string property for the current visual style element.
prop: One of the System.Windows.Forms.VisualStyles.StringProperty values that specifies which property
value to retrieve for the current visual style element.
Returns: A System.String that contains the value of the property specified by the prop parameter for the
current visual style element.
"""
pass
def GetTextExtent(self, dc, *__args):
"""
GetTextExtent(self: VisualStyleRenderer, dc: IDeviceContext, bounds: Rectangle, textToDraw: str, flags: TextFormatFlags) -> Rectangle
Returns the size and location of the specified string when drawn with the font of the current
visual style element within the specified initial bounding rectangle.
dc: The System.Drawing.IDeviceContext this operation will use.
bounds: A System.Drawing.Rectangle used to control the flow and wrapping of the text.
textToDraw: The string to measure.
flags: A bitwise combination of the System.Windows.Forms.TextFormatFlags values.
Returns: A System.Drawing.Rectangle that contains the area required to fit the rendered text.
GetTextExtent(self: VisualStyleRenderer, dc: IDeviceContext, textToDraw: str, flags: TextFormatFlags) -> Rectangle
Returns the size and location of the specified string when drawn with the font of the current
visual style element.
dc: The System.Drawing.IDeviceContext this operation will use.
textToDraw: The string to measure.
flags: A bitwise combination of the System.Windows.Forms.TextFormatFlags values.
Returns: A System.Drawing.Rectangle that contains the area required to fit the rendered text.
"""
pass
def GetTextMetrics(self, dc):
"""
GetTextMetrics(self: VisualStyleRenderer, dc: IDeviceContext) -> TextMetrics
Retrieves information about the font specified by the current visual style element.
dc: The System.Drawing.IDeviceContext this operation will use.
Returns: A System.Windows.Forms.VisualStyles.TextMetrics that provides information about the font
specified by the current visual style element.
"""
pass
def HitTestBackground(self, *__args):
"""
HitTestBackground(self: VisualStyleRenderer, dc: IDeviceContext, backgroundRectangle: Rectangle, hRgn: IntPtr, pt: Point, options: HitTestOptions) -> HitTestCode
Returns a hit test code indicating whether the point is contained in the background of the
current visual style element and within the specified region.
dc: The System.Drawing.IDeviceContext this operation will use.
backgroundRectangle: A System.Drawing.Rectangle that contains the background of the current visual style element.
hRgn: A Windows handle to a System.Drawing.Region that specifies the bounds of the hit test area
within the background.
pt: The System.Drawing.Point to test.
options: A bitwise combination of the System.Windows.Forms.VisualStyles.HitTestOptions values.
Returns: A System.Windows.Forms.VisualStyles.HitTestCode that describes where pt is located in the
background of the current visual style element.
HitTestBackground(self: VisualStyleRenderer, g: Graphics, backgroundRectangle: Rectangle, region: Region, pt: Point, options: HitTestOptions) -> HitTestCode
Returns a hit test code indicating whether the point is contained in the background of the
current visual style element and within the specified bounds.
g: The System.Drawing.Graphics this operation will use.
backgroundRectangle: A System.Drawing.Rectangle that contains the background of the current visual style element.
region: A System.Drawing.Region that specifies the bounds of the hit test area within the background.
pt: The System.Drawing.Point to test.
options: A bitwise combination of the System.Windows.Forms.VisualStyles.HitTestOptions values.
Returns: A System.Windows.Forms.VisualStyles.HitTestCode that describes where pt is located in the
background of the current visual style element, if at all.
HitTestBackground(self: VisualStyleRenderer, dc: IDeviceContext, backgroundRectangle: Rectangle, pt: Point, options: HitTestOptions) -> HitTestCode
Returns a hit test code indicating whether a point is contained in the background of the current
visual style element.
dc: The System.Drawing.IDeviceContext this operation will use.
backgroundRectangle: A System.Drawing.Rectangle that contains the background of the current visual style element.
pt: The System.Drawing.Point to test.
options: A bitwise combination of the System.Windows.Forms.VisualStyles.HitTestOptions values.
Returns: A System.Windows.Forms.VisualStyles.HitTestCode that describes where pt is located in the
background of the current visual style element.
"""
pass
def IsBackgroundPartiallyTransparent(self):
"""
IsBackgroundPartiallyTransparent(self: VisualStyleRenderer) -> bool
Indicates whether the background of the current visual style element has any semitransparent or
alpha-blended pieces.
Returns: true if the background of the current visual style element has any semitransparent or
alpha-blended pieces; otherwise, false.
"""
pass
@staticmethod
def IsElementDefined(element):
"""
IsElementDefined(element: VisualStyleElement) -> bool
Determines whether the specified visual style element is defined by the current visual style.
element: A System.Windows.Forms.VisualStyles.VisualStyleElement whose class and part combination will be
verified.
Returns: true if the combination of the System.Windows.Forms.VisualStyles.VisualStyleElement.ClassName
and System.Windows.Forms.VisualStyles.VisualStyleElement.Part properties of element are defined;
otherwise, false.
"""
pass
def SetParameters(self, *__args):
"""
SetParameters(self: VisualStyleRenderer, className: str, part: int, state: int)
Sets this System.Windows.Forms.VisualStyles.VisualStyleRenderer to the visual style element
represented by the specified class, part, and state values.
className: The new value of the System.Windows.Forms.VisualStyles.VisualStyleRenderer.Class property.
part: The new value of the System.Windows.Forms.VisualStyles.VisualStyleRenderer.Part property.
state: The new value of the System.Windows.Forms.VisualStyles.VisualStyleRenderer.State property.
SetParameters(self: VisualStyleRenderer, element: VisualStyleElement)
Sets this System.Windows.Forms.VisualStyles.VisualStyleRenderer to the visual style element
represented by the specified System.Windows.Forms.VisualStyles.VisualStyleElement.
element: A System.Windows.Forms.VisualStyles.VisualStyleElement that specifies the new values of the
System.Windows.Forms.VisualStyles.VisualStyleRenderer.Class,
System.Windows.Forms.VisualStyles.VisualStyleRenderer.Part, and
System.Windows.Forms.VisualStyles.VisualStyleRenderer.State properties.
"""
pass
@staticmethod # known case of __new__
def __new__(self, *__args):
"""
__new__(cls: type, element: VisualStyleElement)
__new__(cls: type, className: str, part: int, state: int)
"""
pass
Class = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets the class name of the current visual style element.
Get: Class(self: VisualStyleRenderer) -> str
"""
Handle = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets a unique identifier for the current class of visual style elements.
Get: Handle(self: VisualStyleRenderer) -> IntPtr
"""
LastHResult = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets the last error code returned by the native visual styles (UxTheme) API methods encapsulated by the System.Windows.Forms.VisualStyles.VisualStyleRenderer class.
Get: LastHResult(self: VisualStyleRenderer) -> int
"""
Part = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets the part of the current visual style element.
Get: Part(self: VisualStyleRenderer) -> int
"""
State = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets the state of the current visual style element.
Get: State(self: VisualStyleRenderer) -> int
"""
IsSupported = True
class VisualStyleState(Enum, IComparable, IFormattable, IConvertible):
"""
Specifies how visual styles are applied to the current application.
enum VisualStyleState, values: ClientAndNonClientAreasEnabled (3), ClientAreaEnabled (2), NonClientAreaEnabled (1), NoneEnabled (0)
"""
def __eq__(self, *args): #cannot find CLR method
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self, *args): #cannot find CLR method
""" __format__(formattable: IFormattable, format: str) -> str """
pass
def __ge__(self, *args): #cannot find CLR method
pass
def __gt__(self, *args): #cannot find CLR method
pass
def __init__(self, *args): #cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self, *args): #cannot find CLR method
pass
def __lt__(self, *args): #cannot find CLR method
pass
def __ne__(self, *args): #cannot find CLR method
pass
def __reduce_ex__(self, *args): #cannot find CLR method
pass
def __str__(self, *args): #cannot find CLR method
pass
ClientAndNonClientAreasEnabled = None
ClientAreaEnabled = None
NonClientAreaEnabled = None
NoneEnabled = None
value__ = None
|
sleepypuppy/collector/views.py
|
soffensive/sleepy-puppy
| 952 |
81912
|
# Copyright 2015 Netflix, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import urllib
from flask import request
from flask import render_template, make_response
from flask_mail import Message
from sleepypuppy import app, db, flask_mail, csrf_protect
from sleepypuppy.admin.payload.models import Payload
from sleepypuppy.admin.capture.models import Capture
from sleepypuppy.admin.collector.models import GenericCollector
from sleepypuppy.admin.access_log.models import AccessLog
from sleepypuppy.admin.assessment.models import Assessment
from sleepypuppy.admin.user.models import User
from flask import Response
from urlparse import urlparse
@app.route('/x', methods=['GET'])
def x_collector(payload=1):
"""
Determine the payload associated with the request.
If accesslog is enabled for the payload, record the event
and email users subscribed to the payload's assessment.
"""
the_payload = Payload.query.filter_by(
id=int(request.args.get('u', 1))).first()
assessment_id = request.args.get('a', 1)
# consider only looking up payload one time for performance
the_assessment = Assessment.query.filter_by(
id=int(assessment_id)).first()
try:
if the_assessment.access_log_enabled:
referrer = request.headers.get("Referrer", None)
user_agent = request.headers.get("User-Agent", None)
ip_address = request.access_route[-1]
client_info = AccessLog(
the_payload.id, the_assessment.name, referrer, user_agent, ip_address)
db.session.add(client_info)
db.session.commit()
email_subscription(the_payload.id, the_assessment, None, client_info, 'access_log')
except Exception as err:
app.logger.warn("assessment not found, can't check access log.")
app.logger.warn(err)
# Log for recording access log records
if request.args.get('u', 1):
return collector(request.args.get('u', 1))
@app.route('/loader.js', methods=['GET'])
def collector(payload=1):
"""
Render Puppyscript payload with unique identifier and hosts for callback.
Enforce snooze and run_once directives.
"""
payload = request.args.get('u', 1)
assessment = request.args.get('a', 1)
try:
the_assessment = Assessment.query.filter_by(id=int(assessment)).first()
if the_assessment.snooze:
return ''
if the_assessment.run_once and Capture.query.filter_by(payload=int(payload), assessment=the_assessment.name).first():
return ''
if the_assessment.run_once and GenericCollector.query.filter_by(payload=int(payload), assessment=the_assessment.name).first():
return ''
except Exception as err:
app.logger.warn(err)
# Render the template and include payload, hostname, callback_protocol,
# assessment.
# If you need to expose additional server side
# information for your JavaScripts, do it here.
try:
headers = {'Content-Type': 'text/javascript'}
return make_response(render_template(
'loader.js',
payload=payload,
assessment=the_assessment.id,
hostname=app.config['CALLBACK_HOSTNAME'],
callback_protocol=app.config.get('CALLBACK_PROTOCOL', 'https')),
200,
headers
)
except:
app.logger.warn("Assessment not found, defaulting to General.")
# If the assessment doesn't exist, default to general
headers = {'Content-Type': 'text/javascript'}
return make_response(render_template(
'loader.js',
payload=payload,
assessment=1,
hostname=app.config['CALLBACK_HOSTNAME'],
callback_protocol=app.config.get('CALLBACK_PROTOCOL', 'https')),
200,
headers
)
def email_subscription(payload, the_assessment, url, client_info, model):
"""
Email notifications for captures, generic collections, and access log
"""
email_list = []
notify_jobs = Payload.query.filter_by(id=payload).first()
user_notify = User.query.all()
for user in user_notify:
user_subscriptions = []
for assessment in user.assessments:
user_subscriptions.append(assessment.id)
if the_assessment.id in user_subscriptions:
email_list.append(user.email)
import cgi
if model == "capture":
subject = "[Sleepy Puppy] - Capture Received From: {}".format(
cgi.escape(url, quote=True)
)
html = "<b>Associated Assessment: </b>{}<br/>".format(
cgi.escape(the_assessment.name, quote=True)
)
html += "<b>URL: </b>{}<br/>".format(
cgi.escape(url, quote=True)
)
html += "<b>Payload: </b>{}<br/>".format(
cgi.escape(notify_jobs.payload, quote=True)
)
if notify_jobs.notes is not None:
html += "<b>Notes: </b>{}<br/>".format(
cgi.escape(notify_jobs.notes, quote=True)
)
html += "<b>Capture: </b>{}://{}/capture/?flt1_0={}&flt3_14={}".format(
app.config.get('CALLBACK_PROTOCOL', 'https'),
app.config.get('HOSTNAME', 'localhost'),
payload, the_assessment.name)
elif model == "access_log":
subject = "[Sleepy Puppy] - Access Log Request Received For Assessment(s): {}".format(
cgi.escape(the_assessment.name, quote=True)
)
html = "<b>Associated Assessment: </b>{}<br/>".format(
cgi.escape(the_assessment.name, quote=True)
)
html += "<b>Referer: </b>{}<br/>".format(
cgi.escape(client_info.referrer or "", quote=True)
)
html += "<b>User Agent: </b>{}<br/>".format(
cgi.escape(client_info.user_agent or "", quote=True)
)
html += "<b>IP Address: </b>{}<br/>".format(
cgi.escape(client_info.ip_address, quote=True)
)
html += "<b>AccessLog: </b>{}://{}/accesslog/?flt1_7={}&flt2_14={}".format(
app.config.get('CALLBACK_PROTOCOL', 'https'),
app.config.get('HOSTNAME', 'localhost'),
payload, the_assessment.name)
elif model == "generic_collector":
subject = "[Sleepy Puppy] - Generic Collector Received From: {}".format(
cgi.escape(client_info.url, quote=True)
)
html = "<b>Associated Assessment: </b>{}<br/>".format(
cgi.escape(the_assessment.name, quote=True)
)
html += "<b>Puppyscript Name: </b>{}<br/>".format(
cgi.escape(client_info.puppyscript_name or "", quote=True)
)
html += "<b>Url: </b>{}<br/>".format(
cgi.escape(client_info.url or "", quote=True)
)
html += "<b>Referer: </b>{}<br/>".format(
cgi.escape(client_info.referrer or "", quote=True)
)
html += "<b>Generic Collector: </b>{}://{}/genericcollector/?flt1_0={}&flt2_7={}".format(
app.config.get('CALLBACK_PROTOCOL', 'https'),
app.config.get('HOSTNAME', 'localhost'),
payload,
the_assessment.name)
# If there are people to email, email them that a capture was received
if email_list:
if app.config["EMAILS_USE_SES"]:
import boto.ses
try:
ses_region = app.config.get('SES_REGION', 'us-east-1')
ses = boto.ses.connect_to_region(ses_region)
except Exception, e:
import traceback
app.logger.debug(Exception)
app.logger.debug(e)
app.logger.warn(traceback.format_exc())
return
for email in email_list:
try:
ses.send_email(
app.config['MAIL_SENDER'],
subject,
html,
email,
format="html"
)
app.logger.debug("Emailed {} - {} ".format(email, subject))
except Exception, e:
m = "Failed to send failure message to {} from {} with subject: {}\n{} {}".format(
email,
app.config['MAIL_SENDER'],
subject,
Exception,
e
)
app.logger.debug(m)
else:
msg = Message(
subject,
sender=app.config['MAIL_SENDER'],
recipients=email_list
)
msg.html = html
try:
flask_mail.send(msg)
except Exception as err:
app.logger.debug(Exception)
app.logger.debug(err)
@csrf_protect.exempt
@app.route('/generic_callback', methods=['POST', 'GET'])
def get_generic_callback():
"""
Method to handle generic callbacks from arbitrary puppyscripts.
Expects
Method: POST
Data: payload, puppyscript_name, data
Optional Data: referrer, url
"""
response = Response()
if request.method == 'POST':
try:
app.logger.info("request.form.get('payload', 0): {}".format(
request.form.get('payload', 0)))
puppyscript_name = urllib.unquote(
unicode(request.form.get('puppyscript_name', '')))
# If they don't set a url or referrer, ignore it
url = urllib.unquote(unicode(request.form.get('uri', '')))
referrer = urllib.unquote(
unicode(request.form.get('referrer', '')))
try:
if app.config.get('ALLOWED_DOMAINS'):
domain = urlparse(url).netloc.split(':')[0]
if domain not in app.config.get('ALLOWED_DOMAINS'):
app.logger.info(
"Ignoring Capture from unapproved domain: [{}]".format(domain))
return response
except Exception as e:
app.logger.warn("Exception in /generic_callback when parsing url {}\n\n{}".format(Exception, str(e))) # noqa
data = urllib.unquote(unicode(request.form.get('data', '')))
payload = Payload.query.filter_by(
id=int(request.form.get('payload', 0))).first()
assessment = Assessment.query.filter_by(
id=int(request.form.get('assessment', 0))).first()
# If it's a rogue capture, log it anyway.
if payload is None or assessment is None:
client_info = GenericCollector(
0, 0, puppyscript_name, url, referrer, data)
else:
# Create the capture with associated assessment/payload
client_info = GenericCollector(
payload.id, assessment.name, puppyscript_name, url, referrer, data)
db.session.add(client_info)
db.session.commit()
# Email users subscribed to the Payload's Assessment
email_subscription(
payload.id, assessment, url, client_info, 'generic_collector')
except Exception as e:
app.logger.warn(
"Exception in /generic_callback {}\n\n{}".format(Exception, str(e)))
import traceback
traceback.print_exc()
return response
# Disable CSRF protection on callback posts
@csrf_protect.exempt
@app.route('/callbacks', methods=['POST', 'GET'])
def get_callbacks():
"""
Method to handle Capture creation.
The Default Puppyscript provides all the expected parameters
for this endpoint.
If you need to modify the default captures, provide the following:
Method: POST
Data: assessment(payload.id will work here), url, referrer, cookies, user_agent, payload,
screenshot, dom
"""
response = Response()
app.logger.info("Inside /callbacks")
if request.method == 'POST':
try:
app.logger.info("request.form.get('payload', 0): {}".format(
request.form.get('payload', 0)))
url = urllib.unquote(unicode(request.form.get('uri', '')))
if app.config.get('ALLOWED_DOMAINS'):
domain = urlparse(url).netloc.split(':')[0]
if domain not in app.config.get('ALLOWED_DOMAINS'):
app.logger.info(
"Ignoring Capture from unapproved domain: [{}]".format(domain))
return response
referrer = urllib.unquote(
unicode(request.form.get('referrer', '')))
cookies = urllib.unquote(unicode(request.form.get('cookies', '')))
user_agent = urllib.unquote(
unicode(request.form.get('user_agent', '')))
payload = Payload.query.filter_by(
id=int(request.form.get('payload', 0))).first()
assessment = Assessment.query.filter_by(
id=int(request.form.get('assessment', 0))).first()
screenshot = unicode(request.form.get('screenshot', ''))
dom = urllib.unquote(unicode(request.form.get('dom', '')))[:65535]
# If it's a rogue capture, log it anyway.
if payload is None or assessment is None:
client_info = Capture("Not found",
url,
referrer,
cookies,
user_agent,
0,
screenshot,
dom)
else:
# Create the capture with associated assessment/payload
client_info = Capture(assessment.name,
url,
referrer,
cookies,
user_agent,
payload.id,
screenshot,
dom)
db.session.add(client_info)
db.session.commit()
# Email users subscribed to the Payload's Assessment
email_subscription(
payload.id, assessment, url, client_info, 'capture')
except Exception as e:
app.logger.warn(
"Exception in /callbacks {}\n\n{}".format(Exception, str(e)))
import traceback
traceback.print_exc()
return response
|
src/amuse/community/ph4/util/initialize_system.py
|
rknop/amuse
| 131 |
81914
|
<gh_stars>100-1000
import collections
import getopt
import numpy
import os
import random
import sys
import unittest
import pickle
from time import clock
from time import gmtime
from time import mktime
from amuse.community.ph4.interface import ph4 as grav
from amuse.community.smalln.interface import SmallN
from amuse.community.kepler.interface import Kepler
from amuse.couple import multiples
from amuse.units import nbody_system
from amuse.units import units
from amuse.units import quantities
from amuse import datamodel
from amuse.datamodel import particle_attributes as pa
from amuse.rfi.core import is_mpd_running
from amuse.ic.plummer import new_plummer_model
from amuse.ic.salpeter import new_salpeter_mass_distribution_nbody
from amuse import io
from utils import *
def make_nbody(number_of_stars = 100, time = 0.0,
n_workers = 1, use_gpu = 1, gpu_worker = 1,
salpeter = 0,
delta_t = 1.0 | nbody_system.time,
timestep_parameter = 0.1,
softening_length = 0.0 | nbody_system.length,
random_seed = 1234):
# Make an N-body system, print out some statistics on it, and save
# it in a restart file. The restart file name is of the form
# 't=nnnn.n.xxx', where the default time is 0.0.
if random_seed <= 0:
numpy.random.seed()
random_seed = numpy.random.randint(1, pow(2,31)-1)
numpy.random.seed(random_seed)
print("random seed =", random_seed)
init_smalln()
# Note that there are actually three GPU options:
#
# 1. use the GPU code and allow GPU use (default)
# 2. use the GPU code but disable GPU use (-g)
# 3. use the non-GPU code (-G)
if gpu_worker == 1:
try:
gravity = grav(number_of_workers = n_workers,
redirection = "none", mode = "gpu")
except Exception as ex:
gravity = grav(number_of_workers = n_workers,
redirection = "none")
else:
gravity = grav(number_of_workers = n_workers,
redirection = "none")
gravity.initialize_code()
gravity.parameters.set_defaults()
#-----------------------------------------------------------------
# Make a standard N-body system.
print("making a Plummer model")
stars = new_plummer_model(number_of_stars)
id = numpy.arange(number_of_stars)
stars.id = id+1
print("setting particle masses and radii")
if salpeter == 0:
print('equal masses')
total_mass = 1.0 | nbody_system.mass
scaled_mass = total_mass / number_of_stars
else:
print('salpeter mass function')
mmin = 0.5 | nbody_system.mass
mmax = 10.0 | nbody_system.mass
scaled_mass = new_salpeter_mass_distribution_nbody(number_of_stars,
mass_min = mmin,
mass_max = mmax)
stars.mass = scaled_mass
print("centering stars")
stars.move_to_center()
print("scaling stars to virial equilibrium")
stars.scale_to_standard(smoothing_length_squared
= gravity.parameters.epsilon_squared)
# Set dynamical radii (assuming virial equilibrium and standard
# units). Note that this choice should be refined, and updated
# as the system evolves. Probably the choice of radius should be
# made entirely in the multiples module. TODO. In these units,
# M = 1 and <v^2> = 0.5, so the mean 90-degree turnaround impact
# parameter is
#
# b_90 = G (m_1+m_2) / vrel^2
# = 2 <m> / 2<v^2>
# = 2 / N for equal masses
#
# Taking r_i = m_i / 2<v^2> = m_i in virial equilibrium means
# that, approximately, "contact" means a 90-degree deflection (r_1
# + r_2 = b_90). A more conservative choice with r_i less than
# this value will isolate encounters better, but also place more
# load on the large-N dynamical module.
stars.radius = 0.5*stars.mass.number | nbody_system.length
time = 0.0 | nbody_system.time
# print "IDs:", stars.id.number
print("recentering stars")
stars.move_to_center()
sys.stdout.flush()
#-----------------------------------------------------------------
if softening_length < 0.0 | nbody_system.length:
# Use ~interparticle spacing. Assuming standard units here. TODO
softening_length = 0.5*float(number_of_stars)**(-0.3333333) \
| nbody_system.length
print('softening length =', softening_length)
gravity.parameters.timestep_parameter = timestep_parameter
gravity.parameters.epsilon_squared = softening_length*softening_length
gravity.parameters.use_gpu = use_gpu
print('')
print("adding particles")
# print stars
sys.stdout.flush()
gravity.particles.add_particles(stars)
gravity.commit_particles()
print('')
print("number_of_stars =", number_of_stars)
sys.stdout.flush()
# Channel to copy values from the code to the set in memory.
channel = gravity.particles.new_channel_to(stars)
stopping_condition = gravity.stopping_conditions.collision_detection
stopping_condition.enable()
# -----------------------------------------------------------------
# Create the coupled code and integrate the system to the desired
# time, managing interactions internally.
kep = init_kepler(stars[0], stars[1])
multiples_code = multiples.Multiples(gravity, new_smalln, kep)
multiples_code.neighbor_perturbation_limit = 0.1
multiples_code.neighbor_veto = True
print('')
print('multiples_code.initial_scale_factor =', \
multiples_code.initial_scale_factor)
print('multiples_code.neighbor_perturbation_limit =', \
multiples_code.neighbor_perturbation_limit)
print('multiples_code.neighbor_veto =', \
multiples_code.neighbor_veto)
print('multiples_code.final_scale_factor =', \
multiples_code.final_scale_factor)
print('multiples_code.initial_scatter_factor =', \
multiples_code.initial_scatter_factor)
print('multiples_code.final_scatter_factor =', \
multiples_code.final_scatter_factor)
print('multiples_code.retain_binary_apocenter =', \
multiples_code.retain_binary_apocenter)
print('multiples_code.wide_perturbation_limit =', \
multiples_code.wide_perturbation_limit)
# Take a dummy step, just in case...
multiples_code.evolve_model(time)
# Copy values from the module to the set in memory.
channel.copy()
# Copy the index (ID) as used in the module to the id field in
# memory. The index is not copied by default, as different
# codes may have different indices for the same particle and
# we don't want to overwrite silently.
channel.copy_attribute("index_in_code", "id")
pre = "%%% "
E0,cpu0 = print_log(pre, time, multiples_code)
sys.stdout.flush()
# file = 't='+'{:07.2f}'.format(time.number) # fails in Python 2.6
file = 't=%07.2f'%time.number
write_state_to_file(time, stars, gravity, multiples_code,
file, delta_t, E0, cpu0)
tree_copy = multiples_code.root_to_tree.copy()
del multiples_code
sys.stdout.flush()
gravity.stop()
kep.stop()
stop_smalln()
print('')
if __name__ == '__main__':
# Defaults:
N = 1000
time = 0.0 | nbody_system.time
delta_t = 1.0 | nbody_system.time
n_workers = 1
use_gpu = 1
gpu_worker = 1
salpeter = 0
timestep_parameter = 0.1
softening_length = 0.0 | nbody_system.length
random_seed = -1
try:
opts, args = getopt.getopt(sys.argv[1:], "n:st:")
except getopt.GetoptError as err:
print(str(err))
sys.exit(1)
for o, a in opts:
if o == "-n":
N = int(a)
elif o == "-s":
salpeter = 1
elif o == "-t":
time = float(a) | nbody_system.time
else:
print("unexpected argument", o)
assert is_mpd_running()
make_nbody(N, time, n_workers, use_gpu, gpu_worker,
salpeter, delta_t, timestep_parameter, softening_length,
random_seed)
|
packages/pyright-internal/src/tests/samples/memberAccess17.py
|
Jasha10/pyright
| 3,934 |
81932
|
# This sample tests the case where a __getattr__ method override
# differentiates based on the name of the accessed member.
from typing import Any, overload, Literal
class Obj:
@overload
def __getattr__(self, name: Literal["foo"]) -> int:
...
@overload
def __getattr__(self, name: Literal["bar"]) -> str:
...
def __getattr__(self, name: str) -> Any:
if name == "foo":
return 1
return "1"
obj = Obj()
b1 = obj.foo
reveal_type(b1, expected_text="int")
b2 = getattr(obj, "foo")
reveal_type(b2, expected_text="Any")
c1 = obj.bar
reveal_type(c1, expected_text="str")
c2 = getattr(obj, "bar")
reveal_type(c2, expected_text="Any")
|
extstats/utils/elastic.py
|
bakl/chrome-extensions-archive
| 408 |
81984
|
from datetime import datetime
from elasticsearch_dsl import DocType, String, Date, Integer, Float
from elasticsearch_dsl.connections import connections
# Define a default Elasticsearch client
connections.create_connection(hosts=['localhost'])
class Extension(DocType):
name = String()
url = String()
description = String()
user_count = Integer()
review_count = Float()
review_score = Float()
class Meta:
index = 'exts'
# create the mappings in elasticsearch
Extension.init()
import json
exts = json.load(open('data/PAGES.json'))
# TODO source code extract
# rob query: all ext with this permission in manifest and this regex in source code
# https://www.elastic.co/guide/en/elasticsearch/guide/current/nested-query.html
for ext in exts:
print(ext['name'])
sources = extract_sources(ext['id'])
# create and save
ext = Extension(meta={'id': ext['ext_id']},
name=ext['name'],
sources=sources,
url=ext['url'],
review_count=ext['aggregateRating.properties.ratingCount'],
review_score=ext['aggregateRating.properties.ratingValue'],
description=ext['full_description'],
user_count=int(ext['user_count']))
ext.save()
# Display cluster health
print(connections.get_connection().cluster.health())
|
spikeinterface/widgets/depthamplitude.py
|
JuliaSprenger/spikeinterface
| 116 |
81987
|
<filename>spikeinterface/widgets/depthamplitude.py
import numpy as np
from matplotlib import pyplot as plt
from .basewidget import BaseWidget
from ..toolkit import get_template_extremum_channel, get_template_extremum_amplitude
from .utils import get_unit_colors
class UnitsDepthAmplitudeWidget(BaseWidget):
def __init__(self, waveform_extractor, peak_sign='neg', depth_axis=1,
unit_colors=None, figure=None, ax=None):
BaseWidget.__init__(self, figure, ax)
self.we = waveform_extractor
self.peak_sign = peak_sign
self.depth_axis = depth_axis
if unit_colors is None:
unit_colors = get_unit_colors(self.we.sorting)
self.unit_colors = unit_colors
def plot(self):
ax = self.ax
we = self.we
unit_ids = we.sorting.unit_ids
channels_index = get_template_extremum_channel(we, peak_sign=self.peak_sign, outputs='index')
probe = we.recording.get_probe()
channel_depth = probe.contact_positions[:, self.depth_axis]
unit_depth = [channel_depth[channels_index[unit_id]] for unit_id in unit_ids]
unit_amplitude = get_template_extremum_amplitude(we, peak_sign=self.peak_sign)
unit_amplitude = np.abs([unit_amplitude[unit_id] for unit_id in unit_ids])
colors = [self.unit_colors[unit_id] for unit_id in unit_ids]
num_spikes = np.zeros(len(unit_ids))
for i, unit_id in enumerate(unit_ids):
for segment_index in range(we.sorting.get_num_segments()):
st = we.sorting.get_unit_spike_train(unit_id=unit_id, segment_index=segment_index)
num_spikes[i] += st.size
size = num_spikes / max(num_spikes) * 120
ax.scatter(unit_amplitude, unit_depth, color=colors, s=size)
ax.set_aspect(3)
ax.set_xlabel('amplitude')
ax.set_ylabel('depth [um]')
ax.set_xlim(0, max(unit_amplitude) * 1.2)
def plot_units_depth_vs_amplitude(*args, **kwargs):
W = UnitsDepthAmplitudeWidget(*args, **kwargs)
W.plot()
return W
plot_units_depth_vs_amplitude.__doc__ = UnitsDepthAmplitudeWidget.__doc__
|
dnachisel/DnaOptimizationProblem/__init__.py
|
simone-pignotti/DnaChisel
| 124 |
81988
|
<reponame>simone-pignotti/DnaChisel
from .NoSolutionError import NoSolutionError
from .DnaOptimizationProblem import DnaOptimizationProblem
from .CircularDnaOptimizationProblem import CircularDnaOptimizationProblem
__all__ = [
"NoSolutionError",
"DnaOptimizationProblem",
"CircularDnaOptimizationProblem"
]
|
parlai/tasks/msc/constants.py
|
twstewart42/ParlAI
| 9,228 |
81992
|
<reponame>twstewart42/ParlAI
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
INITIAL_DATA_TO_COMPLETE = [
'valid_0',
'valid_1',
'valid_10',
'valid_100',
'valid_101',
'valid_102',
'valid_103',
'valid_105',
'valid_106',
'valid_107',
'valid_108',
'valid_109',
'valid_11',
'valid_110',
'valid_111',
'valid_112',
'valid_115',
'valid_116',
'valid_117',
'valid_119',
'valid_12',
'valid_120',
'valid_121',
'valid_122',
'valid_123',
'valid_124',
'valid_125',
'valid_13',
'valid_130',
'valid_131',
'valid_133',
'valid_136',
'valid_138',
'valid_139',
'valid_14',
'valid_140',
'valid_141',
'valid_143',
'valid_144',
'valid_145',
'valid_146',
'valid_147',
'valid_148',
'valid_15',
'valid_152',
'valid_153',
'valid_154',
'valid_155',
'valid_156',
'valid_158',
'valid_160',
'valid_162',
'valid_163',
'valid_166',
'valid_169',
'valid_17',
'valid_171',
'valid_172',
'valid_174',
'valid_175',
'valid_176',
'valid_177',
'valid_178',
'valid_18',
'valid_181',
'valid_182',
'valid_184',
'valid_187',
'valid_19',
'valid_190',
'valid_191',
'valid_192',
'valid_193',
'valid_194',
'valid_196',
'valid_2',
'valid_20',
'valid_202',
'valid_203',
'valid_205',
'valid_206',
'valid_207',
'valid_208',
'valid_212',
'valid_214',
'valid_215',
'valid_216',
'valid_217',
'valid_219',
'valid_223',
'valid_225',
'valid_227',
'valid_228',
'valid_23',
'valid_230',
'valid_231',
'valid_232',
'valid_233',
'valid_234',
'valid_236',
]
COMMON_CONFIG = {
'task': 'msc:SessionBaseMsc',
'num_examples': -1,
'label_speaker_id': 'their',
'session_id': 4,
'datatype': 'valid',
}
MODEL_OPT = {
'BST90M': {
'previous_persona_type': 'none',
'num_previous_sessions_msg': 10,
'include_time_gap': False,
}
}
UI_OPT = {'BST90M': {'previous_persona_type': 'both', 'include_time_gap': False}}
|
1-XD_XD/code/v9s.py
|
sethahrenbach/BuildingDetectors_Round2
| 196 |
82017
|
# -*- coding: utf-8 -*-
"""
v9s model
* Input: v5_im
Author: Kohei <<EMAIL>>
"""
from logging import getLogger, Formatter, StreamHandler, INFO, FileHandler
from pathlib import Path
import subprocess
import argparse
import math
import glob
import sys
import json
import re
import warnings
import scipy
import tqdm
import click
import tables as tb
import pandas as pd
import numpy as np
from keras.models import Model
from keras.engine.topology import merge as merge_l
from keras.layers import (
Input, Convolution2D, MaxPooling2D, UpSampling2D,
Reshape, core, Dropout,
Activation, BatchNormalization)
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint, EarlyStopping, History
from keras import backend as K
import skimage.transform
import skimage.morphology
import rasterio.features
import shapely.wkt
import shapely.ops
import shapely.geometry
MODEL_NAME = 'v9s'
ORIGINAL_SIZE = 650
INPUT_SIZE = 256
LOGFORMAT = '%(asctime)s %(levelname)s %(message)s'
BASE_DIR = "/data/train"
WORKING_DIR = "/data/working"
IMAGE_DIR = "/data/working/images/{}".format('v5')
MODEL_DIR = "/data/working/models/{}".format(MODEL_NAME)
FN_SOLUTION_CSV = "/data/output/{}.csv".format(MODEL_NAME)
# Parameters
MIN_POLYGON_AREA = 30
# Input files
FMT_TRAIN_SUMMARY_PATH = str(
Path(BASE_DIR) /
Path("{prefix:s}_Train/") /
Path("summaryData/{prefix:s}_Train_Building_Solutions.csv"))
FMT_TRAIN_RGB_IMAGE_PATH = str(
Path(BASE_DIR) /
Path("{prefix:s}_Train/") /
Path("RGB-PanSharpen/RGB-PanSharpen_{image_id:s}.tif"))
FMT_TEST_RGB_IMAGE_PATH = str(
Path(BASE_DIR) /
Path("{prefix:s}_Test_public/") /
Path("RGB-PanSharpen/RGB-PanSharpen_{image_id:s}.tif"))
FMT_TRAIN_MSPEC_IMAGE_PATH = str(
Path(BASE_DIR) /
Path("{prefix:s}_Train/") /
Path("MUL-PanSharpen/MUL-PanSharpen_{image_id:s}.tif"))
FMT_TEST_MSPEC_IMAGE_PATH = str(
Path(BASE_DIR) /
Path("{prefix:s}_Test_public/") /
Path("MUL-PanSharpen/MUL-PanSharpen_{image_id:s}.tif"))
# Preprocessing result
FMT_BANDCUT_TH_PATH = IMAGE_DIR + "/bandcut{}.csv"
FMT_MUL_BANDCUT_TH_PATH = IMAGE_DIR + "/mul_bandcut{}.csv"
# Image list, Image container and mask container
FMT_VALTRAIN_IMAGELIST_PATH = IMAGE_DIR + "/{prefix:s}_valtrain_ImageId.csv"
FMT_VALTEST_IMAGELIST_PATH = IMAGE_DIR + "/{prefix:s}_valtest_ImageId.csv"
FMT_VALTRAIN_IM_STORE = IMAGE_DIR + "/valtrain_{}_im.h5"
FMT_VALTEST_IM_STORE = IMAGE_DIR + "/valtest_{}_im.h5"
FMT_VALTRAIN_MASK_STORE = IMAGE_DIR + "/valtrain_{}_mask.h5"
FMT_VALTEST_MASK_STORE = IMAGE_DIR + "/valtest_{}_mask.h5"
FMT_VALTRAIN_MUL_STORE = IMAGE_DIR + "/valtrain_{}_mul.h5"
FMT_VALTEST_MUL_STORE = IMAGE_DIR + "/valtest_{}_mul.h5"
FMT_TRAIN_IMAGELIST_PATH = IMAGE_DIR + "/{prefix:s}_train_ImageId.csv"
FMT_TEST_IMAGELIST_PATH = IMAGE_DIR + "/{prefix:s}_test_ImageId.csv"
FMT_TRAIN_IM_STORE = IMAGE_DIR + "/train_{}_im.h5"
FMT_TEST_IM_STORE = IMAGE_DIR + "/test_{}_im.h5"
FMT_TRAIN_MASK_STORE = IMAGE_DIR + "/train_{}_mask.h5"
FMT_TRAIN_MUL_STORE = IMAGE_DIR + "/train_{}_mul.h5"
FMT_TEST_MUL_STORE = IMAGE_DIR + "/test_{}_mul.h5"
FMT_IMMEAN = IMAGE_DIR + "/{}_immean.h5"
FMT_MULMEAN = IMAGE_DIR + "/{}_mulmean.h5"
# Model files
FMT_VALMODEL_PATH = MODEL_DIR + "/{}_val_weights.h5"
FMT_FULLMODEL_PATH = MODEL_DIR + "/{}_full_weights.h5"
FMT_VALMODEL_HIST = MODEL_DIR + "/{}_val_hist.csv"
FMT_VALMODEL_EVALHIST = MODEL_DIR + "/{}_val_evalhist.csv"
FMT_VALMODEL_EVALTHHIST = MODEL_DIR + "/{}_val_evalhist_th.csv"
# Prediction & polygon result
FMT_TESTPRED_PATH = MODEL_DIR + "/{}_pred.h5"
FMT_VALTESTPRED_PATH = MODEL_DIR + "/{}_eval_pred.h5"
FMT_VALTESTPOLY_PATH = MODEL_DIR + "/{}_eval_poly.csv"
FMT_VALTESTTRUTH_PATH = MODEL_DIR + "/{}_eval_poly_truth.csv"
FMT_VALTESTPOLY_OVALL_PATH = MODEL_DIR + "/eval_poly.csv"
FMT_VALTESTTRUTH_OVALL_PATH = MODEL_DIR + "/eval_poly_truth.csv"
FMT_TESTPOLY_PATH = MODEL_DIR + "/{}_poly.csv"
# Model related files (others)
FMT_VALMODEL_LAST_PATH = MODEL_DIR + "/{}_val_weights_last.h5"
FMT_FULLMODEL_LAST_PATH = MODEL_DIR + "/{}_full_weights_last.h5"
# Logger
warnings.simplefilter("ignore", UserWarning)
handler = StreamHandler()
handler.setLevel(INFO)
handler.setFormatter(Formatter(LOGFORMAT))
fh_handler = FileHandler(".{}.log".format(MODEL_NAME))
fh_handler.setFormatter(Formatter(LOGFORMAT))
logger = getLogger('spacenet2')
logger.setLevel(INFO)
if __name__ == '__main__':
logger.addHandler(handler)
logger.addHandler(fh_handler)
# Fix seed for reproducibility
np.random.seed(1145141919)
def directory_name_to_area_id(datapath):
"""
Directory name to AOI number
Usage:
>>> directory_name_to_area_id("/data/test/AOI_2_Vegas")
2
"""
dir_name = Path(datapath).name
if dir_name.startswith('AOI_2_Vegas'):
return 2
elif dir_name.startswith('AOI_3_Paris'):
return 3
elif dir_name.startswith('AOI_4_Shanghai'):
return 4
elif dir_name.startswith('AOI_5_Khartoum'):
return 5
else:
raise RuntimeError("Unsupported city id is given.")
def _remove_interiors(line):
if "), (" in line:
line_prefix = line.split('), (')[0]
line_terminate = line.split('))",')[-1]
line = (
line_prefix +
'))",' +
line_terminate
)
return line
def __load_band_cut_th(band_fn, bandsz=3):
df = pd.read_csv(band_fn, index_col='area_id')
all_band_cut_th = {area_id: {} for area_id in range(2, 6)}
for area_id, row in df.iterrows():
for chan_i in range(bandsz):
all_band_cut_th[area_id][chan_i] = dict(
min=row['chan{}_min'.format(chan_i)],
max=row['chan{}_max'.format(chan_i)],
)
return all_band_cut_th
def _calc_fscore_per_aoi(area_id):
prefix = area_id_to_prefix(area_id)
truth_file = FMT_VALTESTTRUTH_PATH.format(prefix)
poly_file = FMT_VALTESTPOLY_PATH.format(prefix)
cmd = [
'java',
'-jar',
'/root/visualizer-2.0/visualizer.jar',
'-truth',
truth_file,
'-solution',
poly_file,
'-no-gui',
'-band-triplets',
'/root/visualizer-2.0/data/band-triplets.txt',
'-image-dir',
'pass',
]
proc = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
stdout_data, stderr_data = proc.communicate()
lines = [line for line in stdout_data.decode('utf8').split('\n')[-10:]]
"""
Overall F-score : 0.85029
AOI_2_Vegas:
TP : 27827
FP : 4999
FN : 4800
Precision: 0.847712
Recall : 0.852883
F-score : 0.85029
"""
if stdout_data.decode('utf8').strip().endswith("Overall F-score : 0"):
overall_fscore = 0
tp = 0
fp = 0
fn = 0
precision = 0
recall = 0
fscore = 0
elif len(lines) > 0 and lines[0].startswith("Overall F-score : "):
assert lines[0].startswith("Overall F-score : ")
assert lines[2].startswith("AOI_")
assert lines[3].strip().startswith("TP")
assert lines[4].strip().startswith("FP")
assert lines[5].strip().startswith("FN")
assert lines[6].strip().startswith("Precision")
assert lines[7].strip().startswith("Recall")
assert lines[8].strip().startswith("F-score")
overall_fscore = float(re.findall("([\d\.]+)", lines[0])[0])
tp = int(re.findall("(\d+)", lines[3])[0])
fp = int(re.findall("(\d+)", lines[4])[0])
fn = int(re.findall("(\d+)", lines[5])[0])
precision = float(re.findall("([\d\.]+)", lines[6])[0])
recall = float(re.findall("([\d\.]+)", lines[7])[0])
fscore = float(re.findall("([\d\.]+)", lines[8])[0])
else:
logger.warn("Unexpected data >>> " + stdout_data.decode('utf8'))
raise RuntimeError("Unsupported format")
return {
'overall_fscore': overall_fscore,
'tp': tp,
'fp': fp,
'fn': fn,
'precision': precision,
'recall': recall,
'fscore': fscore,
}
def prefix_to_area_id(prefix):
area_dict = {
'AOI_2_Vegas': 2,
'AOI_3_Paris': 3,
'AOI_4_Shanghai': 4,
'AOI_5_Khartoum': 5,
}
return area_dict[area_id]
def area_id_to_prefix(area_id):
area_dict = {
2: 'AOI_2_Vegas',
3: 'AOI_3_Paris',
4: 'AOI_4_Shanghai',
5: 'AOI_5_Khartoum',
}
return area_dict[area_id]
# ---------------------------------------------------------
# main
def _get_model_parameter(area_id):
prefix = area_id_to_prefix(area_id)
fn_hist = FMT_VALMODEL_EVALTHHIST.format(prefix)
best_row = pd.read_csv(fn_hist).sort_values(
by='fscore',
ascending=False,
).iloc[0]
param = dict(
fn_epoch=int(best_row['zero_base_epoch']),
min_poly_area=int(best_row['min_area_th']),
)
return param
def get_resized_raster_3chan_image(image_id, band_cut_th=None):
fn = train_image_id_to_path(image_id)
with rasterio.open(fn, 'r') as f:
values = f.read().astype(np.float32)
for chan_i in range(3):
min_val = band_cut_th[chan_i]['min']
max_val = band_cut_th[chan_i]['max']
values[chan_i] = np.clip(values[chan_i], min_val, max_val)
values[chan_i] = (values[chan_i] - min_val) / (max_val - min_val)
values = np.swapaxes(values, 0, 2)
values = np.swapaxes(values, 0, 1)
values = skimage.transform.resize(values, (INPUT_SIZE, INPUT_SIZE))
return values
def get_resized_raster_3chan_image_test(image_id, band_cut_th=None):
fn = test_image_id_to_path(image_id)
with rasterio.open(fn, 'r') as f:
values = f.read().astype(np.float32)
for chan_i in range(3):
min_val = band_cut_th[chan_i]['min']
max_val = band_cut_th[chan_i]['max']
values[chan_i] = np.clip(values[chan_i], min_val, max_val)
values[chan_i] = (values[chan_i] - min_val) / (max_val - min_val)
values = np.swapaxes(values, 0, 2)
values = np.swapaxes(values, 0, 1)
values = skimage.transform.resize(values, (INPUT_SIZE, INPUT_SIZE))
return values
def image_mask_resized_from_summary(df, image_id):
im_mask = np.zeros((650, 650))
for idx, row in df[df.ImageId == image_id].iterrows():
shape_obj = shapely.wkt.loads(row.PolygonWKT_Pix)
if shape_obj.exterior is not None:
coords = list(shape_obj.exterior.coords)
x = [round(float(pp[0])) for pp in coords]
y = [round(float(pp[1])) for pp in coords]
yy, xx = skimage.draw.polygon(y, x, (650, 650))
im_mask[yy, xx] = 1
interiors = shape_obj.interiors
for interior in interiors:
coords = list(interior.coords)
x = [round(float(pp[0])) for pp in coords]
y = [round(float(pp[1])) for pp in coords]
yy, xx = skimage.draw.polygon(y, x, (650, 650))
im_mask[yy, xx] = 0
im_mask = skimage.transform.resize(im_mask, (INPUT_SIZE, INPUT_SIZE))
im_mask = (im_mask > 0.5).astype(np.uint8)
return im_mask
def train_test_image_prep(area_id):
prefix = area_id_to_prefix(area_id)
df_train = pd.read_csv(
FMT_TRAIN_IMAGELIST_PATH.format(prefix=prefix),
index_col='ImageId')
df_test = pd.read_csv(
FMT_TEST_IMAGELIST_PATH.format(prefix=prefix),
index_col='ImageId')
band_cut_th = __load_band_cut_th(
FMT_BANDCUT_TH_PATH.format(prefix))[area_id]
df_summary = _load_train_summary_data(area_id)
fn = FMT_TRAIN_IM_STORE.format(prefix)
logger.info("Prepare image container: {}".format(fn))
with tb.open_file(fn, 'w') as f:
for image_id in tqdm.tqdm(df_train.index, total=len(df_train)):
im = get_resized_raster_3chan_image(image_id, band_cut_th)
atom = tb.Atom.from_dtype(im.dtype)
filters = tb.Filters(complib='blosc', complevel=9)
ds = f.create_carray(f.root, image_id, atom, im.shape,
filters=filters)
ds[:] = im
fn = FMT_TEST_IM_STORE.format(prefix)
logger.info("Prepare image container: {}".format(fn))
with tb.open_file(fn, 'w') as f:
for image_id in tqdm.tqdm(df_test.index, total=len(df_test)):
im = get_resized_raster_3chan_image_test(image_id, band_cut_th)
atom = tb.Atom.from_dtype(im.dtype)
filters = tb.Filters(complib='blosc', complevel=9)
ds = f.create_carray(f.root, image_id, atom, im.shape,
filters=filters)
ds[:] = im
fn = FMT_TRAIN_MASK_STORE.format(prefix)
logger.info("Prepare image container: {}".format(fn))
with tb.open_file(fn, 'w') as f:
for image_id in tqdm.tqdm(df_train.index, total=len(df_train)):
im_mask = image_mask_resized_from_summary(df_summary, image_id)
atom = tb.Atom.from_dtype(im_mask.dtype)
filters = tb.Filters(complib='blosc', complevel=9)
ds = f.create_carray(f.root, image_id, atom, im_mask.shape,
filters=filters)
ds[:] = im_mask
def valtrain_test_image_prep(area_id):
prefix = area_id_to_prefix(area_id)
logger.info("valtrain_test_image_prep for {}".format(prefix))
df_train = pd.read_csv(
FMT_VALTRAIN_IMAGELIST_PATH.format(prefix=prefix),
index_col='ImageId')
df_test = pd.read_csv(
FMT_VALTEST_IMAGELIST_PATH.format(prefix=prefix),
index_col='ImageId')
band_cut_th = __load_band_cut_th(
FMT_BANDCUT_TH_PATH.format(prefix))[area_id]
df_summary = _load_train_summary_data(area_id)
fn = FMT_VALTRAIN_IM_STORE.format(prefix)
logger.info("Prepare image container: {}".format(fn))
with tb.open_file(fn, 'w') as f:
for image_id in tqdm.tqdm(df_train.index, total=len(df_train)):
im = get_resized_raster_3chan_image(image_id, band_cut_th)
atom = tb.Atom.from_dtype(im.dtype)
filters = tb.Filters(complib='blosc', complevel=9)
ds = f.create_carray(f.root, image_id, atom, im.shape,
filters=filters)
ds[:] = im
fn = FMT_VALTEST_IM_STORE.format(prefix)
logger.info("Prepare image container: {}".format(fn))
with tb.open_file(fn, 'w') as f:
for image_id in tqdm.tqdm(df_test.index, total=len(df_test)):
im = get_resized_raster_3chan_image(image_id, band_cut_th)
atom = tb.Atom.from_dtype(im.dtype)
filters = tb.Filters(complib='blosc', complevel=9)
ds = f.create_carray(f.root, image_id, atom, im.shape,
filters=filters)
ds[:] = im
fn = FMT_VALTRAIN_MASK_STORE.format(prefix)
logger.info("Prepare image container: {}".format(fn))
with tb.open_file(fn, 'w') as f:
for image_id in tqdm.tqdm(df_train.index, total=len(df_train)):
im_mask = image_mask_resized_from_summary(df_summary, image_id)
atom = tb.Atom.from_dtype(im_mask.dtype)
filters = tb.Filters(complib='blosc', complevel=9)
ds = f.create_carray(f.root, image_id, atom, im_mask.shape,
filters=filters)
ds[:] = im_mask
fn = FMT_VALTEST_MASK_STORE.format(prefix)
logger.info("Prepare image container: {}".format(fn))
with tb.open_file(fn, 'w') as f:
for image_id in tqdm.tqdm(df_test.index, total=len(df_test)):
im_mask = image_mask_resized_from_summary(df_summary, image_id)
atom = tb.Atom.from_dtype(im_mask.dtype)
filters = tb.Filters(complib='blosc', complevel=9)
ds = f.create_carray(f.root, image_id, atom, im_mask.shape,
filters=filters)
ds[:] = im_mask
def train_test_mul_image_prep(area_id):
prefix = area_id_to_prefix(area_id)
df_train = pd.read_csv(
FMT_TRAIN_IMAGELIST_PATH.format(prefix=prefix),
index_col='ImageId')
df_test = pd.read_csv(
FMT_TEST_IMAGELIST_PATH.format(prefix=prefix),
index_col='ImageId')
band_rgb_th = __load_band_cut_th(
FMT_BANDCUT_TH_PATH.format(prefix))[area_id]
band_mul_th = __load_band_cut_th(
FMT_MUL_BANDCUT_TH_PATH.format(prefix), bandsz=8)[area_id]
df_summary = _load_train_summary_data(area_id)
fn = FMT_TRAIN_MUL_STORE.format(prefix)
logger.info("Prepare image container: {}".format(fn))
with tb.open_file(fn, 'w') as f:
for image_id in tqdm.tqdm(df_train.index, total=len(df_train)):
im = get_resized_raster_8chan_image(
image_id, band_rgb_th, band_mul_th)
atom = tb.Atom.from_dtype(im.dtype)
filters = tb.Filters(complib='blosc', complevel=9)
ds = f.create_carray(f.root, image_id, atom, im.shape,
filters=filters)
ds[:] = im
fn = FMT_TEST_MUL_STORE.format(prefix)
logger.info("Prepare image container: {}".format(fn))
with tb.open_file(fn, 'w') as f:
for image_id in tqdm.tqdm(df_test.index, total=len(df_test)):
im = get_resized_raster_8chan_image_test(
image_id, band_rgb_th, band_mul_th)
atom = tb.Atom.from_dtype(im.dtype)
filters = tb.Filters(complib='blosc', complevel=9)
ds = f.create_carray(f.root, image_id, atom, im.shape,
filters=filters)
ds[:] = im
def valtrain_test_mul_image_prep(area_id):
prefix = area_id_to_prefix(area_id)
logger.info("valtrain_test_image_prep for {}".format(prefix))
df_train = pd.read_csv(
FMT_VALTRAIN_IMAGELIST_PATH.format(prefix=prefix),
index_col='ImageId')
df_test = pd.read_csv(
FMT_VALTEST_IMAGELIST_PATH.format(prefix=prefix),
index_col='ImageId')
band_rgb_th = __load_band_cut_th(
FMT_BANDCUT_TH_PATH.format(prefix))[area_id]
band_mul_th = __load_band_cut_th(
FMT_MUL_BANDCUT_TH_PATH.format(prefix), bandsz=8)[area_id]
df_summary = _load_train_summary_data(area_id)
fn = FMT_VALTRAIN_MUL_STORE.format(prefix)
logger.info("Prepare image container: {}".format(fn))
with tb.open_file(fn, 'w') as f:
for image_id in tqdm.tqdm(df_train.index, total=len(df_train)):
im = get_resized_raster_8chan_image(
image_id, band_rgb_th, band_mul_th)
atom = tb.Atom.from_dtype(im.dtype)
filters = tb.Filters(complib='blosc', complevel=9)
ds = f.create_carray(f.root, image_id, atom, im.shape,
filters=filters)
ds[:] = im
fn = FMT_VALTEST_MUL_STORE.format(prefix)
logger.info("Prepare image container: {}".format(fn))
with tb.open_file(fn, 'w') as f:
for image_id in tqdm.tqdm(df_test.index, total=len(df_test)):
im = get_resized_raster_8chan_image(
image_id, band_rgb_th, band_mul_th)
atom = tb.Atom.from_dtype(im.dtype)
filters = tb.Filters(complib='blosc', complevel=9)
ds = f.create_carray(f.root, image_id, atom, im.shape,
filters=filters)
ds[:] = im
def _load_train_summary_data(area_id):
prefix = area_id_to_prefix(area_id)
fn = FMT_TRAIN_SUMMARY_PATH.format(prefix=prefix)
df = pd.read_csv(fn)
return df
def split_val_train_test(area_id):
prefix = area_id_to_prefix(area_id)
df = _load_train_summary_data(area_id)
df_agg = df.groupby('ImageId').agg('first')
image_id_list = df_agg.index.tolist()
np.random.shuffle(image_id_list)
sz_valtrain = int(len(image_id_list) * 0.7)
sz_valtest = len(image_id_list) - sz_valtrain
pd.DataFrame({'ImageId': image_id_list[:sz_valtrain]}).to_csv(
FMT_VALTRAIN_IMAGELIST_PATH.format(prefix=prefix),
index=False)
pd.DataFrame({'ImageId': image_id_list[sz_valtrain:]}).to_csv(
FMT_VALTEST_IMAGELIST_PATH.format(prefix=prefix),
index=False)
def train_image_id_to_mspec_path(image_id):
prefix = image_id_to_prefix(image_id)
fn = FMT_TRAIN_MSPEC_IMAGE_PATH.format(
prefix=prefix,
image_id=image_id)
return fn
def test_image_id_to_mspec_path(image_id):
prefix = image_id_to_prefix(image_id)
fn = FMT_TEST_MSPEC_IMAGE_PATH.format(
prefix=prefix,
image_id=image_id)
return fn
def train_image_id_to_path(image_id):
prefix = image_id_to_prefix(image_id)
fn = FMT_TRAIN_RGB_IMAGE_PATH.format(
prefix=prefix,
image_id=image_id)
return fn
def test_image_id_to_path(image_id):
prefix = image_id_to_prefix(image_id)
fn = FMT_TEST_RGB_IMAGE_PATH.format(
prefix=prefix,
image_id=image_id)
return fn
def image_id_to_prefix(image_id):
prefix = image_id.split('img')[0][:-1]
return prefix
def calc_multiband_cut_threshold(area_id):
rows = []
band_cut_th = __calc_multiband_cut_threshold(area_id)
prefix = area_id_to_prefix(area_id)
row = dict(prefix=area_id_to_prefix(area_id))
row['area_id'] = area_id
for chan_i in band_cut_th.keys():
row['chan{}_max'.format(chan_i)] = band_cut_th[chan_i]['max']
row['chan{}_min'.format(chan_i)] = band_cut_th[chan_i]['min']
rows.append(row)
pd.DataFrame(rows).to_csv(FMT_BANDCUT_TH_PATH.format(prefix), index=False)
def __calc_multiband_cut_threshold(area_id):
prefix = area_id_to_prefix(area_id)
band_values = {k: [] for k in range(3)}
band_cut_th = {k: dict(max=0, min=0) for k in range(3)}
image_id_list = pd.read_csv(FMT_VALTRAIN_IMAGELIST_PATH.format(
prefix=prefix)).ImageId.tolist()
for image_id in tqdm.tqdm(image_id_list[:500]):
image_fn = train_image_id_to_path(image_id)
with rasterio.open(image_fn, 'r') as f:
values = f.read().astype(np.float32)
for i_chan in range(3):
values_ = values[i_chan].ravel().tolist()
values_ = np.array(
[v for v in values_ if v != 0]
) # Remove sensored mask
band_values[i_chan].append(values_)
image_id_list = pd.read_csv(FMT_VALTEST_IMAGELIST_PATH.format(
prefix=prefix)).ImageId.tolist()
for image_id in tqdm.tqdm(image_id_list[:500]):
image_fn = train_image_id_to_path(image_id)
with rasterio.open(image_fn, 'r') as f:
values = f.read().astype(np.float32)
for i_chan in range(3):
values_ = values[i_chan].ravel().tolist()
values_ = np.array(
[v for v in values_ if v != 0]
) # Remove sensored mask
band_values[i_chan].append(values_)
for i_chan in range(3):
band_values[i_chan] = np.concatenate(
band_values[i_chan]).ravel()
band_cut_th[i_chan]['max'] = scipy.percentile(
band_values[i_chan], 98)
band_cut_th[i_chan]['min'] = scipy.percentile(
band_values[i_chan], 2)
return band_cut_th
def calc_mul_multiband_cut_threshold(area_id):
rows = []
band_cut_th = __calc_mul_multiband_cut_threshold(area_id)
prefix = area_id_to_prefix(area_id)
row = dict(prefix=area_id_to_prefix(area_id))
row['area_id'] = area_id
for chan_i in band_cut_th.keys():
row['chan{}_max'.format(chan_i)] = band_cut_th[chan_i]['max']
row['chan{}_min'.format(chan_i)] = band_cut_th[chan_i]['min']
rows.append(row)
pd.DataFrame(rows).to_csv(
FMT_MUL_BANDCUT_TH_PATH.format(prefix),
index=False)
def __calc_mul_multiband_cut_threshold(area_id):
prefix = area_id_to_prefix(area_id)
band_values = {k: [] for k in range(8)}
band_cut_th = {k: dict(max=0, min=0) for k in range(8)}
image_id_list = pd.read_csv(FMT_VALTRAIN_IMAGELIST_PATH.format(
prefix=prefix)).ImageId.tolist()
for image_id in tqdm.tqdm(image_id_list[:500]):
image_fn = train_image_id_to_mspec_path(image_id)
with rasterio.open(image_fn, 'r') as f:
values = f.read().astype(np.float32)
for i_chan in range(8):
values_ = values[i_chan].ravel().tolist()
values_ = np.array(
[v for v in values_ if v != 0]
) # Remove sensored mask
band_values[i_chan].append(values_)
image_id_list = pd.read_csv(FMT_VALTEST_IMAGELIST_PATH.format(
prefix=prefix)).ImageId.tolist()
for image_id in tqdm.tqdm(image_id_list[:500]):
image_fn = train_image_id_to_mspec_path(image_id)
with rasterio.open(image_fn, 'r') as f:
values = f.read().astype(np.float32)
for i_chan in range(8):
values_ = values[i_chan].ravel().tolist()
values_ = np.array(
[v for v in values_ if v != 0]
) # Remove sensored mask
band_values[i_chan].append(values_)
for i_chan in range(8):
band_values[i_chan] = np.concatenate(
band_values[i_chan]).ravel()
band_cut_th[i_chan]['max'] = scipy.percentile(
band_values[i_chan], 98)
band_cut_th[i_chan]['min'] = scipy.percentile(
band_values[i_chan], 2)
return band_cut_th
def get_unet():
conv_params = dict(activation='relu', border_mode='same')
merge_params = dict(mode='concat', concat_axis=1)
inputs = Input((8, 256, 256))
conv1 = Convolution2D(32, 3, 3, **conv_params)(inputs)
conv1 = Convolution2D(32, 3, 3, **conv_params)(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = Convolution2D(64, 3, 3, **conv_params)(pool1)
conv2 = Convolution2D(64, 3, 3, **conv_params)(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = Convolution2D(128, 3, 3, **conv_params)(pool2)
conv3 = Convolution2D(128, 3, 3, **conv_params)(conv3)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
conv4 = Convolution2D(256, 3, 3, **conv_params)(pool3)
conv4 = Convolution2D(256, 3, 3, **conv_params)(conv4)
pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)
conv5 = Convolution2D(512, 3, 3, **conv_params)(pool4)
conv5 = Convolution2D(512, 3, 3, **conv_params)(conv5)
up6 = merge_l([UpSampling2D(size=(2, 2))(conv5), conv4], **merge_params)
conv6 = Convolution2D(256, 3, 3, **conv_params)(up6)
conv6 = Convolution2D(256, 3, 3, **conv_params)(conv6)
up7 = merge_l([UpSampling2D(size=(2, 2))(conv6), conv3], **merge_params)
conv7 = Convolution2D(128, 3, 3, **conv_params)(up7)
conv7 = Convolution2D(128, 3, 3, **conv_params)(conv7)
up8 = merge_l([UpSampling2D(size=(2, 2))(conv7), conv2], **merge_params)
conv8 = Convolution2D(64, 3, 3, **conv_params)(up8)
conv8 = Convolution2D(64, 3, 3, **conv_params)(conv8)
up9 = merge_l([UpSampling2D(size=(2, 2))(conv8), conv1], **merge_params)
conv9 = Convolution2D(32, 3, 3, **conv_params)(up9)
conv9 = Convolution2D(32, 3, 3, **conv_params)(conv9)
conv10 = Convolution2D(1, 1, 1, activation='sigmoid')(conv9)
adam = Adam()
model = Model(input=inputs, output=conv10)
model.compile(optimizer=adam,
loss='binary_crossentropy',
metrics=['accuracy', jaccard_coef, jaccard_coef_int])
return model
def jaccard_coef(y_true, y_pred):
smooth = 1e-12
intersection = K.sum(y_true * y_pred, axis=[0, -1, -2])
sum_ = K.sum(y_true + y_pred, axis=[0, -1, -2])
jac = (intersection + smooth) / (sum_ - intersection + smooth)
return K.mean(jac)
def jaccard_coef_int(y_true, y_pred):
smooth = 1e-12
y_pred_pos = K.round(K.clip(y_pred, 0, 1))
intersection = K.sum(y_true * y_pred_pos, axis=[0, -1, -2])
sum_ = K.sum(y_true + y_pred_pos, axis=[0, -1, -2])
jac = (intersection + smooth) / (sum_ - intersection + smooth)
return K.mean(jac)
def generate_test_batch(area_id,
batch_size=64,
immean=None,
enable_tqdm=False):
prefix = area_id_to_prefix(area_id)
df_test = pd.read_csv(FMT_TEST_IMAGELIST_PATH.format(prefix=prefix))
fn_im = FMT_TEST_MUL_STORE.format(prefix)
image_id_list = df_test.ImageId.tolist()
if enable_tqdm:
pbar = tqdm.tqdm(total=len(image_id_list))
while 1:
total_sz = len(image_id_list)
n_batch = int(math.floor(total_sz / batch_size) + 1)
with tb.open_file(fn_im, 'r') as f_im:
for i_batch in range(n_batch):
target_image_ids = image_id_list[
i_batch*batch_size:(i_batch+1)*batch_size
]
if len(target_image_ids) == 0:
continue
X_test = []
y_test = []
for image_id in target_image_ids:
im = np.array(f_im.get_node('/' + image_id))
im = np.swapaxes(im, 0, 2)
im = np.swapaxes(im, 1, 2)
X_test.append(im)
mask = np.zeros((INPUT_SIZE, INPUT_SIZE)).astype(np.uint8)
y_test.append(mask)
X_test = np.array(X_test)
y_test = np.array(y_test)
y_test = y_test.reshape((-1, 1, INPUT_SIZE, INPUT_SIZE))
if immean is not None:
X_test = X_test - immean
if enable_tqdm:
pbar.update(y_test.shape[0])
yield (X_test, y_test)
if enable_tqdm:
pbar.close()
def get_resized_raster_8chan_image_test(image_id, band_rgb_th, band_mul_th):
"""
RGB + multispectral (total: 8 channels)
"""
im = []
fn = test_image_id_to_path(image_id)
with rasterio.open(fn, 'r') as f:
values = f.read().astype(np.float32)
for chan_i in range(3):
min_val = band_rgb_th[chan_i]['min']
max_val = band_rgb_th[chan_i]['max']
values[chan_i] = np.clip(values[chan_i], min_val, max_val)
values[chan_i] = (values[chan_i] - min_val) / (max_val - min_val)
im.append(skimage.transform.resize(
values[chan_i],
(INPUT_SIZE, INPUT_SIZE)))
fn = test_image_id_to_mspec_path(image_id)
with rasterio.open(fn, 'r') as f:
values = f.read().astype(np.float32)
usechannels = [1, 2, 5, 6, 7]
for chan_i in usechannels:
min_val = band_mul_th[chan_i]['min']
max_val = band_mul_th[chan_i]['max']
values[chan_i] = np.clip(values[chan_i], min_val, max_val)
values[chan_i] = (values[chan_i] - min_val) / (max_val - min_val)
im.append(skimage.transform.resize(
values[chan_i],
(INPUT_SIZE, INPUT_SIZE)))
im = np.array(im) # (ch, w, h)
im = np.swapaxes(im, 0, 2) # -> (h, w, ch)
im = np.swapaxes(im, 0, 1) # -> (w, h, ch)
return im
def get_resized_raster_8chan_image(image_id, band_rgb_th, band_mul_th):
"""
RGB + multispectral (total: 8 channels)
"""
im = []
fn = train_image_id_to_path(image_id)
with rasterio.open(fn, 'r') as f:
values = f.read().astype(np.float32)
for chan_i in range(3):
min_val = band_rgb_th[chan_i]['min']
max_val = band_rgb_th[chan_i]['max']
values[chan_i] = np.clip(values[chan_i], min_val, max_val)
values[chan_i] = (values[chan_i] - min_val) / (max_val - min_val)
im.append(skimage.transform.resize(
values[chan_i],
(INPUT_SIZE, INPUT_SIZE)))
fn = train_image_id_to_mspec_path(image_id)
with rasterio.open(fn, 'r') as f:
values = f.read().astype(np.float32)
usechannels = [1, 2, 5, 6, 7]
for chan_i in usechannels:
min_val = band_mul_th[chan_i]['min']
max_val = band_mul_th[chan_i]['max']
values[chan_i] = np.clip(values[chan_i], min_val, max_val)
values[chan_i] = (values[chan_i] - min_val) / (max_val - min_val)
im.append(skimage.transform.resize(
values[chan_i],
(INPUT_SIZE, INPUT_SIZE)))
im = np.array(im) # (ch, w, h)
im = np.swapaxes(im, 0, 2) # -> (h, w, ch)
im = np.swapaxes(im, 0, 1) # -> (w, h, ch)
return im
def _get_train_mul_data(area_id):
"""
RGB + multispectral (total: 8 channels)
"""
prefix = area_id_to_prefix(area_id)
fn_train = FMT_TRAIN_IMAGELIST_PATH.format(prefix=prefix)
df_train = pd.read_csv(fn_train)
X_train = []
fn_im = FMT_TRAIN_MUL_STORE.format(prefix)
with tb.open_file(fn_im, 'r') as f:
for idx, image_id in enumerate(df_train.ImageId.tolist()):
im = np.array(f.get_node('/' + image_id))
im = np.swapaxes(im, 0, 2)
im = np.swapaxes(im, 1, 2)
X_train.append(im)
X_train = np.array(X_train)
y_train = []
fn_mask = FMT_TRAIN_MASK_STORE.format(prefix)
with tb.open_file(fn_mask, 'r') as f:
for idx, image_id in enumerate(df_train.ImageId.tolist()):
mask = np.array(f.get_node('/' + image_id))
mask = (mask > 0.5).astype(np.uint8)
y_train.append(mask)
y_train = np.array(y_train)
y_train = y_train.reshape((-1, 1, INPUT_SIZE, INPUT_SIZE))
return X_train, y_train
def _get_test_mul_data(area_id):
"""
RGB + multispectral (total: 8 channels)
"""
prefix = area_id_to_prefix(area_id)
fn_test = FMT_TEST_IMAGELIST_PATH.format(prefix=prefix)
df_test = pd.read_csv(fn_test)
X_test = []
fn_im = FMT_TEST_MUL_STORE.format(prefix)
with tb.open_file(fn_im, 'r') as f:
for idx, image_id in enumerate(df_test.ImageId.tolist()):
im = np.array(f.get_node('/' + image_id))
im = np.swapaxes(im, 0, 2)
im = np.swapaxes(im, 1, 2)
X_test.append(im)
X_test = np.array(X_test)
return X_test
def _get_valtest_mul_data(area_id):
prefix = area_id_to_prefix(area_id)
fn_test = FMT_VALTEST_IMAGELIST_PATH.format(prefix=prefix)
df_test = pd.read_csv(fn_test)
X_val = []
fn_im = FMT_VALTEST_MUL_STORE.format(prefix)
with tb.open_file(fn_im, 'r') as f:
for idx, image_id in enumerate(df_test.ImageId.tolist()):
im = np.array(f.get_node('/' + image_id))
im = np.swapaxes(im, 0, 2)
im = np.swapaxes(im, 1, 2)
X_val.append(im)
X_val = np.array(X_val)
y_val = []
fn_mask = FMT_VALTEST_MASK_STORE.format(prefix)
with tb.open_file(fn_mask, 'r') as f:
for idx, image_id in enumerate(df_test.ImageId.tolist()):
mask = np.array(f.get_node('/' + image_id))
mask = (mask > 0.5).astype(np.uint8)
y_val.append(mask)
y_val = np.array(y_val)
y_val = y_val.reshape((-1, 1, INPUT_SIZE, INPUT_SIZE))
return X_val, y_val
def _get_valtrain_mul_data(area_id):
prefix = area_id_to_prefix(area_id)
fn_train = FMT_VALTRAIN_IMAGELIST_PATH.format(prefix=prefix)
df_train = pd.read_csv(fn_train)
X_val = []
fn_im = FMT_VALTRAIN_MUL_STORE.format(prefix)
with tb.open_file(fn_im, 'r') as f:
for idx, image_id in enumerate(df_train.ImageId.tolist()):
im = np.array(f.get_node('/' + image_id))
im = np.swapaxes(im, 0, 2)
im = np.swapaxes(im, 1, 2)
X_val.append(im)
X_val = np.array(X_val)
y_val = []
fn_mask = FMT_VALTRAIN_MASK_STORE.format(prefix)
with tb.open_file(fn_mask, 'r') as f:
for idx, image_id in enumerate(df_train.ImageId.tolist()):
mask = np.array(f.get_node('/' + image_id))
mask = (mask > 0.5).astype(np.uint8)
y_val.append(mask)
y_val = np.array(y_val)
y_val = y_val.reshape((-1, 1, INPUT_SIZE, INPUT_SIZE))
return X_val, y_val
def get_mul_mean_image(area_id):
prefix = area_id_to_prefix(area_id)
with tb.open_file(FMT_MULMEAN.format(prefix), 'r') as f:
im_mean = np.array(f.get_node('/mulmean'))
return im_mean
def preproc_stage3(area_id):
prefix = area_id_to_prefix(area_id)
if not Path(FMT_VALTEST_MUL_STORE.format(prefix)).exists():
valtrain_test_mul_image_prep(area_id)
if not Path(FMT_TEST_MUL_STORE.format(prefix)).exists():
train_test_mul_image_prep(area_id)
# mean image for subtract preprocessing
X1, _ = _get_train_mul_data(area_id)
X2 = _get_test_mul_data(area_id)
X = np.vstack([X1, X2])
print(X.shape)
X_mean = X.mean(axis=0)
fn = FMT_MULMEAN.format(prefix)
logger.info("Prepare mean image: {}".format(fn))
with tb.open_file(fn, 'w') as f:
atom = tb.Atom.from_dtype(X_mean.dtype)
filters = tb.Filters(complib='blosc', complevel=9)
ds = f.create_carray(f.root, 'mulmean', atom, X_mean.shape,
filters=filters)
ds[:] = X_mean
def _internal_test_predict_best_param(area_id,
save_pred=True):
prefix = area_id_to_prefix(area_id)
param = _get_model_parameter(area_id)
epoch = param['fn_epoch']
min_th = param['min_poly_area']
# Prediction phase
logger.info("Prediction phase: {}".format(prefix))
X_mean = get_mul_mean_image(area_id)
# Load model weights
# Predict and Save prediction result
fn = FMT_TESTPRED_PATH.format(prefix)
fn_model = FMT_VALMODEL_PATH.format(prefix + '_{epoch:02d}')
fn_model = fn_model.format(epoch=epoch)
model = get_unet()
model.load_weights(fn_model)
fn_test = FMT_TEST_IMAGELIST_PATH.format(prefix=prefix)
df_test = pd.read_csv(fn_test, index_col='ImageId')
y_pred = model.predict_generator(
generate_test_batch(
area_id,
batch_size=64,
immean=X_mean,
enable_tqdm=True,
),
val_samples=len(df_test),
)
del model
# Save prediction result
if save_pred:
with tb.open_file(fn, 'w') as f:
atom = tb.Atom.from_dtype(y_pred.dtype)
filters = tb.Filters(complib='blosc', complevel=9)
ds = f.create_carray(f.root, 'pred', atom, y_pred.shape,
filters=filters)
ds[:] = y_pred
return y_pred
def _internal_test(area_id, enable_tqdm=False):
prefix = area_id_to_prefix(area_id)
y_pred = _internal_test_predict_best_param(area_id, save_pred=False)
param = _get_model_parameter(area_id)
min_th = param['min_poly_area']
# Postprocessing phase
logger.info("Postprocessing phase")
fn_test = FMT_TEST_IMAGELIST_PATH.format(prefix=prefix)
df_test = pd.read_csv(fn_test, index_col='ImageId')
fn_out = FMT_TESTPOLY_PATH.format(prefix)
with open(fn_out, 'w') as f:
f.write("ImageId,BuildingId,PolygonWKT_Pix,Confidence\n")
test_image_list = df_test.index.tolist()
for idx, image_id in tqdm.tqdm(enumerate(test_image_list),
total=len(test_image_list)):
df_poly = mask_to_poly(y_pred[idx][0], min_polygon_area_th=min_th)
if len(df_poly) > 0:
for i, row in df_poly.iterrows():
line = "{},{},\"{}\",{:.6f}\n".format(
image_id,
row.bid,
row.wkt,
row.area_ratio)
line = _remove_interiors(line)
f.write(line)
else:
f.write("{},{},{},0\n".format(
image_id,
-1,
"POLYGON EMPTY"))
def validate_score(area_id):
"""
Calc competition score
"""
prefix = area_id_to_prefix(area_id)
# Prediction phase
if not Path(FMT_VALTESTPRED_PATH.format(prefix)).exists():
X_val, y_val = _get_valtest_mul_data(area_id)
X_mean = get_mul_mean_image(area_id)
# Load model weights
# Predict and Save prediction result
model = get_unet()
model.load_weights(FMT_VALMODEL_PATH.format(prefix))
y_pred = model.predict(X_val - X_mean, batch_size=8, verbose=1)
del model
# Save prediction result
fn = FMT_VALTESTPRED_PATH.format(prefix)
with tb.open_file(fn, 'w') as f:
atom = tb.Atom.from_dtype(y_pred.dtype)
filters = tb.Filters(complib='blosc', complevel=9)
ds = f.create_carray(f.root, 'pred', atom, y_pred.shape,
filters=filters)
ds[:] = y_pred
# Postprocessing phase
if not Path(FMT_VALTESTPOLY_PATH.format(prefix)).exists():
fn_test = FMT_VALTEST_IMAGELIST_PATH.format(prefix=prefix)
df_test = pd.read_csv(fn_test, index_col='ImageId')
fn = FMT_VALTESTPRED_PATH.format(prefix)
with tb.open_file(fn, 'r') as f:
y_pred = np.array(f.get_node('/pred'))
print(y_pred.shape)
fn_out = FMT_VALTESTPOLY_PATH.format(prefix)
with open(fn_out, 'w') as f:
f.write("ImageId,BuildingId,PolygonWKT_Pix,Confidence\n")
for idx, image_id in enumerate(df_test.index.tolist()):
df_poly = mask_to_poly(y_pred[idx][0])
if len(df_poly) > 0:
for i, row in df_poly.iterrows():
f.write("{},{},\"{}\",{:.6f}\n".format(
image_id,
row.bid,
row.wkt,
row.area_ratio))
else:
f.write("{},{},{},0\n".format(
image_id,
-1,
"POLYGON EMPTY"))
# update fn_out
with open(fn_out, 'r') as f:
lines = f.readlines()
with open(fn_out, 'w') as f:
f.write(lines[0])
for line in lines[1:]:
line = _remove_interiors(line)
f.write(line)
# Validation solution file
if not Path(FMT_VALTESTTRUTH_PATH.format(prefix)).exists():
fn_true = FMT_TRAIN_SUMMARY_PATH.format(prefix=prefix)
df_true = pd.read_csv(fn_true)
# # Remove prefix "PAN_"
# df_true.loc[:, 'ImageId'] = df_true.ImageId.str[4:]
fn_test = FMT_VALTEST_IMAGELIST_PATH.format(prefix=prefix)
df_test = pd.read_csv(fn_test)
df_test_image_ids = df_test.ImageId.unique()
fn_out = FMT_VALTESTTRUTH_PATH.format(prefix)
with open(fn_out, 'w') as f:
f.write("ImageId,BuildingId,PolygonWKT_Pix,Confidence\n")
df_true = df_true[df_true.ImageId.isin(df_test_image_ids)]
for idx, r in df_true.iterrows():
f.write("{},{},\"{}\",{:.6f}\n".format(
r.ImageId,
r.BuildingId,
r.PolygonWKT_Pix,
1.0))
def validate_all_score():
header_line = []
lines = []
for area_id in range(2, 6):
prefix = area_id_to_prefix(area_id)
assert Path(FMT_VALTESTTRUTH_PATH.format(prefix)).exists()
with open(FMT_VALTESTTRUTH_PATH.format(prefix), 'r') as f:
header_line = f.readline()
lines += f.readlines()
with open(FMT_VALTESTTRUTH_OVALL_PATH, 'w') as f:
f.write(header_line)
for line in lines:
f.write(line)
# Predicted polygons
header_line = []
lines = []
for area_id in range(2, 6):
prefix = area_id_to_prefix(area_id)
assert Path(FMT_VALTESTPOLY_PATH.format(prefix)).exists()
with open(FMT_VALTESTPOLY_PATH.format(prefix), 'r') as f:
header_line = f.readline()
lines += f.readlines()
with open(FMT_VALTESTPOLY_OVALL_PATH, 'w') as f:
f.write(header_line)
for line in lines:
f.write(line)
def generate_valtest_batch(area_id,
batch_size=8,
immean=None,
enable_tqdm=False):
prefix = area_id_to_prefix(area_id)
df_train = pd.read_csv(FMT_VALTEST_IMAGELIST_PATH.format(prefix=prefix))
fn_im = FMT_VALTEST_MUL_STORE.format(prefix)
fn_mask = FMT_VALTEST_MASK_STORE.format(prefix)
image_id_list = df_train.ImageId.tolist()
if enable_tqdm:
pbar = tqdm.tqdm(total=len(image_id_list))
while 1:
total_sz = len(image_id_list)
n_batch = int(math.floor(total_sz / batch_size) + 1)
with tb.open_file(fn_im, 'r') as f_im,\
tb.open_file(fn_mask, 'r') as f_mask:
for i_batch in range(n_batch):
target_image_ids = image_id_list[
i_batch*batch_size:(i_batch+1)*batch_size
]
if len(target_image_ids) == 0:
continue
X_train = []
y_train = []
for image_id in target_image_ids:
im = np.array(f_im.get_node('/' + image_id))
im = np.swapaxes(im, 0, 2)
im = np.swapaxes(im, 1, 2)
X_train.append(im)
mask = np.array(f_mask.get_node('/' + image_id))
mask = (mask > 0).astype(np.uint8)
y_train.append(mask)
X_train = np.array(X_train)
y_train = np.array(y_train)
y_train = y_train.reshape((-1, 1, INPUT_SIZE, INPUT_SIZE))
if immean is not None:
X_train = X_train - immean
if enable_tqdm:
pbar.update(y_train.shape[0])
yield (X_train, y_train)
if enable_tqdm:
pbar.close()
def generate_valtrain_batch(area_id, batch_size=8, immean=None):
prefix = area_id_to_prefix(area_id)
df_train = pd.read_csv(FMT_VALTRAIN_IMAGELIST_PATH.format(prefix=prefix))
fn_im = FMT_VALTRAIN_MUL_STORE.format(prefix)
fn_mask = FMT_VALTRAIN_MASK_STORE.format(prefix)
image_id_list = df_train.ImageId.tolist()
np.random.shuffle(image_id_list)
while 1:
total_sz = len(image_id_list)
n_batch = int(math.floor(total_sz / batch_size) + 1)
with tb.open_file(fn_im, 'r') as f_im,\
tb.open_file(fn_mask, 'r') as f_mask:
for i_batch in range(n_batch):
target_image_ids = image_id_list[
i_batch*batch_size:(i_batch+1)*batch_size
]
if len(target_image_ids) == 0:
continue
X_train = []
y_train = []
for image_id in target_image_ids:
im = np.array(f_im.get_node('/' + image_id))
im = np.swapaxes(im, 0, 2)
im = np.swapaxes(im, 1, 2)
X_train.append(im)
mask = np.array(f_mask.get_node('/' + image_id))
mask = (mask > 0).astype(np.uint8)
y_train.append(mask)
X_train = np.array(X_train)
y_train = np.array(y_train)
y_train = y_train.reshape((-1, 1, INPUT_SIZE, INPUT_SIZE))
if immean is not None:
X_train = X_train - immean
yield (X_train, y_train)
def _get_test_data(area_id):
prefix = area_id_to_prefix(area_id)
fn_test = FMT_TEST_IMAGELIST_PATH.format(prefix=prefix)
df_test = pd.read_csv(fn_test)
X_test = []
fn_im = FMT_TEST_IM_STORE.format(prefix)
with tb.open_file(fn_im, 'r') as f:
for idx, image_id in enumerate(df_test.ImageId.tolist()):
im = np.array(f.get_node('/' + image_id))
im = np.swapaxes(im, 0, 2)
im = np.swapaxes(im, 1, 2)
X_test.append(im)
X_test = np.array(X_test)
return X_test
def _get_valtest_data(area_id):
prefix = area_id_to_prefix(area_id)
fn_test = FMT_VALTEST_IMAGELIST_PATH.format(prefix=prefix)
df_test = pd.read_csv(fn_test)
X_val = []
fn_im = FMT_VALTEST_IM_STORE.format(prefix)
with tb.open_file(fn_im, 'r') as f:
for idx, image_id in enumerate(df_test.ImageId.tolist()):
im = np.array(f.get_node('/' + image_id))
im = np.swapaxes(im, 0, 2)
im = np.swapaxes(im, 1, 2)
X_val.append(im)
X_val = np.array(X_val)
y_val = []
fn_mask = FMT_VALTEST_MASK_STORE.format(prefix)
with tb.open_file(fn_mask, 'r') as f:
for idx, image_id in enumerate(df_test.ImageId.tolist()):
mask = np.array(f.get_node('/' + image_id))
mask = (mask > 0.5).astype(np.uint8)
y_val.append(mask)
y_val = np.array(y_val)
y_val = y_val.reshape((-1, 1, INPUT_SIZE, INPUT_SIZE))
return X_val, y_val
def _get_valtrain_data(area_id):
prefix = area_id_to_prefix(area_id)
fn_train = FMT_VALTRAIN_IMAGELIST_PATH.format(prefix=prefix)
df_train = pd.read_csv(fn_train)
X_val = []
fn_im = FMT_VALTRAIN_IM_STORE.format(prefix)
with tb.open_file(fn_im, 'r') as f:
for idx, image_id in enumerate(df_train.ImageId.tolist()):
im = np.array(f.get_node('/' + image_id))
im = np.swapaxes(im, 0, 2)
im = np.swapaxes(im, 1, 2)
X_val.append(im)
X_val = np.array(X_val)
y_val = []
fn_mask = FMT_VALTRAIN_MASK_STORE.format(prefix)
with tb.open_file(fn_mask, 'r') as f:
for idx, image_id in enumerate(df_train.ImageId.tolist()):
mask = np.array(f.get_node('/' + image_id))
mask = (mask > 0.5).astype(np.uint8)
y_val.append(mask)
y_val = np.array(y_val)
y_val = y_val.reshape((-1, 1, INPUT_SIZE, INPUT_SIZE))
return X_val, y_val
def predict(area_id):
prefix = area_id_to_prefix(area_id)
X_test = _get_test_mul_data(area_id)
X_mean = get_mul_mean_image(area_id)
# Load model weights
# Predict and Save prediction result
model = get_unet()
model.load_weights(FMT_VALMODEL_PATH.format(prefix))
y_pred = model.predict(X_test - X_mean, batch_size=8, verbose=1)
del model
# Save prediction result
fn = FMT_TESTPRED_PATH.format(prefix)
with tb.open_file(fn, 'w') as f:
atom = tb.Atom.from_dtype(y_pred.dtype)
filters = tb.Filters(complib='blosc', complevel=9)
ds = f.create_carray(f.root, 'pred', atom, y_pred.shape,
filters=filters)
ds[:] = y_pred
def _internal_validate_predict_best_param(area_id,
enable_tqdm=False):
param = _get_model_parameter(area_id)
epoch = param['fn_epoch']
y_pred = _internal_validate_predict(
area_id,
epoch=epoch,
save_pred=False,
enable_tqdm=enable_tqdm)
return y_pred
def _internal_validate_predict(area_id,
epoch=3,
save_pred=True,
enable_tqdm=False):
prefix = area_id_to_prefix(area_id)
X_mean = get_mul_mean_image(area_id)
# Load model weights
# Predict and Save prediction result
fn_model = FMT_VALMODEL_PATH.format(prefix + '_{epoch:02d}')
fn_model = fn_model.format(epoch=epoch)
model = get_unet()
model.load_weights(fn_model)
fn_test = FMT_VALTEST_IMAGELIST_PATH.format(prefix=prefix)
df_test = pd.read_csv(fn_test, index_col='ImageId')
y_pred = model.predict_generator(
generate_valtest_batch(
area_id,
batch_size=64,
immean=X_mean,
enable_tqdm=enable_tqdm,
),
val_samples=len(df_test),
)
del model
# Save prediction result
if save_pred:
fn = FMT_VALTESTPRED_PATH.format(prefix)
with tb.open_file(fn, 'w') as f:
atom = tb.Atom.from_dtype(y_pred.dtype)
filters = tb.Filters(complib='blosc', complevel=9)
ds = f.create_carray(f.root,
'pred',
atom,
y_pred.shape,
filters=filters)
ds[:] = y_pred
return y_pred
def _internal_validate_fscore_wo_pred_file(area_id,
epoch=3,
min_th=MIN_POLYGON_AREA,
enable_tqdm=False):
prefix = area_id_to_prefix(area_id)
# Prediction phase
logger.info("Prediction phase")
y_pred = _internal_validate_predict(
area_id,
epoch=epoch,
save_pred=False,
enable_tqdm=enable_tqdm)
# Postprocessing phase
logger.info("Postprocessing phase")
fn_test = FMT_VALTEST_IMAGELIST_PATH.format(prefix=prefix)
df_test = pd.read_csv(fn_test, index_col='ImageId')
fn = FMT_VALTESTPRED_PATH.format(prefix)
fn_out = FMT_VALTESTPOLY_PATH.format(prefix)
with open(fn_out, 'w') as f:
f.write("ImageId,BuildingId,PolygonWKT_Pix,Confidence\n")
test_list = df_test.index.tolist()
iterator = enumerate(test_list)
for idx, image_id in tqdm.tqdm(iterator, total=len(test_list)):
df_poly = mask_to_poly(y_pred[idx][0], min_polygon_area_th=min_th)
if len(df_poly) > 0:
for i, row in df_poly.iterrows():
line = "{},{},\"{}\",{:.6f}\n".format(
image_id,
row.bid,
row.wkt,
row.area_ratio)
line = _remove_interiors(line)
f.write(line)
else:
f.write("{},{},{},0\n".format(
image_id,
-1,
"POLYGON EMPTY"))
# ------------------------
# Validation solution file
logger.info("Validation solution file")
fn_true = FMT_TRAIN_SUMMARY_PATH.format(prefix=prefix)
df_true = pd.read_csv(fn_true)
# # Remove prefix "PAN_"
# df_true.loc[:, 'ImageId'] = df_true.ImageId.str[4:]
fn_test = FMT_VALTEST_IMAGELIST_PATH.format(prefix=prefix)
df_test = pd.read_csv(fn_test)
df_test_image_ids = df_test.ImageId.unique()
fn_out = FMT_VALTESTTRUTH_PATH.format(prefix)
with open(fn_out, 'w') as f:
f.write("ImageId,BuildingId,PolygonWKT_Pix,Confidence\n")
df_true = df_true[df_true.ImageId.isin(df_test_image_ids)]
for idx, r in df_true.iterrows():
f.write("{},{},\"{}\",{:.6f}\n".format(
r.ImageId,
r.BuildingId,
r.PolygonWKT_Pix,
1.0))
def _internal_validate_fscore(area_id,
epoch=3,
predict=True,
min_th=MIN_POLYGON_AREA,
enable_tqdm=False):
prefix = area_id_to_prefix(area_id)
# Prediction phase
logger.info("Prediction phase")
if predict:
_internal_validate_predict(
area_id,
epoch=epoch,
enable_tqdm=enable_tqdm)
# Postprocessing phase
logger.info("Postprocessing phase")
fn_test = FMT_VALTEST_IMAGELIST_PATH.format(prefix=prefix)
df_test = pd.read_csv(fn_test, index_col='ImageId')
fn = FMT_VALTESTPRED_PATH.format(prefix)
fn_out = FMT_VALTESTPOLY_PATH.format(prefix)
with open(fn_out, 'w') as f,\
tb.open_file(fn, 'r') as fr:
y_pred = np.array(fr.get_node('/pred'))
f.write("ImageId,BuildingId,PolygonWKT_Pix,Confidence\n")
test_list = df_test.index.tolist()
iterator = enumerate(test_list)
for idx, image_id in tqdm.tqdm(iterator, total=len(test_list)):
df_poly = mask_to_poly(y_pred[idx][0], min_polygon_area_th=min_th)
if len(df_poly) > 0:
for i, row in df_poly.iterrows():
line = "{},{},\"{}\",{:.6f}\n".format(
image_id,
row.bid,
row.wkt,
row.area_ratio)
line = _remove_interiors(line)
f.write(line)
else:
f.write("{},{},{},0\n".format(
image_id,
-1,
"POLYGON EMPTY"))
# ------------------------
# Validation solution file
logger.info("Validation solution file")
# if not Path(FMT_VALTESTTRUTH_PATH.format(prefix)).exists():
if True:
fn_true = FMT_TRAIN_SUMMARY_PATH.format(prefix=prefix)
df_true = pd.read_csv(fn_true)
# # Remove prefix "PAN_"
# df_true.loc[:, 'ImageId'] = df_true.ImageId.str[4:]
fn_test = FMT_VALTEST_IMAGELIST_PATH.format(prefix=prefix)
df_test = pd.read_csv(fn_test)
df_test_image_ids = df_test.ImageId.unique()
fn_out = FMT_VALTESTTRUTH_PATH.format(prefix)
with open(fn_out, 'w') as f:
f.write("ImageId,BuildingId,PolygonWKT_Pix,Confidence\n")
df_true = df_true[df_true.ImageId.isin(df_test_image_ids)]
for idx, r in df_true.iterrows():
f.write("{},{},\"{}\",{:.6f}\n".format(
r.ImageId,
r.BuildingId,
r.PolygonWKT_Pix,
1.0))
@click.group()
def cli():
pass
@cli.command()
@click.argument('datapath', type=str)
def validate(datapath):
area_id = directory_name_to_area_id(datapath)
prefix = area_id_to_prefix(area_id)
logger.info(">> validate sub-command: {}".format(prefix))
X_mean = get_mul_mean_image(area_id)
X_val, y_val = _get_valtest_mul_data(area_id)
X_val = X_val - X_mean
if not Path(MODEL_DIR).exists():
Path(MODEL_DIR).mkdir(parents=True)
logger.info("load valtrain")
X_trn, y_trn = _get_valtrain_mul_data(area_id)
X_trn = X_trn - X_mean
model = get_unet()
model_checkpoint = ModelCheckpoint(
FMT_VALMODEL_PATH.format(prefix + "_{epoch:02d}"),
monitor='val_jaccard_coef_int',
save_best_only=False)
model_earlystop = EarlyStopping(
monitor='val_jaccard_coef_int',
patience=10,
verbose=0,
mode='max')
model_history = History()
df_train = pd.read_csv(FMT_VALTRAIN_IMAGELIST_PATH.format(prefix=prefix))
logger.info("Fit")
model.fit(
X_trn, y_trn,
nb_epoch=200,
shuffle=True,
verbose=1,
validation_data=(X_val, y_val),
callbacks=[model_checkpoint, model_earlystop, model_history])
model.save_weights(FMT_VALMODEL_LAST_PATH.format(prefix))
# Save evaluation history
pd.DataFrame(model_history.history).to_csv(
FMT_VALMODEL_HIST.format(prefix), index=False)
logger.info(">> validate sub-command: {} ... Done".format(prefix))
@cli.command()
@click.argument('datapath', type=str)
def testproc(datapath):
area_id = directory_name_to_area_id(datapath)
prefix = area_id_to_prefix(area_id)
logger.info(">>>> Test proc for {}".format(prefix))
_internal_test(area_id)
logger.info(">>>> Test proc for {} ... done".format(prefix))
@cli.command()
@click.argument('datapath', type=str)
def evalfscore(datapath):
area_id = directory_name_to_area_id(datapath)
prefix = area_id_to_prefix(area_id)
logger.info("Evaluate fscore on validation set: {}".format(prefix))
# for each epoch
# if not Path(FMT_VALMODEL_EVALHIST.format(prefix)).exists():
if True:
df_hist = pd.read_csv(FMT_VALMODEL_HIST.format(prefix))
df_hist.loc[:, 'epoch'] = list(range(1, len(df_hist) + 1))
rows = []
for zero_base_epoch in range(0, len(df_hist)):
logger.info(">>> Epoch: {}".format(zero_base_epoch))
_internal_validate_fscore_wo_pred_file(
area_id,
epoch=zero_base_epoch,
enable_tqdm=True,
min_th=MIN_POLYGON_AREA)
evaluate_record = _calc_fscore_per_aoi(area_id)
evaluate_record['zero_base_epoch'] = zero_base_epoch
evaluate_record['min_area_th'] = MIN_POLYGON_AREA
evaluate_record['area_id'] = area_id
logger.info("\n" + json.dumps(evaluate_record, indent=4))
rows.append(evaluate_record)
pd.DataFrame(rows).to_csv(
FMT_VALMODEL_EVALHIST.format(prefix),
index=False)
# find best min-poly-threshold
df_evalhist = pd.read_csv(FMT_VALMODEL_EVALHIST.format(prefix))
best_row = df_evalhist.sort_values(by='fscore', ascending=False).iloc[0]
best_epoch = int(best_row.zero_base_epoch)
best_fscore = best_row.fscore
# optimize min area th
rows = []
for th in [30, 60, 90, 120, 150, 180, 210, 240]:
logger.info(">>> TH: {}".format(th))
predict_flag = False
if th == 30:
predict_flag = True
_internal_validate_fscore(
area_id,
epoch=best_epoch,
enable_tqdm=True,
min_th=th,
predict=predict_flag)
evaluate_record = _calc_fscore_per_aoi(area_id)
evaluate_record['zero_base_epoch'] = best_epoch
evaluate_record['min_area_th'] = th
evaluate_record['area_id'] = area_id
logger.info("\n" + json.dumps(evaluate_record, indent=4))
rows.append(evaluate_record)
pd.DataFrame(rows).to_csv(
FMT_VALMODEL_EVALTHHIST.format(prefix),
index=False)
logger.info("Evaluate fscore on validation set: {} .. done".format(prefix))
def mask_to_poly(mask, min_polygon_area_th=MIN_POLYGON_AREA):
"""
Convert from 256x256 mask to polygons on 650x650 image
"""
mask = (skimage.transform.resize(mask, (650, 650)) > 0.5).astype(np.uint8)
shapes = rasterio.features.shapes(mask.astype(np.int16), mask > 0)
poly_list = []
mp = shapely.ops.cascaded_union(
shapely.geometry.MultiPolygon([
shapely.geometry.shape(shape)
for shape, value in shapes
]))
if isinstance(mp, shapely.geometry.Polygon):
df = pd.DataFrame({
'area_size': [mp.area],
'poly': [mp],
})
else:
df = pd.DataFrame({
'area_size': [p.area for p in mp],
'poly': [p for p in mp],
})
df = df[df.area_size > min_polygon_area_th].sort_values(
by='area_size', ascending=False)
df.loc[:, 'wkt'] = df.poly.apply(lambda x: shapely.wkt.dumps(
x, rounding_precision=0))
df.loc[:, 'bid'] = list(range(1, len(df) + 1))
df.loc[:, 'area_ratio'] = df.area_size / df.area_size.max()
return df
def postproc(area_id):
# Mask to poly
print(area_id)
prefix = area_id_to_prefix(area_id)
fn_test = FMT_TEST_IMAGELIST_PATH.format(prefix=prefix)
df_test = pd.read_csv(fn_test, index_col='ImageId')
fn = FMT_TESTPRED_PATH.format(prefix)
with tb.open_file(fn, 'r') as f:
y_pred = np.array(f.get_node('/pred'))
print(y_pred.shape)
fn_out = FMT_TESTPOLY_PATH.format(prefix)
with open(fn_out, 'w') as f:
f.write("ImageId,BuildingId,PolygonWKT_Pix,Confidence\n")
for idx, image_id in enumerate(df_test.index.tolist()):
df_poly = mask_to_poly(y_pred[idx][0])
if len(df_poly) > 0:
for i, row in df_poly.iterrows():
f.write("{},{},\"{}\",{:.6f}\n".format(
image_id,
row.bid,
row.wkt,
row.area_ratio))
else:
f.write("{},{},{},0\n".format(
image_id,
-1,
"POLYGON EMPTY"))
def merge():
df_list = []
for area_id in range(2, 6):
prefix = area_id_to_prefix(area_id)
df_part = pd.read_csv(
FMT_TESTPOLY_PATH.format(prefix))
df_list.append(df_part)
df = pd.concat(df_list)
df.to_csv(FN_SOLUTION_CSV, index=False)
with open(FN_SOLUTION_CSV, 'r') as f:
lines = f.readlines()
with open(FN_SOLUTION_CSV, 'w') as f:
f.write(lines[0])
for line in lines[1:]:
line = _remove_interiors(line)
f.write(line)
if __name__ == '__main__':
cli()
|
sfaira/models/made.py
|
johnmous/sfaira
| 110 |
82034
|
from random import randint
import numpy as np
try:
import tensorflow as tf
except ImportError:
tf = None
# ToDo: we are using a lot of tf.keras.backend modules below, can we use tf core instead?
class MaskingDense(tf.keras.layers.Layer):
""" Just copied code from keras Dense layer and added masking and a few other tricks:
- Direct auto-regressive connections to output
- Allows a second (non-autoregressive) input that is fully connected to first hidden
- Either 1 output or 2 outputs (concatenated) that are separately
auto-regressive wrt to the input
"""
def __init__(self, units, out_units,
hidden_layers=1,
dropout_rate=0.0,
random_input_order=False,
activation='elu',
out_activation='linear',
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
out_kernel_initializer='glorot_uniform',
out_bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
name=None,
batchnorm=False,
**kwargs):
if 'input_shape' not in kwargs and 'input_dim' in kwargs:
kwargs['input_shape'] = (kwargs.pop('input_dim'),)
super(MaskingDense, self).__init__(name=name, **kwargs)
self.input_sel = None
self.random_input_order = random_input_order
self.rate = min(1., max(0., dropout_rate))
self.kernel_sels = []
self.units = units
self.out_units = out_units
self.hidden_layers = hidden_layers
self.activation = tf.keras.activations.get(activation)
self.out_activation = tf.keras.activations.get(out_activation) # None gives linear activation
self.kernel_initializer = tf.keras.initializers.get(kernel_initializer)
self.bias_initializer = tf.keras.initializers.get(bias_initializer)
self.out_kernel_initializer = tf.keras.initializers.get(out_kernel_initializer)
self.out_bias_initializer = tf.keras.initializers.get(out_bias_initializer)
self.kernel_regularizer = tf.keras.regularizers.get(kernel_regularizer)
self.bias_regularizer = tf.keras.regularizers.get(bias_regularizer)
self.activity_regularizer = tf.keras.regularizers.get(activity_regularizer)
self.kernel_constraint = tf.keras.constraints.get(kernel_constraint)
self.bias_constraint = tf.keras.constraints.get(bias_constraint)
self.batchnorm = batchnorm
def dropout_wrapper(self, inputs, training):
if 0. < self.rate < 1.:
def dropped_inputs():
return tf.keras.backend.dropout(inputs, self.rate, noise_shape=None, seed=None)
return tf.keras.backend.in_train_phase(dropped_inputs, inputs, training=training)
return inputs
def build_layer_weights(
self,
input_dim,
units,
use_bias=True,
is_output=False,
id=''
):
kernel_initializer = (self.kernel_initializer if not is_output
else self.out_kernel_initializer)
bias_initializer = (self.bias_initializer if not is_output
else self.out_bias_initializer)
kernel = self.add_weight(shape=(input_dim, units),
initializer=kernel_initializer,
name='kernel' + id,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
if use_bias:
bias = self.add_weight(shape=(units,),
initializer=bias_initializer,
name='bias' + id,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
bias = None
return kernel, bias
def build_mask(self, shape, prev_sel, is_output):
if is_output:
if shape[-1] == len(self.input_sel):
input_sel = self.input_sel
else:
input_sel = self.input_sel * 2
else:
# Disallow D-1 because it would violate auto-regressive property
# Disallow unconnected units by sampling min from previous layer
input_sel = [randint(np.min(prev_sel), shape[-1] - 2) for i in range(shape[-1])]
def vals():
in_len = len(self.input_sel)
for x in range(shape[-2]):
for y in range(shape[-1]):
if is_output:
yield 1 if prev_sel[x] < input_sel[y % in_len] else 0
else:
yield 1 if prev_sel[x] <= input_sel[y] else 0
return tf.keras.backend.constant(list(vals()), dtype='float32', shape=shape), input_sel
def build(self, input_shape):
if isinstance(input_shape, list):
if len(input_shape) != 2:
raise ValueError('Only list only supported for exactly two inputs')
input_shape, other_input_shape = input_shape
# Build weights for other (non-autoregressive) vector
other_shape = (other_input_shape[-1], self.units)
self.other_kernel, self.other_bias = self.build_layer_weights(*other_shape, id='_h')
assert len(input_shape) >= 2
assert self.out_units == input_shape[-1] or self.out_units == 2 * input_shape[-1]
self.kernels, self.biases = [], []
self.kernel_masks, self.kernel_sels = [], []
self.batch_norms = []
shape = (input_shape[-1], self.units)
self.input_sel = np.arange(input_shape[-1])
if self.random_input_order:
np.random.shuffle(self.input_sel)
prev_sel = self.input_sel
for i in range(self.hidden_layers):
# Hidden layer
kernel, bias = self.build_layer_weights(*shape, id=str(i))
self.kernels.append(kernel)
self.biases.append(bias)
# Hidden layer mask
kernel_mask, kernel_sel = self.build_mask(shape, prev_sel, is_output=False)
self.kernel_masks.append(kernel_mask)
self.kernel_sels.append(kernel_sel)
prev_sel = kernel_sel
shape = (self.units, self.units)
self.batch_norms.append(tf.keras.layers.BatchNormalization(center=True, scale=True))
# Direct connection between input/output
if self.hidden_layers > 0:
direct_shape = (input_shape[-1], self.out_units)
self.direct_kernel, _ = self.build_layer_weights(
*direct_shape,
use_bias=False,
is_output=True,
id='_direct')
self.direct_kernel_mask, self.direct_sel = self.build_mask(direct_shape, self.input_sel,
is_output=True)
# Output layer
out_shape = (self.units, self.out_units)
self.out_kernel, self.out_bias = self.build_layer_weights(
*out_shape,
is_output=True,
id='_out')
self.out_kernel_mask, self.out_sel = self.build_mask(out_shape, prev_sel, is_output=True)
self.built = True
def call(self, inputs, training=None):
other_input = None
if isinstance(inputs, list):
assert len(inputs) == 2
assert self.hidden_layers > 0, "other input not supported if no hidden layers"
assert hasattr(self, 'other_kernel')
inputs, other_input = inputs
output = inputs
if other_input is not None:
other = tf.keras.backend.dot(other_input, self.other_kernel)
other = tf.keras.backend.bias_add(other, self.other_bias)
other = self.activation(other)
# Hidden layer + mask
for i in range(self.hidden_layers):
# i=0: input_dim -> masking_dim
# i>0: masking_dim -> masking_dim
weight = self.kernels[i] * self.kernel_masks[i]
output = tf.keras.backend.dot(output, weight)
# "other" input
if i == 0 and other_input is not None:
output = output + other
output = tf.keras.backend.bias_add(output, self.biases[i])
output = self.activation(output)
if self.batchnorm:
output = self.batch_norms[i](output)
output = self.dropout_wrapper(output, training)
# out_act(bias + (V dot M_v)h(x) + (A dot M_a)x + (other dot M_other)other)
# masking_dim -> input_dim
output = tf.keras.backend.dot(output, self.out_kernel * self.out_kernel_mask)
# Direct connection
if self.hidden_layers > 0:
# input_dim -> input_dim
direct = tf.keras.backend.dot(inputs, self.direct_kernel * self.direct_kernel_mask)
output = output + direct
output = tf.keras.backend.bias_add(output, self.out_bias)
output = self.out_activation(output)
output = self.dropout_wrapper(output, training)
return output
def compute_output_shape(self, input_shape):
if isinstance(input_shape, list):
input_shape = input_shape[0]
return (input_shape[0], self.out_units)
|
Clients/PythonCatalyst/Testing/package_test/__init__.py
|
xj361685640/ParaView
| 815 |
82042
|
<gh_stars>100-1000
# filename: __init__.py
# used to test that Catalyst can load Packages
# correctly.
from paraview.simple import *
from paraview import print_info
# print start marker
print_info("begin '%s'", __name__)
tracker = {}
def count(f):
def wrapper(*args, **kwargs):
global tracker
c = tracker.get(f.__name__, 0)
tracker[f.__name__] = c+1
return f(*args, **kwargs)
return wrapper
@count
def catalyst_initialize():
print_info("in '%s::catalyst_initialize'", __name__)
@count
def catalyst_execute(info):
print_info("in '%s::catalyst_execute'", __name__)
@count
def catalyst_finalize():
print_info("in '%s::catalyst_finalize'", __name__)
global tracker
assert tracker["catalyst_initialize"] == 1
assert tracker["catalyst_finalize"] == 1
assert tracker["catalyst_execute"] >= 1
print_info("All's ok")
# print end marker
print_info("end '%s'", __name__)
|
bindings/kepler.gl-jupyter/setup.py
|
Rikuoja/kepler.gl
| 4,297 |
82097
|
from __future__ import print_function
from distutils import log
from setuptools import setup, find_packages
import os
from jupyter_packaging import (
create_cmdclass,
install_npm,
ensure_targets,
combine_commands,
get_version,
skip_if_exists
)
# Name of the project
name = 'keplergl'
here = os.path.dirname(os.path.abspath(__file__))
long_description = 'Keplergl Jupyter Extension'
log.info('setup.py entered')
log.info('$PATH=%s' % os.environ['PATH'])
# Get version
version = get_version(os.path.join(name, '_version.py'))
js_dir = os.path.join(here, 'js')
# Representative files that should exist after a successful build
jstargets = [
os.path.join('keplergl', 'static', 'index.js'),
os.path.join('keplergl-jupyter', 'labextension', 'package.json'),
]
data_files_spec = [
('share/jupyter/nbextensions/keplergl-jupyter',
'keplergl/static', '**'),
('share/jupyter/labextensions/keplergl-jupyter',
'keplergl-jupyter/labextension', "**"),
('etc/jupyter/nbconfig/notebook.d', '.', 'keplergl-jupyter.json'),
]
cmdclass = create_cmdclass('jsdeps', data_files_spec=data_files_spec)
js_command = combine_commands(
install_npm(js_dir, npm=["yarn"], build_cmd='build'), ensure_targets(jstargets),
)
is_repo = os.path.exists(os.path.join(here, '.git'))
if is_repo:
cmdclass['jsdeps'] = js_command
else:
cmdclass['jsdeps'] = skip_if_exists(jstargets, js_command)
LONG_DESCRIPTION = 'A jupyter widget for kepler.gl, an advanced geospatial visualization tool, to render large-scale interactive maps.'
setup_args = {
'name': 'keplergl',
'version': version,
'description': 'This is a simple jupyter widget for kepler.gl, an advanced geospatial visualization tool, to render large-scale interactive maps.',
'long_description': LONG_DESCRIPTION,
'include_package_data': True,
'install_requires': [
'ipywidgets>=7.0.0,<8',
'traittypes>=0.2.1',
'geopandas>=0.5.0',
'pandas>=0.23.0',
'Shapely>=1.6.4.post2'
],
'packages': find_packages(),
'zip_safe': False,
'cmdclass': cmdclass,
'author': '<NAME>',
'author_email': '<EMAIL>',
'url': 'https://github.com/keplergl/kepler.gl/tree/master/bindings/kepler.gl-jupyter',
'keywords': [
'ipython',
'jupyter',
'widgets',
'geospatial',
'visualization',
'webGL'
],
'classifiers': [
'Development Status :: 4 - Beta',
'Framework :: IPython',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Topic :: Multimedia :: Graphics',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
],
}
setup(**setup_args)
|
var/spack/repos/builtin/packages/py-dm-tree/package.py
|
LiamBindle/spack
| 2,360 |
82115
|
<gh_stars>1000+
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import tempfile
class PyDmTree(PythonPackage):
"""tree is a library for working with nested data structures. In a
way, tree generalizes the builtin map() function which only
supports flat sequences, and allows to apply a function to each
leaf preserving the overall structure."""
homepage = "https://github.com/deepmind/tree"
pypi = "dm-tree/dm-tree-0.1.5.tar.gz"
maintainers = ['aweits']
version('0.1.5', sha256='a951d2239111dfcc468071bc8ff792c7b1e3192cab5a3c94d33a8b2bda3127fa')
depends_on('py-setuptools', type='build')
depends_on('bazel', type='build')
depends_on('[email protected]:', type=('build', 'run'))
@run_after('install')
def clean(self):
remove_linked_tree(self.tmp_path)
def patch(self):
self.tmp_path = tempfile.mkdtemp(dir='/tmp', prefix='spack')
env['TEST_TMPDIR'] = self.tmp_path
env['HOME'] = self.tmp_path
args = [
# Don't allow user or system .bazelrc to override build settings
"'--nohome_rc',\n",
"'--nosystem_rc',\n",
# Bazel does not work properly on NFS, switch to /tmp
"'--output_user_root={0}',\n".format(self.tmp_path),
"'build',\n",
# Spack logs don't handle colored output well
"'--color=no',\n",
"'--jobs={0}',\n".format(make_jobs),
# Enable verbose output for failures
"'--verbose_failures',\n",
# Show (formatted) subcommands being executed
"'--subcommands=pretty_print',\n",
"'--spawn_strategy=local',\n",
# Ask bazel to explain what it's up to
# Needs a filename as argument
"'--explain=explainlogfile.txt',\n",
# Increase verbosity of explanation,
"'--verbose_explanations',\n",
# bazel uses system PYTHONPATH instead of spack paths
"'--action_env', 'PYTHONPATH={0}',\n".format(env['PYTHONPATH']),
]
filter_file("'build',",
' '.join(args),
'setup.py')
|
tests/test_reweighting.py
|
JannikWirtz/importance-sampling-diagnostics
| 289 |
82118
|
<filename>tests/test_reweighting.py
#
# Copyright (c) 2017 Idiap Research Institute, http://www.idiap.ch/
# Written by <NAME> <<EMAIL>>
#
import unittest
from keras.layers import Input
from keras.models import Model
import numpy as np
from importance_sampling.reweighting import BiasedReweightingPolicy, \
NoReweightingPolicy
class TestReweighting(unittest.TestCase):
def _test_external_reweighting_layer(self, rw):
s1, s2 = Input(shape=(1,)), Input(shape=(1,))
w = rw.weight_layer()([s1, s2])
m = Model(inputs=[s1, s2], outputs=[w])
m.compile("sgd", "mse")
r = np.random.rand(100, 1).astype(np.float32)
r_hat = m.predict([np.zeros((100, 1)), r])
self.assertTrue(np.all(r == r_hat))
def test_biased_reweighting(self):
rw = BiasedReweightingPolicy(k=1.)
s = np.random.rand(100)
i = np.arange(100)
w = rw.sample_weights(i, s).ravel()
self.assertEqual(rw.weight_size, 1)
self.assertAlmostEqual(w.dot(s), s.sum())
self._test_external_reweighting_layer(rw)
# Make sure that it is just a normalized version of the same weights
# raised to k
rw = BiasedReweightingPolicy(k=0.5)
w_hat = rw.sample_weights(i, s).ravel()
scales = w**0.5 / w_hat
scales_hat = np.ones(100)*scales[0]
self.assertTrue(np.allclose(scales, scales_hat))
def test_no_reweighting(self):
rw = NoReweightingPolicy()
self.assertTrue(
np.all(
rw.sample_weights(np.arange(100), np.random.rand(100)) == 1.0
)
)
self._test_external_reweighting_layer(rw)
def test_adjusted_biased_reweighting(self):
self.skipTest("Not implemented yet")
def test_correcting_reweighting_policy(self):
self.skipTest("Not implemented yet")
if __name__ == "__main__":
unittest.main()
|
src/nginx/config/headers/uwsgi_param.py
|
sixninetynine/nginx-config-builder
| 149 |
82122
|
<filename>src/nginx/config/headers/uwsgi_param.py
# Generic uwsgi_param headers
CONTENT_LENGTH = 'CONTENT_LENGTH'
CONTENT_TYPE = 'CONTENT_TYPE'
DOCUMENT_ROOT = 'DOCUMENT_ROOT'
QUERY_STRING = 'QUERY_STRING'
PATH_INFO = 'PATH_INFO'
REMOTE_ADDR = 'REMOTE_ADDR'
REMOTE_PORT = 'REMOTE_PORT'
REQUEST_METHOD = 'REQUEST_METHOD'
REQUEST_URI = 'REQUEST_URI'
SERVER_ADDR = 'SERVER_ADDR'
SERVER_NAME = 'SERVER_NAME'
SERVER_PORT = 'SERVER_PORT'
SERVER_PROTOCOL = 'SERVER_PROTOCOL'
# SSL uwsgi_param headers
CLIENT_SSL_CERT = 'CLIENT_SSL_CERT'
|
DataProcessor/Feature/em_dependency_feature.py
|
Milozms/feedforward-RE
| 156 |
82126
|
<reponame>Milozms/feedforward-RE
__author__ = 'wenqihe'
from abstract_feature import AbstractFeature
from em_token_feature import EMHeadFeature, get_lemma
class EMDependencyFeature(AbstractFeature):
accepted_deps=[ "nn", "agent", "dobj", "nsubj", "amod", "nsubjpass", "poss", "appos"]
"""
Universal Dependencies
"""
def apply(self, sentence, mention, features):
# head_index = HeadFeature.get_head(sentence, mention)
# for dep_type, gov, dep in sentence.dep:
# if head_index == gov:
# token = 'root'
# if dep >= 0:
# token = get_lemma(sentence.tokens[dep], sentence.pos[dep])
# features.append('ROLE_gov:%s' % dep_type)
# features.append('PARENT_%s' % token)
# if head_index == dep:
# token = 'root'
# if gov >= 0:
# token = get_lemma(sentence.tokens[dep], sentence.pos[gov])
# features.append('ROLE_dep:%s' % dep_type)
# features.append('PARENT_%s' % token)
start = mention.start
end = mention.end
for dep_type, gov, dep in sentence.dep:
if start <= gov < end:
if 0 <= dep <sentence.size():
token = get_lemma(sentence.tokens[dep], sentence.pos[dep])
pos = sentence.pos[dep]
if self.accept_pos(pos) and self.accept_dep(dep_type):
key = "gov:" + dep_type + ":" + token + "=" + pos[0]
features.append(("DEP_" + key))
if start <= dep < end:
if 0 <= gov < sentence.size():
token = get_lemma(sentence.tokens[gov], sentence.pos[gov])
pos = sentence.pos[gov]
if self.accept_pos(pos) and self.accept_dep(dep_type):
key = "dep:" + dep_type + ":" + token + "=" + pos[0]
features.append(("DEP_" + key))
def accept_pos(self, pos):
return pos[0] == 'N' or pos[0] == 'V'
def accept_dep(self, dep):
return dep.startswith('prep') or dep in self.accepted_deps
|
examples/finished/kmedian.py
|
gasse/PySCIPOpt
| 311 |
82129
|
##@file kmedian.py
#@brief model for solving the k-median problem.
"""
minimize the total (weighted) travel cost for servicing
a set of customers from k facilities.
Copyright (c) by <NAME> and <NAME>, 2012
"""
import math
import random
from pyscipopt import Model, quicksum, multidict
def kmedian(I,J,c,k):
"""kmedian -- minimize total cost of servicing customers from k facilities
Parameters:
- I: set of customers
- J: set of potential facilities
- c[i,j]: cost of servicing customer i from facility j
- k: number of facilities to be used
Returns a model, ready to be solved.
"""
model = Model("k-median")
x,y = {},{}
for j in J:
y[j] = model.addVar(vtype="B", name="y(%s)"%j)
for i in I:
x[i,j] = model.addVar(vtype="B", name="x(%s,%s)"%(i,j))
for i in I:
model.addCons(quicksum(x[i,j] for j in J) == 1, "Assign(%s)"%i)
for j in J:
model.addCons(x[i,j] <= y[j], "Strong(%s,%s)"%(i,j))
model.addCons(quicksum(y[j] for j in J) == k, "Facilities")
model.setObjective(quicksum(c[i,j]*x[i,j] for i in I for j in J), "minimize")
model.data = x,y
return model
def distance(x1,y1,x2,y2):
"""return distance of two points"""
return math.sqrt((x2-x1)**2 + (y2-y1)**2)
def make_data(n,m,same=True):
"""creates example data set"""
if same == True:
I = range(n)
J = range(m)
x = [random.random() for i in range(max(m,n))] # positions of the points in the plane
y = [random.random() for i in range(max(m,n))]
else:
I = range(n)
J = range(n,n+m)
x = [random.random() for i in range(n+m)] # positions of the points in the plane
y = [random.random() for i in range(n+m)]
c = {}
for i in I:
for j in J:
c[i,j] = distance(x[i],y[i],x[j],y[j])
return I,J,c,x,y
if __name__ == "__main__":
import sys
random.seed(67)
n = 200
m = n
I,J,c,x_pos,y_pos = make_data(n,m,same=True)
k = 20
model = kmedian(I,J,c,k)
# model.Params.Threads = 1
model.optimize()
EPS = 1.e-6
x,y = model.data
edges = [(i,j) for (i,j) in x if model.getVal(x[i,j]) > EPS]
facilities = [j for j in y if model.getVal(y[j]) > EPS]
print("Optimal value:",model.getObjVal())
print("Selected facilities:", facilities)
print("Edges:", edges)
print("max c:", max([c[i,j] for (i,j) in edges]))
try: # plot the result using networkx and matplotlib
import networkx as NX
import matplotlib.pyplot as P
P.clf()
G = NX.Graph()
facilities = set(j for j in J if model.getVal(y[j]) > EPS)
other = set(j for j in J if j not in facilities)
client = set(i for i in I if i not in facilities and i not in other)
G.add_nodes_from(facilities)
G.add_nodes_from(client)
G.add_nodes_from(other)
for (i,j) in edges:
G.add_edge(i,j)
position = {}
for i in range(len(x_pos)):
position[i] = (x_pos[i],y_pos[i])
NX.draw(G,position,with_labels=False,node_color="w",nodelist=facilities)
NX.draw(G,position,with_labels=False,node_color="c",nodelist=other,node_size=50)
NX.draw(G,position,with_labels=False,node_color="g",nodelist=client,node_size=50)
P.show()
except ImportError:
print("install 'networkx' and 'matplotlib' for plotting")
|
pyeda/parsing/dimacs.py
|
ivotimev/pyeda
| 196 |
82148
|
<filename>pyeda/parsing/dimacs.py<gh_stars>100-1000
"""
DIMACS
For more information on the input formats,
see "Satisfiability Suggested Format",
published May 1993 by the Rutgers Center for Discrete Mathematics (DIMACS).
Also, see the proceedings of the International SAT Competition
(http://www.satcompetition.org) for information and CNF examples.
Exceptions:
Error
Interface Functions:
parse_cnf
parse_sat
"""
# Disable 'invalid-name', because this module uses an unconventional naming
# scheme for the parsing tokens.
# pylint: disable=C0103
from pyeda.parsing import lex
from pyeda.parsing.token import (
EndToken,
KeywordToken, IntegerToken, OperatorToken, PunctuationToken,
)
class Error(Exception):
"""An error happened during parsing a DIMACS file"""
# Keywords
class KW_p(KeywordToken):
"""DIMACS 'p' preamble token"""
class KW_cnf(KeywordToken):
"""DIMACS 'cnf' token"""
class KW_sat(KeywordToken):
"""DIMACS 'sat' token"""
class KW_satx(KeywordToken):
"""DIMACS 'satx' token"""
class KW_sate(KeywordToken):
"""DIMACS 'sate' token"""
class KW_satex(KeywordToken):
"""DIMACS 'satex' token"""
# Operators
class OP_not(OperatorToken):
"""DIMACS '-' operator"""
ASTOP = 'not'
class OP_or(OperatorToken):
"""DIMACS '+' operator"""
ASTOP = 'or'
class OP_and(OperatorToken):
"""DIMACS '*' operator"""
ASTOP = 'and'
class OP_xor(OperatorToken):
"""DIMACS 'xor' operator"""
ASTOP = 'xor'
class OP_equal(OperatorToken):
"""DIMACS '=' operator"""
ASTOP = 'equal'
# Punctuation
class LPAREN(PunctuationToken):
"""DIMACS '(' token"""
class RPAREN(PunctuationToken):
"""DIMACS ')' token"""
class CNFLexer(lex.RegexLexer):
"""Lexical analysis of CNF strings"""
def ignore(self, text):
"""Ignore this text."""
def keyword(self, text):
"""Push a keyword onto the token queue."""
cls = self.KEYWORDS[text]
self.push_token(cls(text, self.lineno, self.offset))
def operator(self, text):
"""Push an operator onto the token queue."""
cls = self.OPERATORS[text]
self.push_token(cls(text, self.lineno, self.offset))
@lex.action(IntegerToken)
def integer(self, text):
"""Push an integer onto the token queue."""
return int(text)
RULES = {
'root': [
(r"c.*\n", ignore),
(r"\bp\b", keyword, 'preamble'),
],
'preamble': [
(r"[ \t]+", ignore),
(r"\bcnf\b", keyword),
(r"\d+", integer),
(r"\n", ignore, 'formula'),
],
'formula': [
(r"\s+", ignore),
(r"-", operator),
(r"\d+", integer),
],
}
KEYWORDS = {
'p': KW_p,
'cnf': KW_cnf,
}
OPERATORS = {
'-': OP_not,
}
def _expect_token(lexer, types):
"""Return the next token, or raise an exception."""
tok = next(lexer)
if any(isinstance(tok, t) for t in types):
return tok
else:
raise Error("unexpected token: " + str(tok))
def parse_cnf(s, varname='x'):
"""
Parse an input string in DIMACS CNF format,
and return an expression abstract syntax tree.
Parameters
----------
s : str
String containing a DIMACS CNF.
varname : str, optional
The variable name used for creating literals.
Defaults to 'x'.
Returns
-------
An ast tuple, defined recursively:
ast := ('var', names, indices)
| ('not', ast)
| ('or', ast, ...)
| ('and', ast, ...)
names := (name, ...)
indices := (index, ...)
"""
lexer = iter(CNFLexer(s))
try:
ast = _cnf(lexer, varname)
except lex.RunError as exc:
fstr = ("{0.args[0]}: "
"(line: {0.lineno}, offset: {0.offset}, text: {0.text})")
raise Error(fstr.format(exc))
# Check for end of buffer
_expect_token(lexer, {EndToken})
return ast
def _cnf(lexer, varname):
"""Return a DIMACS CNF."""
_expect_token(lexer, {KW_p})
_expect_token(lexer, {KW_cnf})
nvars = _expect_token(lexer, {IntegerToken}).value
nclauses = _expect_token(lexer, {IntegerToken}).value
return _cnf_formula(lexer, varname, nvars, nclauses)
def _cnf_formula(lexer, varname, nvars, nclauses):
"""Return a DIMACS CNF formula."""
clauses = _clauses(lexer, varname, nvars)
if len(clauses) < nclauses:
fstr = "formula has fewer than {} clauses"
raise Error(fstr.format(nclauses))
if len(clauses) > nclauses:
fstr = "formula has more than {} clauses"
raise Error(fstr.format(nclauses))
return ('and', ) + clauses
def _clauses(lexer, varname, nvars):
"""Return a tuple of DIMACS CNF clauses."""
tok = next(lexer)
toktype = type(tok)
if toktype is OP_not or toktype is IntegerToken:
lexer.unpop_token(tok)
first = _clause(lexer, varname, nvars)
rest = _clauses(lexer, varname, nvars)
return (first, ) + rest
# null
else:
lexer.unpop_token(tok)
return tuple()
def _clause(lexer, varname, nvars):
"""Return a DIMACS CNF clause."""
return ('or', ) + _lits(lexer, varname, nvars)
def _lits(lexer, varname, nvars):
"""Return a tuple of DIMACS CNF clause literals."""
tok = _expect_token(lexer, {OP_not, IntegerToken})
if isinstance(tok, IntegerToken) and tok.value == 0:
return tuple()
else:
if isinstance(tok, OP_not):
neg = True
tok = _expect_token(lexer, {IntegerToken})
else:
neg = False
index = tok.value
if index > nvars:
fstr = "formula literal {} is greater than {}"
raise Error(fstr.format(index, nvars))
lit = ('var', (varname, ), (index, ))
if neg:
lit = ('not', lit)
return (lit, ) + _lits(lexer, varname, nvars)
class SATLexer(lex.RegexLexer):
"""Lexical analysis of SAT strings"""
def ignore(self, text):
"""Ignore this text."""
def keyword(self, text):
"""Push a keyword onto the token queue."""
cls = self.KEYWORDS[text]
self.push_token(cls(text, self.lineno, self.offset))
def operator(self, text):
"""Push an operator onto the token queue."""
cls = self.OPERATORS[text]
self.push_token(cls(text, self.lineno, self.offset))
def punct(self, text):
"""Push punctuation onto the token queue."""
cls = self.PUNCTUATION[text]
self.push_token(cls(text, self.lineno, self.offset))
@lex.action(IntegerToken)
def integer(self, text):
"""Push an integer onto the token queue."""
return int(text)
RULES = {
'root': [
(r"c.*\n", ignore),
(r"\bp\b", keyword, 'preamble'),
],
'preamble': [
(r"[ \t]+", ignore),
(r"\bsat\b", keyword),
(r"\bsatx\b", keyword),
(r"\bsate\b", keyword),
(r"\bsatex\b", keyword),
(r"\d+", integer),
(r"\n", ignore, 'formula'),
],
'formula': [
(r"\s+", ignore),
(r"\+", operator),
(r"\-", operator),
(r"\*", operator),
(r"\bxor\b", operator),
(r"=", operator),
(r"\(", punct),
(r"\)", punct),
(r"\d+", integer),
],
}
KEYWORDS = {
'p': KW_p,
'sat': KW_sat,
'satx': KW_satx,
'sate': KW_sate,
'satex': KW_satex,
}
OPERATORS = {
'-': OP_not,
'+': OP_or,
'*': OP_and,
'xor': OP_xor,
'=': OP_equal,
}
PUNCTUATION = {
'(': LPAREN,
')': RPAREN,
}
SAT_GRAMMAR = """
SAT := COMMENT* PREAMBLE FORMULA
COMMENT := 'c' .* '\n'
PREAMBLE := 'p' FORMAT VARIABLES '\n'
FORMAT := 'sat' | 'satx' | 'sate' | 'satex'
VARIABLES := INT
FORMULA := INT
| '-' INT
| '(' FORMULA ')'
| '-' '(' FORMULA ')'
| OP '(' FORMULAS ')'
OP := '+' | '*' | 'xor' | '='
FORMULAS := FORMULAS FORMULA
| null
"""
_SAT_TOKS = {
'sat': {OP_not, OP_or, OP_and},
'satx': {OP_not, OP_or, OP_and, OP_xor},
'sate': {OP_not, OP_or, OP_and, OP_equal},
'satex': {OP_not, OP_or, OP_and, OP_xor, OP_equal},
}
def parse_sat(s, varname='x'):
"""
Parse an input string in DIMACS SAT format,
and return an expression.
"""
lexer = iter(SATLexer(s))
try:
ast = _sat(lexer, varname)
except lex.RunError as exc:
fstr = ("{0.args[0]}: "
"(line: {0.lineno}, offset: {0.offset}, text: {0.text})")
raise Error(fstr.format(exc))
# Check for end of buffer
_expect_token(lexer, {EndToken})
return ast
def _sat(lexer, varname):
"""Return a DIMACS SAT."""
_expect_token(lexer, {KW_p})
fmt = _expect_token(lexer, {KW_sat, KW_satx, KW_sate, KW_satex}).value
nvars = _expect_token(lexer, {IntegerToken}).value
return _sat_formula(lexer, varname, fmt, nvars)
def _sat_formula(lexer, varname, fmt, nvars):
"""Return a DIMACS SAT formula."""
types = {IntegerToken, LPAREN} | _SAT_TOKS[fmt]
tok = _expect_token(lexer, types)
# INT
if isinstance(tok, IntegerToken):
index = tok.value
if not 0 < index <= nvars:
fstr = "formula literal {} outside valid range: (0, {}]"
raise Error(fstr.format(index, nvars))
return ('var', (varname, ), (index, ))
# '-'
elif isinstance(tok, OP_not):
tok = _expect_token(lexer, {IntegerToken, LPAREN})
# '-' INT
if isinstance(tok, IntegerToken):
index = tok.value
if not 0 < index <= nvars:
fstr = "formula literal {} outside valid range: (0, {}]"
raise Error(fstr.format(index, nvars))
return ('not', ('var', (varname, ), (index, )))
# '-' '(' FORMULA ')'
else:
formula = _sat_formula(lexer, varname, fmt, nvars)
_expect_token(lexer, {RPAREN})
return ('not', formula)
# '(' FORMULA ')'
elif isinstance(tok, LPAREN):
formula = _sat_formula(lexer, varname, fmt, nvars)
_expect_token(lexer, {RPAREN})
return formula
# OP '(' FORMULAS ')'
else:
_expect_token(lexer, {LPAREN})
formulas = _formulas(lexer, varname, fmt, nvars)
_expect_token(lexer, {RPAREN})
return (tok.ASTOP, ) + formulas
def _formulas(lexer, varname, fmt, nvars):
"""Return a tuple of DIMACS SAT formulas."""
types = {IntegerToken, LPAREN} | _SAT_TOKS[fmt]
tok = lexer.peek_token()
if any(isinstance(tok, t) for t in types):
first = _sat_formula(lexer, varname, fmt, nvars)
rest = _formulas(lexer, varname, fmt, nvars)
return (first, ) + rest
# null
else:
return tuple()
|
modules/exploitation/kingphisher.py
|
decidedlygray/ptf
| 4,391 |
82160
|
<gh_stars>1000+
#!/usr/bin/env python
######################################
# Installation module for King Phisher
######################################
# AUTHOR OF MODULE NAME
AUTHOR="<NAME> (@zeroSteiner)"
# DESCRIPTION OF THE MODULE
DESCRIPTION="This module will install/update the King Phisher phishing campaign toolkit"
# INSTALL TYPE GIT, SVN, FILE DOWNLOAD
# OPTIONS = GIT, SVN, FILE
INSTALL_TYPE="GIT"
# LOCATION OF THE FILE OR GIT/SVN REPOSITORY
REPOSITORY_LOCATION="https://github.com/securestate/king-phisher/"
# WHERE DO YOU WANT TO INSTALL IT
INSTALL_LOCATION="king-phisher"
# DEPENDS FOR DEBIAN INSTALLS
DEBIAN="git"
# DEPENDS FOR FEDORA INSTALLS
FEDORA="git"
# COMMANDS TO RUN AFTER
AFTER_COMMANDS="cd {INSTALL_LOCATION},yes | tools/install.sh"
|
sktime/transformations/series/compose.py
|
marcio55afr/sktime
| 5,349 |
82164
|
<reponame>marcio55afr/sktime<filename>sktime/transformations/series/compose.py
#!/usr/bin/env python3 -u
# -*- coding: utf-8 -*-
# copyright: sktime developers, BSD-3-Clause License (see LICENSE file)
"""Meta-transformers for building composite transformers."""
import pandas as pd
from sktime.transformations.base import _SeriesToSeriesTransformer
from sktime.utils.validation.series import check_series
from sklearn.base import clone
from sklearn.utils.metaestimators import if_delegate_has_method
__author__ = ["aiwalter", "SveaMeyer13"]
__all__ = ["OptionalPassthrough", "ColumnwiseTransformer"]
class OptionalPassthrough(_SeriesToSeriesTransformer):
"""Wrap an existing transformer to tune whether to include it in a pipeline.
Allows tuning the implicit hyperparameter whether or not to use a
particular transformer inside a pipeline (e.g. TranformedTargetForecaster)
or not. This is achieved by the hyperparameter `passthrough`
which can be added to a tuning grid then (see example).
Parameters
----------
transformer : Estimator
scikit-learn-like or sktime-like transformer to fit and apply to series.
passthrough : bool, default=False
Whether to apply the given transformer or to just
passthrough the data (identity transformation). If, True the transformer
is not applied and the OptionalPassthrough uses the identity
transformation.
Examples
--------
>>> from sktime.datasets import load_airline
>>> from sktime.forecasting.naive import NaiveForecaster
>>> from sktime.transformations.series.compose import OptionalPassthrough
>>> from sktime.transformations.series.detrend import Deseasonalizer
>>> from sktime.transformations.series.adapt import TabularToSeriesAdaptor
>>> from sktime.forecasting.compose import TransformedTargetForecaster
>>> from sktime.forecasting.model_selection import (
... ForecastingGridSearchCV,
... SlidingWindowSplitter)
>>> from sklearn.preprocessing import StandardScaler
>>> # create pipeline
>>> pipe = TransformedTargetForecaster(steps=[
... ("deseasonalizer", OptionalPassthrough(Deseasonalizer())),
... ("scaler", OptionalPassthrough(TabularToSeriesAdaptor(StandardScaler()))),
... ("forecaster", NaiveForecaster())])
>>> # putting it all together in a grid search
>>> cv = SlidingWindowSplitter(
... initial_window=60,
... window_length=24,
... start_with_window=True,
... step_length=48)
>>> param_grid = {
... "deseasonalizer__passthrough" : [True, False],
... "scaler__transformer__transformer__with_mean": [True, False],
... "scaler__passthrough" : [True, False],
... "forecaster__strategy": ["drift", "mean", "last"]}
>>> gscv = ForecastingGridSearchCV(
... forecaster=pipe,
... param_grid=param_grid,
... cv=cv,
... n_jobs=-1)
>>> gscv_fitted = gscv.fit(load_airline())
"""
_required_parameters = ["transformer"]
_tags = {
"univariate-only": False,
"fit-in-transform": True,
}
def __init__(self, transformer, passthrough=False):
self.transformer = transformer
self.transformer_ = None
self.passthrough = passthrough
self._is_fitted = False
super(OptionalPassthrough, self).__init__()
self.clone_tags(transformer)
def fit(self, Z, X=None):
"""Fit the model.
Parameters
----------
Z : pd.Series
Series to fit.
X : pd.DataFrame, optional (default=None)
Exogenous data used in transformation.
Returns
-------
self
"""
if not self.passthrough:
self.transformer_ = clone(self.transformer)
self.transformer_.fit(Z, X)
self._is_fitted = True
return self
def transform(self, Z, X=None):
"""Apply transformation.
Parameters
----------
Z : pd.Series
Series to transform.
X : pd.DataFrame, optional (default=None)
Exogenous data used in transformation.
Returns
-------
z : pd.Series
Transformed series.
"""
self.check_is_fitted()
z = check_series(Z, enforce_univariate=False)
if not self.passthrough:
z = self.transformer_.transform(z, X)
return z
@if_delegate_has_method(delegate="transformer")
def inverse_transform(self, Z, X=None):
"""Inverse transform data.
Parameters
----------
Z : pd.Series
Series to transform.
X : pd.DataFrame, optional (default=None)
Exogenous data used in transformation.
Returns
-------
z : pd.Series
Inverse transformed data.
"""
self.check_is_fitted()
z = check_series(Z, enforce_univariate=False)
if not self.passthrough:
z = self.transformer_.inverse_transform(z, X=X)
return z
class ColumnwiseTransformer(_SeriesToSeriesTransformer):
"""Apply a transformer columnwise to multivariate series.
Overview: input multivariate time series and the transformer passed
in `transformer` parameter is applied to specified `columns`, each
column is handled as a univariate series. The resulting transformed
data has the same shape as input data.
Parameters
----------
transformer : Estimator
scikit-learn-like or sktime-like transformer to fit and apply to series.
columns : list of str or None
Names of columns that are supposed to be transformed.
If None, all columns are transformed.
Attributes
----------
transformers_ : dict of {str : transformer}
Maps columns to transformers.
columns_ : list of str
Names of columns that are supposed to be transformed.
See Also
--------
OptionalPassthrough
Examples
--------
>>> from sktime.datasets import load_longley
>>> from sktime.transformations.series.detrend import Detrender
>>> from sktime.transformations.series.compose import ColumnwiseTransformer
>>> y, X = load_longley()
>>> transformer = ColumnwiseTransformer(Detrender())
>>> yt = transformer.fit_transform(X)
"""
_required_parameters = ["transformer"]
def __init__(self, transformer, columns=None):
self.transformer = transformer
self.columns = columns
super(ColumnwiseTransformer, self).__init__()
def fit(self, Z, X=None):
"""Fit data.
Iterates over columns (series) and applies
the fit function of the transformer.
Parameters
----------
Z : pd.Series, pd.DataFrame
Returns
-------
self : an instance of self
"""
self._is_fitted = False
z = check_series(Z, allow_numpy=False)
# cast to pd.DataFrame in univariate case
if isinstance(z, pd.Series):
z = z.to_frame()
# check that columns are None or list of strings
if self.columns is not None:
if not isinstance(self.columns, list) and all(
isinstance(s, str) for s in self.columns
):
raise ValueError("Columns need to be a list of strings or None.")
# set self.columns_ to columns that are going to be transformed
# (all if self.columns is None)
self.columns_ = self.columns
if self.columns_ is None:
self.columns_ = z.columns
# make sure z contains all columns that the user wants to transform
_check_columns(z, selected_columns=self.columns_)
# fit by iterating over columns
self.transformers_ = {}
for colname in self.columns_:
transformer = clone(self.transformer)
self.transformers_[colname] = transformer
self.transformers_[colname].fit(z[colname], X)
self._is_fitted = True
return self
def transform(self, Z, X=None):
"""Transform data.
Returns a transformed version of Z by iterating over specified
columns and applying the univariate series transformer to them.
Parameters
----------
Z : pd.Series, pd.DataFrame
Returns
-------
Z : pd.Series, pd.DataFrame
Transformed time series(es).
"""
self.check_is_fitted()
z = check_series(Z)
# handle univariate case
z, is_series = _check_is_pdseries(z)
# make copy of z
z = z.copy()
# make sure z contains all columns that the user wants to transform
_check_columns(z, selected_columns=self.columns_)
for colname in self.columns_:
z[colname] = self.transformers_[colname].transform(z[colname], X)
# make z a series again in univariate case
if is_series:
z = z.squeeze("columns")
return z
@if_delegate_has_method(delegate="transformer")
def inverse_transform(self, Z, X=None):
"""Inverse transform data.
Returns an inverse-transformed version of Z by iterating over specified
columns and applying the univariate series transformer to them.
Only works if `self.transformer` has an `inverse_transform` method.
Parameters
----------
Z : pd.Series, pd.DataFrame
Returns
-------
Z : pd.Series, pd.DataFrame
Inverse-transformed time series(es).
"""
self.check_is_fitted()
z = check_series(Z)
# handle univariate case
z, is_series = _check_is_pdseries(z)
# make copy of z
z = z.copy()
# make sure z contains all columns that the user wants to transform
_check_columns(z, selected_columns=self.columns_)
# iterate over columns that are supposed to be inverse_transformed
for colname in self.columns_:
z[colname] = self.transformers_[colname].inverse_transform(z[colname], X)
# make z a series again in univariate case
if is_series:
z = z.squeeze("columns")
return z
@if_delegate_has_method(delegate="transformer")
def update(self, Z, X=None, update_params=True):
"""Update parameters.
Update the parameters of the estimator with new data
by iterating over specified columns.
Only works if `self.transformer` has an `update` method.
Parameters
----------
Z : pd.Series
New time series.
update_params : bool, optional, default=True
Returns
-------
self : an instance of self
"""
z = check_series(Z)
# make z a pd.DataFrame in univariate case
if isinstance(z, pd.Series):
z = z.to_frame()
# make sure z contains all columns that the user wants to transform
_check_columns(z, selected_columns=self.columns_)
for colname in self.columns_:
self.transformers_[colname].update(z[colname], X)
return self
def _check_columns(z, selected_columns):
# make sure z contains all columns that the user wants to transform
z_wanted_keys = set(selected_columns)
z_new_keys = set(z.columns)
difference = z_wanted_keys.difference(z_new_keys)
if len(difference) != 0:
raise ValueError("Missing columns" + str(difference) + "in Z.")
def _check_is_pdseries(z):
# make z a pd.Dataframe in univariate case
is_series = False
if isinstance(z, pd.Series):
z = z.to_frame()
is_series = True
return z, is_series
|
lvsr/ops.py
|
Fatman003/Actor
| 178 |
82169
|
from __future__ import print_function
import math
import numpy
import theano
import itertools
from theano import tensor, Op
from theano.gradient import disconnected_type
from fuel.utils import do_not_pickle_attributes
from picklable_itertools.extras import equizip
from collections import defaultdict, deque
from toposort import toposort_flatten
from lvsr.error_rate import (
reward_matrix, gain_matrix, edit_distance, _edit_distance_matrix, _bleu)
class RewardOp(Op):
__props__ = ()
def __init__(self, eos_label, alphabet_size):
"""Computes matrices of rewards and gains."""
self.eos_label = eos_label
self.alphabet_size = alphabet_size
def perform(self, node, inputs, output_storage):
groundtruth, recognized = inputs
if (groundtruth.ndim != 2 or recognized.ndim != 2
or groundtruth.shape[1] != recognized.shape[1]):
raise ValueError
batch_size = groundtruth.shape[1]
all_rewards = numpy.zeros(
recognized.shape + (self.alphabet_size,), dtype='int64')
all_gains = numpy.zeros(
recognized.shape + (self.alphabet_size,), dtype='int64')
alphabet = list(range(self.alphabet_size))
for index in range(batch_size):
y = list(groundtruth[:, index])
y_hat = list(recognized[:, index])
try:
eos_pos = y.index(self.eos_label)
y = y[:eos_pos + 1]
except:
# Sometimes groundtruth is in fact also a prediction
# and in this case it might not have EOS label
pass
if self.eos_label in y_hat:
y_hat_eos_pos = y_hat.index(self.eos_label)
y_hat_trunc = y_hat[:y_hat_eos_pos + 1]
else:
y_hat_trunc = y_hat
rewards_trunc = reward_matrix(
y, y_hat_trunc, alphabet, self.eos_label)
# pass freshly computed rewards to gain_matrix to speed things up
# a bit
gains_trunc = gain_matrix(y, y_hat_trunc, alphabet,
given_reward_matrix=rewards_trunc)
gains = numpy.ones((len(y_hat), len(alphabet))) * -1000
gains[:(gains_trunc.shape[0] - 1), :] = gains_trunc[:-1, :]
rewards = numpy.ones((len(y_hat), len(alphabet))) * -1
rewards[:(rewards_trunc.shape[0] - 1), :] = rewards_trunc[:-1, :]
all_rewards[:, index, :] = rewards
all_gains[:, index, :] = gains
output_storage[0][0] = all_rewards
output_storage[1][0] = all_gains
def grad(self, *args, **kwargs):
return disconnected_type(), disconnected_type()
def make_node(self, groundtruth, recognized):
recognized = tensor.as_tensor_variable(recognized)
groundtruth = tensor.as_tensor_variable(groundtruth)
return theano.Apply(
self, [groundtruth, recognized], [tensor.ltensor3(), tensor.ltensor3()])
def trim(y, mask):
try:
return y[:mask.index(0.)]
except ValueError:
return y
class EditDistanceOp(Op):
__props__ = ()
def __init__(self, bos_label, eos_label, deltas=False):
self.bos_label = bos_label
self.eos_label = eos_label
self.deltas = deltas
def perform(self, node, inputs, output_storage):
prediction, prediction_mask, groundtruth, groundtruth_mask = inputs
if (groundtruth.ndim != 2 or prediction.ndim != 2
or groundtruth.shape[1] != prediction.shape[1]):
raise ValueError
batch_size = groundtruth.shape[1]
results = numpy.zeros_like(prediction[:, :, None])
for index in range(batch_size):
y = trim(list(groundtruth[:, index]),
list(groundtruth_mask[:, index]))
y_hat = trim(list(prediction[:, index]),
list(prediction_mask[:, index]))
if self.deltas:
matrix = _edit_distance_matrix(
y, y_hat, special_tokens={self.bos_label, self.eos_label})
row = matrix[-1, :].copy()
results[:len(y_hat), index, 0] = row[1:] - matrix[-1, :-1]
else:
results[len(y_hat) - 1, index, 0] = edit_distance(y, y_hat)
output_storage[0][0] = results
def grad(self, *args, **kwargs):
return theano.gradient.disconnected_type()
def make_node(self, prediction, prediction_mask,
groundtruth, groundtruth_mask):
prediction = tensor.as_tensor_variable(prediction)
prediction_mask = tensor.as_tensor_variable(prediction_mask)
groundtruth = tensor.as_tensor_variable(groundtruth)
groundtruth_mask = tensor.as_tensor_variable(groundtruth_mask)
return theano.Apply(
self, [prediction, prediction_mask,
groundtruth, groundtruth_mask], [tensor.ltensor3()])
class BleuOp(Op):
__props__ = ()
def __init__(self, bos_label, eos_label, deltas=False):
self.n = 4
self.deltas = deltas
self.special_tokens = set([bos_label, eos_label])
def grad(self, *args, **kwargs):
return [theano.gradient.disconnected_type()] * 4
def perform(self, node, inputs, output_storage):
prediction, prediction_mask, groundtruth, groundtruth_mask = inputs
if (groundtruth.ndim != 2 or prediction.ndim != 2
or groundtruth.shape[1] != prediction.shape[1]):
raise ValueError
batch_size = groundtruth.shape[1]
results = numpy.zeros_like(prediction[:, :, None]).astype('float32')
for index in range(batch_size):
y = trim(list(groundtruth[:, index]),
list(groundtruth_mask[:, index]))
y_no_special = [token for token in y
if token not in self.special_tokens]
y_hat = trim(list(prediction[:, index]),
list(prediction_mask[:, index]))
y_hat_no_special = [token for token in y_hat
if token not in self.special_tokens]
blues, _, _, _ = _bleu(y_no_special, y_hat_no_special, self.n)
reward = blues[:, self.n - 1].copy()
if self.deltas:
reward[1:] = reward[1:] - reward[:-1]
pos = -1
for i in range(len(y_hat)):
if y_hat[i] not in self.special_tokens:
pos = pos + 1
results[i, index, 0] = reward[pos]
else:
results[i, index, 0] = 0.
elif len(reward):
results[len(y_hat) - 1, index, 0] = reward[-1]
output_storage[0][0] = results
def make_node(self, prediction, prediction_mask,
groundtruth, groundtruth_mask):
prediction = tensor.as_tensor_variable(prediction)
prediction_mask = tensor.as_tensor_variable(prediction_mask)
groundtruth = tensor.as_tensor_variable(groundtruth)
groundtruth_mask = tensor.as_tensor_variable(groundtruth_mask)
return theano.Apply(
self,
[prediction, prediction_mask,
groundtruth, groundtruth_mask],
[tensor.tensor3()])
|
tools/autograd/nested_dict.py
|
wenhaopeter/read_pytorch_code
| 206 |
82178
|
# TODO: refactor nested_dict into common library with ATen
class nested_dict(object):
"""
A nested dict is a dictionary with a parent. If key lookup fails,
it recursively continues into the parent. Writes always happen to
the top level dict.
"""
def __init__(self, base, parent):
self.base, self.parent = base, parent
def __contains__(self, item):
return item in self.base or item in self.parent
def __getitem__(self, x):
r = self.base.get(x)
if r is not None:
return r
return self.parent[x]
|
lightautoml/automl/base.py
|
kobylkinks/LightAutoML
| 766 |
82183
|
"""Base AutoML class."""
import logging
from typing import Any
from typing import Dict
from typing import Iterable
from typing import List
from typing import Optional
from typing import Sequence
from ..dataset.base import LAMLDataset
from ..dataset.utils import concatenate
from ..pipelines.ml.base import MLPipeline
from ..reader.base import Reader
from ..utils.logging import set_stdout_level
from ..utils.logging import verbosity_to_loglevel
from ..utils.timer import PipelineTimer
from ..validation.utils import create_validation_iterator
from .blend import BestModelSelector
from .blend import Blender
logger = logging.getLogger(__name__)
class AutoML:
"""Class for compile full pipeline of AutoML task.
AutoML steps:
- Read, analyze data and get inner
:class:`~lightautoml.dataset.base.LAMLDataset` from input
dataset: performed by reader.
- Create validation scheme.
- Compute passed ml pipelines from levels.
Each element of levels is list
of :class:`~lightautoml.pipelines.ml.base.MLPipelines`
prediction from current level are passed to next level
pipelines as features.
- Time monitoring - check if we have enough time to calc new pipeline.
- Blend last level models and prune useless pipelines
to speedup inference: performed by blender.
- Returns prediction on validation data.
If crossvalidation scheme is used,
out-of-fold prediction will returned.
If validation data is passed
it will return prediction on validation dataset.
In case of cv scheme when some point of train data
never was used as validation (ex. timeout exceeded
or custom cv iterator like
:class:`~lightautoml.validation.np_iterators.TimeSeriesIterator`
was used) NaN for this point will be returned.
Example:
Common usecase - create custom pipelines or presets.
>>> reader = SomeReader()
>>> pipe = MLPipeline([SomeAlgo()])
>>> levels = [[pipe]]
>>> automl = AutoML(reader, levels, )
>>> automl.fit_predict(data, roles={'target': 'TARGET'})
"""
def __init__(
self,
reader: Reader,
levels: Sequence[Sequence[MLPipeline]],
timer: Optional[PipelineTimer] = None,
blender: Optional[Blender] = None,
skip_conn: bool = False,
return_all_predictions: bool = False,
):
"""
Args:
reader: Instance of Reader class object that
creates :class:`~lightautoml.dataset.base.LAMLDataset`
from input data.
levels: List of list
of :class:`~lightautoml.pipelines.ml..base.MLPipelines`.
timer: Timer instance of
:class:`~lightautoml.utils.timer.PipelineTimer`.
Default - unlimited timer.
blender: Instance of Blender.
Default - :class:`~lightautoml.automl.blend.BestModelSelector`.
skip_conn: True if we should pass first level
input features to next levels.
Note:
There are several verbosity levels:
- `0`: No messages.
- `1`: Warnings.
- `2`: Info.
- `3`: Debug.
"""
self._initialize(reader, levels, timer, blender, skip_conn, return_all_predictions)
def _initialize(
self,
reader: Reader,
levels: Sequence[Sequence[MLPipeline]],
timer: Optional[PipelineTimer] = None,
blender: Optional[Blender] = None,
skip_conn: bool = False,
return_all_predictions: bool = False,
):
"""Same as __init__. Exists for delayed initialization in presets.
Args:
reader: Instance of Reader class object that
creates :class:`~lightautoml.dataset.base.LAMLDataset`
from input data.
levels: List of list
of :class:`~lightautoml.pipelines.ml..base.MLPipelines`.
timer: Timer instance of
:class:`~lightautoml.utils.timer.PipelineTimer`.
Default - unlimited timer.
blender: Instance of Blender.
Default - :class:`~lightautoml.automl.blend.BestModelSelector`.
skip_conn: True if we should pass first level
input features to next levels.
return_all_predictions: True if we should return all predictions from last
level models.
verbose: Controls the verbosity: the higher, the more messages.
<1 : messages are not displayed;
>=1 : the computation process for layers is displayed;
>=2 : the information about folds processing is also displayed;
>=3 : the hyperparameters optimization process is also displayed;
>=4 : the training process for every algorithm is displayed;
"""
assert len(levels) > 0, "At least 1 level should be defined"
self.timer = timer
if timer is None:
self.timer = PipelineTimer()
self.reader = reader
self._levels = levels
# default blender is - select best model and prune other pipes
self.blender = blender
if blender is None:
self.blender = BestModelSelector()
# update model names
for i, lvl in enumerate(self._levels):
for j, pipe in enumerate(lvl):
pipe.upd_model_names("Lvl_{0}_Pipe_{1}".format(i, j))
self.skip_conn = skip_conn
self.return_all_predictions = return_all_predictions
def fit_predict(
self,
train_data: Any,
roles: dict,
train_features: Optional[Sequence[str]] = None,
cv_iter: Optional[Iterable] = None,
valid_data: Optional[Any] = None,
valid_features: Optional[Sequence[str]] = None,
verbose: int = 0,
) -> LAMLDataset:
"""Fit on input data and make prediction on validation part.
Args:
train_data: Dataset to train.
roles: Roles dict.
train_features: Optional features names,
if cannot be inferred from train_data.
cv_iter: Custom cv iterator. For example,
:class:`~lightautoml.validation.np_iterators.TimeSeriesIterator`.
valid_data: Optional validation dataset.
valid_features: Optional validation dataset
features if can't be inferred from `valid_data`.
Returns:
Predicted values.
"""
set_stdout_level(verbosity_to_loglevel(verbose))
self.timer.start()
train_dataset = self.reader.fit_read(train_data, train_features, roles)
assert (
len(self._levels) <= 1 or train_dataset.folds is not None
), "Not possible to fit more than 1 level without cv folds"
assert (
len(self._levels) <= 1 or valid_data is None
), "Not possible to fit more than 1 level with holdout validation"
valid_dataset = None
if valid_data is not None:
valid_dataset = self.reader.read(valid_data, valid_features, add_array_attrs=True)
train_valid = create_validation_iterator(train_dataset, valid_dataset, n_folds=None, cv_iter=cv_iter)
# for pycharm)
level_predictions = None
pipes = None
self.levels = []
for leven_number, level in enumerate(self._levels, 1):
pipes = []
level_predictions = []
flg_last_level = leven_number == len(self._levels)
logger.info(
f"Layer \x1b[1m{leven_number}\x1b[0m train process start. Time left {self.timer.time_left:.2f} secs"
)
for k, ml_pipe in enumerate(level):
pipe_pred = ml_pipe.fit_predict(train_valid)
level_predictions.append(pipe_pred)
pipes.append(ml_pipe)
logger.info("Time left {:.2f} secs\n".format(self.timer.time_left))
if self.timer.time_limit_exceeded():
logger.info(
"Time limit exceeded. Last level models will be blended and unused pipelines will be pruned.\n"
)
flg_last_level = True
break
else:
if self.timer.child_out_of_time:
logger.info(
"Time limit exceeded in one of the tasks. AutoML will blend level {0} models.\n".format(
leven_number
)
)
flg_last_level = True
logger.info("\x1b[1mLayer {} training completed.\x1b[0m\n".format(leven_number))
# here is split on exit condition
if not flg_last_level:
self.levels.append(pipes)
level_predictions = concatenate(level_predictions)
if self.skip_conn:
valid_part = train_valid.get_validation_data()
try:
# convert to initital dataset type
level_predictions = valid_part.from_dataset(level_predictions)
except TypeError:
raise TypeError(
"Can not convert prediction dataset type to input features. Set skip_conn=False"
)
level_predictions = concatenate([level_predictions, valid_part])
train_valid = create_validation_iterator(level_predictions, None, n_folds=None, cv_iter=None)
else:
break
blended_prediction, last_pipes = self.blender.fit_predict(level_predictions, pipes)
self.levels.append(last_pipes)
self.reader.upd_used_features(remove=list(set(self.reader.used_features) - set(self.collect_used_feats())))
del self._levels
if self.return_all_predictions:
return concatenate(level_predictions)
return blended_prediction
def predict(
self,
data: Any,
features_names: Optional[Sequence[str]] = None,
return_all_predictions: Optional[bool] = None,
) -> LAMLDataset:
"""Predict with automl on new dataset.
Args:
data: Dataset to perform inference.
features_names: Optional features names,
if cannot be inferred from `train_data`.
return_all_predictions: if True,
returns all model predictions from last level
Returns:
Dataset with predictions.
"""
dataset = self.reader.read(data, features_names=features_names, add_array_attrs=False)
for n, level in enumerate(self.levels, 1):
# check if last level
level_predictions = []
for _n, ml_pipe in enumerate(level):
level_predictions.append(ml_pipe.predict(dataset))
if n != len(self.levels):
level_predictions = concatenate(level_predictions)
if self.skip_conn:
try:
# convert to initital dataset type
level_predictions = dataset.from_dataset(level_predictions)
except TypeError:
raise TypeError(
"Can not convert prediction dataset type to input features. Set skip_conn=False"
)
dataset = concatenate([level_predictions, dataset])
else:
dataset = level_predictions
else:
if (return_all_predictions is None and self.return_all_predictions) or return_all_predictions:
return concatenate(level_predictions)
return self.blender.predict(level_predictions)
def collect_used_feats(self) -> List[str]:
"""Get feats that automl uses on inference.
Returns:
Features names list.
"""
used_feats = set()
for lvl in self.levels:
for pipe in lvl:
used_feats.update(pipe.used_features)
used_feats = list(used_feats)
return used_feats
def collect_model_stats(self) -> Dict[str, int]:
"""Collect info about models in automl.
Returns:
Dict with models and its runtime numbers.
"""
model_stats = {}
for lvl in self.levels:
for pipe in lvl:
for ml_algo in pipe.ml_algos:
model_stats[ml_algo.name] = len(ml_algo.models)
return model_stats
|
WebMirror/management/rss_parser_funcs/feed_parse_extractBersekerTranslations.py
|
fake-name/ReadableWebProxy
| 193 |
82278
|
def extractBersekerTranslations(item):
"""
"""
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if 'Because the world has changed into a death game is funny' in item['tags'] and (chp or vol or 'Prologue' in postfix):
return buildReleaseMessageWithType(item, 'Sekai ga death game ni natta no de tanoshii desu', vol, chp, frag=frag, postfix=postfix)
return False
|
tests/unit/test_utils.py
|
Avi-Labs/taurus
| 1,743 |
82282
|
<filename>tests/unit/test_utils.py
# coding=utf-8
""" unit test """
import os
import sys
import logging
from psutil import Popen
from os.path import join
from bzt import TaurusNetworkError
from bzt.utils import log_std_streams, get_uniq_name, JavaVM, ToolError, is_windows, HTTPClient, BetterDict
from bzt.utils import ensure_is_dict, Environment, temp_file, communicate
from tests.unit import BZTestCase, RESOURCES_DIR
from tests.unit.mocks import MockFileReader
class MockPopen(object):
def __init__(self, out, err):
self.out = out
self.err = err
def communicate(self):
return self.out, self.err
class TestEnvironment(BZTestCase):
def test_nesting(self):
v1 = 'val_param_name'
v2 = 'path_param_name'
v3 = 'const_val'
os.environ[v1] = 'v1.1'
os.environ[v2] = 'v1.2'
os.environ[v3] = 'v1.3'
e1 = Environment()
e1.set({v1: 'local_val1.1'})
e1.add_path({v2: 'param_val1.1'}, finish=True)
e2 = Environment(parent=e1)
e1.add_path({v2: 'param_val1.3'}, finish=True)
os.environ[v1] = 'v2.1'
os.environ[v2] = 'v2.2'
os.environ[v3] = 'v2.3'
e1.set({v1: 'local_val1.2'})
e2.add_path({v2: 'param_val1.2'}, finish=True)
self.assertEqual(e1.get(v1), 'local_val1.2')
self.assertEqual(e2.get(v1), 'local_val1.1')
self.assertEqual(e1.get(v2), os.pathsep.join(('v2.2', 'param_val1.1', 'param_val1.3')))
self.assertEqual(e2.get(v2), os.pathsep.join(('v2.2', 'param_val1.1', 'param_val1.2')))
self.assertEqual(e1.get(v3), 'v2.3')
self.assertEqual(e2.get(v3), 'v2.3')
class TestBetterDict(BZTestCase):
def _merge_and_compare(self, first, second, result):
sample = BetterDict().merge(first)
sample.merge(second)
result = BetterDict().merge(result)
self.assertEqual(sample, result)
def _filter_and_compare(self, first, second, result, black_list=False):
sample = BetterDict().merge(first)
sample.filter(second, black_list=black_list)
result = BetterDict().merge(result)
self.assertEqual(sample, result)
def test_merge_configs(self):
a = {"modules": {"local": "class_name"}}
b = {"modules": {"local": {"class": "another_class"}}}
res = BetterDict()
res.merge(a)
res.merge(b)
self.assertEqual(BetterDict.__name__, type(res["modules"]["local"]).__name__)
modules = res["modules"]
ensure_is_dict(modules, "local", "class")
self.assertEqual("another_class", res["modules"]["local"]["class"])
def test_merge_del(self):
a = {
"A": ["B", "C"],
"B": {"A": "vA"}}
b = {
"^A": {"^D": "E"},
"^X": "Y"}
res = {"B": {"A": "vA"}}
self._merge_and_compare(a, b, res)
def test_merge_overwrite(self):
a = {
"A": ["B", "C"],
"B": {"A": "vA"}}
b = {"~B": {"~C": "vC"}}
res = {
"A": ["B", "C"],
"B": {"C": "vC"}}
self._merge_and_compare(a, b, res)
def test_merge_list_elements(self):
a = {
"A": ["B", "C"],
"B": {"A": "vA"},
"D": ["E", "F"]}
b = {
"$A": ["nB"],
"$B": {"nC": "vC"},
"$C": ["D"]}
res = {
"A": ["nB", "C"],
"B": {"A": "vA", "nC": "vC"},
"D": ["E", "F"],
"C": ["D"]}
self._merge_and_compare(a, b, res)
def test_filter_wl0(self):
a = {
"A": False,
"C": {"D": "E", "G": "GG"},
"F": ["FF"]}
b = {
"A": True,
"!C": {"G": "H"}}
res = {
"A": False,
"C": {"D": "E"}}
self._filter_and_compare(a, b, res)
def test_filter_wl1(self):
a = {
"A": ["B", "BB"],
"C": {"D": "E", "G": "GG"},
"F": ["FF"]}
b = {
"A": True,
"!C": {"G": "H"}}
res = {
"A": ["B", "BB"],
"C": {"D": "E"}}
self._filter_and_compare(a, b, res)
def test_filter_wl2(self):
a = {
"A": "B",
"C": {"D": "E"}}
b = {
"A": {"B": "C"},
"C": True}
res = {
"C": {"D": "E"}}
self._filter_and_compare(a, b, res)
def test_filter_bl0(self):
a = {
"A": ["B", "BB"],
"C": {"D": "E", "G": "GG"},
"F": ["FF"]}
b = {
"A": True,
"!C": {"G": "H"}}
res = {
"F": ["FF"],
"C": {"G": "GG"}}
self._filter_and_compare(a, b, res, black_list=True)
def test_filter_bl1(self):
a = {
"A": "B",
"C": {"D": "E"}}
b = {
"A": {"B": "C"},
"C": True}
res = {
"A": "B"}
self._filter_and_compare(a, b, res, black_list=True)
class TestMisc(BZTestCase):
def test_communicate(self):
self.sniff_log()
out = b'\xf1\xe5\xedoutput' # on py2 bytes is just str synonym
err = b'\xf1\xe5\xederror'
obj = MockPopen(out, err)
output = communicate(obj)
self.assertEqual(output, ("output", "error"))
class TestJavaVM(BZTestCase):
def test_missed_tool(self):
self.obj = JavaVM()
self.obj.tool_path = 'java-not-found'
self.assertEqual(False, self.obj.check_if_installed())
self.obj.install()
def test_missed_req_tool(self):
self.obj = JavaVM()
self.obj.tool_path = 'java-not-found'
self.obj.mandatory = True
self.assertEqual(False, self.obj.check_if_installed())
self.assertRaises(ToolError, self.obj.install)
def test_get_version(self):
self.obj = JavaVM()
out1 = "openjdk version \"10.0.1\" 2018-04-17\nOpenJDK Runtime Environment (build " \
"10.0.1+10-Ubuntu-3ubuntu1)\nOpenJDK 64-Bit Server VM (build 10.0.1+10-Ubuntu-3ubuntu1, mixed mode)"
out2 = "java version \"1.8.0_151\"\nJava(TM) SE Runtime Environment (build 1.8.0_151-b12)\n" \
"Java HotSpot(TM) 64-Bit Server VM (build 25.151-b12, mixed mode)"
self.assertEqual("10", self.obj._get_version(out1))
self.assertEqual("8", self.obj._get_version(out2))
class TestLogStreams(BZTestCase):
def test_streams(self):
self.sniff_log()
print('test1')
with log_std_streams(logger=self.captured_logger, stdout_level=logging.DEBUG):
print('test2')
with log_std_streams(stdout_level=logging.DEBUG):
print('test3')
with log_std_streams(stdout_level=logging.DEBUG):
sys.stdout.write('test3')
with log_std_streams(logger=self.captured_logger, stdout_level=logging.DEBUG):
cmd = ['echo', '"test5"']
if is_windows():
cmd = ['cmd', '/c'] + cmd
process = Popen(cmd)
process.wait()
missed_file = get_uniq_name('.', 'test6', '')
with log_std_streams(logger=self.captured_logger, stderr_level=logging.WARNING):
if is_windows():
cmd = ['cmd', '/c', 'dir']
else:
cmd = ['ls']
process = Popen(cmd + [missed_file])
process.wait()
debug_buf = self.log_recorder.debug_buff.getvalue()
warn_buf = self.log_recorder.warn_buff.getvalue()
self.assertNotIn('test1', debug_buf)
self.assertIn('test2', debug_buf)
self.assertNotIn('test3', debug_buf)
self.assertIn('test5', debug_buf)
self.assertTrue(len(warn_buf) > 0)
class TestFileReader(BZTestCase):
def setUp(self):
super(TestFileReader, self).setUp()
self.obj = MockFileReader()
def configure(self, file_name):
self.obj.name = file_name
def tearDown(self):
if self.obj and self.obj.fds:
self.obj.fds.close()
super(TestFileReader, self).tearDown()
def test_file_len(self):
self.configure(join(RESOURCES_DIR, 'jmeter', 'jtl', 'file.notfound'))
self.sniff_log(self.obj.log)
list(self.obj.get_lines(size=1))
self.assertIn('File not appeared yet', self.log_recorder.debug_buff.getvalue())
self.obj.name = join(RESOURCES_DIR, 'jmeter', 'jtl', 'unicode.jtl')
lines = list(self.obj.get_lines(size=1))
self.assertEqual(1, len(lines))
lines = list(self.obj.get_lines(last_pass=True))
self.assertEqual(13, len(lines))
self.assertTrue(all(l.endswith('\n') for l in lines))
def test_decode(self):
old_string = "Тест.Эхо"
gen_file_name = temp_file()
mod_str = old_string + '\n'
with open(gen_file_name, 'wb') as fd: # use target system encoding for writing
fd.write(mod_str.encode(self.obj.SYS_ENCODING)) # important on win where it's not 'utf-8'
try:
self.configure(gen_file_name)
self.assertEqual('utf-8', self.obj.cp)
lines = list(self.obj.get_lines(True))
self.assertEqual(self.obj.SYS_ENCODING, self.obj.cp) # on win self.obj.cp must be changed during of
self.assertEqual(1, len(lines)) # reading (see MockFileReader)
new_string = lines[0].rstrip()
self.assertEqual(old_string, new_string)
finally:
if self.obj.fds:
self.obj.fds.close()
os.remove(gen_file_name)
def test_decode_crash(self):
self.configure(join(RESOURCES_DIR, 'jmeter', 'jtl', 'unicode.jtl'))
self.obj.get_bytes(size=180) # shouldn't crash with UnicodeDecodeError
class TestHTTPClient(BZTestCase):
def test_proxy_setup(self):
obj = HTTPClient()
obj.add_proxy_settings({"address": "http://localhost:3128",
"username": "me",
"password": "<PASSWORD>"})
self.assertIn('http', obj.session.proxies)
self.assertIn('https', obj.session.proxies)
self.assertEqual(obj.session.proxies['http'], 'http://me:too@localhost:3128')
self.assertEqual(obj.session.proxies['https'], 'http://me:too@localhost:3128')
def test_proxy_ssl_cert(self):
obj = HTTPClient()
obj.add_proxy_settings({"ssl-cert": "i am server side cert",
"ssl-client-cert": "i am client side cert"})
self.assertEqual(obj.session.verify, 'i am server side cert')
self.assertEqual(obj.session.cert, 'i am client side cert')
def test_jvm_args(self):
obj = HTTPClient()
obj.add_proxy_settings({"address": "http://localhost:3128",
"username": "me",
"password": "<PASSWORD>"})
jvm_args = obj.get_proxy_props()
for protocol in ['http', 'https']:
for key in ['proxyHost', 'proxyPort', 'proxyUser', 'proxyPass']:
combo_key = protocol + '.' + key
self.assertIn(combo_key, jvm_args)
def test_download_file(self):
obj = HTTPClient()
tmpfile = temp_file()
obj.download_file('http://localhost:8000/', tmpfile)
self.assertTrue(os.path.exists(tmpfile))
with open(tmpfile) as fds:
contents = fds.read()
self.assertGreaterEqual(len(contents), 0)
def test_download_404(self):
obj = HTTPClient()
tmpfile = temp_file()
self.assertRaises(TaurusNetworkError, lambda: obj.download_file('http://localhost:8000/404', tmpfile))
def test_download_fail(self):
obj = HTTPClient()
tmpfile = temp_file()
self.assertRaises(TaurusNetworkError, lambda: obj.download_file('http://non.existent.com/', tmpfile))
def test_request(self):
obj = HTTPClient()
resp = obj.request('GET', 'http://localhost:8000/')
self.assertTrue(resp.ok)
def test_request_fail(self):
obj = HTTPClient()
self.assertRaises(TaurusNetworkError, lambda: obj.request('GET', 'http://non.existent.com/'))
|
workloads/examples/k8s/kubeflow-pipeline-deploy/triton.py
|
mkunin-nvidia/deepops
| 748 |
82320
|
#!/usr/bin/env python3
'''
Kubeflow documentation: https://kubeflow-pipelines.readthedocs.io/en/latest/_modules/kfp/dsl/_container_op.html
K8S documentation: https://github.com/kubernetes-client/python/blob/02ef5be4ecead787961037b236ae498944040b43/kubernetes/docs/V1Container.md
Example Triton Inference Server Models: https://docs.nvidia.com/deeplearning/sdk/tensorrt-inference-server-master-branch-guide/docs/run.html#example-model-repository
Example Triton Inference Server Client: https://docs.nvidia.com/deeplearning/sdk/tensorrt-inference-server-master-branch-guide/docs/client_example.html#section-getting-the-client-examples
Bugs:
Cannot dynamically assign GPU counts: https://github.com/kubeflow/pipelines/issues/1956
# Manual run example:
nvidia-docker run --rm --shm-size=1g --ulimit memlock=-1 --ulimit stack=67108864 -p8000:8000 -p8001:8001 -p8002:8002 -v/raid/shared/results/model_repository/:/model_repository nvcr.io/nvidia/tensorrtserver:20.02-py3 trtserver --model-repository=/model_repository
docker run -it --rm --net=host tensorrtserver_client /workspace/install/bin/image_client -m resnet50_netdef images/mug.jpg
'''
import triton_ops
import kfp.dsl as dsl
from kubernetes import client as k8s_client
@dsl.pipeline(
name='tritonPipeline',
description='Deploy a Triton server'
)
def triton_pipeline(skip_examples):
op_dict = {}
# Hardcoded paths mounted in the Triton container
results_dir = "/results/"
data_dir = "/data/"
checkpoints_dir = "/checkpoints/"
models = "/results/model_repository"
# Set default volume names
pv_data = "triton-data"
pv_results = "triton-results"
pv_checkpoints = "triton-checkpoints"
# Create K8s PVs
op_dict['triton_volume_results'] = triton_ops.TritonVolume('triton_volume_results', pv_results)
op_dict['triton_volume_data'] = triton_ops.TritonVolume('triton_volume_data', pv_data)
op_dict['triton_volume_checkpoints'] = triton_ops.TritonVolume('triton_volume_checkpoints', pv_checkpoints)
# Download example models
with dsl.Condition(skip_examples == '', name='skip-examples-download'):
op_dict['triton_download'] = triton_ops.TritonDownload('triton_download', models)
# Common Operations
op_dict['triton_service'] = triton_ops.TritonService('triton_service')
op_dict['triton_deploy'] = triton_ops.TritonDeploy('triton_deploy', models)
# Use GPUs
op_dict['triton_deploy'].set_gpu_limit(1, vendor = "nvidia")
# Add Triton Ports
op_dict['triton_deploy'].add_port(k8s_client.V1ContainerPort(container_port=8000, host_port=8000)) # HTTP
op_dict['triton_deploy'].add_port(k8s_client.V1ContainerPort(8001, host_port=8001)) # gRPC
op_dict['triton_deploy'].add_port(k8s_client.V1ContainerPort(8002, host_port=8002)) # Metrics
# Set order so tha volumes are created, then examples downloaded, then service started
op_dict['triton_download'].after(op_dict['triton_volume_results'])
op_dict['triton_download'].after(op_dict['triton_volume_data'])
op_dict['triton_download'].after(op_dict['triton_volume_checkpoints'])
op_dict['triton_deploy'].after(op_dict['triton_download'])
# Mount Volumes
for name, container_op in op_dict.items():
if name == 'triton_service' or type(container_op) == triton_ops.TritonVolume:
continue
container_op.add_volume(k8s_client.V1Volume(persistent_volume_claim=k8s_client.V1PersistentVolumeClaimVolumeSource(
claim_name=pv_results, read_only=False), name=pv_results))
container_op.add_volume_mount(k8s_client.V1VolumeMount(
mount_path=results_dir, name=pv_results, read_only=False))
container_op.add_volume(k8s_client.V1Volume(persistent_volume_claim=k8s_client.V1PersistentVolumeClaimVolumeSource(
claim_name=pv_data, read_only=False), name=pv_data))
container_op.add_volume_mount(k8s_client.V1VolumeMount(
mount_path=data_dir, name=pv_data, read_only=True))
container_op.add_volume(k8s_client.V1Volume(persistent_volume_claim=k8s_client.V1PersistentVolumeClaimVolumeSource(
claim_name=pv_checkpoints, read_only=False), name=pv_checkpoints))
container_op.add_volume_mount(k8s_client.V1VolumeMount(
mount_path=checkpoints_dir, name=pv_checkpoints, read_only=True))
'''
TODO Implement https://github.com/kubernetes-client/python/blob/master/kubernetes/docs/V1Probe.md:
livenessProbe:
httpGet:
path: /api/health/live
port: http
readinessProbe:
initialDelaySeconds: 5
periodSeconds: 5
httpGet:
path: /api/health/ready
port: http
'''
if __name__ == '__main__':
import kfp.compiler as compiler
compiler.Compiler().compile(triton_pipeline, __file__ + '.tar.gz')
|
tencentcloud/asr/v20190614/errorcodes.py
|
PlasticMem/tencentcloud-sdk-python
| 465 |
82323
|
# -*- coding: utf8 -*-
# Copyright (c) 2017-2021 THL A29 Limited, a Tencent company. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# CAM签名/鉴权错误。
AUTHFAILURE = 'AuthFailure'
# 用户没有权限进行此查询操作。
AUTHFAILURE_CHECKRESOURCERESPONSECODEERROR = 'AuthFailure.CheckResourceResponseCodeError'
# 未授权操作。
AUTHFAILURE_UNAUTHORIZEDOPERATION = 'AuthFailure.UnauthorizedOperation'
# 操作失败。
FAILEDOPERATION = 'FailedOperation'
# 下载音频文件失败。
FAILEDOPERATION_ERRORDOWNFILE = 'FailedOperation.ErrorDownFile'
# 识别失败。
FAILEDOPERATION_ERRORRECOGNIZE = 'FailedOperation.ErrorRecognize'
# 错误的TaskId。
FAILEDOPERATION_NOSUCHTASK = 'FailedOperation.NoSuchTask'
# 账号因为欠费停止服务,请在腾讯云账户充值。
FAILEDOPERATION_SERVICEISOLATE = 'FailedOperation.ServiceIsolate'
# 账号本月免费额度已用完。
FAILEDOPERATION_USERHASNOFREEAMOUNT = 'FailedOperation.UserHasNoFreeAmount'
# 服务未开通,请在腾讯云官网语音识别控制台开通服务。
FAILEDOPERATION_USERNOTREGISTERED = 'FailedOperation.UserNotRegistered'
# 内部错误。
INTERNALERROR = 'InternalError'
# 初始化配置失败。
INTERNALERROR_ERRORCONFIGURE = 'InternalError.ErrorConfigure'
# 创建日志失败。
INTERNALERROR_ERRORCREATELOG = 'InternalError.ErrorCreateLog'
# 下载音频文件失败。
INTERNALERROR_ERRORDOWNFILE = 'InternalError.ErrorDownFile'
# 新建数组失败。
INTERNALERROR_ERRORFAILNEWPREQUEST = 'InternalError.ErrorFailNewprequest'
# 写入数据库失败。
INTERNALERROR_ERRORFAILWRITETODB = 'InternalError.ErrorFailWritetodb'
# 文件无法打开。
INTERNALERROR_ERRORFILECANNOTOPEN = 'InternalError.ErrorFileCannotopen'
# 获取路由失败。
INTERNALERROR_ERRORGETROUTE = 'InternalError.ErrorGetRoute'
# 创建日志路径失败。
INTERNALERROR_ERRORMAKELOGPATH = 'InternalError.ErrorMakeLogpath'
# 识别失败。
INTERNALERROR_ERRORRECOGNIZE = 'InternalError.ErrorRecognize'
# 访问数据库失败。
INTERNALERROR_FAILACCESSDATABASE = 'InternalError.FailAccessDatabase'
# 访问Redis失败。
INTERNALERROR_FAILACCESSREDIS = 'InternalError.FailAccessRedis'
# 参数错误。
INVALIDPARAMETER = 'InvalidParameter'
# 请求数据长度无效。
INVALIDPARAMETER_ERRORCONTENTLENGTH = 'InvalidParameter.ErrorContentlength'
# 参数不全。
INVALIDPARAMETER_ERRORPARAMSMISSING = 'InvalidParameter.ErrorParamsMissing'
# 解析请求数据失败。
INVALIDPARAMETER_ERRORPARSEQUEST = 'InvalidParameter.ErrorParsequest'
# 文件编码错误。
INVALIDPARAMETER_FILEENCODE = 'InvalidParameter.FileEncode'
# 非法的词表状态。
INVALIDPARAMETER_INVALIDVOCABSTATE = 'InvalidParameter.InvalidVocabState'
# 该模型状态不允许删除。
INVALIDPARAMETER_MODELSTATE = 'InvalidParameter.ModelState'
# 参数取值错误。
INVALIDPARAMETERVALUE = 'InvalidParameterValue'
# AppId无效。
INVALIDPARAMETERVALUE_ERRORINVALIDAPPID = 'InvalidParameterValue.ErrorInvalidAppid'
# ClientIp无效。
INVALIDPARAMETERVALUE_ERRORINVALIDCLIENTIP = 'InvalidParameterValue.ErrorInvalidClientip'
# EngSerViceType无效。
INVALIDPARAMETERVALUE_ERRORINVALIDENGSERVICE = 'InvalidParameterValue.ErrorInvalidEngservice'
# ProjectId无效。
INVALIDPARAMETERVALUE_ERRORINVALIDPROJECTID = 'InvalidParameterValue.ErrorInvalidProjectid'
# RequestId无效。
INVALIDPARAMETERVALUE_ERRORINVALIDREQUESTID = 'InvalidParameterValue.ErrorInvalidRequestid'
# SourceType无效。
INVALIDPARAMETERVALUE_ERRORINVALIDSOURCETYPE = 'InvalidParameterValue.ErrorInvalidSourcetype'
# SubserviceType无效。
INVALIDPARAMETERVALUE_ERRORINVALIDSUBSERVICETYPE = 'InvalidParameterValue.ErrorInvalidSubservicetype'
# Url无效。
INVALIDPARAMETERVALUE_ERRORINVALIDURL = 'InvalidParameterValue.ErrorInvalidUrl'
# UsrAudioKey无效。
INVALIDPARAMETERVALUE_ERRORINVALIDUSERAUDIOKEY = 'InvalidParameterValue.ErrorInvalidUseraudiokey'
# 音频编码格式不支持。
INVALIDPARAMETERVALUE_ERRORINVALIDVOICEFORMAT = 'InvalidParameterValue.ErrorInvalidVoiceFormat'
# 音频数据无效。
INVALIDPARAMETERVALUE_ERRORINVALIDVOICEDATA = 'InvalidParameterValue.ErrorInvalidVoicedata'
# 音频时长超过限制。
INVALIDPARAMETERVALUE_ERRORVOICEDATATOOLONG = 'InvalidParameterValue.ErrorVoicedataTooLong'
# 非法的参数长度。
INVALIDPARAMETERVALUE_INVALIDPARAMETERLENGTH = 'InvalidParameterValue.InvalidParameterLength'
# 非法的VocabId。
INVALIDPARAMETERVALUE_INVALIDVOCABID = 'InvalidParameterValue.InvalidVocabId'
# 非法的词表状态。
INVALIDPARAMETERVALUE_INVALIDVOCABSTATE = 'InvalidParameterValue.InvalidVocabState'
# 词权重不合法。
INVALIDPARAMETERVALUE_INVALIDWORDWEIGHT = 'InvalidParameterValue.InvalidWordWeight'
# 非法的WordWeightStr。
INVALIDPARAMETERVALUE_INVALIDWORDWEIGHTSTR = 'InvalidParameterValue.InvalidWordWeightStr'
# 模型不存在。
INVALIDPARAMETERVALUE_MODELID = 'InvalidParameterValue.ModelId'
# 非法的模型状态。
INVALIDPARAMETERVALUE_TOSTATE = 'InvalidParameterValue.ToState'
# 超过配额限制。
LIMITEXCEEDED = 'LimitExceeded'
# 自学习模型创建个数已到限制。
LIMITEXCEEDED_CUSTOMIZATIONFULL = 'LimitExceeded.CustomizationFull'
# 上线模型个数已到限制。
LIMITEXCEEDED_ONLINEFULL = 'LimitExceeded.OnlineFull'
# 热词表数量已到账号限制。
LIMITEXCEEDED_VOCABFULL = 'LimitExceeded.VocabFull'
# 缺少参数错误。
MISSINGPARAMETER = 'MissingParameter'
# 请求的次数超过了频率限制。
REQUESTLIMITEXCEEDED = 'RequestLimitExceeded'
# 未知参数错误。
UNKNOWNPARAMETER = 'UnknownParameter'
|
lstm_classifier/combined/utils.py
|
undeadyequ/ser_model
| 217 |
82358
|
<reponame>undeadyequ/ser_model
import torch
import pickle
import numpy as np
import pandas as pd
from config import model_config as config
from sklearn.metrics import confusion_matrix, accuracy_score, f1_score, precision_score, recall_score
import itertools
import matplotlib.pyplot as plt
def load_data(batched=True, test=False, file_dir='../../data/combined/combined_features.pkl'):
bs = config['batch_size']
ftype = 'test' if test else 'train'
with open('{}'.format(file_dir), 'rb') as f:
features = pickle.load(f)
x = features['x_{}'.format(ftype)]
y = features['y_{}'.format(ftype)]
data = (x, y)
if test or not batched:
return [torch.FloatTensor(data[0]), torch.LongTensor(data[1])]
data = list(zip(data[0], data[1]))
n_iters = len(data) // bs
batches = []
for i in range(1, n_iters + 1):
input_batch = []
output_batch = []
for e in data[bs * (i-1):bs * i]:
input_batch.append(e[0])
output_batch.append(e[1])
batches.append([torch.FloatTensor(input_batch),
torch.LongTensor(output_batch)])
return batches
def evaluate(targets, predictions):
performance = {
'acc': accuracy_score(targets, predictions),
'f1': f1_score(targets, predictions, average='macro'),
'precision': precision_score(targets, predictions, average='macro'),
'recall': recall_score(targets, predictions, average='macro')}
return performance
def plot_confusion_matrix(targets, predictions, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
# plt.figure(figsize=(8,8))
cm = confusion_matrix(targets, predictions)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, cm[i, j],
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
|
promgen/migrations/0011_notifier_counts.py
|
kackey0-1/promgen
| 913 |
82384
|
<reponame>kackey0-1/promgen
# Generated by Django 2.2.4 on 2019-11-28 02:21
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('promgen', '0010_app_label_migration'),
]
operations = [
migrations.AddField(
model_name='alert',
name='error_count',
field=models.PositiveIntegerField(default=0),
),
migrations.AddField(
model_name='alert',
name='sent_count',
field=models.PositiveIntegerField(default=0),
),
migrations.CreateModel(
name='AlertError',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('message', models.TextField()),
('created', models.DateTimeField(default=django.utils.timezone.now)),
('alert', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='promgen.Alert')),
],
),
]
|
examples/initialization_schemes/gaussian.py
|
uaca/deepy
| 260 |
82385
|
<reponame>uaca/deepy
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
from util import run
from deepy.utils import GaussianInitializer
model_path = os.path.join(os.path.dirname(__file__), "models", "gaussian1.gz")
if __name__ == '__main__':
# I have to set std to be 0.1 in this case, or it will not convergence
run(GaussianInitializer(deviation=0.1), model_path)
|
tests/unit/test_parse.py
|
joye1503/cocrawler
| 166 |
82395
|
<reponame>joye1503/cocrawler
from bs4 import BeautifulSoup
import cocrawler.parse as parse
from cocrawler.urls import URL
test_html = '''
<html>
<head><title>Foo</title><link href='link.html'></link></head>
<body>
<a href = "foo1.html">Anchor 1</a>
<a
href = foo2.htm>Anchor 2</a>
<a
href='foo3.html '>Anchor 3</a>
<img src=foo.gif />
<a href='torture"
<url>'>torture
anchor</a>
</body>
'''
test_html_harder = '''
<html>
<head></head>
<body>
<iframe src="iframe.html"></iframe>
<iframe src=""></iframe>
<link href="stylesheet.blah" rel="stylesheet">
<link href="" rel="stylesheet">
<link href="http://example.com" rel="prefetch">
<link href="do-not-crash-1">
<link href="do-not-crash-2" rel="one" rel="two">
<link href="">
</body>
'''
test_html_no_body = '''
<html>
<head><title>Foo</title><link href='link.html'></link></head>
<a href="foo1.html">Anchor 4</a>
<a
href=foo2.htm>Anchor 5</a>
<a
href="foo3.html ">Anchor 6</a>
<img src=foo.gif />
'''
test_html_no_head = '''
<html>
<body>
<a href="foo1.html">Anchor 7</a>
<a
href=foo2.htm>Anchor 8</a>
<a
href="foo3.html ">Anchor 9</a>
<img src=foo.gif />
</body>
'''
test_html_no_nothing = '''
<a href="foo1.html">Anchor 10</a>
<a
href=foo2.htm>Anchor 11</a>
<a
href="foo3.html ">Anchor 12</a>
<img src=foo.gif />
'''
def test_do_burner_work_html():
urlj = URL('http://example.com')
test_html_bytes = test_html.encode(encoding='utf-8', errors='replace')
headers = {}
links, embeds, sha1, facets, base = parse.do_burner_work_html(test_html, test_html_bytes, headers, url=urlj)
assert len(links) == 4
assert len(embeds) == 2
linkset = set(u.url for u in links)
embedset = set(e.url for e in embeds)
assert 'http://example.com/foo3.html' in linkset
assert 'http://example.com/foo.gif' in embedset
assert sha1 == 'sha1:cdcb087d39afd827d5d523e165a6566d65a2e9b3'
assert base is None
# as a handwave, let's expect these defective pages to also work.
test_html_bytes = test_html_no_body.encode(encoding='utf-8', errors='replace')
links, embeds, sha1, facets, base = parse.do_burner_work_html(test_html_no_body, test_html_bytes, headers, url=urlj)
assert len(links) == 3
assert len(embeds) == 2
test_html_bytes = test_html_no_head.encode(encoding='utf-8', errors='replace')
links, embeds, sha1, facets, base = parse.do_burner_work_html(test_html_no_head, test_html_bytes, headers, url=urlj)
assert len(links) == 3
assert len(embeds) == 1
test_html_bytes = test_html_no_nothing.encode(encoding='utf-8', errors='replace')
links, embeds, sha1, facets, base = parse.do_burner_work_html(test_html_no_nothing, test_html_bytes, headers, url=urlj)
assert len(links) == 3
assert len(embeds) == 1
def test_clean_link_objects():
test = [{'href': 'http://example.com'}, {'href': 'data:46532656'}, {'href': 'https://example.com'}]
ret = [{'href': 'http://example.com'}, {'href': 'https://example.com'}]
assert parse.clean_link_objects(test, ('data:', 'javascript:')) == ret
def test_individual_parsers():
links, embeds = parse.find_html_links_re(test_html)
assert len(links) == 6
assert len(embeds) == 0
linkset = set(parse.collapse_links(links))
assert 'foo2.htm' in linkset
assert 'foo3.html ' in linkset
assert 'foo.gif' in linkset
assert 'torture"\n<url>' in linkset
head, body = parse.split_head_body(test_html)
links, embeds = parse.find_body_links_re(body)
assert len(links) == 4
assert len(embeds) == 1
linkset = set(parse.collapse_links(links))
embedset = set(parse.collapse_links(embeds))
assert 'foo2.htm' in linkset
assert 'foo3.html ' in linkset
assert 'torture"\n<url>' in linkset
assert 'foo.gif' in embedset
links, embeds = parse.find_body_links_anchors_re(body)
assert len(links) == 4
assert len(embeds) == 1
linkdict = dict([(l['href'], l['anchor']) for l in links])
# {('foo1.html', 'Anchor 1'), ('foo3.html ', 'Anchor 3'), ('foo2.htm', 'Anchor 2'), ('torture"\n<url>', 'torture\nanchor')}
assert linkdict['foo2.htm'] == 'Anchor 2'
assert linkdict['foo3.html '] == 'Anchor 3'
assert linkdict['torture"\n<url>'] == 'torture\nanchor'
assert 'foo.gif' in embeds[0]['src']
head_soup = BeautifulSoup(head, 'lxml')
links, embeds = parse.find_head_links_soup(head_soup)
embedset = set(parse.collapse_links(embeds))
assert len(links) == 0
assert len(embeds) == 1
assert 'link.html' in embedset
head_soup = BeautifulSoup(head, 'lxml')
body_soup = BeautifulSoup(body, 'lxml')
links, embeds = parse.find_head_links_soup(head_soup)
lbody, ebody = parse.find_body_links_soup(body_soup)
links += lbody
embeds += ebody
linkset = set(parse.collapse_links(links))
embedset = set(parse.collapse_links(embeds))
assert len(links) == 4
assert len(embeds) == 2
assert 'foo2.htm' in linkset
assert 'foo3.html ' in linkset
assert 'torture"\n<url>' in linkset
assert 'link.html' in embedset
assert 'foo.gif' in embedset
head, body = parse.split_head_body(test_html_harder)
body_soup = BeautifulSoup(body, 'lxml')
lbody, ebody = parse.find_body_links_soup(body_soup)
assert len(lbody) == 1
assert len(ebody) == 1
assert 'iframe.html' == lbody[0]['src']
assert 'stylesheet.blah' == ebody[0]['href']
test_css = '''
@import url('foo1.css')
url(images/foo2.png)
url( images/foo3.png )
'''
def test_css_parser():
links, embeds = parse.find_css_links_re(test_css)
assert len(links) == 0
assert len(embeds) == 3
assert 'images/foo3.png' in embeds
def test_split_head_body():
'''
Whitebox test of the heuristics in this function
'''
head, body = parse.split_head_body('x'*100000)
assert head == ''
assert len(body) == 100000
head, body = parse.split_head_body('x' + '<HeAd>' + 'x'*100000)
assert head == ''
assert len(body) == 100007
head, body = parse.split_head_body('x' + '</HeAd>' + 'x'*100000)
assert head == 'x'
assert len(body) == 100000
head, body = parse.split_head_body('x' + '<BoDy>' + 'x'*100000)
assert head == 'x'
assert len(body) == 100000
head, body = parse.split_head_body('x' + '<heAd><boDy>' + 'x'*100000)
assert head == 'x<heAd>'
assert len(body) == 100000
head, body = parse.split_head_body('x' + '<hEad></heAd>' + 'x'*100000)
assert head == 'x<hEad>'
assert len(body) == 100000
head, body = parse.split_head_body('x' + '<heaD></Head><bOdy>' + 'x'*100000)
assert head == 'x<heaD>'
assert len(body) == 100006
def test_parse_refresh():
test = ((('0;foo'), (0, 'foo')),
((';'), (None, None)),
(('1.1.1.1; bar'), (1, 'bar')),
(('2.2, urbaz'), (2, 'urbaz')),
(('3; url=barf'), (3, 'barf')),
(('3; url="barf"asdf'), (3, 'barf')),
(('3; UrL='), (3, '')))
for t in test:
assert parse.parse_refresh(t[0]) == t[1]
def test_regex_out_comments():
t = 'Hello <!-- foo --> world!'
assert parse.regex_out_comments(t) == 'Hello world!'
def test_regex_out_some_scripts():
t = '<script>foo</script> bar'
assert parse.regex_out_some_scripts(t) == ' bar'
def test_regex_out_all_script():
t = '<script>foo</script> bar <script type="baz">barf</script> '
assert parse.regex_out_all_scripts(t) == ' bar '
|
pyez/load_temp_conf.py
|
rsmekala/junosautomation
| 117 |
82459
|
<filename>pyez/load_temp_conf.py<gh_stars>100-1000
from jnpr.junos import Device
from jnpr.junos.utils.config import Config
import yaml
dev = Device(host='xxxx', user='demo', password='<PASSWORD>', gather_facts=False)
dev.open()
data = yaml.load(open('protocol_data.yml'))
cu = Config(dev)
cu.load(template_path='protocol_temp.j2', template_vars=data, format='text')
cu.pdiff()
if cu.commit_check():
cu.commit()
else:
cu.rollback()
dev.close()
|
alphamind/data/winsorize.py
|
rongliang-tech/alpha-mind
| 186 |
82466
|
<reponame>rongliang-tech/alpha-mind
# -*- coding: utf-8 -*-
"""
Created on 2017-4-25
@author: cheng.li
"""
import numba as nb
import numpy as np
from alphamind.utilities import aggregate
from alphamind.utilities import array_index
from alphamind.utilities import group_mapping
from alphamind.utilities import simple_mean
from alphamind.utilities import simple_std
from alphamind.utilities import transform
@nb.njit(nogil=True, cache=True)
def mask_values_2d(x: np.ndarray,
mean_values: np.ndarray,
std_values: np.ndarray,
num_stds: int = 3) -> np.ndarray:
res = x.copy()
length, width = x.shape
for i in range(length):
for j in range(width):
ubound = mean_values[i, j] + num_stds * std_values[i, j]
lbound = mean_values[i, j] - num_stds * std_values[i, j]
if x[i, j] > ubound:
res[i, j] = ubound
elif x[i, j] < lbound:
res[i, j] = lbound
return res
@nb.njit(nogil=True, cache=True)
def interp_values_2d(x: np.ndarray,
groups: np.ndarray,
mean_values: np.ndarray,
std_values: np.ndarray,
num_stds: int = 3,
interval: float = 0.5) -> np.ndarray:
res = x.copy()
length, width = x.shape
max_cat = np.max(groups)
for k in range(max_cat + 1):
target_idx = np.where(groups == k)[0].flatten()
for j in range(width):
target_x = x[target_idx, j]
target_res = target_x.copy()
mean = mean_values[target_idx[0], j]
std = std_values[target_idx[0], j]
ubound = mean + num_stds * std
lbound = mean - num_stds * std
# upper bound abnormal values
idx = target_x > ubound
n = np.sum(idx)
if n > 0:
u_values = target_res[idx]
q_values = u_values.argsort().argsort()
target_res[idx] = ubound + q_values / n * interval * std
# lower bound abnormal values
idx = target_x < lbound
n = np.sum(idx)
if n > 0:
l_values = target_res[idx]
q_values = (-l_values).argsort().argsort()
target_res[idx] = lbound - q_values / n * interval * std
res[target_idx, j] = target_res
return res
@nb.njit(nogil=True, cache=True)
def mask_values_1d(x: np.ndarray,
mean_values: np.ndarray,
std_values: np.ndarray,
num_stds: int = 3) -> np.ndarray:
res = x.copy()
length, width = x.shape
for j in range(width):
ubound = mean_values[j] + num_stds * std_values[j]
lbound = mean_values[j] - num_stds * std_values[j]
res[x[:, j] > ubound, j] = ubound
res[x[:, j] < lbound, j] = lbound
return res
@nb.njit(nogil=True, cache=True)
def interp_values_1d(x: np.ndarray,
mean_values: np.ndarray,
std_values: np.ndarray,
num_stds: int = 3,
interval: float = 0.5) -> np.ndarray:
res = x.copy()
length, width = x.shape
for j in range(width):
ubound = mean_values[j] + num_stds * std_values[j]
lbound = mean_values[j] - num_stds * std_values[j]
# upper bound abnormal values
idx = x[:, j] > ubound
n = np.sum(idx)
if n > 0:
u_values = res[idx, j]
q_values = u_values.argsort().argsort()
res[idx, j] = ubound + q_values / n * interval * std_values[j]
# lower bound abnormal values
idx = x[:, j] < lbound
n = np.sum(idx)
if n > 0:
l_values = res[idx, j]
q_values = (-l_values).argsort().argsort()
res[idx, j] = lbound - q_values / n * interval * std_values[j]
return res
def winsorize_normal(x: np.ndarray, num_stds: int = 3, ddof=1,
groups: np.ndarray = None,
method: str = 'flat',
interval: float = 0.5) -> np.ndarray:
if groups is not None:
groups = group_mapping(groups)
mean_values = transform(groups, x, 'mean')
std_values = transform(groups, x, 'std', ddof)
if method == 'flat':
res = mask_values_2d(x, mean_values, std_values, num_stds)
else:
res = interp_values_2d(x, groups, mean_values, std_values, num_stds, interval)
else:
std_values = simple_std(x, axis=0, ddof=ddof)
mean_values = simple_mean(x, axis=0)
if method == 'flat':
res = mask_values_1d(x, mean_values, std_values, num_stds)
else:
res = interp_values_1d(x, mean_values, std_values, num_stds, interval)
return res
class NormalWinsorizer(object):
def __init__(self, num_stds: int = 3,
ddof: int =1,
method: str = 'flat',
interval: float = 0.5):
self.num_stds = num_stds
self.ddof = ddof
self.mean = None
self.std = None
self.labels = None
self.method = method
self.interval = interval
def fit(self, x: np.ndarray, groups: np.ndarray = None):
if groups is not None:
group_index = group_mapping(groups)
self.mean = aggregate(group_index, x, 'mean')
self.std = aggregate(group_index, x, 'std', self.ddof)
self.labels = np.unique(groups)
else:
self.mean = simple_mean(x, axis=0)
self.std = simple_std(x, axis=0, ddof=self.ddof)
def transform(self, x: np.ndarray, groups: np.ndarray = None) -> np.ndarray:
if groups is not None:
index = array_index(self.labels, groups)
if self.method == 'flat':
res = mask_values_2d(x, self.mean[index], self.std[index], self.num_stds)
else:
res = interp_values_2d(x, groups,
self.mean[index],
self.std[index],
self.num_stds,
self.interval)
else:
if self.method == 'flat':
res = mask_values_1d(x, self.mean, self.std, self.num_stds)
else:
res = interp_values_1d(x, self.mean, self.std, self.num_stds, self.interval)
return res
def __call__(self, x: np.ndarray, groups: np.ndarray = None) -> np.ndarray:
return winsorize_normal(x, self.num_stds, self.ddof, groups, self.method, self.interval)
if __name__ == '__main__':
x = np.random.randn(10000, 1)
groups = np.random.randint(0, 3, 10000)
import datetime as dt
start = dt.datetime.now()
for i in range(1000):
winsorize_normal(x, method='flat')
print(dt.datetime.now() - start)
start = dt.datetime.now()
for i in range(1000):
winsorize_normal(x, method='interp')
print(dt.datetime.now() - start)
|
tracardi/process_engine/action/v1/metrics/key_counter/model/configuration.py
|
bytepl/tracardi
| 153 |
82476
|
<gh_stars>100-1000
from typing import Union, List
from pydantic import BaseModel
class Configuration(BaseModel):
key: Union[str, List[str]]
save_in: str
|
pygears/typing/array.py
|
bogdanvuk/pygears
| 120 |
82478
|
<filename>pygears/typing/array.py
from .base import EnumerableGenericMeta, typeof, is_type, TemplateArgumentsError
from .base import class_and_instance_method
# TODO: Check why array is specified when no length is specified
class ArrayType(EnumerableGenericMeta):
def __new__(cls, name, bases, namespace, args=None):
cls = super().__new__(cls, name, bases, namespace, args)
if not cls.specified:
return cls
if not isinstance(cls.args[1], int):
err = None
try:
cls.__args__[1] = int(cls.args[1])
except TypeError as e:
err = e
if err:
raise TemplateArgumentsError(
f'Second argument to the "Array" type must be integer, not "{repr(cls.args[1])}'
)
return cls
def keys(self):
"""Returns a list of keys that can be used for indexing :class:`Array` [T, N] type. Number of keys equals to the number of elements N.
>>> Array[Uint[2], 5].keys()
[0, 1, 2, 3, 4]
"""
return list(range(self.args[1]))
@property
def width(self):
return sum(f.width for f in self)
# TODO: Remove this
@property
def dtype(self):
return self.args[0]
@property
def data(self):
return self.args[0]
def __getitem__(self, index):
"""If a single element is supplied for index, returns type T. If a slice is suplied for index, an :class:`Array` type is returned with a number of elements equal to the slice size.
>>> Array[Uint[2], 5][3]
Uint[2]
>>> Array[Uint[2], 5][2:4]
Array[Uint[2], 2]
"""
if not self.specified:
return super().__getitem__(index)
index = self.index_norm(index)
if len(index) == 1 and not isinstance(index[0], slice):
if index[0] >= len(self):
raise IndexError
return self.args[0]
else:
width = 0
for i in index:
if isinstance(i, slice):
if (i.stop == 0) or (i.stop - i.start > len(self)):
raise IndexError
width += i.stop - i.start
else:
if i >= len(self):
raise IndexError
width += 1
return Array[self.args[0], width]
def __str__(self):
if self.args:
return f'Array[{str(self.args[0])}, {len(self)}]'
else:
return super().__str__()
class Array(list, metaclass=ArrayType):
"""Generic container datatype that holds N instances of type T
Generic parameters:
T: Type of the :class:`Array` [T, N] elements
N: Number of elements in the :class:`Array` [T, N]
Concrete data type is obtained by indexing::
u16_4 = Array[Uint[16], 4]
"""
__parameters__ = ['T', 'N']
def __init__(self, val: tuple = None):
t = type(self).data
if val is None:
array_tpl = (None, ) * len(type(self))
else:
array_tpl = (v if typeof(type(v), t) or v is None else t(v) for v in val)
return super().__init__(array_tpl)
def __eq__(self, other):
t_other = type(other)
if not is_type(t_other):
return super().__eq__(other)
return type(self) == t_other and super().__eq__(other)
def __ne__(self, other):
if not is_type(type(other)):
return self._array != other
return not self.__eq__(other)
@class_and_instance_method
def __getitem__(self, key):
if isinstance(key, int):
return super().__getitem__(key)
elif isinstance(key, str):
try:
return super().__getitem__(type(self).fields.index(key))
except ValueError:
raise TypeError(f'Tuple "{repr(self)}" has no field "{key}"')
key_norm = type(self).index_norm(key)
if len(key_norm) == 1:
if isinstance(key_norm[0], slice):
tout = type(self)[key_norm]
return tout(super().__getitem__(key_norm[0]))
else:
return super(Array, self).__getitem__(key_norm[0])
else:
tout = type(self)[key_norm]
elems = []
for i in key_norm:
elems.extend(super().__getitem__(i))
return tout(elems)
@class_and_instance_method
def subs(self, path, val):
if isinstance(path, tuple):
if len(path) > 1:
val = self[path[0]].subs(path[1:], val)
path = path[0]
return type(self)([self[i] if i != path else val for i in range(len(self))])
def __hash__(self):
return super().__hash__()
def code(self):
w_dtype = type(self).data.width
ret = 0
for d in reversed(self):
ret <<= w_dtype
if d is not None:
ret |= d.code()
return ret
@property
def unknown(self):
return any(v is None or getattr(v, 'unknown', False) for v in self)
@classmethod
def decode(cls, val):
ret = []
val = int(val)
mask = int(cls.data.width * '1', 2)
for t in cls:
ret.append(t.decode(val & mask))
val >>= t.width
return cls(ret)
@class_and_instance_method
def copy(self):
type(self)(self)
|
models/convfc.py
|
bhairavmehta95/dnn-mode-connectivity
| 182 |
82482
|
<gh_stars>100-1000
import math
import torch.nn as nn
import curves
__all__ = [
'ConvFC',
]
class ConvFCBase(nn.Module):
def __init__(self, num_classes):
super(ConvFCBase, self).__init__()
self.conv_part = nn.Sequential(
nn.Conv2d(3, 32, kernel_size=5, padding=2),
nn.ReLU(True),
nn.MaxPool2d(kernel_size=3, stride=2),
nn.Conv2d(32, 64, kernel_size=5, padding=2),
nn.ReLU(True),
nn.MaxPool2d(3, 2),
nn.Conv2d(64, 128, kernel_size=5, padding=2),
nn.ReLU(True),
nn.MaxPool2d(3, 2),
)
self.fc_part = nn.Sequential(
nn.Linear(1152, 1000),
nn.ReLU(True),
nn.Linear(1000, 1000),
nn.ReLU(True),
nn.Linear(1000, num_classes)
)
# Initialize weights
for m in self.conv_part.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
m.bias.data.zero_()
def forward(self, x):
x = self.conv_part(x)
x = x.view(x.size(0), -1)
x = self.fc_part(x)
return x
class ConvFCCurve(nn.Module):
def __init__(self, num_classes, fix_points):
super(ConvFCCurve, self).__init__()
self.conv1 = curves.Conv2d(3, 32, kernel_size=5, padding=2, fix_points=fix_points)
self.relu1 = nn.ReLU(True)
self.max_pool1 = nn.MaxPool2d(kernel_size=3, stride=2)
self.conv2 = curves.Conv2d(32, 64, kernel_size=5, padding=2, fix_points=fix_points)
self.relu2 = nn.ReLU(True)
self.max_pool2 = nn.MaxPool2d(3, 2)
self.conv3 = curves.Conv2d(64, 128, kernel_size=5, padding=2, fix_points=fix_points)
self.relu3 = nn.ReLU(True)
self.max_pool3 = nn.MaxPool2d(3, 2)
self.fc4 = curves.Linear(1152, 1000, fix_points=fix_points)
self.relu4 = nn.ReLU(True)
self.fc5 = curves.Linear(1000, 1000, fix_points=fix_points)
self.relu5 = nn.ReLU(True)
self.fc6 = curves.Linear(1000, num_classes, fix_points=fix_points)
# Initialize weights
for m in self.modules():
if isinstance(m, curves.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
for i in range(m.num_bends):
getattr(m, 'weight_%d' % i).data.normal_(0, math.sqrt(2. / n))
getattr(m, 'bias_%d' % i).data.zero_()
def forward(self, x, coeffs_t):
x = self.conv1(x, coeffs_t)
x = self.relu1(x)
x = self.max_pool1(x)
x = self.conv2(x, coeffs_t)
x = self.relu2(x)
x = self.max_pool2(x)
x = self.conv3(x, coeffs_t)
x = self.relu3(x)
x = self.max_pool3(x)
x = x.view(x.size(0), -1)
x = self.fc4(x, coeffs_t)
x = self.relu4(x)
x = self.fc5(x, coeffs_t)
x = self.relu5(x)
x = self.fc6(x, coeffs_t)
return x
class ConvFC:
base = ConvFCBase
curve = ConvFCCurve
kwargs = {}
|
zilencer/migrations/0014_cleanup_pushdevicetoken.py
|
TylerPham2000/zulip
| 17,004 |
82485
|
# Generated by Django 1.11.14 on 2018-10-10 22:52
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("zilencer", "0013_remove_customer_billing_user"),
]
operations = [
migrations.AlterUniqueTogether(
name="remotepushdevicetoken",
unique_together={("server", "user_id", "kind", "token")},
),
]
|
examples/showcase/src/demos_widgets/fileUpload.py
|
takipsizad/pyjs
| 739 |
82547
|
<gh_stars>100-1000
"""
The ``ui.FileUpload`` class implements a file uploader widget.
The FileUpload widget must be inside a ``ui.FormPanel`` which is used to submit
the HTML form to the server. Note that you must set the form encoding and
method like this:
self.form.setEncoding(FormPanel.ENCODING_MULTIPART)
self.form.setMethod(FormPanel.METHOD_POST)
This will ensure that the form is submitted in a way that allows files to be
uploaded.
The example below doesn't really work, as there is no suitable server at
``nonexistent.com``. However, it does show how a file upload widget could be
used within a FormPanel.
"""
from pyjamas.ui.SimplePanel import SimplePanel
from pyjamas.ui.FormPanel import FormPanel
from pyjamas.ui.VerticalPanel import VerticalPanel
from pyjamas.ui.HorizontalPanel import HorizontalPanel
from pyjamas.ui.FileUpload import FileUpload
from pyjamas.ui.Label import Label
from pyjamas.ui.Button import Button
class FileUploadDemo(SimplePanel):
def __init__(self):
SimplePanel.__init__(self)
self.form = FormPanel()
self.form.setEncoding(FormPanel.ENCODING_MULTIPART)
self.form.setMethod(FormPanel.METHOD_POST)
self.form.setAction("http://nonexistent.com")
self.form.setTarget("results")
vPanel = VerticalPanel()
hPanel = HorizontalPanel()
hPanel.setSpacing(5)
hPanel.add(Label("Upload file:"))
self.field = FileUpload()
self.field.setName("file")
hPanel.add(self.field)
hPanel.add(Button("Submit", getattr(self, "onBtnClick")))
vPanel.add(hPanel)
results = NamedFrame("results")
vPanel.add(results)
self.form.add(vPanel)
self.add(self.form)
def onBtnClick(self, event):
self.form.submit()
|
tools/pydev/movifier.py
|
xdr1000/https-github.com-intercept-intercept
| 166 |
82549
|
<filename>tools/pydev/movifier.py
# ******************************************************************************************************************
# **************************************************** movefier ****************************************************
# ******************************************************************************************************************
def moveifier_init():
print "This tool will move function definitions and the"
print "corresponding implementation to another file."
print ""
folderloc = raw_input("Please input the folder containing the hpp and cpp files: ").strip()
if not os.path.exists(folderloc):
print folderloc
print "Path is invalid: directory does not exist."
return
filenameer = raw_input("Please type the filename (excluding extension): ").strip()
header_file = os.path.join(folderloc, filenameer + ".hpp")
if not os.path.isfile(header_file):
print header_file
print "Header file is invalid: file not found."
return
else:
print "Header: " + header_file + " OK!"
implementation_file = os.path.join(folderloc, filenameer + ".cpp")
if not os.path.isfile(implementation_file):
print implementation_file
print "Implementation file is invalid: file not found."
return
else:
print "Implementation: " + implementation_file + " OK!"
print "OK! Header and implementation found."
dest = raw_input("Please input the script output location: ").strip()
if not os.path.exists(dest):
print dest
print "Script output folder is invalid: folder does not exist."
return
else:
print "Output: " + dest + " OK!"
moveifier_scan_files(header_file, implementation_file, dest)
def moveifier_scan_files(headerf, implf, dest):
funpat = re.compile('[\w_ \:]+\(.*\)\;')
function_definitions = []
flag_verbose = True
# Find all function signatures.
with open(headerf) as f:
for line in f:
cleanline = line.strip()
if funpat.match(cleanline):
if (cleanline.startswith("\\") or cleanline.startswith("\*") or cleanline.startswith("*") or cleanline.startswith("\**")):
moveifier_print_to_unsorted(cleanline, os.path.join(dest, "_unsorted.hpp"))
continue
function_definitions.append(cleanline)
if flag_verbose:
print "Found definition: " + cleanline
else:
moveifier_print_to_unsorted(cleanline, os.path.join(dest, "_unsorted.hpp"))
print "Function definitions found: " + str(len(function_definitions))
# Find the function implementation
implementationcounter = 0
f_arr = []
with open(implf) as f:
fcache = f.readlines()
f.seek(0)
clineno = 0
for definit in function_definitions:
while True:
line = f.readline()
clineno = clineno + 1
if not line: break
if definit.strip(';') in line.strip():
open_braces = line.strip().count("{")
if open_braces < 1:
print "NO open braces on line" + line
if (peek_line(f).strip().count("{") < 1):
print "NO open braces on peeked line."
continue
open_braces = open_braces - line.strip().count("}")
function_imp = line;
fcache[clineno] = ""
while (open_braces > 0):
thisline = f.readline()
fcache[clineno] = ""
clineno = clineno + 1
function_imp = function_imp + "\n" + thisline
open_braces = open_braces + thisline.strip().count("{")
open_braces = open_braces - thisline.strip().count("}")
print "Found implementation for " + definit.strip(';')
f_pair = [definit, function_imp]
f_arr.append(f_pair)
implementationcounter = implementationcounter + 1
continue;
clineno = 0
f.seek(0)
print "Function implemenations found: " + str(implementationcounter)
print "Writing unsorted backup... please wait."
for line in fcache:
moveifier_print_to_unsorted(line, os.path.join(dest, "_unsorted.cpp"))
print ""
print ""
print " Source file parsing completed successfully. "
print ""
print " Header: " + headerf
print " Implementation: " + headerf
print " Outputting to: "
print ""
print " Function definitions found: " + str(len(function_definitions))
print " Function implemenations found: " + str(implementationcounter)
print ""
print " Functions to be moveed: " + str(len(f_arr))
print ""
print ""
print ""
print " The next stage will move the detected functions into files."
print " To view the manual before moving, type 'man', otherwise type 'yes' to continue or 'no' to cancel."
print ""
while (True):
ask_result = raw_input(" Do you wish to begin moving the functions? [yes/no/man]: ").strip()
if ask_result == "yes":
moveifier_do_move(f_arr, dest)
clearscr()
print ""
print " moving finished!"
print ""
print " Summary: "
print " Functions moveed: "
print " Functions skipped: "
print ""
raw_input("Press enter to return to the main menu.")
return
elif ask_result == "no":
return
elif ask_result == "man":
moveifier_show_manual(f_arr, dest)
break
def moveifier_show_manual(f_arr, dest):
print ""
print "==========================================================================="
print ""
print " Intercept pydev Move tool manual "
print ""
print " This tool is designed to assist in the fast movement of functions and their"
print " implementation between files. The tool discovers defined functions and"
print " attempts to match these definitions to implementations. Once this has been"
print " done the user will then be presented with the series of functions which"
print " were discovered and asked to provide a filename to move these functions to."
print " "
print " To move a function simply type the name (with no ext.) of the file you wish to move it into."
print " "
print " To skip a function just press enter with no filename entered. This will move the"
print " function into a file named _skipped.xpp"
print " "
print " To stop moving functions press ctrl+c or close the command prompt."
print " "
print " "
print " Note: Functions are moved immediatley, with no undo function, however, the original"
print " source file from which functions come from are not modified in any way."
print " "
print " The next stage will move the detected functions into files."
print " Type 'yes' to start moving functions or 'no' to cancel."
print ""
ask_result = raw_input(" Do you wish to begin moving the functions? [yes/no]: ").strip()
if ask_result == "yes":
moveifier_do_move(f_arr, dest)
clearscr()
print ""
print " moving finished!"
print ""
print " Summary: "
print " Functions moveed: "
print " Functions skipped: "
print ""
raw_input("Press enter to return to the main menu.")
return
elif ask_result == "no":
return
def moveifier_print_to_unsorted(line, dest):
with open(dest,'a') as f: f.write(line + "\n")
def moveifier_do_move(f_arr, dest):
i = 0
for func in f_arr:
i = i + 1
clearscr()
print ""
print ""
print " --== moving Function #" + str(i) + "/" + str(len(f_arr)) + " ==--"
print ""
moveifier_print_func_info(func, False)
dest_loc = raw_input(" Destination [blank to skip]: ").strip()
if dest_loc == "":
dest_loc = "_unsorted"
header_op_to = os.path.join(dest, dest_loc + ".hpp")
implementation_op_to = os.path.join(dest, dest_loc + ".cpp")
with open(header_op_to,'a') as f: f.write(func[0] + "\n")
with open(implementation_op_to,'a') as f: f.write(func[1] + "\n")
def moveifier_print_func_info(func, expand_impl):
print ""
print ""
print " Function Signature: " + func[0]
print ""
print " Function Implementation:\n" + func[1]
print ""
print ""
|
python-scipy-cluster-optimize/cluster_sms_spam.py
|
syberflea/materials
| 3,682 |
82553
|
<reponame>syberflea/materials
"""
Clustering example using an SMS spam dataset with SciPy.
Associated with the Real Python article
Scientific Python: Using SciPy for Optimization
Available at: https://realpython.com/python-scipy-cluster-optimize/
"""
from pathlib import Path
import numpy as np
from scipy.cluster.vq import whiten, kmeans, vq
HERE = Path(__file__).parent
data = HERE.joinpath("SMSSpamCollection").read_text().strip().split("\n")
digit_counts = np.empty((len(data), 2), dtype=int)
for i, line in enumerate(data):
case, message = line.split("\t")
num_digits = sum(c.isdigit() for c in message)
digit_counts[i, 0] = 0 if case == "ham" else 1
digit_counts[i, 1] = num_digits
unique_counts = np.unique(digit_counts[:, 1], return_counts=True)
unique_counts = np.transpose(np.vstack(unique_counts))
whitened_counts = whiten(unique_counts)
codebook, _ = kmeans(whitened_counts, 3)
codes, _ = vq(whitened_counts, codebook)
ham_code = codes[0]
spam_code = codes[-1]
unknown_code = list(set(range(3)) ^ set((ham_code, spam_code)))[0]
print("definitely ham:", unique_counts[codes == ham_code][-1])
print("definitely spam:", unique_counts[codes == spam_code][-1])
print("unknown:", unique_counts[codes == unknown_code][-1])
digits = digit_counts[:, 1]
predicted_hams = digits == 0
predicted_spams = digits > 20
predicted_unknowns = np.logical_and(digits > 0, digits <= 20)
ham_cluster = digit_counts[predicted_hams]
spam_cluster = digit_counts[predicted_spams]
unknown_cluster = digit_counts[predicted_unknowns]
print("hams:", np.unique(ham_cluster[:, 0], return_counts=True))
print("spams:", np.unique(spam_cluster[:, 0], return_counts=True))
print("unknowns:", np.unique(unknown_cluster[:, 0], return_counts=True))
|
challenge_1/python/sysek/src/reverse.py
|
rchicoli/2017-challenges
| 271 |
82556
|
def reverse(string):
return string[::-1]
print('Gimmie some word')
s = input()
print(reverse(s))
|
nova/keymgr/conf_key_mgr.py
|
zjzh/nova
| 1,874 |
82577
|
<reponame>zjzh/nova<filename>nova/keymgr/conf_key_mgr.py
# Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
An implementation of a key manager that reads its key from the project's
configuration options.
This key manager implementation provides limited security, assuming that the
key remains secret. Using the volume encryption feature as an example,
encryption provides protection against a lost or stolen disk, assuming that
the configuration file that contains the key is not stored on the disk.
Encryption also protects the confidentiality of data as it is transmitted via
iSCSI from the compute host to the storage host (again assuming that an
attacker who intercepts the data does not know the secret key).
Because this implementation uses a single, fixed key, it proffers no
protection once that key is compromised. In particular, different volumes
encrypted with a key provided by this key manager actually share the same
encryption key so *any* volume can be decrypted once the fixed key is known.
"""
import binascii
from castellan.common.objects import symmetric_key as key
from castellan.key_manager import key_manager
from oslo_log import log as logging
import nova.conf
from nova import exception
from nova.i18n import _
CONF = nova.conf.CONF
LOG = logging.getLogger(__name__)
class ConfKeyManager(key_manager.KeyManager):
"""This key manager implementation supports all the methods specified by
the key manager interface. This implementation creates a single key in
response to all invocations of create_key. Side effects
(e.g., raising exceptions) for each method are handled
as specified by the key manager interface.
"""
def __init__(self, configuration):
LOG.warning('This key manager is insecure and is not recommended '
'for production deployments')
super(ConfKeyManager, self).__init__(configuration)
self.key_id = '00000000-0000-0000-0000-000000000000'
self.conf = CONF if configuration is None else configuration
if CONF.key_manager.fixed_key is None:
raise ValueError(_('keymgr.fixed_key not defined'))
self._hex_key = CONF.key_manager.fixed_key
super(ConfKeyManager, self).__init__(configuration)
def _get_key(self):
key_bytes = bytes(binascii.unhexlify(self._hex_key))
return key.SymmetricKey('AES', len(key_bytes) * 8, key_bytes)
def create_key(self, context, algorithm, length, **kwargs):
"""Creates a symmetric key.
This implementation returns a UUID for the key read from the
configuration file. A Forbidden exception is raised if the
specified context is None.
"""
if context is None:
raise exception.Forbidden()
return self.key_id
def create_key_pair(self, context, **kwargs):
raise NotImplementedError(
"ConfKeyManager does not support asymmetric keys")
def store(self, context, managed_object, **kwargs):
"""Stores (i.e., registers) a key with the key manager."""
if context is None:
raise exception.Forbidden()
if managed_object != self._get_key():
raise exception.KeyManagerError(
reason="cannot store arbitrary keys")
return self.key_id
def get(self, context, managed_object_id):
"""Retrieves the key identified by the specified id.
This implementation returns the key that is associated with the
specified UUID. A Forbidden exception is raised if the specified
context is None; a KeyError is raised if the UUID is invalid.
"""
if context is None:
raise exception.Forbidden()
if managed_object_id != self.key_id:
raise KeyError(str(managed_object_id) + " != " + str(self.key_id))
return self._get_key()
def delete(self, context, managed_object_id):
"""Represents deleting the key.
Because the ConfKeyManager has only one key, which is read from the
configuration file, the key is not actually deleted when this is
called.
"""
if context is None:
raise exception.Forbidden()
if managed_object_id != self.key_id:
raise exception.KeyManagerError(
reason="cannot delete non-existent key")
LOG.warning("Not deleting key %s", managed_object_id)
|
Giveme5W1H/extractor/tools/file/writer.py
|
bkrrr/Giveme5W
| 410 |
82600
|
<reponame>bkrrr/Giveme5W
import json
import pickle
import os
from Giveme5W1H.extractor.candidate import Candidate
from Giveme5W1H.extractor.configuration import Configuration as Config
class Writer:
"""
Helper to write prickles and json representations of documents
There is no way to convert a json back to a full document object. Use prickles instead
"""
def __init__(self):
"""
:param path: Absolute path to the output directory
"""
self._preprocessedPath = None
def _write_json(self, output_object):
outfile = open(self._outputPath + '/' + output_object['dId'] + '.json', 'w')
outfile.write(json.dumps(output_object, sort_keys=False, indent=2))
outfile.close()
def write_pickle(self, document):
#deprecated
with open(self.get_preprocessed_filepath(document.get_rawData()['dId']), 'wb') as f:
# Pickle the 'data' document using the highest protocol available.
pickle.dump(document, f, pickle.HIGHEST_PROTOCOL)
def write_pickle_file(self, path, file):
fullpath = self._preprocessedPath + '/' + path + '.pickle'
os.makedirs(os.path.dirname(fullpath), exist_ok=True)
with open(fullpath , 'wb') as f:
pickle.dump(file, f, pickle.HIGHEST_PROTOCOL)
def get_preprocessed_filepath(self, id):
#deprecated
return self._preprocessedPath + '/' + id + '.pickle'
def get_preprocessed_path(self):
return self._preprocessedPath
def set_preprocessed_path(self, preprocessed_path):
self._preprocessedPath = preprocessed_path
def setOutputPath(self, output_path):
self._outputPath = output_path
def generate_json(self, document):
"""
:param document: The parsed Document
:type document: Document
:return: None
"""
# Reuse the input json as template for the output json
output = document.get_rawData()
if output is None:
output = {}
# Check if there isn`t already a fiveWoneH literal
five_w_one_h_literal = output.setdefault('fiveWoneH', {})
# Save error flags(not under fiveWoneH, would break further code which expects there only questions)
output.setdefault('fiveWoneH_Metadata', {
'process_errors': document.get_error_flags()
})
if Config.get()['fiveWoneH_enhancer_full']:
output.setdefault('fiveWoneH_enhancer', document.get_enhancements() )
# Extract answers
answers = document.get_answers()
for question in answers:
# check if question literal is there
question_literal = five_w_one_h_literal.setdefault(question, {'extracted': []})
# add a label, thats only there for the ui
if Config.get()['label']:
question_literal['label'] = question
# check if extracted literal is there
extracted_literal = question_literal.setdefault('extracted', [])
for answer in answers[question]:
if isinstance(answer, Candidate):
# answer was already refactored
awJson = answer.get_json()
# clean up json by skipping NULL entries
if awJson:
extracted_literal.append(awJson)
else:
# fallback for none refactored extractors
candidate_json = {'score': answer[1], 'words': []}
for candidateWord in answer[0]:
candidate_json['parts'].append({'text': candidateWord[0], 'nlpTag': candidateWord[1]})
extracted_literal.append(candidate_json)
if Config.get()['onlyTopCandidate']:
# stop after the first answer
break
return output
def write(self, document):
if self._outputPath:
a_json = self.generate_json(document)
self._write_json(a_json)
else:
print("set a outputPath before writing")
|
import/elftools/dwarf/aranges.py
|
seemoo-lab/polypyus_pdom
| 1,358 |
82606
|
<gh_stars>1000+
#-------------------------------------------------------------------------------
# elftools: dwarf/aranges.py
#
# DWARF aranges section decoding (.debug_aranges)
#
# <NAME> (<EMAIL>)
# This code is in the public domain
#-------------------------------------------------------------------------------
import os
from collections import namedtuple
from ..common.utils import struct_parse
from bisect import bisect_right
import math
# An entry in the aranges table;
# begin_addr: The beginning address in the CU
# length: The length of the address range in this entry
# info_offset: The CU's offset into .debug_info
# see 6.1.2 in DWARF4 docs for explanation of the remaining fields
ARangeEntry = namedtuple('ARangeEntry',
'begin_addr length info_offset unit_length version address_size segment_size')
class ARanges(object):
""" ARanges table in DWARF
stream, size:
A stream holding the .debug_aranges section, and its size
structs:
A DWARFStructs instance for parsing the data
"""
def __init__(self, stream, size, structs):
self.stream = stream
self.size = size
self.structs = structs
# Get entries of aranges table in the form of ARangeEntry tuples
self.entries = self._get_entries()
# Sort entries by the beginning address
self.entries.sort(key=lambda entry: entry.begin_addr)
# Create list of keys (first addresses) for better searching
self.keys = [entry.begin_addr for entry in self.entries]
def cu_offset_at_addr(self, addr):
""" Given an address, get the offset of the CU it belongs to, where
'offset' refers to the offset in the .debug_info section.
"""
tup = self.entries[bisect_right(self.keys, addr) - 1]
if tup.begin_addr <= addr < tup.begin_addr + tup.length:
return tup.info_offset
else:
return None
#------ PRIVATE ------#
def _get_entries(self):
""" Populate self.entries with ARangeEntry tuples for each range of addresses
"""
self.stream.seek(0)
entries = []
offset = 0
# one loop == one "set" == one CU
while offset < self.size :
aranges_header = struct_parse(self.structs.Dwarf_aranges_header,
self.stream, offset)
addr_size = self._get_addr_size_struct(aranges_header["address_size"])
# No segmentation
if aranges_header["segment_size"] == 0:
# pad to nearest multiple of tuple size
tuple_size = aranges_header["address_size"] * 2
fp = self.stream.tell()
seek_to = int(math.ceil(fp/float(tuple_size)) * tuple_size)
self.stream.seek(seek_to)
# entries in this set/CU
addr = struct_parse(addr_size('addr'), self.stream)
length = struct_parse(addr_size('length'), self.stream)
while addr != 0 or length != 0:
# 'begin_addr length info_offset version address_size segment_size'
entries.append(
ARangeEntry(begin_addr=addr,
length=length,
info_offset=aranges_header["debug_info_offset"],
unit_length=aranges_header["unit_length"],
version=aranges_header["version"],
address_size=aranges_header["address_size"],
segment_size=aranges_header["segment_size"]))
addr = struct_parse(addr_size('addr'), self.stream)
length = struct_parse(addr_size('length'), self.stream)
# Segmentation exists in executable
elif aranges_header["segment_size"] != 0:
raise NotImplementedError("Segmentation not implemented")
offset = (offset
+ aranges_header.unit_length
+ self.structs.initial_length_field_size())
return entries
def _get_addr_size_struct(self, addr_header_value):
""" Given this set's header value (int) for the address size,
get the Construct representation of that size
"""
if addr_header_value == 4:
return self.structs.Dwarf_uint32
else:
assert addr_header_value == 8
return self.structs.Dwarf_uint64
|
src/sage/interfaces/tachyon.py
|
UCD4IDS/sage
| 1,742 |
82619
|
<filename>src/sage/interfaces/tachyon.py
r"""
The Tachyon Ray Tracer
AUTHOR:
- <NAME>
"""
#*****************************************************************************
# Copyright (C) 2006 <NAME>
#
# Distributed under the terms of the GNU General Public License (GPL)
# as published by the Free Software Foundation; either version 2 of
# the License, or (at your option) any later version.
# http://www.gnu.org/licenses/
#*****************************************************************************
import os
from sage.cpython.string import bytes_to_str
from sage.misc.pager import pager
from sage.misc.temporary_file import tmp_filename
from sage.structure.sage_object import SageObject
class TachyonRT(SageObject):
"""
The Tachyon Ray Tracer
tachyon_rt(model, outfile='sage.png', verbose=1, block=True, extra_opts='')
INPUT:
- ``model`` - a string that describes a 3d model in
the Tachyon modeling format. Type tachyon_rt.help() for a
description of this format.
- ``outfile`` - (default: 'sage.png') output filename;
the extension of the filename determines the type. Supported types
include:
- ``tga`` - 24-bit (uncompressed)
- ``bmp`` - 24-bit Windows BMP (uncompressed)
- ``ppm`` - 24-bit PPM (uncompressed)
- ``rgb`` - 24-bit SGI RGB (uncompressed)
- ``png`` - 24-bit PNG (compressed, lossless)
- ``verbose`` - integer; (default: 1)
- ``0`` - silent
- ``1`` - some output
- ``2`` - very verbose output
- ``block`` - bool (default: True); if False, run the
rendering command in the background.
- ``extra_opts`` - passed directly to tachyon command
line. Use tachyon_rt.usage() to see some of the possibilities.
OUTPUT:
- Some text may be displayed onscreen.
- The file outfile is created.
EXAMPLES:
.. automethod:: __call__
"""
def _repr_(self):
"""
Returns a brief description of this interface object (the Tachyon raytracer written by <NAME>).
TESTS::
sage: from sage.interfaces.tachyon import TachyonRT
sage: t = TachyonRT()
sage: print(t.__repr__())
<NAME>'s Tachyon Ray Tracer
"""
return "<NAME>'s Tachyon Ray Tracer"
def __call__(self, model, outfile='sage.png', verbose=1, extra_opts=''):
"""
This executes the tachyon program, given a scene file input.
INPUT:
- ``model`` -- string. The tachyon model.
- ``outfile`` -- string, default ``'sage.png'``. The filename
to save the model to.
- ``verbose`` -- 0, 1, (default) or 2. The verbosity level.
- ``extra_opts`` -- string (default: empty string). Extra
options that will be appended to the tachyon commandline.
EXAMPLES::
sage: from sage.interfaces.tachyon import TachyonRT
sage: tgen = Tachyon()
sage: tgen.texture('t1')
sage: tgen.sphere((0,0,0),1,'t1')
sage: tgen.str()[30:40]
'resolution'
sage: t = TachyonRT()
sage: import os
sage: t(tgen.str(), outfile=os.devnull)
tachyon ...
Tachyon Parallel/Multiprocessor Ray Tracer...
TESTS::
sage: from sage.env import SAGE_EXTCODE
sage: filename = os.path.join(SAGE_EXTCODE, 'doctest', 'invalid', 'syntax_error.tachyon')
sage: with open(filename, 'r') as f:
....: syntax_error = f.read()
sage: t(syntax_error, outfile=os.devnull)
Traceback (most recent call last):
...
RuntimeError: Tachyon Parallel/Multiprocessor Ray Tracer...
...
Parser failed due to an input file syntax error.
Aborting render.
"""
modelfile = tmp_filename(ext='.dat')
with open(modelfile, 'w') as file:
file.write(model)
cmd = ['tachyon', modelfile]
ext = outfile[-4:].lower()
if ext == '.png':
cmd += ['-format', 'PNG']
elif ext == '.tga':
cmd += ['-format', 'TARGA']
elif ext == '.bmp':
cmd += ['-format', 'BMP']
elif ext == '.ppm':
cmd += ['-format', 'PPM']
elif ext == '.rgb':
cmd += ['-format', 'RGB']
cmd += ['-o', outfile]
cmd += extra_opts.split()
if verbose >= 2:
cmd += ['+V']
if verbose:
print(' '.join(cmd))
import subprocess
out = bytes_to_str(subprocess.check_output(cmd))
if verbose >= 1:
print(out)
if out.rstrip().endswith('Aborting render.'):
raise RuntimeError(out)
if outfile != os.devnull and os.stat(outfile).st_size == 0:
raise RuntimeError('tachyon did not abort but output file is empty')
def usage(self, use_pager=True):
"""
Returns the basic description of using the Tachyon raytracer (simply what is returned by running tachyon with no input). The output is paged unless use_pager=False.
TESTS::
sage: from sage.interfaces.tachyon import TachyonRT
sage: t = TachyonRT()
sage: t.usage(use_pager=False)
...
tachyon modelfile [options]...
<BLANKLINE>
Model file formats supported:
filename.dat ...
"""
with os.popen('tachyon') as f:
r = f.read()
if use_pager:
pager()(r)
else:
print(r)
def help(self, use_pager=True):
"""
Prints (pages) the help file written by <NAME> describing scene files for Tachyon. The output is paged unless use_pager=False.
TESTS::
sage: from sage.interfaces.tachyon import TachyonRT
sage: t = TachyonRT()
sage: t.help(use_pager=False)
This help, which was written by <NAME>, describes ...
"""
s = r"""
This help, which was written by <NAME>, describes how to create
scene files.
At the present time, scene description files are very simple.
The parser can't handle multiple file scene descriptions, although they
may be added in the future. Most of the objects and their scene description
are closely related to the RAY API
\emph{(See the API docs for additional info.)}
\subsection{Basic Scene Requirements}
Unlike some other ray tracers out there, RAY requires that you
specify most of the scene parameters in the scene description file itself.
If users would rather specify some of these parameters at the command line,
then I may add that feature in the future.
A scene description file contains keywords, and values associated or grouped
with a keyword. All keywords can be in caps, lower case, or mixed case
for the convenience of the user. File names and texture names are
normally case-sensitive, although the behavior for file names is
operating system-dependent. All values are either character strings, or
floating point numbers. In some cases, the presence of one keyword will
require additional keyword / value pairs.
At the moment there are several keywords with values,
that must appear in every scene description file.
Every scene description file must begin with the
{\bf BEGIN\_SCENE} keyword, and end with the {\bf END\_SCENE} keyword.
All definitions and declarations of any kind must be inside the
{\bf BEGIN\_SCENE}, {\bf END\_SCENE} pair.
The {\bf RESOLUTION} keyword is followed by an x resolution
and a y resolution in terms of pixels on each axis. There are currently
no limits placed on the resolution of an output image other than the
computer's available memory and reasonable execution time.
An example of a simple scene description skeleton is show below:
\begin{verbatim}
BEGIN_SCENE
RESOLUTION 1024 1024
...
... Camera definition..
...
... Other objects, etc..
...
END_SCENE
\end{verbatim}
\subsection{Camera and viewing parameters}
One of the most important parts of any scene, is the camera position and
orientation. Having a good angle on a scene can make the difference between
an average looking scene and a strikingly interesting one. There may be
multiple camera definitions in a scene file, but the last camera definition
overrides all previous definitions.
There are several parameters that control the camera in \RAY,
{\bf PROJECTION}, {\bf ZOOM}, {\bf ASPECTRATIO}, {\bf ANTIALIASING},
{\bf CENTER}, {\bf RAYDEPTH}, {\bf VIEWDIR}, and {\bf UPDIR}.
The first and last keywords required in the definition of a camera are the
{\bf CAMERA} and {\bf END\_CAMERA} keywords. The {\bf PROJECTION} keyword
is optional, the remaining camera keywords are required, and must be
written in the sequence they are listed in the examples in this section.
\subsubsection{Camera projection modes}
The {\bf PROJECTION} keyword must be followed by one of the supported
camera projection mode identifiers {\bf PERSPECTIVE}, {\bf PERSPECTIVE_DOF},
{\bf ORTHOGRAPHIC}, or {\bf FISHEYE}. The {\bf FISHEYE} projection mode
requires two extra parameters {\bf FOCALLENGTH} and {\bf APERTURE}
which precede the regular camera options.
\begin{verbatim}
Camera
projection perspective_dof
focallength 0.75
aperture 0.02
Zoom 0.666667
Aspectratio 1.000000
Antialiasing 128
Raydepth 30
Center 0.000000 0.000000 -2.000000
Viewdir -0.000000 -0.000000 2.000000
Updir 0.000000 1.000000 -0.000000
End_Camera
\end{verbatim}
\subsubsection{Common camera parameters}
The {\bf ZOOM} parameter controls the camera in a way similar to a
telephoto lens on a 35mm camera. A zoom value of 1.0 is standard,
with a 90 degree field of view. By changing the zoom factor to 2.0,
the relative size of any feature in the frame is twice as big, while
the field of view is decreased slightly. The zoom effect is
implemented as a scaling factor on the height and width of the image
plane relative to the world.
The {\bf ASPECTRATIO} parameter controls the aspect ratio of the resulting
image. By using the aspect ratio parameter, one can produce images which
look correct on any screen. Aspect ratio alters the relative width of the
image plane, while keeping the height of the image plane constant. In
general, most workstation displays have an aspect ratio of 1.0. To see
what aspect ratio your display has, you can render a simple sphere, at
a resolution of 512x512 and measure the ratio of its width to its height.
The {\bf ANTIALIASING} parameter controls the maximum level of supersampling
used to obtain higher image quality. The parameter given sets the number of
additional rays to trace per-pixel to attain higher image quality.
The {\bf RAYDEPTH} parameter tells RAY what the maximum
level of reflections, refraction, or in general the maximum recursion
depth to trace rays to. A value between 4 and 12 is usually good. A
value of 1 will disable rendering of reflective or transmissive
objects (they'll be black).
The remaining three camera parameters are the most important, because
they define the coordinate system of the camera, and its position in the
scene. The {\bf CENTER} parameter is an X, Y, Z coordinate defining the
center of the camera \emph{(also known as the Center of Projection)}.
Once you have determined where the camera will be placed in the scene, you
need to tell RAY what the camera should be looking at. The
{\bf VIEWDIR} parameter is a vector indicating the direction the camera
is facing. It may be useful for me to add a "Look At" type keyword in
the future to make camera aiming easier. If people want or need the
"Look At" style camera, let me know. The last parameter needed to completely
define a camera is the "up" direction. The {\bf UPDIR} parameter is a vector
which points in the direction of the "sky". I wrote the camera so that
{\bf VIEWDIR} and {\bf UPDIR} don't have to be perpendicular, and there
shouldn't be a need for a "right" vector although some other ray tracers
require it. Here's a snippet of a camera definition:
\begin{verbatim}
CAMERA
ZOOM 1.0
ASPECTRATIO 1.0
ANTIALIASING 0
RAYDEPTH 12
CENTER 0.0 0.0 2.0
VIEWDIR 0 0 -1
UPDIR 0 1 0
END_CAMERA
\end{verbatim}
\subsubsection{Viewing frustum}
An optional {\bf FRUSTUM} parameter provides a means for rendering sub-images
in a larger frame, and correct stereoscopic images. The {\bf FRUSTUM}
keyword must be followed by four floating parameters, which indicate
the top, bottom, left and right coordinates of the image plane in
eye coordinates. When the projection mode is set to {\bf FISHEYE},
the frustum parameters correspond to spherical coordinates specified
in radians.
\begin{verbatim}
CAMERA
ZOOM 1.0
ASPECTRATIO 1.0
ANTIALIASING 0
RAYDEPTH 4
CENTER 0.0 0.0 -6.0
VIEWDIR 0.0 0.0 1.0
UPDIR 0.0 1.0 0.0
FRUSTUM -0.5 0.5 -0.5 0.5
END_CAMERA
\end{verbatim}
\subsection{Including Files}
The {\bf INCLUDE} keyword is used anywhere after the camera description,
and is immediately followed by a valid filename, for a file containing
additional scene description information. The included file is opened,
and processing continues as if it were part of the current file, until
the end of the included file is reached. Parsing of the current file
continues from where it left off prior to the included file.
\subsection{Scene File Comments}
The {\bf $\#$} keyword is used anywhere after the camera description, and
will cause RAY to ignore all characters from the {\bf $\#$} to the end
of the input line. The {\bf $\#$} character must be surrounded by whitespace
in order to be recognized. A sequence such as {\bf $\#\#\#$} will not be
recognized as a comment.
\subsection{Lights}
The most frequently used type of lights provided by RAY are positional
point light sources. The lights are actually small spheres, which are
visible. A point light is composed of three pieces of
information, a center, a radius (since its a sphere), and a color.
To define a light, simply write the {\bf LIGHT} keyword, followed by
its {\bf CENTER} (a X, Y, Z coordinate), its {\bf RAD} (radius, a scalar),
and its {\bf COLOR} (a Red Green Blue triple). The radius parameter will
accept any value of 0.0 or greater. Lights of radius 0.0 will not be
directly visible in the rendered scene, but contribute light to the scene
normally.
For a light, the color values
range from 0.0 to 1.0, any values outside this range may yield unpredictable
results. A simple light definition looks like this:
\begin{verbatim}
LIGHT CENTER 4.0 3.0 2.0
RAD 0.2
COLOR 0.5 0.5 0.5
\end{verbatim}
This light would be gray colored if seen directly, and would be 50\%
intensity in each RGB color component.
RAY supports simple directional lighting, commonly used in
CAD and scientific visualization programs for its performance
advantages over positional lights. Directional lights cannot be
seen directly in scenes rendered by \RAY, only their illumination
contributes to the final image.
\begin{verbatim}
DIRECTIONAL_LIGHT
DIRECTION 0.0 -1.0 0.0
COLOR 1.0 0.0 0.0
\end{verbatim}
RAY supports spotlights, which are described very similarly to a
point light, but they are attenuated by angle from the direction vector,
based on a ``falloff start'' angle and ``falloff end''angle. Between
the starting and ending angles, the illumination is attenuated linearly.
The syntax for a spotlight description in a scene file is as follows.
\begin{verbatim}
SPOTLIGHT
CENTER 0.0 3.0 17.0
RAD 0.2
DIRECTION 0.0 -1.0 0.0
FALLOFF_START 20.0
FALLOFF_END 45.0
COLOR 1.0 0.0 0.0
\end{verbatim}
The lighting system implemented by RAY provides various levels of
distance-based lighting attenuation. By default, a light is not attenuated
by distance. If the \emph{attenuation} keywords is present immediately
prior to the light's color, RAY will accept coefficients which are used
to calculate distance-based attenuation, which is applied the light by
multiplying with the resulting value. The attenuation factor is calculated
from the equation
$$
1/(K_c + K_l d + k_q d^2)
$$
This attenuation equation should be familiar to some as it
is the same lighting attenuation equation used by OpenGL.
The constant, linear, and quadratic terms are specified in a scene file
as shown in the following example.
\begin{verbatim}
LIGHT
CENTER -5.0 0.0 10.0
RAD 1.0
ATTENUATION CONSTANT 1.0 LINEAR 0.2 QUADRATIC 0.05
COLOR 1.0 0.0 0.0
\end{verbatim}
\subsection{Atmospheric effects}
RAY currently only implements one atmospheric effect,
simple distance-based fog.
\subsubsection{Fog}
RAY provides a simple distance-based fog effect intended to provide
functionality similar to that found in OpenGL, for compatibility with
software that requires an OpenGL-like fog implementation. Much like
OpenGL, RAY provides linear, exponential, and exponential-squared fog.
\begin{verbatim}
FOG
LINEAR START 0.0 END 50.0 DENSITY 1.0 COLOR 1.0 1.0 1.0
\end{verbatim}
\begin{verbatim}
FOG
EXP START 0.0 END 50.0 DENSITY 1.0 COLOR 1.0 1.0 1.0
\end{verbatim}
\begin{verbatim}
FOG
EXP2 START 0.0 END 50.0 DENSITY 1.0 COLOR 1.0 1.0 1.0
\end{verbatim}
\subsection{Objects}
\subsubsection{Spheres}
Spheres are the simplest object supported by RAY and they are
also the fastest object to render. Spheres are defined as one would expect,
with a {\bf CENTER}, {\bf RAD} (radius), and a texture. The texture may
be defined along with the object as discussed earlier, or it may be declared
and assigned a name.
Here's a sphere definition using a previously defined "NitrogenAtom" texture:
\begin{verbatim}
SPHERE CENTER 26.4 27.4 -2.4 RAD 1.0 NitrogenAtom
\end{verbatim}
A sphere with an inline texture definition is declared like this:
\begin{verbatim}
Sphere center 1.0 0.0 10.0
Rad 1.0
Texture Ambient 0.2 Diffuse 0.8 Specular 0.0 Opacity 1.0
Color 1.0 0.0 0.5
TexFunc 0
\end{verbatim}
Notice that in this example I used mixed case for the keywords, this is
allowable...
Review the section on textures if the texture definitions are confusing.
\subsubsection{Triangles}
Triangles are also fairly simple objects, constructed by listing the
three vertices of the triangle, and its texture. The order of the
vertices isn't important, the triangle object is "double sided", so the
surface normal is always pointing back in the direction of the incident ray.
The triangle vertices are listed as {\bf V1}, {\bf V2}, and {\bf V3} each one
is an X, Y, Z coordinate. An example of a triangle is shown below:
\begin{verbatim}
TRI
V0 0.0 -4.0 12.0
V1 4.0 -4.0 8.0
V2 -4.0 -4.0 8.0
TEXTURE
AMBIENT 0.1 DIFFUSE 0.2 SPECULAR 0.7 OPACITY 1.0
COLOR 1.0 1.0 1.0
TEXFUNC 0
\end{verbatim}
\subsubsection{Smoothed Triangles}
Smoothed triangles are just like regular triangles, except that the
surface normal for each of the three vertices is used to determine the
surface normal across the triangle by linear interpolation.
Smoothed triangles yield curved looking objects and have nice
reflections.
\begin{verbatim}
STRI
V0 1.4 0.0 2.4
V1 1.35 -0.37 2.4
V2 1.36 -0.32 2.45
N0 -0.9 -0.0 -0.4
N1 -0.8 0.23 -0.4
N2 -0.9 0.27 -0.15
TEXTURE
AMBIENT 0.1 DIFFUSE 0.2 SPECULAR 0.7 OPACITY 1.0
COLOR 1.0 1.0 1.0
TEXFUNC 0
\end{verbatim}
\subsubsection{Infinite Planes}
Useful for things like desert floors, backgrounds, skies etc, the infinite
plane is pretty easy to use. An infinite plane only consists of two pieces
of information, the {\bf CENTER} of the plane, and a {\bf NORMAL} to the plane.
The center of the plane is just any point on the plane such that the point
combined with the surface normal define the equation for the plane.
As with triangles, planes are double sided. Here is an example of an
infinite plane:
\begin{verbatim}
PLANE
CENTER 0.0 -5.0 0.0
NORMAL 0.0 1.0 0.0
TEXTURE
AMBIENT 0.1 DIFFUSE 0.9 SPECULAR 0.0 OPACITY 1.0
COLOR 1.0 1.0 1.0
TEXFUNC 1
CENTER 0.0 -5.0 0.0
ROTATE 0. 0.0 0.0
SCALE 1.0 1.0 1.0
\end{verbatim}
\subsubsection{Rings}
Rings are a simple object, they are really a not-so-infinite plane.
Rings are simply an infinite plane cut into a washer shaped ring, infinitely
thing just like a plane. A ring only requires two more pieces of information
than an infinite plane does, an inner and outer radius. Here's an example
of a ring:
\begin{verbatim}
Ring
Center 1.0 1.0 1.0
Normal 0.0 1.0 0.0
Inner 1.0
Outer 5.0
MyNewRedTexture
\end{verbatim}
\subsubsection{Infinite Cylinders}
Infinite cylinders are quite simple. They are defined by a center, an
axis, and a radius. An example of an infinite cylinder is:
\begin{verbatim}
Cylinder
Center 0.0 0.0 0.0
Axis 0.0 1.0 0.0
Rad 1.0
SomeRandomTexture
\end{verbatim}
\subsubsection{Finite Cylinders}
Finite cylinders are almost the same as infinite ones, but the
center and length of the axis determine the extents of the cylinder.
The finite cylinder is also really a shell, it doesn't have any
caps. If you need to close off the ends of the cylinder, use two
ring objects, with the inner radius set to 0.0 and the normal set
to be the axis of the cylinder. Finite cylinders are built this
way to enhance speed.
\begin{verbatim}
FCylinder
Center 0.0 0.0 0.0
Axis 0.0 9.0 0.0
Rad 1.0
SomeRandomTexture
\end{verbatim}
This defines a finite cylinder with radius 1.0, going from 0.0 0.0 0.0, to
0.0 9.0 0.0 along the Y axis. The main difference between an infinite cylinder
and a finite cylinder is in the interpretation of the {\bf AXIS} parameter.
In the case of the infinite cylinder, the length of the axis vector is
ignored. In the case of the finite cylinder, the axis parameter is used
to determine the length of the overall cylinder.
\subsubsection{Axis Aligned Boxes}
Axis aligned boxes are fast, but of limited usefulness. As such, I'm
not going to waste much time explaining 'em. An axis aligned box is
defined by a {\bf MIN} point, and a {\bf MAX} point. The volume between
the min and max points is the box. Here's a simple box:
\begin{verbatim}
BOX
MIN -1.0 -1.0 -1.0
MAX 1.0 1.0 1.0
Boxtexture1
\end{verbatim}
\subsubsection{Fractal Landscapes}
Currently fractal landscapes are a built-in function. In the near future
I'll allow the user to load an image map for use as a heightfield.
Fractal landscapes are currently forced to be axis aligned. Any suggestion
on how to make them more appealing to users is welcome. A fractal landscape
is defined by its "resolution" which is the number of grid points along
each axis, and by its scale and center. The "scale" is how large the
landscape is along the X, and Y axes in world coordinates. Here's a simple
landscape:
\begin{verbatim}
SCAPE
RES 30 30
SCALE 80.0 80.0
CENTER 0.0 -4.0 20.0
TEXTURE
AMBIENT 0.1 DIFFUSE 0.9 SPECULAR 0.0 OPACITY 1.0
COLOR 1.0 1.0 1.0
TEXFUNC 0
\end{verbatim}
The landscape shown above generates a square landscape made of 1,800 triangles.
When time permits, the heightfield code will be rewritten to be more
general and to increase rendering speed.
\subsubsection{Arbitrary Quadric Surfaces}
Docs soon. I need to add these into the parser, must have forgotten
before ;-)
\subsubsection{Volume Rendered Scalar Voxels}
These are a little trickier than the average object :-)
These are likely to change substantially in the very near future so I'm not
going to get too detailed yet.
A volume rendered data set is described by its axis aligned bounding box, and
its resolution along each axis. The final parameter is the voxel data
file. If you are seriously interested in messing with these, get hold of
me and I'll give you more info. Here's a quick example:
\begin{verbatim}
SCALARVOL
MIN -1.0 -1.0 -0.4
MAX 1.0 1.0 0.4
DIM 256 256 100
FILE /cfs/johns/vol/engine.256x256x110
TEXTURE
AMBIENT 1.0 DIFFUSE 0.0 SPECULAR 0.0 OPACITY 8.1
COLOR 1.0 1.0 1.0
TEXFUNC 0
\end{verbatim}
\subsection{Texture and Color}
\subsubsection{Simple Texture Characteristics}
The surface textures applied to an object drastically alter its overall
appearance, making textures and color one of the most important topics in
this manual. As with many other renderers, textures can be declared and
associated with a name so that they may be used over and over again in
a scene definition with less typing. If a texture is only need once, or it
is unique to a particular object in the scene, then it may be declared along
with the object it is applied to, and does not need a name.
The simplest texture definition is a solid color with no image mapping
or procedural texture mapping. A solid color texture is defined by the
{\bf AMBIENT}, {\bf DIFFUSE}, {\bf SPECULAR}, {\bf OPACITY} and {\bf COLOR}
parameters. The {\bf AMBIENT} parameter defines the ambient lighting
coefficient to be used when shading the object. Similarly, the {\bf DIFFUSE}
parameter is the relative contribution of the diffuse shading to the surface
appearance. The {\bf SPECULAR} parameter is the contribution from perfectly
reflected rays, as if on a mirrored surface. {\bf OPACITY} defines how
transparent a surface is. An {\bf OPACITY} value of 0.0 renders the object
completely invisible. An {\bf OPACITY} value of 1.0 makes the object
completely solid, and non-transmissive. In general, the values for the
ambient, diffuse, and specular parameters should add up to 1.0, if they don't
then pixels may be over or underexposed quite easily. These parameters
function in a manner similar to that of other ray tracers. The {\bf COLOR}
parameter is an RGB triple with each value ranging from 0.0 to 1.0 inclusive.
If the RGB values stray from 0.0 to 1.0, results are undefined.
In the case of solid textures, a final parameter, {\bf TEXFUNC} is set to
zero (integer).
\subsubsection{Texture Declaration and Aliasing}
To define a simple texture for use on several objects in a scene, the
{\bf TEXDEF} keyword is used. The {\bf TEXDEF} keyword is followed by
a case sensitive texture name, which will subsequently be used while
defining objects. If many objects in a scene use the same texture through
texture definition, a significant amount of memory may be saved since only
one copy of the texture is present in memory, and its shared by all
of the objects. Here is an example of a solid texture definition:
\begin{verbatim}
TEXDEF MyNewRedTexture
AMBIENT 0.1 DIFFUSE 0.9 SPECULAR 0.0 OPACITY 1.0
COLOR 1.0 0.0 0.0 TEXFUNC 0
\end{verbatim}
When this texture is used in an object definition, it is referenced only by
name. Be careful not to use one of the other keywords as a defined texture,
this will probably cause the parser to explode, as I don't check for use
of keywords as texture names.
When a texture is declared within an object definition, it appears in
an identical format to the {\bf TEXDEF} declaration, but the {\bf TEXTURE}
keyword is used instead of {\bf TEXDEF}. If it is useful to have several
names for the same texture (when you are too lazy to actually finish defining
different variations of a wood texture for example, and just want to be
approximately correct for example) aliases can be constructed using the
{\bf TEXALIAS} keyword, along with the alias name, and the original name.
An example of a texture alias is:
\begin{verbatim}
TEXALIAS MyNewestRedTexture MyNewRedTexture
\end{verbatim}
This line would alias MyNewestRedTexture to be the same thing as the
previously declared MyNewRedTexture. Note that the source texture must
be declared before any aliases that use it.
\subsubsection{Image Maps and Procedural Textures} Image maps and
procedural textures very useful in making realistic looking scenes. A
good image map can do as much for the realism of a wooden table as any
amount of sophisticated geometry or lighting. Image maps are made by
wrapping an image on to an object in one of three ways, a spherical
map, a cylindrical map, and a planar map. Procedural textures are
used in a way similar to the image maps, but they are on the fly and
do not use much memory compared to the image maps. The main
disadvantage of the procedural maps is that they must be hard-coded
into RAY when it is compiled.
The syntax used for all texture maps is fairly simple to learn. The biggest
problem with the way that the parser is written now is that the different
mappings are selected by an integer, which is not very user friendly. I
expect to rewrite this section of the parser sometime in the near future to
alleviate this problem. When I rewrite the parser, I may also end up altering
the parameters that are used to describe a texture map, and some of them may
become optional rather than required.
\begin{center}
\begin{tabular}{|c|c|}
\multicolumn{2}{c}{Texture Mapping Functions} \\
\hline
{Value for TEXFUNC} & {Mapping and Texture Description}\\
\hline
{0} & {No special texture, plain shading} \\
{1} & {3D checkerboard function, like a Rubik's cube} \\
{2} & {Grit Texture, randomized surface color} \\
{3} & {3D marble texture, uses object's base color} \\
{4} & {3D wood texture, light and dark brown, not very good yet} \\
{5} & {3D gradient noise function (can't remember what it look like} \\
{6} & {Don't remember} \\
{7} & {Cylindrical Image Map, requires ppm filename} \\
{8} & {Spherical Image Map, requires ppm filename} \\
{9} & {Planar Image Map, requires ppm filename} \\
\hline
\end{tabular}
\end{center}
Here's an example of a sphere, with a spherical image map applied to its
surface:
\begin{verbatim}
SPHERE
CENTER 2.0 0.0 5.0
RAD 2.0
TEXTURE
AMBIENT 0.4 DIFFUSE 0.8 SPECULAR 0.0 OPACITY 1.0
COLOR 1.0 1.0 1.0
TEXFUNC 7 /cfs/johns/imaps/fire644.ppm
CENTER 2.0 0.0 5.0
ROTATE 0.0 0.0 0.0
SCALE 2.0 -2.0 1.0
\end{verbatim}
Basically, the image maps require the center, rotate and scale
parameters so that you can position the image map on the object
properly.
"""
from sage.misc.sagedoc import format
f = format(s)
f = f.replace('{ ','').replace('}','').replace('{','')
if use_pager:
pager()(f)
else:
print(f)
tachyon_rt = TachyonRT()
|
Trakttv.bundle/Contents/Libraries/Shared/plugin/scrobbler/methods/__init__.py
|
disrupted/Trakttv.bundle
| 1,346 |
82638
|
from plugin.scrobbler.methods.s_logging import Logging
from plugin.scrobbler.methods.s_websocket import WebSocket
__all__ = ['Logging', 'WebSocket']
|
pyjswidgets/pyjamas/ui/TextBoxBase.mshtml.py
|
takipsizad/pyjs
| 739 |
82651
|
<gh_stars>100-1000
class TextBoxBase(FocusWidget):
def getCursorPos(self):
try :
elem = self.getElement()
tr = elem.document.selection.createRange()
if tr.parentElement().uniqueID != elem.uniqueID:
return -1
return -tr.move("character", -65535)
except:
return 0
def getSelectionLength(self):
try :
elem = self.getElement()
tr = elem.document.selection.createRange()
if tr.parentElement().uniqueID != elem.uniqueID:
return 0
return tr.text and len(tr.text) or 0
except:
return 0
def setSelectionRange(self, pos, length):
try :
elem = self.getElement()
tr = elem.createTextRange()
tr.collapse(True)
tr.moveStart('character', pos)
tr.moveEnd('character', length)
tr.select()
except :
pass
def getText(self):
return DOM.getAttribute(self.getElement(), "value") or ""
def setText(self, text):
DOM.setAttribute(self.getElement(), "value", text)
|
simdeblur/dataset/__init__.py
|
ljzycmd/SimDeblur
| 190 |
82671
|
from .dvd import DVD
from .gopro import GOPRO
from .reds import REDS
#
from .build import build_dataset, list_datasets
__all__ = [k for k in globals().keys() if not k.startswith("_")]
|
downstream/Up-Down_VC/scripts/hdf5_2_bufile.py
|
alfred100p/VC-R-CNN
| 344 |
82682
|
import h5py
import numpy as np
file = h5py.File('/data2/wt/openimages/vc_feature/1coco_train_all_bu_2.hdf5', 'r')
for keys in file:
feature = file[keys]['feature'][:]
np.save('/data2/wt/openimages/vc_feature/coco_vc_all_bu/'+keys+'.npy', feature)
|
Kerning/New Tab with Overkerns.py
|
justanotherfoundry/Glyphs-Scripts
| 283 |
82686
|
<filename>Kerning/New Tab with Overkerns.py
#MenuTitle: New Tab with Overkerned Pairs
# -*- coding: utf-8 -*-
from __future__ import division, print_function, unicode_literals
__doc__="""
Asks a threshold percentage, and opens a new tab with all kern pairs going beyond the width threshold.
"""
import vanilla
class FindOverkerns( object ):
def __init__( self ):
# Window 'self.w':
windowWidth = 350
windowHeight = 135
windowWidthResize = 100 # user can resize width by this value
windowHeightResize = 0 # user can resize height by this value
self.w = vanilla.FloatingWindow(
( windowWidth, windowHeight ), # default window size
"Find Negative Overkerns in This Master", # window title
minSize = ( windowWidth, windowHeight ), # minimum size (for resizing)
maxSize = ( windowWidth + windowWidthResize, windowHeight + windowHeightResize ), # maximum size (for resizing)
autosaveName = "com.mekkablue.FindOverkerns.mainwindow" # stores last window position and size
)
# UI elements:
self.w.text_1 = vanilla.TextBox( (15-1, 12+2, 220, 14), "Open tab with kerns beyond threshold:", sizeStyle='small' )
self.w.threshold = vanilla.EditText( (225, 12-1, -15, 20), "40", sizeStyle = 'small')
self.w.text_2 = vanilla.TextBox( (15-1, 12+25, -15, 14), "(Maximum percentage of letter widths that may be kerned.)", sizeStyle='small' )
self.w.limitToExportingGlyphs = vanilla.CheckBox( (15, 12+50, 150, 20), "Limit to exporting glyphs", value=True, callback=self.SavePreferences, sizeStyle='small' )
# Run Button:
self.w.runButton = vanilla.Button((-100-15, -20-15, -15, -15), "Open Tab", sizeStyle='regular', callback=self.FindOverkernsMain )
self.w.setDefaultButton( self.w.runButton )
# Load Settings:
if not self.LoadPreferences():
print("Note: 'Find Overkerns' could not load preferences. Will resort to defaults")
# Open window and focus on it:
self.w.open()
self.w.makeKey()
def SavePreferences( self, sender ):
try:
Glyphs.defaults["com.mekkablue.FindOverkerns.threshold"] = self.w.threshold.get()
Glyphs.defaults["com.mekkablue.FindOverkerns.limitToExportingGlyphs"] = self.w.limitToExportingGlyphs.get()
except:
return False
return True
def LoadPreferences( self ):
try:
Glyphs.registerDefault("com.mekkablue.FindOverkerns.threshold", "40")
Glyphs.registerDefault("com.mekkablue.FindOverkerns.limitToExportingGlyphs", True)
self.w.threshold.set( Glyphs.defaults["com.mekkablue.FindOverkerns.threshold"] )
self.w.limitToExportingGlyphs.set( Glyphs.defaults["com.mekkablue.FindOverkerns.limitToExportingGlyphs"] )
except:
return False
return True
def FindOverkernsMain( self, sender ):
try:
# brings macro window to front and clears its log:
Glyphs.clearLog()
# retrieve user entry:
thresholdFactor = None
try:
thresholdFactor = float( Glyphs.defaults["com.mekkablue.FindOverkerns.threshold"] )/100.0
except:
Message(title="Value Error", message="The threshold value you entered is invalid", OKButton="Oops")
limitToExportingGlyphs = bool( Glyphs.defaults["com.mekkablue.FindOverkerns.limitToExportingGlyphs"] )
# continuer if user entry is valid:
if not thresholdFactor is None:
thisFont = Glyphs.font # frontmost font
thisMaster = thisFont.selectedFontMaster # current master
masterKerning = thisFont.kerning[thisMaster.id] # kerning dictionary
tabText = "" # the text appearing in the new tab
# collect minimum widths for every kerning group:
leftGroupMinimumWidths = {}
leftGroupNarrowestGlyphs = {}
rightGroupMinimumWidths = {}
rightGroupNarrowestGlyphs = {}
if limitToExportingGlyphs:
theseGlyphs = [g for g in thisFont.glyphs if g.export]
else:
theseGlyphs = thisFont.glyphs
for thisGlyph in theseGlyphs:
thisLayer = thisGlyph.layers[thisMaster.id]
# left side of the glyph (= right side of kern pair)
if thisGlyph.leftKerningGroup:
if thisGlyph.leftKerningGroup in leftGroupMinimumWidths:
if thisLayer.width < leftGroupMinimumWidths[thisGlyph.leftKerningGroup]:
leftGroupMinimumWidths[thisGlyph.leftKerningGroup] = thisLayer.width
leftGroupNarrowestGlyphs[thisGlyph.leftKerningGroup] = thisGlyph.name
else:
leftGroupMinimumWidths[thisGlyph.leftKerningGroup] = thisLayer.width
leftGroupNarrowestGlyphs[thisGlyph.leftKerningGroup] = thisGlyph.name
# right side of the glyph (= left side of kern pair)
if thisGlyph.rightKerningGroup:
if thisGlyph.rightKerningGroup in rightGroupMinimumWidths:
if thisLayer.width < rightGroupMinimumWidths[thisGlyph.rightKerningGroup]:
rightGroupMinimumWidths[thisGlyph.rightKerningGroup] = thisLayer.width
rightGroupNarrowestGlyphs[thisGlyph.rightKerningGroup] = thisGlyph.name
else:
rightGroupMinimumWidths[thisGlyph.rightKerningGroup] = thisLayer.width
rightGroupNarrowestGlyphs[thisGlyph.rightKerningGroup] = thisGlyph.name
# go through kern values and collect them in tabText:
for leftKey in masterKerning.keys():
for rightKey in masterKerning[leftKey].keys():
kernValue = masterKerning[leftKey][rightKey]
if kernValue < 0:
leftWidth = None
rightWidth = None
try:
# collect widths for comparison
if leftKey[0] == "@":
# leftKey is a group name like "@MMK_L_y"
groupName = leftKey[7:]
leftWidth = rightGroupMinimumWidths[groupName]
leftGlyphName = rightGroupNarrowestGlyphs[groupName]
else:
# leftKey is a glyph ID like "<KEY>"
leftGlyph = thisFont.glyphForId_(leftKey)
# exclude if non-exporting and user limited to exporting glyphs:
if limitToExportingGlyphs and not leftGlyph.export:
kernValue = 0.0
leftWidth = leftGlyph.layers[thisMaster.id].width
leftGlyphName = leftGlyph.name
if rightKey[0] == "@":
# rightKey is a group name like "@MMK_R_y"
groupName = rightKey[7:]
rightWidth = leftGroupMinimumWidths[groupName]
rightGlyphName = leftGroupNarrowestGlyphs[groupName]
else:
# rightKey is a glyph ID like "<KEY>"
rightGlyph = thisFont.glyphForId_(rightKey)
# exclude if non-exporting and user limited to exporting glyphs:
if limitToExportingGlyphs and not rightGlyph.export:
kernValue = 0.0
rightWidth = rightGlyph.layers[thisMaster.id].width
rightGlyphName = rightGlyph.name
# compare widths and collect overkern if it is one:
# (kernValue of excluded glyphs will be 0.0 and not trigger the if clause)
if abs(kernValue) > thresholdFactor*leftWidth or abs(kernValue) > thresholdFactor*rightWidth:
tabText += "/%s/%s\n" % (leftGlyphName, rightGlyphName)
except Exception as e:
# probably a kerning group name found in the kerning data, but no glyph assigned to it:
# brings macro window to front and reports warning:
Glyphs.showMacroWindow()
import traceback
errormsg = traceback.format_exc().lower()
for side in ("left","right"):
if not side in errormsg:
print(
"⚠️ Warning: The %s group '%s' found in your kerning data does not appear in any glyph. Clean up your kerning, and run the script again." % (
side,
groupName,
))
if tabText:
# opens new Edit tab:
thisFont.newTab( tabText[:-1] )
else:
Message(title="No Overkerns Found", message="Could not find any kern pairs beyond the threshold in this master.", OKButton="Phew!")
if not self.SavePreferences( self ):
print("Note: 'Find Overkerns' could not write preferences.")
# self.w.close() # delete if you want window to stay open
except Exception as e:
# brings macro window to front and reports error:
Glyphs.showMacroWindow()
print("Find Overkerns Error: %s" % e)
import traceback
print(traceback.format_exc())
FindOverkerns()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.