content
stringlengths 0
894k
| type
stringclasses 2
values |
---|---|
version = "1.0.0a"
|
python
|
from django.core.management.base import BaseCommand, CommandError
from django.conf import settings
from django.db import connection
from django.db import IntegrityError
from django.utils.text import slugify
from django.http import HttpResponse, JsonResponse
from build.management.commands.base_build import Command as BaseBuild
from common.tools import fetch_from_cache, save_to_cache, fetch_from_web_api
from residue.models import Residue
from protein.models import Protein, ProteinGProteinPair
from ligand.models import *
from mutation.models import Mutation
from ligand.functions import get_or_make_ligand
from common.models import WebLink, WebResource, Publication
from multiprocessing.pool import ThreadPool
from chembl_webresource_client.new_client import new_client
import queue
import logging
import os
from datetime import datetime
import xlrd
import operator
import traceback
import time
import math
import json
import threading
import concurrent.futures
import pytz
MISSING_PROTEINS = {}
SKIPPED = 0
class Command(BaseBuild):
mylog = logging.getLogger(__name__)
mylog.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s:%(levelname)s:%(message)s')
file_handler = logging.FileHandler('biasDataTest.log')
file_handler.setLevel(logging.ERROR)
file_handler.setFormatter(formatter)
mylog.addHandler(file_handler)
help = 'Updates ChEMBL data and imports it'
publication_cache = {}
ligand_cache = {}
data_all = []
my_queue = queue.Queue()
def storeInQueue(f):
def wrapper(*args):
my_queue.put(f(*args))
return wrapper
def add_arguments(self, parser):
parser.add_argument('-p', '--proc',
type=int,
action='store',
dest='proc',
default=1,
help='Number of processes to run')
parser.add_argument('-f', '--filename',
action='append',
dest='filename',
help='Filename to import. Can be used multiple times')
parser.add_argument('-u', '--purge',
action='store_true',
dest='purge',
default=False,
help='Purge existing bias records')
parser.add_argument('--test_run', action='store_true', help='Skip this during a test run',
default=False)
def handle(self, *args, **options):
if options['test_run']:
print('Skipping in test run')
return
# delete any existing structure data
if options['purge']:
try:
print('Started purging bias data')
self.purge_bias_data()
print('Ended purging bias data')
except Exception as msg:
print(msg)
self.logger.error(msg)
# import the structure data
# self.analyse_rows()
try:
print('Updatind ChEMBL data')
self.analyse_rows()
self.logger.info('COMPLETED updating ChEMBL Data')
except Exception as msg:
print('--error--', msg, '\n')
self.logger.info("The error appeared in def handle")
def purge_bias_data(self):
delete_bias_excel = BiasedExperiment.objects.all()
delete_bias_excel.delete()
delete_bias_experiment = AnalyzedExperiment.objects.all()
delete_bias_experiment.delete()
def get_gpcrs(self):
print('---get_gpcrs from ChEMBL---')
g_family_ids = [ 1,147,165,166,202,281,407,435,446,460,468,479,480,483,484,486,487,491,499,500,501,502,503,504,506,507,508,509,
510,515,516,517,518,528,533,534,535,540,541,542,544,547,548,549,550,551,554,555,
556,558,559,561,562,563,565,566,567,568,569,570,571,573,574,603,604,605,606,607,
608,609,610,611,612,613,614,615,616,617,618,619,620,621,830,1020,1021,1022,1038,
1082,1083,1088,1089,1251,1253,1256,1259,1265,1266,1267,1268,1269,1270,1271,1272,
1273,1274,1275,1277,1278]
target_list = set()
for item in g_family_ids:
gproteins = new_client.target_component.filter(protein_classifications__protein_classification_id=item).only('targets')
for protein in gproteins:
for target in protein['targets']:
if target['target_chembl_id'] not in target_list:
target_list.add(target['target_chembl_id'])
else:
pass
print('GPCRS ready')
return target_list
def get_chembl_assay(self,targets, prev_id, current_id):
# gets assays from ChEMBL (by batch sie of 20). Filters using Tsonko rules, GPCRs.
# as arguments takes GPCR ids(targets), batch start size (prev_id) and end size(current_id)
new_test = new_client.activity.filter(pchembl_value__isnull=False).filter(data_validity_comment__isnull=True
).filter(standard_value__isnull = False
).filter(standard_units__isnull = False
).filter(target_chembl_id__in = targets
).only(['molecule_chembl_id', 'target_chembl_id' ,'standard_type',
'standard_value','standard_units','standard_relation','activity_comment',
'assay_description','assay_type',
'document_chembl_id','pchembl_value',
'activity_id','canonical_smiles','assay_chembl_id'])[prev_id:current_id]
return new_test
def get_dois(self, dci, q):
# gets references for assays from ChEMBL (DOI)
pubs = new_client.document.filter(document_chembl_id = dci).only('doi')
if len(pubs) > 0:
doi = pubs[0]['doi']
q.put(doi)
else:
q.put(None)
def get_cell_line(self, assay_id, q):
# gets cell line info for assays from ChEMBL
new_test = new_client.assay.filter(assay_chembl_id = assay_id).only('assay_cell_type')
if len(new_test) > 0:
cell_line = new_test[0]['assay_cell_type']
q.put(cell_line)
else:
q.put('no data')
def valdiate_data(self, i):
#validates ChEMBL assays in accordance with addtional Tsonko rules
result = False
if(i['standard_units'] == 'nM' or i['standard_units'] == 'um' or i['standard_units'] == 'M'
or i['standard_units'] == 'pmol' or i['standard_units'] == 'mM' or i['standard_units'] == 'fmol'
or i['standard_units'] == 'pM' or i['standard_units'] == 'nmol' or i['standard_units'] == 'fM'):
if ( i['assay_type'] != 'U' or i['assay_type'] != 'A'):
if( i['activity_comment'] != 'inconclusive' or i['activity_comment'] != 'Inconclusive'):
result = True
return result
def process_chembl(self,chembl_assays, temp_increment):
#Loop through API results (20 objects per batch)
chembl_data = dict()
main_dict = dict()
increment = 0
for i in chembl_assays:
temp_increment = temp_increment+1
if self.valdiate_data(i) == False:
continue
temp_dict = dict()
temp_dict['protein'] = self.fetch_protein( i['target_chembl_id'])
temp_dict['doi']=None
if temp_dict['protein'] == None:
continue
temp_dict['smiles'] = i['canonical_smiles']
temp_dict['ligand'] = self.fetch_ligand(i['molecule_chembl_id'],i['canonical_smiles'])
if temp_dict['ligand'] == None:
continue
if( self.check_dublicates(temp_dict["ligand"], temp_dict["protein"], i["assay_description"],
i["molecule_chembl_id"],
i["standard_value"],i["standard_units"],
i["pchembl_value"]) == True):
continue
# q = queue.Queue()
# x=threading.Thread(target=self.get_cell_line, args=(i['assay_chembl_id'], q)).start()
cell_line = None
#
# pub_q = queue.Queue
# y=threading.Thread(target=self.get_dois, args=(i['document_chembl_id'], q)).start()
# pub = q.get()
# if pub is not None:
# temp_dict['doi'] = self.fetch_publication(pub)
temp_dict['activity_id'] = i['activity_id']
temp_dict['standard_type'] = i['standard_type']
temp_dict['standard_value'] = i['standard_value']
temp_dict['standard_units'] = i['standard_units']
temp_dict['standard_relation'] = i['standard_relation']
temp_dict['assay_description'] = i['assay_description']
temp_dict['assay_type'] = i['assay_type']
temp_dict['cell_line'] = cell_line
temp_dict['pchembl_value'] = i['pchembl_value']
temp_dict['document_chembl_id'] = i['document_chembl_id']
temp_dict['chembl_id'] = i['molecule_chembl_id']
temp_dict['assay_id'] = i['assay_chembl_id']
chembl_data[increment] = temp_dict
increment=increment+1
self.upload_to_db(chembl_data)
def analyse_rows(self):
"""
Fetch data to models
Saves to DB
"""
print('---Starting---')
current_id = 0
prev_id = 0
start = time.time()
target_list = self.get_gpcrs()
target_list_list = list(target_list)
start = time.time()
chembl_assays = None
print('---process_chembl---')
#range should be set to number of total objects/20 (API batch size)
#555200 is the last id saved before session was aborted
for i in range(30578):
current_id = 591900 + ((i+1) * 20)
prev_id = 591900 + (i *20)
chembl_assays = self.get_chembl_assay(target_list_list, prev_id, current_id)
chembl_assays=list(chembl_assays)
self.process_chembl(chembl_assays,current_id)
# control the flow
if(current_id%100==0):
end = time.time()
print('---temp_increment time---',current_id, end - start)
def check_dublicates(self, ligand, protein, assay_description, chembl,standard_value,standard_units, pchembl_value ):
# Checks if assay experiment is already saved
try:
experiment = AssayExperiment.objects.filter(
ligand=ligand, protein=protein, assay_description=assay_description,
chembl=chembl,standard_value=standard_value,standard_units=standard_units,pchembl_value=pchembl_value )
experiment = experiment.get()
if experiment:
return True
else:
return False
except Exception as msg:
experiment = None
self.mylog.exception(
"Experiment AnalyzedExperiment error | module: AnalyzedExperiment.")
return False
def upload_to_db(self, chembl):
# saves data
for i in chembl.items():
chembl_data = AssayExperiment(ligand = i[1]["ligand"],
publication = i[1]["doi"],
protein = i[1]["protein"],
chembl = i[1]["chembl_id"],
smiles = i[1]["smiles"],
cell_line = i[1]['cell_line'],
activity = i[1]["activity_id"],
standard_type = i[1]["standard_type"],
standard_value = i[1]["standard_value"],
standard_units = i[1]["standard_units"],
standard_relation = i[1]["standard_relation"],
assay_description = i[1]["assay_description"],
assay_type = i[1]["assay_type"],
pchembl_value = i[1]["pchembl_value"],
document_chembl_id = i[1]["document_chembl_id"],
)
chembl_data.save()
# print('--saved---')
def fetch_measurements(self, potency, p_type, unit):
# it was used for bias prediction build. Temporarily unused
if p_type.lower() == 'pec50':
potency = 10**(potency*(-1))
p_type = 'EC50'
elif p_type.lower() == 'logec50':
potency = 10**(potency)
p_type = 'EC50'
elif p_type.lower() == 'pic50':
potency = 10**(potency*(-1))
p_type = 'IC50'
elif p_type.lower() == 'logic50':
potency = 10**(potency)
p_type = 'IC50'
elif p_type.lower() == 'ec50':
if unit.lower() == 'nm':
potency = potency* 10**(-9)
elif unit.lower() == 'µm':
potency = potency* 10**(-9)
elif unit.lower() == 'pm':
potency = potency* 10**(-12)
elif unit.lower() == 'mm':
potency = potency* 10**(-6)
else:
pass
if potency:
potency = "{:.2E}".format(Decimal(potency))
return potency,p_type
def fetch_protein(self,target):
"""
fetch receptor with Protein model
requires: protein id, source
"""
test = None
test = Protein.objects.filter(web_links__index = target, web_links__web_resource__slug = 'chembl').first()
return test
def fetch_ligand(self, ligand_id, smiles):
"""
fetch ligands with Ligand model
requires: ligand id, ligand id type, ligand name
requires: source_file name
"""
l = None
try:
if ligand_id in self.ligand_cache:
l = self.ligand_cache[ligand_id]
else:
l = Ligand.objects.filter(properities__web_links__index=ligand_id).first()
if l:
cid = l.properities.web_links.filter(web_resource__slug = 'pubchem').first()
if cid:
cid = cid.index
else:
l = None
else:
l = get_or_make_ligand(smiles, 'SMILES', ligand_id, )
except Exception as msg:
l = None
# print('ligand_id---',l,'\n end')
return l
def fetch_publication(self, publication_doi):
"""
fetch publication with Publication model
requires: publication doi or pmid
"""
try:
float(publication_doi)
publication_doi = str(int(publication_doi))
except ValueError:
pass
if publication_doi.isdigit(): # assume pubmed
pub_type = 'pubmed'
else: # assume doi
pub_type = 'doi'
if publication_doi not in self.publication_cache:
try:
wl = WebLink.objects.get(
index=publication_doi, web_resource__slug=pub_type)
except WebLink.DoesNotExist:
try:
wl = WebLink.objects.create(index=publication_doi,
web_resource=WebResource.objects.get(slug=pub_type))
except IntegrityError:
wl = WebLink.objects.get(
index=publication_doi, web_resource__slug=pub_type)
try:
pub = Publication.objects.get(web_link=wl)
except Publication.DoesNotExist:
pub = Publication()
try:
pub.web_link = wl
pub.save()
except IntegrityError:
pub = Publication.objects.get(web_link=wl)
if pub_type == 'doi':
pub.update_from_doi(doi=publication_doi)
elif pub_type == 'pubmed':
pub.update_from_pubmed_data(index=publication_doi)
try:
pub.save()
except:
self.mylog.debug(
"publication fetching error | module: fetch_publication. Row # is : " + str(publication_doi) + ' ' + pub_type)
# if something off with publication, skip.
self.publication_cache[publication_doi] = pub
else:
pub = self.publication_cache[publication_doi]
return pub
|
python
|
from dataclasses import dataclass
@dataclass
class FileContent:
content: str
|
python
|
from itertools import product
from .constraints import Validator
def all_cut_edge_flips(partition):
for edge, index in product(partition.cut_edges, (0, 1)):
yield {edge[index]: partition.assignment[edge[1 - index]]}
def all_valid_states_one_flip_away(partition, constraints):
"""Generates all valid Partitions that differ from the given partition
by one flip. These are the given partition's neighbors in the metagraph
of partitions.
"""
if callable(constraints):
is_valid = constraints
else:
is_valid = Validator(constraints)
for flip in all_cut_edge_flips(partition):
next_state = partition.flip(flip)
if is_valid(next_state):
yield next_state
def all_valid_flips(partition, constraints):
for state in all_valid_states_one_flip_away(partition, constraints):
yield state.flips
def metagraph_degree(partition, constraints):
return len(list(all_valid_states_one_flip_away(partition, constraints)))
|
python
|
from ..lab2.TreeNode import TreeNode
from ..lab1.Type import Type
from ..lab1.Token import Token
from ..lab1.Tag import Tag
from ..lab2.NodeType import NodeType
from TypeException import TypeException
from SemanticException import SemanticException
class SemanticAnalyzer(object):
def __init__(self, symbol_table, syntax_tree):
"""
:type symbol_table:dict
:type syntax_tree:TreeNode
"""
self.symbol_table = symbol_table
self.syntax_tree = syntax_tree
def check_types(self):
for block in self.syntax_tree.children:
if block.node_type == NodeType.CONST_DEF:
self.check_constant_definition(block)
elif block.node_type == NodeType.VAR_DECL:
self.check_variable_declaration(block)
elif block.node_type == NodeType.SEQUENCE:
self.check_sequence_type(block)
def check_constant_definition(self, constant_definition):
"""
:type constant_definition:TreeNode
:type variable_declarations:TreeNode
"""
for assingment in constant_definition.children:
constant_token = assingment.children[0]
number_token = assingment.children[1]
if self.symbol_table[constant_token.attribute].declared:
raise SemanticException("Double declaration", constant_token)
else:
self.symbol_table[constant_token.attribute].declared = True
self.symbol_table[constant_token.attribute].constant = True
self.symbol_table[constant_token.attribute].type = self.symbol_table[number_token.attribute].type
def check_variable_declaration(self, variable_declaration):
for colon_block in variable_declaration.children:
identifier_list = colon_block.children[0]
type_token = colon_block.children[1]
for identifier_token in identifier_list.children:
if self.symbol_table[identifier_token.attribute].declared:
raise SemanticException("Double declaration", identifier_token)
else:
self.symbol_table[identifier_token.attribute].declared = True
self.symbol_table[identifier_token.attribute].constant = False
self.symbol_table[identifier_token.attribute].type = Type.get_type_by_token(type_token)
def check_sequence_type(self, sequence_block):
"""
:type sequence_block:TreeNode
"""
for statement in sequence_block.children:
self.check_statement_type(statement)
def check_statement_type(self, statement_block):
"""
:type statement_block:TreeNode
"""
if statement_block.node_type == NodeType.ASSIGNMENT:
identifier_token = statement_block.children[0]
if self.symbol_table[identifier_token.attribute].constant:
raise SemanticException("Cant assign to constant", identifier_token)
if not self.symbol_table[identifier_token.attribute].declared:
raise SemanticException("Variable not declared", identifier_token)
identifier_type = self.symbol_table[identifier_token.attribute].type
expression_type = self.get_expression_type(statement_block.children[1])
if identifier_type in (Type.BOOLEAN, Type.INTEGER) and identifier_type != expression_type:
raise TypeException(statement_block, expression_type, identifier_type)
if identifier_type==Type.REAL and expression_type==Type.BOOLEAN:
raise TypeException(statement_block, expression_type, Type.REAL, Type.INTEGER)
elif statement_block.node_type == NodeType.IF:
expression = statement_block.children[0]
expression_type = self.get_expression_type(expression)
if expression_type != Type.BOOLEAN:
raise TypeException(statement_block, expression_type, Type.BOOLEAN)
then_statement = statement_block.children[1]
self.check_statement_type(then_statement)
if len(statement_block.children) > 2:
else_statement = statement_block.children[2]
self.check_statement_type(else_statement)
elif statement_block.node_type == NodeType.WHILE:
expression = statement_block.children[0]
statement = statement_block.children[1]
expression_type = self.get_expression_type(expression)
if expression_type != Type.BOOLEAN:
raise TypeException(statement_block, expression_type, Type.BOOLEAN)
self.check_statement_type(statement)
elif statement_block.node_type == NodeType.COMPOSED:
sequence = statement_block.children[0]
self.check_sequence_type(sequence)
def get_expression_type(self, expression):
if isinstance(expression, TreeNode):
if expression.node_type in (
NodeType.RELATION_EQUAl, NodeType.RELATION_NON_EQUAL, NodeType.RELATION_LESS,
NodeType.RELATION_LESS_EQUAL,
NodeType.RELATION_GREATER, NodeType.RELATION_GREATER_EQUAL):
first_operand_type = self.get_expression_type(expression.children[0])
second_operand_type = self.get_expression_type(expression.children[1])
if (first_operand_type in (Type.INTEGER, Type.REAL) and second_operand_type == Type.BOOLEAN) or (
first_operand_type == Type.BOOLEAN and second_operand_type in (
Type.REAL, Type.INTEGER)):
raise TypeException(expression, Type.BOOLEAN, Type.REAL, Type.INTEGER)
return Type.BOOLEAN
elif expression.node_type in (NodeType.OPERATOR_UNARY_MINUS, NodeType.OPERATOR_UNARY_PLUS):
first_operand_type = self.get_expression_type(expression.children[0])
if first_operand_type not in (Type.REAL, Type.INTEGER):
raise TypeException(expression, first_operand_type, Type.REAL, Type.INTEGER)
return first_operand_type
elif expression.node_type in (
NodeType.OPERATOR_PLUS, NodeType.OPERATOR_MINUS, NodeType.OPERATOR_MULTIPLY,
NodeType.OPERATOR_DIVISION):
first_operand_type = self.get_expression_type(expression.children[0])
second_operand_type = self.get_expression_type(expression.children[1])
if first_operand_type not in (Type.REAL, Type.INTEGER):
raise TypeException(expression, first_operand_type, Type.REAL, Type.INTEGER)
if second_operand_type not in (Type.REAL, Type.INTEGER):
raise TypeException(expression, second_operand_type, Type.REAL, Type.INTEGER)
if first_operand_type == Type.REAL or second_operand_type == Type.REAL:
return Type.REAL
else:
return Type.INTEGER
elif expression.node_type == NodeType.OPERATOR_NOT:
first_operand_type = self.get_expression_type(expression.children[0])
if first_operand_type != Type.BOOLEAN:
raise TypeException(expression, first_operand_type, Type.BOOLEAN)
return Type.BOOLEAN
elif expression.node_type in (NodeType.OPERATOR_AND, NodeType.OPERATOR_OR):
first_operand_type = self.get_expression_type(expression.children[0])
second_operand_type = self.get_expression_type(expression.children[1])
if first_operand_type != Type.BOOLEAN:
raise TypeException(expression, first_operand_type, Type.BOOLEAN)
if second_operand_type != Type.BOOLEAN:
raise TypeException(expression, second_operand_type, Type.BOOLEAN)
return Type.BOOLEAN
elif expression.node_type in (NodeType.OPERATOR_MOD, NodeType.OPERATOR_DIV):
first_operand_type = self.get_expression_type(expression.children[0])
second_operand_type = self.get_expression_type(expression.children[1])
if first_operand_type != Type.INTEGER:
raise TypeException(expression, first_operand_type, Type.INTEGER)
if second_operand_type != Type.INTEGER:
raise TypeException(expression, first_operand_type, Type.INTEGER)
return Type.INTEGER
elif isinstance(expression, Token):
if expression.tag == Tag.ID:
if not self.symbol_table[expression.attribute].declared:
raise SemanticException("Variable not declared", expression)
else:
return self.symbol_table[expression.attribute].type
elif expression.tag == Tag.NUM:
return self.symbol_table[expression.attribute].type
elif expression.tag in (Tag.TRUE, Tag.FALSE):
return Type.BOOLEAN
|
python
|
#!/usr/bin/env python3 -u
"""
Main Script for training and testing
"""
import argparse
import json
import logging
import os
import pdb
import random
import sys
import time as t
from collections import OrderedDict
import numpy as np
import spacy
import torch
from torch import nn
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torch.utils.data import DataLoader, RandomSampler, SubsetRandomSampler
from data_loader import ContDataset, MySampler
from dataset_settings import data_dir
# from extra import plot_batch
from model import ContModel
from settings import (args, batch_size, device,
individual_turnpair_experiments, just_test,
just_test_folder, just_test_model, language_size,
load_encodings, load_model, load_second_encodings,
lstm_sets_dict, max_epochs, naming_dict, note_append,
num_data_loader_workers)
from settings import num_feat_per_person as num_feat_per_person_dict
from settings import (optim_patience, pred_task_dict,
test_dataset_settings_dict, test_file_list,
time_out_length, train_dataset_settings_dict,
train_file_list, use_ling, vae_data_multiplier,
vae_data_multiplier_2, vae_experiments, vae_target_da,
valid_dataset_settings_dict, valid_file_list)
from test_funcs import test
from util import (get_individual_turnpair_dataset, get_vae_dataset,
get_vae_encodings)
# from test_funcs import sanity_check_func, get_batch_items_for_full_test
sys.dont_write_bytecode = True
torch.autograd.set_detect_anomaly(True)
torch.set_default_dtype(torch.float32)
SET_TRAIN = 0
SET_VALID = 1
SET_TEST = 2
def main():
SEED = args.seed
random.seed(SEED)
torch.manual_seed(SEED)
torch.cuda.manual_seed_all(SEED)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
np.random.seed(SEED)
t0 = t.time()
embeds_usr, embeds_sys, embeds, num_feat_per_person, sil_tok, nlp = setup_ling()
if just_test:
just_test_func(
embeds_usr=embeds_usr,
embeds_sys=embeds_sys,
embeds=embeds,
num_feat_per_person=num_feat_per_person,
sil_tok=sil_tok,
nlp=nlp
)
def _init_fn(): return np.random.seed(SEED)
print('Loading valid DATA')
valid_dataset = ContDataset(valid_dataset_settings_dict)
valid_dataset.embeds_usr = embeds_usr
valid_dataset.embeds_sys = embeds_sys
valid_dataset.nlp = nlp
valid_dataset.sil_tok = sil_tok
collate_fn_valid = valid_dataset.collate_fn
valid_sampler = MySampler(valid_dataset)
valid_dataloader = DataLoader(valid_dataset, sampler=valid_sampler,
batch_size=batch_size, collate_fn=collate_fn_valid,
num_workers=num_data_loader_workers,
worker_init_fn=_init_fn)
num_valid_batches = len(valid_dataloader)
valid_dataset.update_annots_test = test_dataset_settings_dict['update_annots_test']
print('Loading train DATA')
train_dataset = ContDataset(train_dataset_settings_dict)
train_dataset.embeds_usr = embeds_usr
train_dataset.embeds_sys = embeds_sys
train_dataset.nlp = nlp
train_dataset.sil_tok = sil_tok
collate_fn_train = train_dataset.collate_fn
if lstm_sets_dict['two_sys_turn']:
tmp_sampler = MySampler(train_dataset)
train_sampler = SubsetRandomSampler(tmp_sampler.my_indices_no_first)
else:
train_sampler = RandomSampler(
train_dataset) if lstm_sets_dict['train_random_sample'] else MySampler(train_dataset)
train_dataloader = DataLoader(train_dataset, sampler=train_sampler,
batch_size=batch_size, collate_fn=collate_fn_train,
num_workers=num_data_loader_workers, worker_init_fn=_init_fn)
num_train_batches = len(train_dataloader)
print('Done loading all DATA')
print('time taken: \n' + str(t.time() - t0))
context_vec_settings_dict = {
'train': len(train_file_list),
'valid': len(valid_file_list),
'test': len(test_file_list)
}
lstm_sets_dict['sil_tok'] = sil_tok
model = ContModel(num_feat_per_person, lstm_sets_dict,
device, context_vec_settings_dict,
embeds_usr, embeds_sys, embeds)
# print([[k, i.shape, torch.prod(torch.tensor(i.shape))] for k, i in model.state_dict().items()]) # keep for debugging
# best_valid_loss = 10000
iteration = 0
results_dict = OrderedDict()
results_dict['train'], results_dict['valid'], results_dict['test'] = OrderedDict(
), OrderedDict(), OrderedDict()
for task in ['all'] + pred_task_dict['active_outputs'] + ['iteration', 'epoch', ]:
results_dict['train'][task] = []
results_dict['valid'][task] = []
results_dict['test'][task] = []
# results_dict['test']['num_batches'] = len(test_dataloader)
results_dict['valid']['stats'] = []
results_dict['test']['stats'] = []
if load_model:
print('LOADING MODEL FROM DISK')
if torch.cuda.is_available():
checkpoint = torch.load(just_test_folder + '/model.pt')
else:
checkpoint = torch.load(
just_test_folder+'/model.pt', map_location='cpu')
model = torch.nn.DataParallel(model, dim=0)
model.load_state_dict(checkpoint)
model.to(device)
embeds = model.module.embeds
train_dataset.embeds = embeds
valid_dataset.embeds = embeds
train_dataset.nlp = nlp
valid_dataset.nlp = nlp
valid_dataset.sil_tok = sil_tok
results_dict = json.load(open(just_test_folder + '/results.json', 'r'))
# model.load_state_dict(checkpoint, strict=False)
iteration = results_dict['train']['iteration'][-1]
if not note_append == '_dev' and not os.path.exists(just_test_folder+'/optimizer.pt'):
initial_learning_rate = float(input("Set initial learning rate:"))
lstm_sets_dict['learning_rate'] = initial_learning_rate
else:
model = torch.nn.DataParallel(model, dim=0)
model.to(device)
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
def get_lr(optimizer):
for param_group in optimizer.param_groups:
return param_group['lr']
print('Parameter count:{}'.format(int(count_parameters(model))))
optimizer = torch.optim.Adam(model.parameters(
), lr=lstm_sets_dict['learning_rate'], weight_decay=lstm_sets_dict['l2'])
if load_model and os.path.exists(just_test_folder + '/optimizer.pt'):
optim_state = torch.load(just_test_folder+'/optimizer.pt')
optimizer.load_state_dict(optim_state)
print('optimizer loaded. LR:{}'.format(optimizer.defaults['lr']))
# scheduler = ReduceLROnPlateau(optimizer, 'min', patience=optim_patience, min_lr=5.0e-06, verbose=True)
# 9000, 2000, 2000, 1000, 1000 iterations.
scheduler = torch.optim.lr_scheduler.MultiStepLR(
optimizer, milestones=lstm_sets_dict['milestones'], gamma=0.1)
if load_model and os.path.exists(just_test_folder + '/scheduler.pt'):
sched_state = torch.load(just_test_folder + '/scheduler.pt')
scheduler.load_state_dict(sched_state)
print('scheduler loaded.')
print('LR {}'.format(get_lr(optimizer)))
# test_dataset.embeds = model.module.embeds
train_dataset.embeds = model.module.embeds
valid_dataset.embeds = model.module.embeds
# Train
for epoch in range(max_epochs):
model.train()
loss_dict_train_raw = {
task: 0.0 for task in pred_task_dict['active_outputs']}
loss_dict_train_raw['all'] = 0.0
num_pred_samples_for_result = {
task: 0 for task in pred_task_dict['active_outputs']}
model.module.reset_hidden('train')
# hidden_inference = model.module.hidden_inference['train']
model.zero_grad()
start_time = t.time()
for batch_ndx, batch in enumerate(train_dataloader):
if not (lstm_sets_dict['two_sys_turn']) and (len(batch['update_strt_f']) != batch_size):
# This should just be triggered for the last few batches of the epoch
continue
if lstm_sets_dict['two_sys_turn'] and batch['sys_trn_1'].shape[0] != int(batch_size * 2):
print('caught small batch')
continue
cont_file_indx, cont_ab_indx = batch['file_idx'], batch['a_idx']
if lstm_sets_dict['two_sys_turn']:
cont_file_indx = cont_file_indx[::2]
cont_ab_indx = cont_ab_indx[::2]
# h_inf = hidden_inference[:, cont_file_indx, cont_ab_indx, 0, :]
# c_inf = hidden_inference[:, cont_file_indx, cont_ab_indx, 1, :]
mod_in = {k: v for k, v in batch.items() if not (k in ['y_dict'])}
# mod_in['h_inf'] = h_inf.squeeze(0)
# mod_in['c_inf'] = c_inf.squeeze(0)
mod_in = {**batch['y_dict'], **mod_in}
# loc_seed = torch.LongTensor([random.randint(0, 1<<31)]*2).unsqueeze(1)
# mod_in['seed'] = loc_seed
bp_loss, outputs = model(**mod_in)
loss = torch.sum(bp_loss)
loss.backward()
optimizer.step()
optimizer.zero_grad()
# if lstm_sets_dict['plot_batch']:
# for b_i in [0, 1, 2, 3]:
# plot_batch(outputs, mod_in, train_dataset, b_i)
# quit()
# Don't delete, need for context
# hidden_inference[:, cont_file_indx, cont_ab_indx, 0, :] = outputs['h_inf'].detach().cpu()
# hidden_inference[:, cont_file_indx, cont_ab_indx, 1, :] = outputs['c_inf'].detach().cpu()
# aggregate info
loss_dict_train_raw = {k: float(loss_dict_train_raw[k]) + float(np.sum(
v.data.cpu().numpy())) for k, v in outputs['loss_dict_train_raw'].items()}
num_pred_samples_for_result = {k: num_pred_samples_for_result[k] + int(np.sum(
v.data.cpu().numpy())) for k, v in outputs['num_pred_samples_for_result'].items()}
if (iteration + 1) % 10 == 0 or ((note_append == '_dev' or note_append == '_dev_restart_') and (iteration + 1) % 2 == 0):
print_results = {}
print_results['all'] = 0.0
weight_denom = 0.0
for task in pred_task_dict['active_outputs']:
print_results[task] = loss_dict_train_raw[task] / \
num_pred_samples_for_result[task]
print_results['all'] += pred_task_dict[task]['weight'] * \
print_results[task]
weight_denom += pred_task_dict[task]['weight']
num_pred_samples_for_result[task] = 0
loss_dict_train_raw[task] = 0.0
print_results['all'] = print_results['all']/weight_denom
elapsed = t.time() - start_time
loss_string = ''
loss_string += ' train | epoch {:2d} {:4d}/{:4d}| dur(s) {:4.2f} |'
loss_string += ''.join(
[task + ' {:1.5f} |' for task in pred_task_dict['active_outputs']])
loss_string += ' Weighted {:1.5f} '
loss_string_items = [epoch, batch_ndx+1, num_train_batches, elapsed] + [
print_results[task] for task in pred_task_dict['active_outputs']] + [print_results['all']]
print(loss_string.format(*loss_string_items))
for task in pred_task_dict['active_outputs']:
results_dict['train'][task].append(
float(print_results[task]))
results_dict['train']['all'].append(
float(print_results['all']))
results_dict['train']['iteration'].append(int(iteration) + 1)
results_dict['train']['epoch'].append(int(epoch))
start_time = t.time()
if (iteration + 1) % 200 == 0 or ((note_append == '_dev' or note_append == '_dev_restart_') and (iteration + 1) % 2 == 0): # 25
full_test_flag = lstm_sets_dict['valid_full_test_flag']
model.module.autoregress = lstm_sets_dict['valid_autoregress']
valid_loss_all, valid_loss_TL = test(
model, valid_dataloader, full_test_flag, results_dict, iteration, epoch)
if note_append != '_dev' and (np.argmin(valid_loss_TL) == (len(valid_loss_TL)-1)):
torch.save(model.state_dict(),
naming_dict['fold_name']+'/best_model.pt')
torch.save(model.state_dict(),
naming_dict['fold_name'] + '/model.pt')
torch.save(optimizer.state_dict(),
naming_dict['fold_name']+'/optimizer.pt')
json.dump(results_dict, open(
naming_dict['fold_name'] + '/results.json', 'w'), indent=4)
# scheduler.step(valid_loss_all[-1])
scheduler.step()
torch.save(scheduler.state_dict(),
naming_dict['fold_name']+'/scheduler.pt')
print(naming_dict['fold_name'])
print('LR {}'.format(get_lr(optimizer)))
print('Best TL valid loss: {:.4f} ({} steps ago) \n'.format(
np.min(valid_loss_TL), len(valid_loss_TL) - np.argmin(valid_loss_TL)))
# Run tests after final iteration
if scheduler._step_count >= scheduler.milestones[-1]:
# load test dataloader
# del train_dataset.dataset # free some RAM
# test_dataset = ContDataset(test_dataset_settings_dict)
# collate_fn_test = test_dataset.collate_fn
# test_dataset.embeds_usr = embeds_usr
# test_dataset.embeds_sys = embeds_sys
# test_dataset.embeds = embeds
# test_dataset.nlp = nlp
# test_dataset.sil_tok = sil_tok
# test_sampler = MySampler(test_dataset)
# test_dataloader = DataLoader(test_dataset, sampler=test_sampler,
# batch_size=batch_size, collate_fn=collate_fn_test,
# num_workers=num_data_loader_workers,
# worker_init_fn=_init_fn)
# test_dataset.update_annots_test = test_dataset_settings_dict['update_annots_test']
# test_dataloader.dataset.time_out_length = time_out_length
# epoch = 0
# train_batch_indx = -1
# full_test_flag = False
# test(model, test_dataloader, full_test_flag,
# results_dict, train_batch_indx, epoch)
# json.dump(results_dict, open(
# naming_dict['fold_name'] + '/results_test.json', 'w'), indent=4)
# print('Finished non-sampling test')
# full_test_flag = True
# model.module.lstm_sets_dict['full_test_flag'] = True
# test(model, test_dataloader, full_test_flag,
# results_dict, train_batch_indx, epoch)
# json.dump(results_dict, open(
# naming_dict['fold_name'] + '/results_sampled.json', 'w'), indent=4)
# print('Finished sampling test')
print('DONE')
os._exit(0)
model.train()
model.module.autoregress = lstm_sets_dict['train_autoregress']
start_time = t.time()
iteration += 1
start_time = t.time()
print('finished')
def setup_ling():
if use_ling:
nlp = spacy.blank('en')
if language_size == 500:
print('using REALLY small language: 500')
nlp.from_disk(data_dir+'/spacy_tok_combined_500/')
elif language_size == 5000:
print('using small language: 5000')
nlp.from_disk(data_dir+'/spacy_tok_combined_5000/')
elif language_size == 10000:
print('using small language: 10000')
nlp.from_disk(data_dir+'/spacy_tok_combined_10000/')
else:
print('using medium language:20000')
nlp.from_disk(data_dir+'/spacy_tok_combined_20000/')
spacy.vocab.link_vectors_to_models(nlp.vocab)
unspec_tok = len(nlp.vocab.vectors.data)
sil_tok = unspec_tok + 1
if lstm_sets_dict['use_wait_stop_tok']:
lstm_sets_dict['unspec_tok'] = unspec_tok # for user
lstm_sets_dict['sil_tok'] = sil_tok
lstm_sets_dict['wait_tok'] = sil_tok + 1
lstm_sets_dict['stop_tok'] = sil_tok + 2
lstm_sets_dict['pad_tok'] = sil_tok + 3
num_embed_rows_to_add = 5
# padding_idx = lstm_sets_dict['stop_tok']
padding_idx = lstm_sets_dict['pad_tok']
else:
num_embed_rows_to_add = 1
# padding_idx = sil_tok
padding_idx = None
lstm_sets_dict['sil_tok'] = sil_tok
embedding_dim = nlp.vocab.vectors.data.shape[1]
num_embeddings = len(nlp.vocab.vectors.data)
if lstm_sets_dict['ling_use_glove']:
embeds = nn.Embedding.from_pretrained(
torch.FloatTensor(np.concatenate([np.array(nlp.vocab.vectors.data), np.zeros(
[num_embed_rows_to_add, embedding_dim])])),
padding_idx=padding_idx, freeze=lstm_sets_dict['ling_emb_freeze']
)
else:
num_embeddings = len(nlp.vocab.vectors.data)
embeds = nn.Embedding(
num_embeddings + 1, embedding_dim=embedding_dim, padding_idx=sil_tok).to(device)
embeds_reduce_layer_usr = nn.Linear(embedding_dim, 300)
embeds_reduce_layer_sys = nn.Linear(embedding_dim, 300)
embeds_dropout_usr = nn.Dropout(lstm_sets_dict['embeds_dropout'])
embeds_dropout_sys = nn.Dropout(lstm_sets_dict['embeds_dropout'])
embeds_usr = nn.Sequential(embeds_dropout_usr, embeds_reduce_layer_usr)
embeds_sys = nn.Sequential(embeds_dropout_sys, embeds_reduce_layer_sys)
num_feat_per_person = num_feat_per_person_dict['acous'] + embedding_dim
print('Embeddings loaded.')
else:
num_feat_per_person = num_feat_per_person_dict['acous']
embeds_usr, embeds_sys = 0, 0
sil_tok = -1
nlp = -1
return embeds_usr, embeds_sys, embeds, num_feat_per_person, sil_tok, nlp
def just_test_func(**kwargs):
print('******* JUST TESTING *****')
print('Loading test DATA')
context_vec_settings_dict = {
'train': len(train_file_list),
'valid': len(valid_file_list),
'test': len(test_file_list)
}
# if kwargs['load_test_model']:
if torch.cuda.is_available():
checkpoint = torch.load(just_test_folder+just_test_model)
else:
checkpoint = torch.load(
just_test_folder+just_test_model, map_location='cpu')
model = ContModel(kwargs['num_feat_per_person'], lstm_sets_dict, device,
context_vec_settings_dict, kwargs['embeds_usr'], kwargs['embeds_sys'], kwargs['embeds'])
model.temperature = lstm_sets_dict['temperature']
model.autoregress = lstm_sets_dict['test_autoregress']
# only test on one gpu
if torch.cuda.device_count() > 1:
model = torch.nn.DataParallel(model, device_ids=[0], dim=0)
else:
model = torch.nn.DataParallel(model, dim=0)
strict = True
model.load_state_dict(checkpoint, strict=strict)
model.to(device)
model.eval()
embeds = model.module.embeds
test_dataset = ContDataset(test_dataset_settings_dict)
collate_fn_test = test_dataset.collate_fn
test_dataset.embeds_usr = kwargs['embeds_usr']
test_dataset.embeds_sys = kwargs['embeds_sys']
test_dataset.embeds = embeds
test_dataset.nlp = kwargs['nlp']
test_dataset.sil_tok = kwargs['sil_tok']
def _init_fn(): return np.random.seed(lstm_sets_dict['seed'])
if vae_experiments:
test_dataset_subset = get_vae_dataset(
test_dataset, test_dataset_settings_dict['update_annots_test'], vae_target_da)
test_dataset_subset = test_dataset_subset * vae_data_multiplier
if load_encodings and load_second_encodings:
test_dataset_subset_2 = get_vae_dataset(
test_dataset, test_dataset_settings_dict['update_annots_test'], vae_target_da)
test_dataset_subset_2 = test_dataset_subset * vae_data_multiplier_2
test_dataset_subset = test_dataset_subset[:len(
test_dataset_subset)//2] + test_dataset_subset_2[:len(test_dataset_subset_2)//2]
print('Target da: {}\t number of points: {}'.format(
vae_target_da, len(test_dataset_subset)))
test_sampler = SubsetRandomSampler(test_dataset_subset)
test_dataloader = DataLoader(test_dataset, sampler=test_sampler,
batch_size=batch_size, collate_fn=collate_fn_test,
num_workers=num_data_loader_workers, drop_last=False,
worker_init_fn=_init_fn)
if load_encodings:
mu, log_var = get_vae_encodings(lstm_sets_dict, False)
if load_second_encodings:
mu_2, log_var_2 = get_vae_encodings(lstm_sets_dict, True)
mu = (mu + mu_2) / 2
log_var = np.log(0.5*(np.exp(log_var)+np.exp(log_var_2)))
model.module.VAE.set_static_mu_log_var(mu, log_var)
elif individual_turnpair_experiments:
test_dataset_subset = get_individual_turnpair_dataset(test_dataset, test_dataset_settings_dict['update_annots_test'],
target_individual_turnpairs[0], target_individual_turnpairs[1], target_individual_turnpairs[2])
test_dataset_subset = test_dataset_subset * vae_data_multiplier
print('Target da: {}\t number of points: {}'.format(
vae_target_da, len(test_dataset_subset)))
test_sampler = SubsetRandomSampler(test_dataset_subset)
test_dataloader = DataLoader(test_dataset, sampler=test_sampler,
batch_size=batch_size, collate_fn=collate_fn_test,
num_workers=num_data_loader_workers, drop_last=False,
worker_init_fn=_init_fn)
else:
test_sampler = MySampler(test_dataset)
test_dataloader = DataLoader(test_dataset, sampler=test_sampler,
batch_size=batch_size, collate_fn=collate_fn_test,
num_workers=num_data_loader_workers,
worker_init_fn=_init_fn)
test_dataset.update_annots_test = test_dataset_settings_dict['update_annots_test']
print('Testing with temperature:' + str(lstm_sets_dict['temperature']))
# print('Number of Test files: '+ str(test_dataset.file_list))
test_dataloader.dataset.time_out_length = time_out_length
epoch = 0
# print('Just testing...')
results_dict = json.load(open(just_test_folder + '/results.json', 'r'))
train_batch_indx = -1
full_test_flag = True if lstm_sets_dict['full_test_flag'] else lstm_sets_dict['test_autoregress']
# pdb.set_trace()
test(model, test_dataloader, full_test_flag,
results_dict, train_batch_indx, epoch)
json.dump(results_dict, open(
naming_dict['fold_name'] + '/results.json', 'w'), indent=4)
os._exit(0)
if __name__ == '__main__':
main()
|
python
|
# Demonstration showing plot of the 5 stations with the highest relative water levels as well as their best fit polynomials
import matplotlib.pyplot as plt
import matplotlib
from datetime import datetime, timedelta
from floodsystem.station import MonitoringStation
from floodsystem.plot import plot_water_level_with_fit, plot_water_levels
from floodsystem.flood import stations_highest_rel_level
from floodsystem.stationdata import build_station_list
from floodsystem.stationdata import update_water_levels
import datetime
from floodsystem.datafetcher import fetch_measure_levels
from floodsystem.stationdata import build_station_list
def run():
# Build list of stations
stations = build_station_list()
update_water_levels(stations)
# Build list of the 5 stations at highest relative levels
high_rel_level_stations = stations_highest_rel_level(stations, 5)
print(high_rel_level_stations)
# Find dates and levels for each of these stations then add them to the plot
# Get the names of the 5 highest level stations
names = []
for i in high_rel_level_stations:
names.append(i[0])
print(names)
dt = 4
# Find each station
for i in names:
for station in stations:
if station.name == i:
try:
dates, levels = fetch_measure_levels(station.measure_id, dt=datetime.timedelta(days=dt))
plot_water_level_with_fit(station, dates, levels, 4)
except:
ValueError
pass
if __name__ == "__main__":
run()
|
python
|
from sanic import Sanic
from sanic_cors import CORS
app = Sanic(__name__)
CORS(app)
|
python
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
from progress.bar import ChargingBar
from scipy import linalg as LA
from .base import ComputationInterface
# noinspection PyUnresolvedReferences
class Numpy(ComputationInterface):
def __init__(self):
super(Numpy, self).__init__()
self.cuda = None
self.using_gpu = False
def update_args(self, args):
super(Numpy, self).update_args(args)
if self.args.gpu >= 0:
print("Getting GPU")
from chainer import cuda
self.cuda = cuda
cuda.get_device(self.args.gpu).use()
self.xp = cuda.cupy
self.using_gpu = True
else:
import numpy as np
self.xp = np
def calc_block_inter_layer_covariance(self, model_wrapper, use_training_data=True, batch_size=100, **options):
model = model_wrapper.model
is_chainer = model_wrapper.model_type == "chainer"
if self.using_gpu and is_chainer:
model.to_gpu()
xp = self.xp
train, test = model_wrapper.dataset
data_x = train if use_training_data else test
if is_chainer:
data_x = xp.moveaxis(data_x, -1, 0)[0]
else:
data_x = data_x[0]
data_x = xp.stack(data_x, axis=0)
data_size = len(data_x)
if batch_size > 0:
perm = xp.random.permutation(data_size)
data_x = data_x[perm[0:batch_size]]
data_size = batch_size
compute_batch_size = 100
if is_chainer:
model.train = False
# perm = xp.random.permutation(data_size)
full_mean = []
cov = []
test_num = 0
bar = ChargingBar("Calculating inter layer covariance", max=data_size)
for batch in range(0, data_size, compute_batch_size):
test_num += 1
# ind_tmp = perm[batch:batch + batch_size] xp.asarray(, dtype=xp.float32)
x = data_x[batch:batch + compute_batch_size]
layer_outputs = model_wrapper.get_layer_outputs(x)
for i, layer_out in enumerate(layer_outputs):
if is_chainer:
layer_out = layer_out.data
# tmp_mean = xp.mean(layer_out, axis=0)
# tmp_cov = xp.einsum("a,b->ab", layer_out.ravel(), layer_out.ravel())
tmp_cov = None
for fxi in layer_out:
fxi = fxi.ravel()
dot = xp.outer(fxi, fxi)
if tmp_cov is None:
tmp_cov = dot
else:
tmp_cov += dot
bar.next()
if batch == 0:
full_mean.append(tmp_mean)
cov.append(tmp_cov)
else:
# full_mean[i] += tmp_mean
cov[i] += tmp_cov
logging.debug("Computed covariance for batch %d of size %d" % (batch, compute_batch_size))
bar.finish()
bar = ChargingBar("Calculating eigen values", max=len(cov))
eigen_values = []
for j in range(len(cov)):
cov[j] = cov[j] / test_num
if self.using_gpu:
tmp_cov_ma = self.cuda.to_cpu(cov[j])
else:
tmp_cov_ma = cov[j]
eigs = LA.eigvals(tmp_cov_ma)
eigen_values.append(eigs)
bar.next()
bar.finish()
# Saving
self.store_elements({
"cov": cov,
"eigen_values": eigen_values,
}, group_name="inter_layer_covariance", model_wrapper=model_wrapper)
def calc_inter_layer_covariance(self, model_wrapper, use_training_data=True, batch_size=100, **options):
model = model_wrapper.model
is_chainer = model_wrapper.model_type == "chainer"
if self.using_gpu and is_chainer:
model.to_gpu()
xp = self.xp
train, test = model_wrapper.dataset
data_x = train if use_training_data else test
if is_chainer:
data_x = xp.moveaxis(data_x, -1, 0)[0]
else:
data_x = data_x[0]
data_x = xp.stack(data_x, axis=0)
data_size = len(data_x)
if batch_size and batch_size > 0:
perm = xp.random.permutation(data_size)
data_x = data_x[perm[0:batch_size]]
data_size = batch_size
compute_batch_size = 100
if is_chainer:
model.train = False
# perm = xp.random.permutation(data_size)
# full_mean = []
cov = []
test_num = 0
bar = ChargingBar("Calculating inter layer covariance", max=data_size)
for batch in range(0, data_size, compute_batch_size):
test_num += 1
# ind_tmp = perm[batch:batch + batch_size] xp.asarray(, dtype=xp.float32)
x = data_x[batch:batch + compute_batch_size]
layer_outputs = model_wrapper.get_layer_outputs(x)
for i, layer_out in enumerate(layer_outputs):
if is_chainer:
layer_out = layer_out.data
# tmp_mean = xp.mean(layer_out, axis=0)
# tmp_cov = xp.einsum("a,b->ab", layer_out.ravel(), layer_out.ravel())
tmp_cov = None
for fxi in layer_out:
fxi = fxi.ravel()
dot = xp.outer(fxi, fxi)
if tmp_cov is None:
tmp_cov = dot
else:
tmp_cov += dot
bar.next()
if batch == 0:
# full_mean.append(tmp_mean)
cov.append(tmp_cov)
else:
# full_mean[i] += tmp_mean
cov[i] += tmp_cov
logging.debug("Computed covariance for batch %d of size %d" % (batch, compute_batch_size))
bar.finish()
bar = ChargingBar("Calculating eigen values", max=len(cov))
eigen_values = []
for j in range(len(cov)):
cov[j] = cov[j] / test_num
if self.using_gpu:
tmp_cov_ma = self.cuda.to_cpu(cov[j])
else:
tmp_cov_ma = cov[j]
eigs = LA.eigvals(tmp_cov_ma)
eigen_values.append(eigs)
bar.next()
bar.finish()
# Saving
self.store_elements({
"cov": cov,
"eigen_values": eigen_values,
}, group_name="inter_layer_covariance", model_wrapper=model_wrapper)
|
python
|
from .. import db
class Enrollment(db.Model):
'''
Model handling the association between a user and the courses they are
enrolled in in a many-to-many relationship, with some added data
'''
__tablename__ = 'user_courses'
user_id = db.Column(db.Integer, db.ForeignKey('user.id'), primary_key=True)
course_id = db.Column(db.Integer, db.ForeignKey('course.id'), primary_key=True)
enabled = db.Column(db.Boolean, default=True)
user = db.relationship('User', backref='enrollment_assoc')
course = db.relationship('Course', backref='enrollment_assoc')
def __init__(self, user=None, course=None, enabled=True):
self.user = user
self.course = course
self.enabled = enabled
|
python
|
import sys
import os
# in order to get __main__ to work, we follow: https://stackoverflow.com/questions/16981921/relative-imports-in-python-3
PACKAGE_PARENT = '../..'
SCRIPT_DIR = os.path.dirname(os.path.realpath(os.path.join(os.getcwd(), os.path.expanduser(__file__))))
sys.path.append(os.path.normpath(os.path.join(SCRIPT_DIR, PACKAGE_PARENT)))
import click
import logging
import coloredlogs
import asyncio
from asyncio import AbstractEventLoop
from rx.subject import Subject
from trader.messaging.messaging_client import MessagingSubscriber, MessagingPublisher
from typing import Dict
class MessagingServer():
def __init__(self,
subscribe_ip_address: str,
subscribe_port: int,
publisher_ip_address: str,
publisher_port: int,
loop: AbstractEventLoop):
self.subject: Subject = Subject()
self.publisher = MessagingSubscriber(subscribe_ip_address=publisher_ip_address,
subscribe_port=publisher_port,
loop=loop)
self.subscribers = MessagingPublisher(publish_ip_address=subscribe_ip_address,
publish_port=subscribe_port,
loop=loop)
self.loop: AbstractEventLoop = loop
async def start(self):
def pretty_print(message: Dict):
result = ''.join(['{0}: {1}, '.format(k, v) for k, v in message.items()])
return result[:100] + ' ...'
done = asyncio.Future()
logging.info('starting...')
# subscribe to the publisher, and push messages to the subscribers
disposable = self.publisher.subscribe(self.subscribers)
# write to the console for now
self.publisher.subscribe(on_next=lambda message: logging.info(pretty_print(message)))
logging.info('started.')
await done
disposable.dispose()
@click.command()
@click.option('--subscribe_ipaddress', default='127.0.0.1', required=False, help='ip to bind to')
@click.option('--subscribe_port', default=5002, required=False, help='port for subscribers')
@click.option('--publisher_ipaddress', default='127.0.0.1', required=False, help='ip address of publisher')
@click.option('--publisher_port', default=5001, required=False, help='port for publisher')
def main(subscribe_ipaddress: str,
subscribe_port: int,
publisher_ipaddress: str,
publisher_port: int):
logging.info('starting messaging_server subscribe_port: {} publisher_port: {}'.format(subscribe_port, publisher_port))
loop = asyncio.get_event_loop()
messaging_server = MessagingServer(subscribe_ipaddress,
subscribe_port,
publisher_ipaddress,
publisher_port,
loop)
loop.run_until_complete(messaging_server.start())
if __name__ == '__main__':
coloredlogs.install(level='INFO')
main()
|
python
|
import argparse
import sys
from typing import Tuple
from bxcommon.models.blockchain_peer_info import BlockchainPeerInfo
from bxcommon.models.blockchain_protocol import BlockchainProtocol
from bxcommon.utils.blockchain_utils.eth import eth_common_constants
from bxgateway import log_messages
from bxutils import logging
logger = logging.get_logger(__name__)
# Make sure enode is at least as long as the public key
def enode_is_valid_length(enode: str) -> bool:
return len(enode) >= 2 * eth_common_constants.PUBLIC_KEY_LEN
def get_enode_parts(enode: str) -> Tuple[str, str, str]:
enode_and_pub_key, ip_and_port = enode.split("@")
if enode_and_pub_key.startswith("enode://"):
pub_key = enode_and_pub_key[8:]
else:
pub_key = enode_and_pub_key
ip, port_and_disc = ip_and_port.split(":")
port = port_and_disc.split("?")[0]
return pub_key, ip, port
def get_ip_port_string_parts(ip_port_string: str) -> Tuple[str, str]:
ip_port_list = ip_port_string.strip().split(":")
ip = ip_port_list[0]
port = ip_port_list[1]
return ip, port
def parse_enode(enode: str) -> BlockchainPeerInfo:
if not enode_is_valid_length(enode):
logger.fatal(log_messages.ETH_PARSER_INVALID_ENODE_LENGTH, enode, len(enode), exc_info=False)
sys.exit(1)
try:
pub_key, ip, port = get_enode_parts(enode)
if not port.isnumeric():
logger.fatal(log_messages.PARSER_INVALID_PORT, port, exc_info=False)
sys.exit(1)
except ValueError:
logger.fatal(log_messages.ETH_PARSER_INVALID_ENODE, enode, exc_info=False)
sys.exit(1)
else:
return BlockchainPeerInfo(ip, int(port), pub_key)
def parse_ip_port(ip_port_string: str) -> BlockchainPeerInfo:
ip, port = get_ip_port_string_parts(ip_port_string)
if not port.isnumeric():
logger.fatal(log_messages.PARSER_INVALID_PORT, port, exc_info=False)
sys.exit(1)
return BlockchainPeerInfo(ip, int(port))
def parse_peer(blockchain_protocol: str, peer: str) -> BlockchainPeerInfo:
if blockchain_protocol.lower() == BlockchainProtocol.ETHEREUM.value:
return parse_enode(peer)
else:
return parse_ip_port(peer)
class ParseEnode(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
assert isinstance(values, str)
blockchain_peer = parse_enode(values)
# Node public key gets validated in validate_eth_opts
namespace.node_public_key = blockchain_peer.node_public_key
# blockchain IP gets validated in __init__()
namespace.blockchain_ip = blockchain_peer.ip
namespace.blockchain_port = blockchain_peer.port
class ParseBlockchainPeers(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
assert isinstance(values, str)
blockchain_peers = set()
blockchain_protocol = namespace.blockchain_protocol
for peer in values.split(","):
blockchain_peer = parse_peer(blockchain_protocol, peer)
blockchain_peers.add(blockchain_peer)
namespace.blockchain_peers = blockchain_peers
|
python
|
from setuptools import setup, find_namespace_packages
with open("README.md", "r") as fh:
long_description = fh.read()
setup(name='s3a_decorrelation_toolbox',
version='0.2.9',
description='Decorrelation algorithm and toolbox for diffuse sound objects and general upmix',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/s3a-spatialaudio/decorrelation-python-toolbox',
author='Michael Cousins',
author_email='[email protected]',
license='ISC',
packages=['s3a_decorrelation_toolbox'],
install_requires=[
'numpy >= 1.16.2',
'scipy >= 1.2.1',
'soundfile >= 0.10.0',
'librosa >= 0.6.3',
'acoustics >= 0.1.2',
'pyloudnorm >= 0.0.1',
'matplotlib >= 3.0.2'
],
include_package_data=True,
classifiers=[
"Programming Language :: Python :: 3",
"Development Status :: 3 - Alpha",
"License :: OSI Approved :: ISC License (ISCL)",
"Operating System :: OS Independent",
"Topic :: Multimedia :: Sound/Audio"
],
)
|
python
|
# Creative Commons Legal Code
#
# CC0 1.0 Universal
#
# CREATIVE COMMONS CORPORATION IS NOT A LAW FIRM AND DOES NOT PROVIDE
# LEGAL SERVICES. DISTRIBUTION OF THIS DOCUMENT DOES NOT CREATE AN
# ATTORNEY-CLIENT RELATIONSHIP. CREATIVE COMMONS PROVIDES THIS
# INFORMATION ON AN "AS-IS" BASIS. CREATIVE COMMONS MAKES NO WARRANTIES
# REGARDING THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS
# PROVIDED HEREUNDER, AND DISCLAIMS LIABILITY FOR DAMAGES RESULTING FROM
# THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS PROVIDED
# HEREUNDER.
#
# Statement of Purpose
#
# The laws of most jurisdictions throughout the world automatically confer
# exclusive Copyright and Related Rights (defined below) upon the creator
# and subsequent owner(s) (each and all, an "owner") of an original work of
# authorship and/or a database (each, a "Work").
#
# Certain owners wish to permanently relinquish those rights to a Work for
# the purpose of contributing to a commons of creative, cultural and
# scientific works ("Commons") that the public can reliably and without fear
# of later claims of infringement build upon, modify, incorporate in other
# works, reuse and redistribute as freely as possible in any form whatsoever
# and for any purposes, including without limitation commercial purposes.
# These owners may contribute to the Commons to promote the ideal of a free
# culture and the further production of creative, cultural and scientific
# works, or to gain reputation or greater distribution for their Work in
# part through the use and efforts of others.
#
# For these and/or other purposes and motivations, and without any
# expectation of additional consideration or compensation, the person
# associating CC0 with a Work (the "Affirmer"), to the extent that he or she
# is an owner of Copyright and Related Rights in the Work, voluntarily
# elects to apply CC0 to the Work and publicly distribute the Work under its
# terms, with knowledge of his or her Copyright and Related Rights in the
# Work and the meaning and intended legal effect of CC0 on those rights.
#
# 1. Copyright and Related Rights. A Work made available under CC0 may be
# protected by copyright and related or neighboring rights ("Copyright and
# Related Rights"). Copyright and Related Rights include, but are not
# limited to, the following:
#
# i. the right to reproduce, adapt, distribute, perform, display,
# communicate, and translate a Work;
# ii. moral rights retained by the original author(s) and/or performer(s);
# iii. publicity and privacy rights pertaining to a person's image or
# likeness depicted in a Work;
# iv. rights protecting against unfair competition in regards to a Work,
# subject to the limitations in paragraph 4(a), below;
# v. rights protecting the extraction, dissemination, use and reuse of data
# in a Work;
# vi. database rights (such as those arising under Directive 96/9/EC of the
# European Parliament and of the Council of 11 March 1996 on the legal
# protection of databases, and under any national implementation
# thereof, including any amended or successor version of such
# directive); and
# vii. other similar, equivalent or corresponding rights throughout the
# world based on applicable law or treaty, and any national
# implementations thereof.
#
# 2. Waiver. To the greatest extent permitted by, but not in contravention
# of, applicable law, Affirmer hereby overtly, fully, permanently,
# irrevocably and unconditionally waives, abandons, and surrenders all of
# Affirmer's Copyright and Related Rights and associated claims and causes
# of action, whether now known or unknown (including existing as well as
# future claims and causes of action), in the Work (i) in all territories
# worldwide, (ii) for the maximum duration provided by applicable law or
# treaty (including future time extensions), (iii) in any current or future
# medium and for any number of copies, and (iv) for any purpose whatsoever,
# including without limitation commercial, advertising or promotional
# purposes (the "Waiver"). Affirmer makes the Waiver for the benefit of each
# member of the public at large and to the detriment of Affirmer's heirs and
# successors, fully intending that such Waiver shall not be subject to
# revocation, rescission, cancellation, termination, or any other legal or
# equitable action to disrupt the quiet enjoyment of the Work by the public
# as contemplated by Affirmer's express Statement of Purpose.
#
# 3. Public License Fallback. Should any part of the Waiver for any reason
# be judged legally invalid or ineffective under applicable law, then the
# Waiver shall be preserved to the maximum extent permitted taking into
# account Affirmer's express Statement of Purpose. In addition, to the
# extent the Waiver is so judged Affirmer hereby grants to each affected
# person a royalty-free, non transferable, non sublicensable, non exclusive,
# irrevocable and unconditional license to exercise Affirmer's Copyright and
# Related Rights in the Work (i) in all territories worldwide, (ii) for the
# maximum duration provided by applicable law or treaty (including future
# time extensions), (iii) in any current or future medium and for any number
# of copies, and (iv) for any purpose whatsoever, including without
# limitation commercial, advertising or promotional purposes (the
# "License"). The License shall be deemed effective as of the date CC0 was
# applied by Affirmer to the Work. Should any part of the License for any
# reason be judged legally invalid or ineffective under applicable law, such
# partial invalidity or ineffectiveness shall not invalidate the remainder
# of the License, and in such case Affirmer hereby affirms that he or she
# will not (i) exercise any of his or her remaining Copyright and Related
# Rights in the Work or (ii) assert any associated claims and causes of
# action with respect to the Work, in either case contrary to Affirmer's
# express Statement of Purpose.
#
# 4. Limitations and Disclaimers.
#
# a. No trademark or patent rights held by Affirmer are waived, abandoned,
# surrendered, licensed or otherwise affected by this document.
# b. Affirmer offers the Work as-is and makes no representations or
# warranties of any kind concerning the Work, express, implied,
# statutory or otherwise, including without limitation warranties of
# title, merchantability, fitness for a particular purpose, non
# infringement, or the absence of latent or other defects, accuracy, or
# the present or absence of errors, whether or not discoverable, all to
# the greatest extent permissible under applicable law.
# c. Affirmer disclaims responsibility for clearing rights of other persons
# that may apply to the Work or any use thereof, including without
# limitation any person's Copyright and Related Rights in the Work.
# Further, Affirmer disclaims responsibility for obtaining any necessary
# consents, permissions or other rights required for any use of the
# Work.
# d. Affirmer understands and acknowledges that Creative Commons is not a
# party to this document and has no duty or obligation with respect to
# this CC0 or use of the Work.
# This file was compiled from a KSY format file downloaded from:
# https://github.com/kaitai-io/kaitai_struct_formats
# This is a generated file! Please edit source .ksy file and use kaitai-struct-compiler to rebuild
from pkg_resources import parse_version
import kaitaistruct
from kaitaistruct import KaitaiStruct, KaitaiStream, BytesIO
from enum import Enum
import collections
if parse_version(kaitaistruct.__version__) < parse_version('0.9'):
raise Exception("Incompatible Kaitai Struct Python API: 0.9 or later is required, but you have %s" % (kaitaistruct.__version__))
class Code6502(KaitaiStruct):
"""This spec can be used to disassemble raw stream of 6502 CPU machine
code into individual operations. Each operation includes an opcode
and, optionally, an argument. Register arguments are part of the
`opcode` enum.
"""
class Opcode(Enum):
brk_impl = 0
ora_x_ind = 1
ora_zpg = 5
asl_zpg = 6
php_impl = 8
ora_imm = 9
asl_a = 10
ora_abs = 13
asl_abs = 14
bpl_rel = 16
ora_ind_y = 17
ora_zpg_x = 21
asl_zpg_x = 22
clc_impl = 24
ora_abs_y = 25
ora_abs_x = 29
asl_abs_x = 30
jsr_abs = 32
and_x_ind = 33
bit_zpg = 36
and_zpg = 37
rol_zpg = 38
plp_impl = 40
and_imm = 41
rol_a = 42
bit_abs = 44
and_abs = 45
rol_abs = 46
bmi_rel = 48
and_ind_y = 49
and_zpg_x = 53
rol_zpg_x = 54
sec_impl = 56
and_abs_y = 57
and_abs_x = 61
rol_abs_x = 62
rti_impl = 64
eor_x_ind = 65
eor_zpg = 69
lsr_zpg = 70
pha_impl = 72
eor_imm = 73
lsr_a = 74
jmp_abs = 76
eor_abs = 77
lsr_abs = 78
bvc_rel = 80
eor_ind_y = 81
eor_zpg_x = 85
lsr_zpg_x = 86
cli_impl = 88
eor_abs_y = 89
eor_abs_x = 93
lsr_abs_x = 94
rts_impl = 96
adc_x_ind = 97
adc_zpg = 101
ror_zpg = 102
pla_impl = 104
adc_imm = 105
ror_a = 106
jmp_ind = 108
adc_abs = 109
ror_abs = 110
bvs_rel = 112
adc_ind_y = 113
adc_zpg_x = 117
ror_zpg_x = 118
sei_impl = 120
adc_abs_y = 121
adc_abs_x = 125
ror_abs_x = 126
sta_x_ind = 129
sty_zpg = 132
sta_zpg = 133
stx_zpg = 134
dey_impl = 136
txa_impl = 138
sty_abs = 140
sta_abs = 141
stx_abs = 142
bcc_rel = 144
sta_ind_y = 145
sty_zpg_x = 148
sta_zpg_x = 149
stx_zpg_y = 150
tya_impl = 152
sta_abs_y = 153
txs_impl = 154
sta_abs_x = 157
ldy_imm = 160
lda_x_ind = 161
ldx_imm = 162
ldy_zpg = 164
lda_zpg = 165
ldx_zpg = 166
tay_impl = 168
lda_imm = 169
tax_impl = 170
ldy_abs = 172
lda_abs = 173
ldx_abs = 174
bcs_rel = 176
lda_ind_y = 177
ldy_zpg_x = 180
lda_zpg_x = 181
ldx_zpg_y = 182
clv_impl = 184
lda_abs_y = 185
tsx_impl = 186
ldy_abs_x = 188
lda_abs_x = 189
ldx_abs_y = 190
cpy_imm = 192
cmp_x_ind = 193
cpy_zpg = 196
cmp_zpg = 197
dec_zpg = 198
iny_impl = 200
cmp_imm = 201
dex_impl = 202
cpy_abs = 204
cmp_abs = 205
dec_abs = 206
bne_rel = 208
cmp_ind_y = 209
cmp_zpg_x = 213
dec_zpg_x = 214
cld_impl = 216
cmp_abs_y = 217
cmp_abs_x = 221
dec_abs_x = 222
cpx_imm = 224
sbc_x_ind = 225
cpx_zpg = 228
sbc_zpg = 229
inc_zpg = 230
inx_impl = 232
sbc_imm = 233
nop_impl = 234
cpx_abs = 236
sbc_abs = 237
inc_abs = 238
beq_rel = 240
sbc_ind_y = 241
sbc_zpg_x = 245
inc_zpg_x = 246
sed_impl = 248
sbc_abs_y = 249
sbc_abs_x = 253
inc_abs_x = 254
SEQ_FIELDS = ["operations"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['operations']['start'] = self._io.pos()
self.operations = []
i = 0
while not self._io.is_eof():
if not 'arr' in self._debug['operations']:
self._debug['operations']['arr'] = []
self._debug['operations']['arr'].append({'start': self._io.pos()})
_t_operations = Code6502.Operation(self._io, self, self._root)
_t_operations._read()
self.operations.append(_t_operations)
self._debug['operations']['arr'][len(self.operations) - 1]['end'] = self._io.pos()
i += 1
self._debug['operations']['end'] = self._io.pos()
class Operation(KaitaiStruct):
SEQ_FIELDS = ["code", "args"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['code']['start'] = self._io.pos()
self.code = KaitaiStream.resolve_enum(Code6502.Opcode, self._io.read_u1())
self._debug['code']['end'] = self._io.pos()
self._debug['args']['start'] = self._io.pos()
_on = self.code
if _on == Code6502.Opcode.bcc_rel:
self.args = self._io.read_s1()
elif _on == Code6502.Opcode.ora_ind_y:
self.args = self._io.read_u1()
elif _on == Code6502.Opcode.lda_ind_y:
self.args = self._io.read_u1()
elif _on == Code6502.Opcode.cpx_zpg:
self.args = self._io.read_u1()
elif _on == Code6502.Opcode.sta_zpg_x:
self.args = self._io.read_u1()
elif _on == Code6502.Opcode.sta_zpg:
self.args = self._io.read_u1()
elif _on == Code6502.Opcode.bcs_rel:
self.args = self._io.read_s1()
elif _on == Code6502.Opcode.ldy_zpg_x:
self.args = self._io.read_u1()
elif _on == Code6502.Opcode.lsr_abs_x:
self.args = self._io.read_u2le()
elif _on == Code6502.Opcode.and_abs_x:
self.args = self._io.read_u2le()
elif _on == Code6502.Opcode.adc_abs:
self.args = self._io.read_u2le()
elif _on == Code6502.Opcode.sta_abs:
self.args = self._io.read_u2le()
elif _on == Code6502.Opcode.bne_rel:
self.args = self._io.read_s1()
elif _on == Code6502.Opcode.lda_imm:
self.args = self._io.read_u1()
elif _on == Code6502.Opcode.adc_imm:
self.args = self._io.read_u1()
elif _on == Code6502.Opcode.lsr_abs:
self.args = self._io.read_u2le()
elif _on == Code6502.Opcode.adc_abs_x:
self.args = self._io.read_u2le()
elif _on == Code6502.Opcode.sta_abs_x:
self.args = self._io.read_u2le()
elif _on == Code6502.Opcode.cpx_imm:
self.args = self._io.read_u1()
elif _on == Code6502.Opcode.jmp_ind:
self.args = self._io.read_u2le()
elif _on == Code6502.Opcode.adc_zpg:
self.args = self._io.read_u1()
elif _on == Code6502.Opcode.eor_imm:
self.args = self._io.read_u1()
elif _on == Code6502.Opcode.eor_abs_x:
self.args = self._io.read_u2le()
elif _on == Code6502.Opcode.sta_x_ind:
self.args = self._io.read_u1()
elif _on == Code6502.Opcode.sbc_imm:
self.args = self._io.read_u1()
elif _on == Code6502.Opcode.cpy_abs:
self.args = self._io.read_u2le()
elif _on == Code6502.Opcode.ldx_abs_y:
self.args = self._io.read_u2le()
elif _on == Code6502.Opcode.adc_zpg_x:
self.args = self._io.read_u1()
elif _on == Code6502.Opcode.bpl_rel:
self.args = self._io.read_s1()
elif _on == Code6502.Opcode.ora_imm:
self.args = self._io.read_u1()
elif _on == Code6502.Opcode.ror_abs_x:
self.args = self._io.read_u2le()
elif _on == Code6502.Opcode.adc_ind_y:
self.args = self._io.read_u1()
elif _on == Code6502.Opcode.eor_ind_y:
self.args = self._io.read_u1()
elif _on == Code6502.Opcode.lda_abs:
self.args = self._io.read_u2le()
elif _on == Code6502.Opcode.bit_zpg:
self.args = self._io.read_u1()
elif _on == Code6502.Opcode.rol_zpg:
self.args = self._io.read_u1()
elif _on == Code6502.Opcode.sty_zpg:
self.args = self._io.read_u1()
elif _on == Code6502.Opcode.jsr_abs:
self.args = self._io.read_u2le()
elif _on == Code6502.Opcode.eor_zpg:
self.args = self._io.read_u1()
elif _on == Code6502.Opcode.eor_abs:
self.args = self._io.read_u2le()
elif _on == Code6502.Opcode.lda_abs_y:
self.args = self._io.read_u2le()
elif _on == Code6502.Opcode.lda_zpg_x:
self.args = self._io.read_u1()
elif _on == Code6502.Opcode.bmi_rel:
self.args = self._io.read_s1()
elif _on == Code6502.Opcode.sty_zpg_x:
self.args = self._io.read_u1()
elif _on == Code6502.Opcode.adc_x_ind:
self.args = self._io.read_u1()
elif _on == Code6502.Opcode.rol_abs_x:
self.args = self._io.read_u2le()
elif _on == Code6502.Opcode.stx_zpg:
self.args = self._io.read_u1()
elif _on == Code6502.Opcode.asl_abs_x:
self.args = self._io.read_u2le()
elif _on == Code6502.Opcode.lsr_zpg_x:
self.args = self._io.read_u1()
elif _on == Code6502.Opcode.ora_zpg_x:
self.args = self._io.read_u1()
elif _on == Code6502.Opcode.adc_abs_y:
self.args = self._io.read_u2le()
elif _on == Code6502.Opcode.ldy_abs:
self.args = self._io.read_u2le()
elif _on == Code6502.Opcode.cmp_abs_x:
self.args = self._io.read_u2le()
elif _on == Code6502.Opcode.lda_abs_x:
self.args = self._io.read_u2le()
elif _on == Code6502.Opcode.bvs_rel:
self.args = self._io.read_s1()
elif _on == Code6502.Opcode.lda_x_ind:
self.args = self._io.read_u1()
elif _on == Code6502.Opcode.cmp_imm:
self.args = self._io.read_u1()
elif _on == Code6502.Opcode.inc_zpg_x:
self.args = self._io.read_u1()
elif _on == Code6502.Opcode.asl_zpg:
self.args = self._io.read_u1()
elif _on == Code6502.Opcode.and_abs_y:
self.args = self._io.read_u2le()
elif _on == Code6502.Opcode.ldx_imm:
self.args = self._io.read_u1()
elif _on == Code6502.Opcode.and_zpg:
self.args = self._io.read_u1()
elif _on == Code6502.Opcode.cpx_abs:
self.args = self._io.read_u2le()
elif _on == Code6502.Opcode.dec_zpg:
self.args = self._io.read_u1()
elif _on == Code6502.Opcode.ror_zpg_x:
self.args = self._io.read_u1()
elif _on == Code6502.Opcode.ldx_zpg:
self.args = self._io.read_u1()
elif _on == Code6502.Opcode.dec_zpg_x:
self.args = self._io.read_u1()
elif _on == Code6502.Opcode.sbc_zpg:
self.args = self._io.read_u1()
elif _on == Code6502.Opcode.cmp_abs:
self.args = self._io.read_u2le()
elif _on == Code6502.Opcode.ror_zpg:
self.args = self._io.read_u1()
elif _on == Code6502.Opcode.inc_abs:
self.args = self._io.read_u2le()
elif _on == Code6502.Opcode.and_x_ind:
self.args = self._io.read_u1()
elif _on == Code6502.Opcode.sbc_abs_x:
self.args = self._io.read_u2le()
elif _on == Code6502.Opcode.asl_abs:
self.args = self._io.read_u2le()
elif _on == Code6502.Opcode.eor_x_ind:
self.args = self._io.read_u1()
elif _on == Code6502.Opcode.ora_abs_x:
self.args = self._io.read_u2le()
elif _on == Code6502.Opcode.ldy_abs_x:
self.args = self._io.read_u2le()
elif _on == Code6502.Opcode.sbc_x_ind:
self.args = self._io.read_u1()
elif _on == Code6502.Opcode.asl_zpg_x:
self.args = self._io.read_u1()
elif _on == Code6502.Opcode.sbc_abs_y:
self.args = self._io.read_u2le()
elif _on == Code6502.Opcode.rol_abs:
self.args = self._io.read_u2le()
elif _on == Code6502.Opcode.lsr_zpg:
self.args = self._io.read_u1()
elif _on == Code6502.Opcode.stx_zpg_y:
self.args = self._io.read_u1()
elif _on == Code6502.Opcode.ora_abs_y:
self.args = self._io.read_u2le()
elif _on == Code6502.Opcode.eor_abs_y:
self.args = self._io.read_u2le()
elif _on == Code6502.Opcode.bit_abs:
self.args = self._io.read_u2le()
elif _on == Code6502.Opcode.ldx_abs:
self.args = self._io.read_u2le()
elif _on == Code6502.Opcode.ldy_imm:
self.args = self._io.read_u1()
elif _on == Code6502.Opcode.jmp_abs:
self.args = self._io.read_u2le()
elif _on == Code6502.Opcode.beq_rel:
self.args = self._io.read_s1()
elif _on == Code6502.Opcode.dec_abs_x:
self.args = self._io.read_u2le()
elif _on == Code6502.Opcode.and_ind_y:
self.args = self._io.read_u1()
elif _on == Code6502.Opcode.and_zpg_x:
self.args = self._io.read_u1()
elif _on == Code6502.Opcode.cmp_zpg_x:
self.args = self._io.read_u1()
elif _on == Code6502.Opcode.eor_zpg_x:
self.args = self._io.read_u1()
elif _on == Code6502.Opcode.sbc_abs:
self.args = self._io.read_u2le()
elif _on == Code6502.Opcode.cmp_abs_y:
self.args = self._io.read_u2le()
elif _on == Code6502.Opcode.sbc_ind_y:
self.args = self._io.read_u1()
elif _on == Code6502.Opcode.cmp_zpg:
self.args = self._io.read_u1()
elif _on == Code6502.Opcode.stx_abs:
self.args = self._io.read_u2le()
elif _on == Code6502.Opcode.sty_abs:
self.args = self._io.read_u2le()
elif _on == Code6502.Opcode.cpy_zpg:
self.args = self._io.read_u1()
elif _on == Code6502.Opcode.dec_abs:
self.args = self._io.read_u2le()
elif _on == Code6502.Opcode.ror_abs:
self.args = self._io.read_u2le()
elif _on == Code6502.Opcode.sta_abs_y:
self.args = self._io.read_u2le()
elif _on == Code6502.Opcode.inc_abs_x:
self.args = self._io.read_u2le()
elif _on == Code6502.Opcode.lda_zpg:
self.args = self._io.read_u1()
elif _on == Code6502.Opcode.cmp_ind_y:
self.args = self._io.read_u1()
elif _on == Code6502.Opcode.cpy_imm:
self.args = self._io.read_u1()
elif _on == Code6502.Opcode.ldx_zpg_y:
self.args = self._io.read_u1()
elif _on == Code6502.Opcode.sbc_zpg_x:
self.args = self._io.read_u1()
elif _on == Code6502.Opcode.ora_x_ind:
self.args = self._io.read_u1()
elif _on == Code6502.Opcode.rol_zpg_x:
self.args = self._io.read_u1()
elif _on == Code6502.Opcode.ora_abs:
self.args = self._io.read_u2le()
elif _on == Code6502.Opcode.sta_ind_y:
self.args = self._io.read_u1()
elif _on == Code6502.Opcode.and_abs:
self.args = self._io.read_u2le()
elif _on == Code6502.Opcode.and_imm:
self.args = self._io.read_u1()
elif _on == Code6502.Opcode.cmp_x_ind:
self.args = self._io.read_u1()
elif _on == Code6502.Opcode.ldy_zpg:
self.args = self._io.read_u1()
elif _on == Code6502.Opcode.inc_zpg:
self.args = self._io.read_u1()
elif _on == Code6502.Opcode.bvc_rel:
self.args = self._io.read_s1()
elif _on == Code6502.Opcode.ora_zpg:
self.args = self._io.read_u1()
self._debug['args']['end'] = self._io.pos()
|
python
|
# BLACK = \033[0;30m
# RED = \033[0;31m
# GREEN = \033[0;32m
# BROWN = \033[0;33m
# BLUE = \033[0;34m
# PURPLE = \033[0;35m
# CYAN = \033[0;36m
# YELLOW = \033[1;33m
# BOLD = \033[1m
# FAINT = \033[2m
# ITALIC = \033[3m
# UNDERLINE = \033[4m
# BLINK = \033[5m
# NEGATIVE = \033[7m
# CROSSED = \033[9m
# END = \033[0m
def other():
print('''
A Parser Program for toy lang vYpr
\033[1mGROUP 21\033[0m\033[0;36m
+----------------+---------------+
| NAME | ID NO. |
+----------------+---------------+
| Ashna Swaika | 2018A7PS0027H |
| Abhishek Bapna | 2018A7PS0184H |
| Kumar Pranjal | 2018A7PS0163H |
| Ashish Verma | 2018A7PS0009H |
+----------------+---------------+
\033[1;0m
Toy Language creation in python for Compilers Construction (CS F363)
\033[0;34m Part 1: \033[0m
Functionalities enabled\033[0;32m
[\u2713] Created DFA
[\u2713] Created lexeme() function which can be called repeatedly
[\u2713] Removes Whitespace
[\u2713] Removes Comments
[\u2713] CLI created for easy operation\033[0;0m
\033[0;34m Part 2: \033[0m
Functionalities enabled\033[0;32m
[\u2713] Created CFG of language
[\u2713] Created LALR(1) parse table for vYpr
[\u2713] Expand language constructs
[\u2713] Syntax analysis
[\u2713] Error handling while parsing\033[0;0m
Fuctionalities on check list:\033[0;31m
[ ] Compile and execute vYpr programs
\033[0;0m''')
if __name__ == '__main__':
other()
|
python
|
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('ct', '0002_auto_20141110_1820'),
]
operations = [
migrations.AlterField(
model_name='conceptlink',
name='relationship',
field=models.CharField(default='defines', max_length=10, choices=[('is', 'Represents (unique ID for)'), ('defines', 'Defines'), ('informal', 'Intuitive statement of'), ('formaldef', 'Formal definition for'), ('tests', 'Tests understanding of'), ('derives', 'Derives'), ('proves', 'Proves'), ('assumes', 'Assumes'), ('motiv', 'Motivates'), ('illust', 'Illustrates'), ('intro', 'Introduces'), ('comment', 'Comments on'), ('warns', 'Warning about')]),
),
]
|
python
|
from pytest import mark
from ..crunch import crunch
@mark.parametrize(
"description,uncrunched,crunched",
[
["number primitive", 0, [0]],
["boolean primitive", True, [True]],
["string primitive", "string", ["string"]],
["empty array", [], [[]]],
["single-item array", [None], [None, [0]]],
[
"multi-primitive all distinct array",
[None, 0, True, "string"],
[None, 0, True, "string", [0, 1, 2, 3]],
],
[
"multi-primitive repeated array",
[True, True, True, True],
[True, [0, 0, 0, 0]],
],
["one-level nested array", [[1, 2, 3]], [1, 2, 3, [0, 1, 2], [3]]],
["two-level nested array", [[[1, 2, 3]]], [1, 2, 3, [0, 1, 2], [3], [4]]],
["empty object", {}, [{}]],
["single-item object", {"a": None}, [None, {"a": 0}]],
[
"multi-item all distinct object",
{"a": None, "b": 0, "c": True, "d": "string"},
[None, 0, True, "string", {"a": 0, "b": 1, "c": 2, "d": 3}],
],
[
"multi-item repeated object",
{"a": True, "b": True, "c": True, "d": True},
[True, {"a": 0, "b": 0, "c": 0, "d": 0}],
],
[
"complex array",
[{"a": True, "b": [1, 2, 3]}, [1, 2, 3]],
[True, 1, 2, 3, [1, 2, 3], {"a": 0, "b": 4}, [5, 4]],
],
[
"complex object",
{"a": True, "b": [1, 2, 3], "c": {"a": True, "b": [1, 2, 3]}},
[True, 1, 2, 3, [1, 2, 3], {"a": 0, "b": 4}, {"a": 0, "b": 4, "c": 5}],
],
],
)
def test_crunch(description, uncrunched, crunched):
assert crunch(uncrunched) == crunched
|
python
|
import os
path = os.path.dirname(__file__)
def get(resource):
return os.path.join(path, resource).replace('\\', '/')
|
python
|
# Copyright 2021 Adobe. All rights reserved.
# This file is licensed to you under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. You may obtain a copy
# of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS
# OF ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
import logging
import os.path
from adobe.pdfservices.operation.auth.credentials import Credentials
from adobe.pdfservices.operation.exception.exceptions import ServiceApiException, ServiceUsageException, SdkException
from adobe.pdfservices.operation.pdfops.options.extractpdf.extract_pdf_options import ExtractPDFOptions
from adobe.pdfservices.operation.pdfops.options.extractpdf.extract_element_type import ExtractElementType
from adobe.pdfservices.operation.execution_context import ExecutionContext
from adobe.pdfservices.operation.io.file_ref import FileRef
from adobe.pdfservices.operation.pdfops.extract_pdf_operation import ExtractPDFOperation
logging.basicConfig(level=os.environ.get("LOGLEVEL", "INFO"))
try:
# get base path.
base_path = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# Initial setup, create credentials instance.
credentials = Credentials.service_account_credentials_builder() \
.with_client_id("client_id") \
.with_client_secret("client_secret") \
.with_organization_id("organization_id") \
.with_account_id("account_id") \
.with_private_key("private_key") \
.build()
# Create an ExecutionContext using credentials and create a new operation instance.
execution_context = ExecutionContext.create(credentials)
extract_pdf_operation = ExtractPDFOperation.create_new()
# Set operation input from a source file.
source = FileRef.create_from_local_file(base_path + "/resources/extractPdfInput.pdf")
extract_pdf_operation.set_input(source)
# Build ExtractPDF options and set them into the operation
extract_pdf_options: ExtractPDFOptions = ExtractPDFOptions.builder() \
.with_element_to_extract(ExtractElementType.TEXT) \
.build()
extract_pdf_operation.set_options(extract_pdf_options)
# Execute the operation.
result: FileRef = extract_pdf_operation.execute(execution_context)
# Save the result to the specified location.
result.save_as(base_path + "/output/ExtractTextInfoFromPDFWithInMemoryAuthCredentials.zip")
except (ServiceApiException, ServiceUsageException, SdkException):
logging.exception("Exception encountered while executing operation")
|
python
|
import os
import tempfile
from contextlib import contextmanager
from collections import OrderedDict
from neupy import plots, layers, algorithms
from neupy.plots.layer_structure import exclude_layer_from_graph
from base import BaseTestCase
@contextmanager
def reproducible_mktemp():
name = tempfile.mktemp()
real_mktemp = tempfile.mktemp
tempfile.mktemp = lambda *args, **kwargs: name
yield
tempfile.mktemp = real_mktemp
class LayerStructurePlotTestCase(BaseTestCase):
def test_that_layer_structure_does_not_fail(self):
connection = layers.Input(10) > layers.Sigmoid(1)
with tempfile.NamedTemporaryFile() as temp:
filesize_before = os.path.getsize(temp.name)
plots.layer_structure(connection, filepath=temp.name, show=False)
filesize_after = os.path.getsize(temp.name)
self.assertEqual(filesize_before, 0)
self.assertGreater(filesize_after, filesize_before)
def test_that_layer_structure_for_network(self):
connection = layers.Input(10) > layers.Sigmoid(1)
network = algorithms.GradientDescent(connection)
with tempfile.NamedTemporaryFile() as temp:
filesize_before = os.path.getsize(temp.name)
plots.layer_structure(network, filepath=temp.name, show=False)
filesize_after = os.path.getsize(temp.name)
self.assertEqual(filesize_before, 0)
self.assertGreater(filesize_after, filesize_before)
def test_layer_structure_undefined_file_name(self):
connection = layers.Input(10) > layers.Sigmoid(1)
with reproducible_mktemp():
plots.layer_structure(connection, filepath=None, show=False)
temp_filename = tempfile.mktemp()
filesize_after = os.path.getsize(temp_filename)
self.assertGreater(filesize_after, 0)
class LayerStructureExcludeLayersPlotTestCase(BaseTestCase):
def test_layer_structure_exclude_layer_nothing_to_exclude(self):
connection = layers.Input(10) > layers.Sigmoid(1)
graph = connection.graph.forward_graph
new_graph = exclude_layer_from_graph(graph, tuple())
self.assertEqual(graph, new_graph)
def test_layer_structure_exclude_layer(self):
input_layer = layers.Input(10)
connection = input_layer > layers.Sigmoid(1)
graph = connection.graph.forward_graph
actual_graph = exclude_layer_from_graph(graph, [layers.Sigmoid])
expected_graph = OrderedDict()
expected_graph[input_layer] = []
self.assertEqual(expected_graph, actual_graph)
def test_layer_structure_ignore_layers_attr(self):
input_layer = layers.Input(10)
connection = input_layer > layers.Sigmoid(1)
with tempfile.NamedTemporaryFile() as temp:
plots.layer_structure(connection, filepath=temp.name, show=False,
ignore_layers=[])
filesize_first = os.path.getsize(temp.name)
with tempfile.NamedTemporaryFile() as temp:
plots.layer_structure(connection, filepath=temp.name, show=False,
ignore_layers=[layers.Sigmoid])
filesize_second = os.path.getsize(temp.name)
# First one should have more layers to draw
# than the second one
self.assertGreater(filesize_first, filesize_second)
|
python
|
"""Wrapper of the multiprocessing module for multi-GPU training."""
# To avoid duplicating the graph structure for node classification or link prediction
# training we recommend using fork() rather than spawn() for multiple GPU training.
# However, we need to work around https://github.com/pytorch/pytorch/issues/17199 to
# make fork() and openmp work together.
from .. import backend as F
if F.get_preferred_backend() == 'pytorch':
# Wrap around torch.multiprocessing...
from torch.multiprocessing import *
# ... and override the Process initializer
from .pytorch import Process
else:
# Just import multiprocessing module.
from multiprocessing import * # pylint: disable=redefined-builtin
|
python
|
"""
payload_generators.py
Copyright 2007 Andres Riancho
This file is part of w3af, http://w3af.org/ .
w3af is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation version 2 of the License.
w3af is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with w3af; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
import gtk
import gobject
def create_generator_menu(text_view_obj):
"""
:return: A menu item that contains the generator classes for the fuzzy request editor window.
"""
# The main menu
menu = gtk.Menu()
# Number generators
number_generator_mi = gtk.MenuItem(_("Number generator"))
number_generator_mi.connect(
'activate', print_generator_text, text_view_obj, number_generator())
menu.append(number_generator_mi)
return menu
def print_generator_text(widget, text_view_obj, generator_instance):
"""
Print the generator name to the textview, in the position where the cursor is at.
"""
pass
class generic_generator(object):
def __init__(self):
"""
Provides generic methods and attributes for generators.
w3af generator objects are used to create a python generator for letters, numbers, and
other interesting things. The generator class is called from the fuzzy request editor.
"""
self._generator_name = None
class number_generator(generic_generator):
def __init__(self):
"""
Provides generic methods and attributes for generators.
w3af generator objects are used to create a python generator for letters, numbers, and
other interesting things. The generator class is called from the fuzzy request editor.
"""
generic_generator.__init__(self)
self._generator_name = 'number_generator'
|
python
|
from django.http import JsonResponse
from django.views.generic.edit import FormView
from .forms import (
UploadAttachmentForm, DeleteAttachmentForm
)
class FileUploadView(FormView):
"""Provide a way to show and handle uploaded files in a request."""
form_class = UploadAttachmentForm
def upload_file(self, *args, **kwargs):
"""Abstract method must be overridden."""
raise NotImplementedError
def form_valid(self, form):
"""If the form is valid, return JSON file list after saving them"""
data = self.upload_file(uploaded_files=self.request.FILES)
return JsonResponse(data)
def form_invalid(self, form):
"""If the form is invalid, return HTTP 400 error"""
return JsonResponse({
'status': 'false',
'message': 'Bad Request'
}, status=400)
class FileDeleteView(FormView):
"""Provide a way to show and handle files to be deleted in a request."""
form_class = DeleteAttachmentForm
def delete_file(self, *args, **kwargs):
"""Abstract method must be overridden."""
raise NotImplementedError
def form_valid(self, form):
"""If the form is valid, return JSON file list after deleting them"""
data = self.delete_file(form=form, user=self.request.user)
return JsonResponse(data)
def form_invalid(self, form):
"""If the form is invalid, return HTTP 400 error"""
return JsonResponse({
'status': 'false',
'message': 'Bad Request'
}, status=400)
|
python
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""CSSubmissionCable model"""
from __future__ import absolute_import, unicode_literals
from datetime import datetime
from peewee import DateTimeField, ForeignKeyField
from playhouse.fields import ManyToManyField
from .base import BaseModel
from .challenge_binary_node import ChallengeBinaryNode
from .challenge_set import ChallengeSet
from .ids_rule import IDSRule
from .round import Round
class CSSubmissionCable(BaseModel):
"""CSSubmissionCable model. Communicate what patch submit to ambassador"""
cs = ForeignKeyField(ChallengeSet, related_name='submission_cables')
ids = ForeignKeyField(IDSRule, related_name='submission_cables')
cbns = ManyToManyField(ChallengeBinaryNode, related_name='submission_cables')
round = ForeignKeyField(Round, related_name='cs_submission_cables')
processed_at = DateTimeField(null=True)
@classmethod
def create(cls, *args, **kwargs):
if 'cbns' in kwargs:
cbns = kwargs.pop('cbns')
obj = super(cls, cls).create(*args, **kwargs)
obj.cbns = cbns
return obj
@classmethod
def get_or_create(cls, cs, ids, round, cbns=None): # pylint: disable=arguments-differ
if cbns is None:
cbns = []
results = cls.select() \
.where((cls.cs == cs)
& (cls.ids == ids)
& (cls.round == round))
for cssb in results:
found = {cbn.id for cbn in cssb.cbns}
expected = {cbn.id for cbn in cbns}
if (len(found) == len(expected)) and \
(len(found & expected) == len(expected)):
return (cssb, False)
return (cls.create(cs=cs, ids=ids, cbns=cbns, round=round), True)
def process(self):
self.processed_at = datetime.now()
self.save()
|
python
|
import functools
import gc
import os
import sys
import traceback
import warnings
def is_in_ipython():
"Is the code running in the ipython environment (jupyter including)"
program_name = os.path.basename(os.getenv('_', ''))
if ('jupyter-notebook' in program_name or # jupyter-notebook
'ipython' in program_name or # ipython
'jupyter' in program_name or # jupyter
'JPY_PARENT_PID' in os.environ): # ipython-notebook
return True
else:
return False
IS_IN_IPYTHON = is_in_ipython()
def is_in_colab():
if not is_in_ipython(): return False
try:
from google import colab
return True
except:
return False
IS_IN_COLAB = is_in_colab()
def is_in_kaggle_kernel():
if 'kaggle' in os.environ['PYTHONPATH']:
return True
else:
return False
|
python
|
import collections
import math
import numbers
import numpy as np
from .. import base
from .. import optim
from .. import utils
__all__ = [
'LinearRegression',
'LogisticRegression'
]
class GLM:
"""Generalized Linear Model.
Parameters:
optimizer (optim.Optimizer): The sequential optimizer used for updating the weights. Note
that the intercept is handled separately.
loss (optim.Loss): The loss function to optimize for.
l2 (float): Amount of L2 regularization used to push weights towards 0.
intercept (float): Initial intercept value.
intercept_lr (optim.schedulers.Scheduler or float): Learning rate scheduler used for
updating the intercept. If a `float` is passed, then an instance of
`optim.schedulers.Constant` will be used. Setting this to 0 implies that the intercept
will be not be updated.
clip_gradient (float): Clips the absolute value of each gradient value.
initializer (optim.initializers.Initializer): Weights initialization scheme.
Attributes:
weights (collections.defaultdict): The current weights.
"""
def __init__(self, optimizer, loss, l2, intercept, intercept_lr, clip_gradient, initializer):
self.optimizer = optimizer
self.loss = loss
self.l2 = l2
self.intercept = intercept
self.intercept_lr = (
optim.schedulers.Constant(intercept_lr)
if isinstance(intercept_lr, numbers.Number) else
intercept_lr
)
self.clip_gradient = clip_gradient
self.weights = collections.defaultdict(initializer)
self.initializer = initializer
def _raw_dot(self, x):
return utils.math.dot(self.weights, x) + self.intercept
def _eval_gradient(self, x, y, sample_weight):
"""Returns the gradient for a given observation.
This logic is put into a separate function for testing purposes.
"""
loss_gradient = self.loss.gradient(y_true=y, y_pred=self._raw_dot(x))
# Apply the sample weight
loss_gradient *= sample_weight
# Clip the gradient to avoid numerical instability
loss_gradient = utils.math.clamp(
loss_gradient,
minimum=-self.clip_gradient,
maximum=self.clip_gradient
)
return (
{
i: (
xi * loss_gradient +
2. * self.l2 * self.weights.get(i, 0)
)
for i, xi in x.items()
},
loss_gradient
)
def fit_one(self, x, y, sample_weight=1.):
# Some optimizers need to do something before a prediction is made
self.weights = self.optimizer.update_before_pred(w=self.weights)
# Calculate the gradient
gradient, loss_gradient = self._eval_gradient(x=x, y=y, sample_weight=sample_weight)
# Update the intercept
self.intercept -= self.intercept_lr.get(self.optimizer.n_iterations) * loss_gradient
# Update the weights
self.weights = self.optimizer.update_after_pred(w=self.weights, g=gradient)
return self
class LinearRegression(GLM, base.Regressor):
"""Linear regression.
Parameters:
optimizer (optim.Optimizer): The sequential optimizer used for updating the weights. Note
that the intercept is handled separately. Defaults to ``optim.SGD(.01)``.
loss (optim.RegressionLoss): The loss function to optimize for. Defaults to
``optim.losses.SquaredLoss``.
l2 (float): Amount of L2 regularization used to push weights towards 0.
intercept (float): Initial intercept value.
intercept_lr (optim.schedulers.Scheduler or float): Learning rate scheduler used for
updating the intercept. If a `float` is passed, then an instance of
`optim.schedulers.Constant` will be used. Setting this to 0 implies that the intercept
will be not be updated.
l2 (float): Amount of L2 regularization used to push weights towards 0.
clip_gradient (float): Clips the absolute value of each gradient value.
initializer (optim.initializers.Initializer): Weights initialization scheme.
Attributes:
weights (collections.defaultdict): The current weights.
Example:
::
>>> from creme import datasets
>>> from creme import linear_model
>>> from creme import metrics
>>> from creme import model_selection
>>> from creme import preprocessing
>>> X_y = datasets.TrumpApproval()
>>> model = (
... preprocessing.StandardScaler() |
... linear_model.LinearRegression(intercept_lr=.1)
... )
>>> metric = metrics.MAE()
>>> model_selection.progressive_val_score(X_y, model, metric)
MAE: 0.616405
>>> model['LinearRegression'].intercept
38.000439
Note:
Using a feature scaler such as `preprocessing.StandardScaler` upstream helps the optimizer
to converge.
"""
def __init__(self, optimizer=None, loss=None, l2=.0, intercept=0., intercept_lr=.01,
clip_gradient=1e12, initializer=None):
super().__init__(
optimizer=(
optim.SGD(optim.schedulers.InverseScaling(.01, .25))
if optimizer is None else
optimizer
),
loss=optim.losses.Squared() if loss is None else loss,
intercept=intercept,
intercept_lr=intercept_lr,
l2=l2,
clip_gradient=clip_gradient,
initializer=initializer if initializer else optim.initializers.Zeros()
)
def predict_one(self, x):
return self.loss.mean_func(self._raw_dot(x))
def debug_one(self, x, decimals=5, **print_params):
"""
Example:
::
>>> from creme import datasets
>>> from creme import linear_model
>>> from creme import metrics
>>> from creme import model_selection
>>> from creme import preprocessing
>>> X_y = datasets.TrumpApproval()
>>> model = (
... preprocessing.StandardScaler() |
... linear_model.LinearRegression(intercept_lr=.1)
... )
>>> for x, y in X_y:
... y_pred = model.predict_one(x)
... model = model.fit_one(x, y)
>>> model.debug_one(x)
0. Input
--------
gallup: 43.84321 (float)
ipsos: 40.57068 (float)
morning_consult: 37.81875 (float)
ordinal_date: 737389 (int)
rasmussen: 40.10469 (float)
you_gov: 41.63691 (float)
<BLANKLINE>
1. StandardScaler
-----------------
gallup: 1.18751 (float)
ipsos: -0.04683 (float)
morning_consult: -1.22583 (float)
ordinal_date: 1.72946 (float)
rasmussen: -0.23857 (float)
you_gov: 0.44131 (float)
<BLANKLINE>
2. LinearRegression
-------------------
Name Value Weight Contribution
Intercept 1.00000 38.00044 38.00044
ordinal_date 1.72946 2.23125 3.85885
gallup 1.18751 0.28647 0.34019
you_gov 0.44131 -0.01270 -0.00560
ipsos -0.04683 1.01815 -0.04768
rasmussen -0.23857 0.45099 -0.10759
morning_consult -1.22583 0.35181 -0.43126
<BLANKLINE>
Prediction: 41.60735
"""
def fmt_float(x):
return '{: ,.{prec}f}'.format(x, prec=decimals)
names = list(map(str, x.keys())) + ['Intercept']
values = list(map(fmt_float, list(x.values()) + [1]))
weights = list(map(fmt_float, [self.weights.get(i, 0) for i in x] + [self.intercept]))
contributions = [xi * self.weights.get(i, 0) for i, xi in x.items()] + [self.intercept]
order = reversed(np.argsort(contributions))
contributions = list(map(fmt_float, contributions))
table = utils.pretty.print_table(
headers=['Name', 'Value', 'Weight', 'Contribution'],
columns=[names, values, weights, contributions],
order=order
)
print(table, **print_params)
class LogisticRegression(GLM, base.BinaryClassifier):
"""Logistic regression.
Parameters:
optimizer (optim.Optimizer): The sequential optimizer used for updating the weights. Note
that the intercept is handled separately. Defaults to ``optim.SGD(.05)``.
loss (optim.BinaryLoss): The loss function to optimize for. Defaults to
``optim.losses.Log``.
l2 (float): Amount of L2 regularization used to push weights towards 0.
intercept (float): Initial intercept value.
intercept_lr (optim.schedulers.Scheduler or float): Learning rate scheduler used for
updating the intercept. If a `float` is passed, then an instance of
`optim.schedulers.Constant` will be used. Setting this to 0 implies that the intercept
will be not be updated.
l2 (float): Amount of L2 regularization used to push weights towards 0.
clip_gradient (float): Clips the absolute value of each gradient value.
initializer (optim.initializers.Initializer): Weights initialization scheme.
Attributes:
weights (collections.defaultdict): The current weights.
Example:
::
>>> from creme import datasets
>>> from creme import linear_model
>>> from creme import metrics
>>> from creme import model_selection
>>> from creme import optim
>>> from creme import preprocessing
>>> X_y = datasets.Phishing()
>>> model = (
... preprocessing.StandardScaler() |
... linear_model.LogisticRegression(optimizer=optim.SGD(.1))
... )
>>> metric = metrics.Accuracy()
>>> model_selection.progressive_val_score(X_y, model, metric)
Accuracy: 88.96%
Note:
Using a feature scaler such as `preprocessing.StandardScaler` upstream helps the optimizer
to converge.
"""
def __init__(self, optimizer=None, loss=None, l2=.0, intercept=0., intercept_lr=.01,
clip_gradient=1e12, initializer=None):
super().__init__(
optimizer=optim.SGD(.01) if optimizer is None else optimizer,
loss=optim.losses.Log() if loss is None else loss,
intercept=intercept,
intercept_lr=intercept_lr,
l2=l2,
clip_gradient=clip_gradient,
initializer=initializer if initializer else optim.initializers.Zeros()
)
def predict_proba_one(self, x):
p = self.loss.mean_func(self._raw_dot(x)) # Convert logit to probability
return {False: 1. - p, True: p}
|
python
|
import sys
import os
import re
import shutil
import tempfile
import subprocess
from distutils import dir_util
from typing import Any, List, Mapping, Set
import pathspec
REPLACE_PATTERN = re.compile(r"#\[(\w+)\]#")
HELP = """
Usage: netbeansify <input directory> [options]
Available Options:
--help Display this help message.
--out <dir> Specify the output directory; if --zip is set, this is optional.
--sourcepath <dir> Specify the input directory (overrides the one already specified).
--name <project_name> Specify the project name (default: input directory name).
--mainclass <class> Specify the main class, including the package (default: project name).
--sourcever <ver> Specify the source code's compatible Java version (default: 11).
--targetver <ver> Specify the target's compatible Java version (default: 11).
--jvmargs <args> Specify additional args to pass to the JVM during execution.
--javacargs <args> Specify additional args to pass to javac during compilation.
--precommand <cmd> Specify a command to run before generating the files; the command is
executed in the source directory. Multiple commands can be
used by chaining with &&.
--postcommand <cmd> Specify a command to run after generating the files; the command is
executed in the destination directory. Multiple commands can be
used by chaining with &&.
--template <dir> Specify the template file directory (default: "template/" in the Python
file's directory).
--zip Create a NetBeans project zip named ProjectName.zip in the current
directory; if this is set, --out is optional.
--nologo Do not include netbeanz.png in the output.
--verbose Print more output.
netbeansifier also supports gitignore-style ignore files.
Files named .nbignore contain patterns for files/directories that are excluded during copying.
The file itself is also ignored.
You can also make a netbeansifierfile. Each line will be treated as a command-line option.
Note: Because of the --precommand and --postcommand options, running an untrusted netbeansifierfile
could result in malicious commands being executed!
""".strip()
def main():
"""
Main program with I/O and argument handling.
"""
# Args to be used later
# Items starting with # are used internally
# Other items are used when replacing strings in the template
args = {
"project_name": None,
"javac_source": "11",
"javac_target": "11",
"main_class": None,
"#out": None,
"#src": None,
"#template": os.path.join(os.path.dirname(__file__), "template/"),
}
flags = set()
# Map from long option name to arg name (in the args dict)
long_opts = {
"name": "project_name",
"sourcever": "javac_source",
"targetver": "javac_target",
"mainclass": "main_class",
"jvmargs": "jvm_args",
"javacargs": "javac_args",
"out": "#out",
"template": "#template",
"precommand": "#pre_command",
"postcommand": "#post_command",
"sourcepath": "#src",
}
long_flags = {"zip", "nologo", "verbose"}
# Extract command line arguments from netbeansifierfile
cmdargs = []
try:
with open("./netbeansifierfile", "r") as nbconfig:
for line in nbconfig:
line = line.strip()
# Comments
if not line or line.startswith("#"):
continue
# Long option
if line.startswith("--"):
try:
# Split by space and do the two pieces separately
# For long options with parameters
i = line.index(" ")
option = line[:i]
arg = line[i + 1:]
cmdargs.append(option)
cmdargs.append(arg)
except ValueError:
cmdargs.append(line)
# Other argument
else:
cmdargs.append(line)
except OSError:
pass
cmdargs.extend(sys.argv[1:])
# Parse command line arguments
source_path = None
it = iter(cmdargs)
for s in it:
if s.startswith("--"):
if s == "--help":
print(HELP)
sys.exit(0)
try:
arg_name = s[2:]
# Check whether it's a flag or an argument
if arg_name in long_flags:
flags.add(arg_name)
else:
opt = long_opts[arg_name]
args[opt] = next(it)
except KeyError:
print("Invalid option:", s, file=sys.stderr)
sys.exit(1)
except StopIteration:
print("Option", s, "needs a value", file=sys.stderr)
sys.exit(1)
else:
source_path = s
if args["#src"] is not None:
source_path = args["#src"]
if source_path is None or not os.path.isdir(source_path):
print("Source path not provided", file=sys.stderr)
sys.exit(1)
if args["#out"] is None and "zip" not in flags:
print("Destination path not provided", file=sys.stderr)
sys.exit(1)
# Default values
args["project_name"] = args["project_name"] or os.path.basename(source_path)
args["main_class"] = args["main_class"] or args["project_name"]
# No output directory is specified - must be making a zip
if args["#out"] is None:
# Use a temporary directory
with tempfile.TemporaryDirectory() as tempdir:
# Make a temp dir inside the zip to netbeansify
out_path = os.path.join(tempdir, args["project_name"])
args["#out"] = out_path
netbeansify(source_path, args, flags)
print("Files generated successfully. Making zip file...")
shutil.make_archive(args["project_name"], "zip", tempdir, args["project_name"])
else:
netbeansify(source_path, args, flags)
if "zip" in flags:
print("Files generated successfully. Making zip file...")
shutil.make_archive(args["project_name"], "zip", os.path.dirname(os.path.abspath(args["#out"])), os.path.basename(args["#out"]))
print("Done.")
def netbeansify(source_path: str, args: Mapping[str, Any], flags: Set[str]):
"""
Generate the netbeansified project.
"""
verbose = "verbose" in flags
print("Netbeansify started.")
# Make sure these paths are absolute
source_path = os.path.abspath(source_path)
args["#out"] = os.path.abspath(args["#out"])
if args.get("#pre_command"):
print("Running pre-command; output:\n")
old_workdir = os.getcwd()
os.chdir(source_path)
subprocess.run(args["#pre_command"], shell=True, check=True)
os.chdir(old_workdir)
print("\nPre-command exited with success.")
print("Copying template files...")
# Copy over the template
dir_util.copy_tree(args["#template"], args["#out"])
if verbose:
print("Starting template file generation...")
for dirpath, _, files in os.walk(args["#out"]):
for file in files:
file = os.path.join(dirpath, file)
if verbose:
print("Generating", file)
try:
with open(file, "r") as f:
text = f.read()
with open(file, "w") as f:
f.write(REPLACE_PATTERN.sub(lambda match: args.get(match.group(1), ""), text))
except UnicodeDecodeError:
if verbose:
print("File", file, "is a binary, skipping.")
# Copy over the files
def copy_dir(src_dir: str, dest_dir: str, ignores: List[Any]):
if verbose:
print(f"Copying '{src_dir}' to '{dest_dir}'...")
ignore_file = os.path.join(src_dir, ".nbignore")
has_ignore = False
if os.path.exists(ignore_file):
with open(ignore_file, "r") as f:
ignores.append(pathspec.PathSpec.from_lines("gitwildmatch", f))
has_ignore = True
os.makedirs(dest_dir, exist_ok=True)
with os.scandir(src_dir) as sdit:
for entry in sdit:
# Ignore ignore files and netbeansifierfiles
if entry.name == ".nbignore" or entry.name == "netbeansifierfile":
continue
# Match ignore patterns
# Append a trailing slash for directories, otherwise patterns such as dir/ won't match properly
if any(spec.match_file(os.path.join(entry.path, "") if entry.is_dir() else entry.path) for spec in ignores):
continue
if entry.is_file():
# copy the file over
shutil.copyfile(os.path.join(src_dir, entry.name), os.path.join(dest_dir, entry.name))
if verbose:
print(f"File copied: '{entry.name}'")
elif entry.is_dir():
# Make sure that this directory is not the destination
if os.path.abspath(entry.path) == args["#out"]:
continue
copy_dir(os.path.join(src_dir, entry.name), os.path.join(dest_dir, entry.name), ignores)
if has_ignore:
ignores.pop()
print("Template files generated. Copying source files...")
copy_dir(source_path, os.path.join(args["#out"], "src"), [])
print("Source files copied successfully.")
if "nologo" not in flags:
try:
print("Copying logo...")
shutil.copy(os.path.join(os.path.dirname(__file__), "netbeanz.png"), args["#out"])
except OSError:
print("Warning: Logo not found! This is very important!", file=sys.stderr)
if args.get("#post_command"):
print("Running post-command; output:\n")
old_workdir = os.getcwd()
os.chdir(args["#out"])
subprocess.run(args["#post_command"], shell=True, check=True)
os.chdir(old_workdir)
print("\nPost-command exited with success.")
|
python
|
from __future__ import annotations
import pathlib
from contextlib import contextmanager
from typing import Any, Iterable, Iterator, Union
from alembic import command
from alembic.config import Config
from dcp import Storage
from dcp.storage.base import DatabaseStorageClass
from sqlalchemy.engine import Result
from sqlalchemy.engine.base import Connection
from sqlalchemy.orm.session import Session, SessionTransaction, sessionmaker
from sqlalchemy.sql import Delete, Update
from sqlalchemy.sql.expression import select
from sqlalchemy.sql.functions import func
from sqlalchemy.sql.selectable import Select
class MetadataApi:
def __init__(self, env_id: str, storage: Storage):
self.env_id = env_id
self.storage = storage
self.engine = self.storage.get_api().get_engine()
self.Session = sessionmaker(self.engine)
self.initialize_metadata_database()
self.active_session = None
@contextmanager
def begin(self) -> Iterator[Session]:
# if self.active_session is None:
with self.Session.begin() as sess:
self.active_session = sess
yield sess
self.active_session = None
# else:
# # TODO: handle nested tx
# yield self.active_session
@contextmanager
def begin_nested(self) -> Iterator[SessionTransaction]:
assert self.active_session is not None
with self.active_session.begin_nested() as sess_tx:
yield sess_tx
@contextmanager
def ensure_session(self) -> Iterator[Session]:
if self.active_session is None:
with self.Session.begin() as sess:
self.active_session = sess
yield sess
self.active_session = None
else:
yield self.active_session
def get_session(self) -> Session:
if self.active_session is None:
raise ValueError(
"No metadata session active. Call MetadataApi.begin() beforehand"
)
return self.active_session
def augment_statement(
self, stmt: Union[Select, Update, Delete], filter_env: bool = True
) -> Select:
if filter_env:
stmt = stmt.filter_by(env_id=self.env_id)
return stmt
def execute(
self, stmt: Union[Select, Update, Delete], filter_env: bool = True
) -> Result:
stmt = self.augment_statement(stmt, filter_env=filter_env)
return self.get_session().execute(stmt)
def count(self, stmt: Select, filter_env: bool = True) -> int:
stmt = select(func.count()).select_from(stmt.subquery())
return self.execute(stmt).scalar_one()
def add(self, obj: Any, set_env: bool = True):
if obj.env_id is None and set_env:
obj.env_id = self.env_id
self.get_session().add(obj)
def add_all(self, objects: Iterable, set_env: bool = True):
for obj in objects:
if obj.env_id is None and set_env:
obj.env_id = self.env_id
self.get_session().add_all(objects)
def flush(self, objects=None):
if objects:
self.get_session().flush(objects)
else:
self.get_session().flush()
def delete(self, obj):
sess = self.get_session()
if obj in sess.new:
sess.expunge(obj)
else:
sess.delete(obj)
def commit(self):
self.get_session().commit()
### Alembic
def initialize_metadata_database(self):
if not issubclass(
self.storage.storage_engine.storage_class, DatabaseStorageClass
):
raise ValueError(
f"metadata storage expected a database, got {self.storage}"
)
# BaseModel.metadata.create_all(conn)
# try:
self.migrate_metdata_database()
# except SQLAlchemyError as e:
# # Catch database exception, meaning already created, just stamp
# # For initial migration
# # TODO: remove once all 0.2 systems migrated?
# logger.warning(e)
# self.stamp_metadata_database()
# self.migrate_metdata_database()
def migrate_metdata_database(self):
alembic_cfg = self.get_alembic_config()
if self.engine is not None:
alembic_cfg.attributes["connection"] = self.engine
command.upgrade(alembic_cfg, "head")
def get_alembic_config(self) -> Config:
dir_path = pathlib.Path(__file__).parent.absolute()
cfg_path = dir_path / "../../migrations/alembic.ini"
alembic_cfg = Config(str(cfg_path))
alembic_cfg.set_main_option("sqlalchemy.url", self.storage.url)
return alembic_cfg
def stamp_metadata_database(self):
alembic_cfg = self.get_alembic_config()
if self.engine is not None:
alembic_cfg.attributes["connection"] = self.engine
command.stamp(alembic_cfg, "23dd1cc88eb2")
|
python
|
"""AyudaEnPython: https://www.facebook.com/groups/ayudapython
"""
from random import randint
# pip install prototools
from prototools import int_input, text_align, textbox
from prototools.colorize import *
LIMITE, MIN, MAX, intentos = 5, 1, 10, 0
NUMERO_ALEATORIO = randint(MIN, MAX)
textbox(
cyan("ADIVINA EL NUMERO"),
align="center", light=False, bcolor="magenta", style="double"
)
print(cyan("Hola! Cuál es tu nombre?"))
usuario = input(yellow(u"\u25ba\u25ba" + " "))
print(cyan(f"Bien, estoy pensando en un número entre el {MIN} y el {MAX}"))
print(cyan(f"Tienes {LIMITE} intentos!"))
while intentos < LIMITE:
text_align(magenta(f"¡Adivina! {intentos + 1}° intento"))
numero_ingresado = int_input(
yellow(u"\u25ba\u25ba" + " "), min=MIN, max=MAX, lang="es",
)
intentos += 1
if numero_ingresado == NUMERO_ALEATORIO:
print(magenta(f"¡Felicidades {usuario}! ¡Has ganado!"))
print(magenta(f"¡Adivinaste el número en {intentos} intentos!"))
break
elif numero_ingresado > NUMERO_ALEATORIO:
text_align(yellow("El número es mayor al que estoy pensando"))
else:
text_align(yellow("El número es menor al que estoy pensando"))
else:
print(red(f"¡Lo siento, {usuario}! ¡Has perdido!"))
print(red(f"El número que pensé era {NUMERO_ALEATORIO}"))
|
python
|
# -*- coding:utf-8 -*-
from xml.dom import minidom
from xlrd import open_workbook
import json
import re
jdict = {}
axls = open_workbook('student.xls')
sheet1 = axls.sheet_by_name('student')
for i in range(3):
sign = str(sheet1.cell(i,0).value)
jdict[sign] = []
for j in range(1,5):
if j > 1:
jdict[sign].append(int(sheet1.cell(i,j).value))
else:
jdict[sign].append(str(sheet1.cell(i,j).value).decode('gbk'))
#jdata = json.dumps(jdict,indent=4, ensure_ascii=False)
#导入jdata引号会出问题,只得导入jdict,但格式不好看,以下是黑科技,想出这种东西要逼疯我。
s = str(jdict)
s = re.sub('{','{\n\t ',s)
s = re.sub('}','\n}',s)
s = re.sub('],','],\n\t',s)
print s
doc = minidom.Document()
root = doc.createElement('root')
doc.appendChild(root)
students = doc.createElement('students')
comment = doc.createComment(u'\n\t学生信息表\n\t"id" : [名字, 数学, 语文, 英文]\n')
students.appendChild(comment)
students_text = doc.createTextNode(s.decode('unicode_escape'))
students.appendChild(students_text)
root.appendChild(students)
f = open("student.xml", "wb")
f.write(doc.toprettyxml(indent = "", newl = "\n", encoding = "utf-8"))
f.close()
|
python
|
import os
import pytest
from analyzer.ResultsCollector.PytestResults import PytestResult
pytest_version = 'pytest-6.2.5'
@pytest.fixture(scope='module')
def data(data_dir):
return os.path.join(data_dir, 'PytestResults')
@pytest.fixture(scope='module')
def happy_day(data):
with open(os.path.join(data, 'happy_day.txt'), 'r') as f1:
with open(os.path.join(data, 'happy_day2.txt'), 'r') as f2:
return [PytestResult(f1.read()), PytestResult(f2.read())]
@pytest.fixture(scope='module')
def fail_data(data):
files = ['wrong_end.txt', 'wrong_end2.txt', 'wrong_start_log.txt']
fails = []
for f in files:
with open(os.path.join(data, f), 'r') as file:
fails.append(PytestResult(file.read()))
return fails
def test_get_framework_happyday(happy_day):
for result in happy_day:
assert result.get_test_framework() == pytest_version
def test_get_framework_fail(data):
with open(os.path.join(data, 'no_framework.txt')) as f:
result = PytestResult(f.read())
assert result.get_test_framework() != pytest_version
assert result.get_test_framework() == str()
def test_detect_happyday(happy_day):
for result in happy_day:
assert result.detect()
def test_detect_fail(fail_data):
for f in fail_data:
assert not f.detect()
def test_get_failed_count(data, fail_data):
for f in fail_data:
assert f.get_failed_test_count() == 0
def failed_count(filename, expected):
with open(os.path.join(data, filename), 'r') as fi:
happy_day1 = PytestResult(fi.read())
assert happy_day1.get_failed_test_count() == expected
failed_count('happy_day.txt', 0)
failed_count('happy_day2.txt', 1)
def test_get_successful_count(data, fail_data):
for f in fail_data:
assert f.get_successful_test_count() == 0
def success_count(filename, expected):
with open(os.path.join(data, filename), 'r') as fi:
happy_day1 = PytestResult(fi.read())
assert happy_day1.get_successful_test_count() == expected
success_count('happy_day.txt', 3)
success_count('happy_day2.txt', 2)
|
python
|
# -*- coding: utf-8 -*-
import plotly.graph_objs as go
def draw(x1, y1,
x2, y2,
x3, y3,
safety_factor,
flag=None,
x4=None, y4=None,
x5=None, y5=None,
x6=None, y6=None,
x7=None, y7=None,
x8=None, y8=None,
x9=None, y9=None,
x10=None, y10=None,
x11=None, y11=None):
trace0 = go.Scatter(
x=x1,
y=y1,
mode='lines',
name='Ground curve',
line=dict(
color='blue'
),
xaxis='x1',
yaxis='y1'
)
trace1 = go.Scatter(
x=x2,
y=y2,
mode='lines',
name='Support (LE)',
line=dict(
color='red'
),
xaxis='x1',
yaxis='y1'
)
trace2 = go.Scatter(
x=x3,
y=y3,
mode='markers',
name=f'F.S.={safety_factor:0.2f}',
marker=dict(
size=7,
color='green'
),
xaxis='x1',
yaxis='y1'
)
trace3 = go.Scatter(
x=x4,
y=y4,
mode='lines',
name='Org. LDP',
line=dict(
color='blue',
width=1.5,
dash='dashdot'
),
xaxis='x1',
yaxis='y2'
)
data = [trace0, trace1, trace2, trace3]
shapes = list() # append the vertical lines later
if flag is not None:
for index, item in enumerate(x6):
if index % 30 == 0:
trace5 = go.Scatter(
x=item,
y=y6,
mode='lines',
line=dict(
color='gray',
width=1,
dash='dashdot'
),
visible=True, # 'legendonly' displays only in legend
showlegend=False,
legendgroup="group",
hoverinfo='none',
name='Diff. p_i',
opacity=0.4,
xaxis='x1',
yaxis='y2'
)
data.append(trace5)
trace5 = go.Scatter(
x=x6[-1],
y=y6,
mode='lines',
line=dict(
color='green',
width=1.5,
dash='dashdot'
),
visible=True,
showlegend=False,
legendgroup="group",
hoverinfo='none',
name='Diff. p_i',
opacity=1,
xaxis='x1',
yaxis='y2'
)
data.append(trace5)
# vertical lines
for i in (x2[0], x3[0]):
shapes.append({'type': 'line',
'xref': 'x1',
'yref': 'y2',
'x0': i,
'y0': 0,
'x1': i,
'y1': 80,
'line': {
'color': 'red',
'width': 1.5,
'dash': 'dashdot'},
'opacity' : 0.6
})
trace6 = go.Scatter(
x=x7,
y=y7,
mode='lines',
name='New LDP',
line=dict(
color='red',
width=2
),
xaxis='x1',
yaxis='y2'
)
data.append(trace6)
trace7 = go.Scatter(
x=x8,
y=y8,
mode='lines',
name='Support (NL)',
line=dict(
color='green'
),
xaxis='x2',
yaxis='y3',
# showlegend=False,
)
trace8 = go.Scatter(
x=x9,
y=y9,
mode='lines',
name='Flow Rate',
line=dict(
color='red'
),
xaxis='x2',
yaxis='y4'
)
trace9 = go.Scatter(
x=x10,
y=y10,
mode='lines',
name='Support (NL)',
line=dict(
color='green'
),
xaxis='x2',
yaxis='y4',
showlegend=False,
# hoverinfo='x+y'
)
trace10 = go.Scatter(
x=x11,
y=y11,
mode='markers',
name='Critical Point',
line=dict(
color='brown'
),
xaxis='x1',
yaxis='y1',
showlegend=True
)
data.extend([trace7, trace8, trace9, trace10])
layout = go.Layout(
plot_bgcolor='#f9f7f7',
showlegend=True,
margin=dict(
l=75,
r=50,
b=50,
t=50,
pad=4
),
titlefont=dict(
size=20,
),
hovermode='closest',
autosize=True,
width=1200,
height=800,
xaxis=dict(
rangemode='normal',
title='Tunnel Wall Displacement [m]',
range=[0, max(x1)],
tickformat='.3f',
domain=[0, 0.47],
anchor='y2'
),
yaxis=dict(
scaleratio=0.1,
tickformat='.2f',
title='Support Pressure [MPa]',
titlefont=dict(size=14),
range=[0, max(y1)],
domain=[0.55, 1]
),
xaxis2=dict(
title='Time [days]',
tickformat='.2f',
domain=[0.55, 1],
anchor='y4'
),
yaxis2=dict(
title='Distance from Tunnel Face [m]',
tickformat='.2f',
anchor='x1',
range=[80, -25],
domain=[0, 0.50]
),
yaxis3=dict(
title='Support Pressure [MPa]',
tickformat='.2f',
range = [0, max(y1)],
domain=[0.55, 1],
anchor='x2'
),
yaxis4=dict(
title='Stress SpC [MPa]',
tickformat='.2f',
domain=[0, 0.50],
anchor='x2',
rangemode='nonnegative'
),
legend=dict(
traceorder='normal',
font=dict(
family='arial',
size=12,
color='#000'
),
bgcolor='#E2E2E2',
bordercolor='#FFFFFF',
borderwidth=1.5
),
shapes=shapes
)
fig = go.Figure(data=data, layout=layout)
return fig
if __name__ == '__main__':
from plotly.offline import plot
from Ground_Curve import ground_curve as gc
values = gc()
if len(values) == 13:
p1, p2, p2_el, p3, p3_el, p4, p5, p6, v1, v1_el, v2, v2_el, v3 = values
else:
p1, p2, p2_el, p3, p3_el, p4, p5, p6, p7, p8, p9, p10, p11, v1, \
v1_el, v2, v2_el, v3 = values
flag = p3.x
if len(p3_el.y) != 0:
safety_factor_el = v1_el.val / v2_el.val
else:
safety_factor_el = 0
fig = draw(x1=p1.x, y1=p1.y,
x2=p2_el.x, y2=p2_el.y,
x3=p3_el.x, y3=p3_el.y,
safety_factor=safety_factor_el,
flag=flag,
x4=p4.x, y4=p4.y,
x5=p7.x, y5=p7.y,
x6=p8.x, y6=p8.y,
x7=p9.x, y7=p9.y,
x8=p5.x, y8=p5.y,
x9=p10.x, y9=p10.y,
x10=p11.x, y10=p11.y)
plot(fig, filename='ground_curve_basic.html')
|
python
|
'''
Created on 24 Nov 2015
@author: wnm24546
'''
from scipy.constants import c, h, k, pi
from scipy.optimize import curve_fit
from collections import OrderedDict
import numpy as np
from Lucky.LuckyExceptions import BadModelStateException
#k is kb
class CalculationService(object):
def __init__(self, pp):
self.parentPresenter = pp
self.planckResults = (0, 0, 0, 0)
self.wienResults = (0, 0, 0, 0)
self.twoColResults = (0, 0, 0, 0)
#TODO Spawn calculations and plots in a separate thread
def createCalcs(self, dM, debug=False):
self.updateModel(dM)
self.dsCalcs = LuckyCalculations(self.dsData, self.dsCalib,
self.integConf, self.bulbTemp, "Downstream Measurement")
self.usCalcs = LuckyCalculations(self.usData, self.usCalib,
self.integConf, self.bulbTemp, "Upstream Measurement")
self.dsCalcs.runCalculations()
self.usCalcs.runCalculations()
self.updateResults()
#Create plot objects once we've got some data to plot
self.dsPlots = LuckyPlots(self.dsCalcs, 'DS')
self.usPlots = LuckyPlots(self.usCalcs, 'US')
def updateCalcs(self):
#Perhaps add updateModel call?
self.dsCalcs.runCalculations()
self.usCalcs.runCalculations()
self.updateResults()
#Update the plots with new values from the calculations
self.dsPlots.updatePlots()
self.usPlots.updatePlots()
def updateResults(self):
def calculateResults(dsVal, usVal):
avs = (dsVal + usVal)/2
diff = abs(dsVal - usVal)
return [dsVal, usVal, avs, diff]
self.planckResults = calculateResults(self.dsCalcs.planckTemp, self.usCalcs.planckTemp)
self.wienResults = calculateResults(self.dsCalcs.wienTemp, self.usCalcs.wienTemp)
self.twoColResults = calculateResults(self.dsCalcs.twoColTemp, self.usCalcs.twoColTemp)
def updateModel(self, dM):
self.dsData, self.usData = self.openData(dM)
self.dsCalib, self.usCalib = self.openCalib(dM.calibType, dM.calibConfigData)
self.integConf = dM.integrationConf
self.bulbTemp = dM.calibConfigData.bulbTemp
def updateData(self, usData=None, dsData=None):
if (usData == None) and (dsData == None):
raise BadModelStateException("No data given for data update")
if dsData != None:
newData = np.loadtxt(usData)
self.dsCalcs.update(data=newData)
if usData != None:
newData = np.loadtxt(usData)
self.usCalcs.update(data=usData)
def updateIntegration(self, integConf):
self.dsCalcs.update(integConf=integConf)
self.usCalcs.update(integConf=integConf)
def updateCalibration(self, calibType, calibConf):
self.dsCalib, self.usCalib = self.openCalib(calibType, calibConf)
self.bulbTemp = calibConf.bulbTemp
self.dsCalcs.update(calib=self.dsCalib, bulbTemp=self.bulbTemp)
self.usCalcs.update(calib=self.usCalib, bulbTemp=self.bulbTemp)
def openCalib(self, calibType, calibConfig):
calibFileLabels = calibConfig.calibFiles.keys()
dsCalib, usCalib = None, None
for i in range(len(calibType)):
if calibType[i] == 1:
dsCalib = str(calibConfig.calibFiles[calibFileLabels[2*i]])
usCalib = str(calibConfig.calibFiles[calibFileLabels[2*i+1]])
if None not in [dsCalib, usCalib]:
break
return np.loadtxt(dsCalib, unpack=True), np.loadtxt(usCalib, unpack=True)
def openData(self, dM):
return np.loadtxt(dM.usdsPair[0], unpack=True), np.loadtxt(dM.usdsPair[1], unpack=True)
def disposePlots(self):
self.dsPlots.dispose()
self.usPlots.dispose()
class LuckyCalculations(object): #TODO Make calcs use calcserv to get bulbTemp, integConf & calibset
def __init__(self, data, calib, integConf, bulbTemp, label, debug=False):
self.dataSet = data
self.calibSet = calib
self.intConf = integConf
self.bulbTemp = bulbTemp
self.label = label
self.planckPlotRange = [550, 900]
self.wienPlotRange = [1e9 / self.planckPlotRange[1], 1e9/self.planckPlotRange[0]]
#Prepare the data
self.normaliseData()
def update(self, data=None, integConf=None, calib=None, bulbTemp=None):
self.dataSet = data if (data != None) else self.dataSet
self.intConf = integConf if (integConf != None) else self.intConf
self.calibSet = calib if (calib != None) else self.calibSet
self.bulbTemp = bulbTemp if (bulbTemp != None) else self.bulbTemp
if (data != None) or (calib != None) or (bulbTemp != None):
self.normaliseData()
if integConf != None:
self.calculateRanges()
def normaliseData(self):
self.planckIdeal = self.planck(self.dataSet[0], 1, self.bulbTemp)
self.planckIdeal = np.reshape(self.planckIdeal, (1, len(self.planckIdeal)))
#This step adds the normalises dataset & concatenates with the original data array
self.dataSet = np.concatenate((self.dataSet, self.dataSet[1] / self.calibSet[1] * self.planckIdeal), axis=0)
#We've changed the data so we need to recalculate the ranges:
self.calculateRanges()
def calculateRanges(self):
#Data sets for fitting or plotting, limited by integration range
self.invWL = 1e9 / self.dataSet[0]# For Wien function
self.invWLIntegLim = self.invWL[self.intConf[0]:self.intConf[1]]
self.wlIntegLim = self.dataSet[0][self.intConf[0]:self.intConf[1]]
self.RawIntegLim= self.dataSet[1][self.intConf[0]:self.intConf[1]]
self.normIntegLim = self.dataSet[2][self.intConf[0]:self.intConf[1]]
def runCalculations(self):
#Calculate functions over the range of data
self.wienData = self.wien(self.dataSet[0], self.dataSet[2])
self.wienDataIntegLim = self.wienData[self.intConf[0]:self.intConf[1]]
self.twoColData = self.twoColour(self.dataSet[0], self.dataSet[2], self.intConf[2])
self.twoColDataLim = self.twoColData[self.intConf[0]:self.intConf[1]] #twoColData limited between the integration boundaries
self.wavelengthredLim = self.wavelengthred[self.intConf[0]:self.intConf[1]]
#print "ecco i due colori"
#print self.twoColDataLim
self.a = int(round(min(self.twoColDataLim)))
self.b = int(round(max(self.twoColDataLim)))
self.binning = range(self.a, self.b, 30)
self.twoColHistFreq, self.twoColHistValues = np.histogram(self.twoColDataLim, bins= self.binning, density=False)
#old
#self.twoColHistFreq, self.twoColHistValues = np.histogram(self.twoColDataLim, bins=range(1500,5000,1), density=False)
#self.twoColHistValues = np.delete(self.twoColHistValues, len(self.twoColHistFreq), 0)
#Do fits
self.fitPlanck()
self.fitWien()
self.fitHistogram()
def fitPlanck(self):
#Do some fitting for Planck...
###
self.fitOkPlanck = 1
try:
self.planckFit, planckCov = curve_fit(self.planck, self.wlIntegLim, self.normIntegLim, [1,2000])
except ValueError:
print "Value Error Planck fit"
self.fitOkPlanck = 0
except RuntimeError:
print "Runtime Error Planck fit"
self.fitOkPlanck = 0
if self.fitOkPlanck == 1:
self.planckTemp = self.planckFit[1]
self.planckEmiss = self.planckFit[0]
#Planck with fit params(??)
self.planckFitData = self.planck(self.wlIntegLim, self.planckEmiss, self.planckTemp)
else:
self.planckTemp = 2000
#new method defined to operate a sliding average. usefull for the fit Histogram
def moving_average(self, a, n=2) :
self.ret = np.cumsum(a, dtype=float)
self.ret[n:] = self.ret[n:] - self.ret[:-n]
return self.ret[n - 1:] / n
def fitWien(self):
#Do some fitting for Wien...
###
self.fitOkWien = 1
if self.fitOkPlanck == 1:
try:
self.wienFit, wienCov = curve_fit(self.fWien, self.invWLIntegLim[(np.isfinite(self.wienDataIntegLim))], self.wienDataIntegLim[(np.isfinite(self.wienDataIntegLim))], p0=[1, self.planckTemp])
self.wienResidual = self.wienDataIntegLim - self.fWien(self.invWLIntegLim[(np.isfinite(self.wienDataIntegLim))], *self.wienFit)
except ValueError:
print "Value Error Wien fit"
self.fitOkWien = 0
except RuntimeError:
print "Runtime Error Wien fit"
self.fitOkWien = 0
if self.fitOkWien == 1:
self.wienTemp = self.wienFit[1]
else:
self.wienTemp = 2000
else:
self.wienTemp = 2000
def fitHistogram(self):
#Gaussian fit of two colour histogram
###
#print('averaged twocolhistvalues:')
#print self.moving_average(self.twoColHistValues)
self.fitOkGauss = 1
if self.fitOkPlanck == 1:
try:
self.histFit, histCov = curve_fit(self.gaus, self.moving_average(self.twoColHistValues), self.twoColHistFreq, p0=[1000,self.planckTemp,100])
except ValueError:
print "Value Error Gauss fit"
self.fitOkGauss = 0
except RuntimeError:
print "Runtime Error Gauss fit"
self.fitOkGauss = 0
if self.fitOkGauss == 1:
self.twoColTemp = self.histFit[1]
self.twoColErr = self.histFit[2]
else:
self.twoColTemp = np.mean(self.twoColDataLim)
self.twoColErr = np.std(self.twoColDataLim)
else:
self.twoColTemp = np.mean(self.twoColDataLim)
self.twoColErr = np.std(self.twoColDataLim)
#old
#def fitHistogram(self):
#Gaussian fit of two colour histogram
###
#self.histFit, histCov = curve_fit(self.gaus, self.twoColHistValues, self.twoColHistFreq, p0=[1000,self.planckTemp,100])
#self.twoColTemp = self.histFit[1]
#self.twoColErr = self.histFit[2]
#Planck function
def planck(self, wavelength, emiss, temp):
wavelength = wavelength * 1e-9
return emiss / np.power(wavelength, 5) * (2 * pi * h * np.power(c, 2)) / np.expm1((h * c)/(k * wavelength * temp))
#Wien function
def wien(self, wavelength, intens):
wavelength = wavelength * 1e-9
return self.wienBase(np.power(wavelength, 5) * intens / (2 * pi * h * np.power(c, 2)))
#Linear Wien function
def fWien(self, wavelength, emiss, temp):
# wavelength = wavelength * 1e-9
return self.wienBase(emiss) - (1/temp) * wavelength
#Wien support function (this is just recycling code)
def wienBase(self, exponent):
return k / (h * c) * np.log(exponent)
#Two colour function
def twoColour(self, wavelength, intens, delta):
#wavelength = wavelength * 1e-9
nPoints = len(wavelength)
nWindows = nPoints - delta
twoCol = []
#def twoColCalc(wavelength, intens):
# return np.log(intens * np.power(wavelength, 5) / (2 * pi * h * np.power(c, 2))) * (k / (h *c))
for i in range(nWindows):
f1 = 1 / (wavelength[i]* 1e-9)
f2 = 1/ (wavelength[i + delta]* 1e-9)
i1 = np.log(intens[i]/2/pi/h/c**2/f1**5)*k/h/c #twoColCalc(wavelength[i], intens[i])
i2 = np.log(intens[i+delta]/2/pi/h/c**2/f2**5)*k/h/c #twoColCalc(wavelength[i + delta], intens[i+delta])
twoCol.append(abs((f2 - f1) / (i2 - i1)))
#for i in range(nWindows, nPoints):
# twoCol.append(float('nan'))
self.wavelengthred = wavelength[0:nPoints - delta]
return twoCol
#Gaussian for fit
def gaus(self, x, a, x0, sigma):
return a*np.exp(-(x-x0)**2/(2*sigma**2))
###
import matplotlib.pyplot as plt
class LuckyPlots(object):
def __init__(self, calcs, US_DS, debug=False):
if debug:
return
self.debug = debug
self.luckyCalcs = calcs
self.fig = plt.figure(self.luckyCalcs.label)
self.fig.suptitle(self.luckyCalcs.label, fontsize="16", weight="bold", color = 'b')
self.ax1 = self.fig.add_subplot(3, 2, 1)#Raw+Calib
self.ax2 = self.fig.add_subplot(3, 2, 3)#Planck
self.ax3 = self.fig.add_subplot(3, 2, 4)#Wien
self.ax3.xaxis.get_major_formatter().set_powerlimits((0, 1))
self.ax4 = self.fig.add_subplot(3, 2, 5)#2Colour
self.ax5 = self.fig.add_subplot(3, 2, 6)#Histogram
self.ax5.xaxis.get_major_formatter().set_powerlimits((0, 1))
self.ax6 = self.ax3.twinx()
#Layout settings for the plots
plt.subplots_adjust(wspace=0.3, hspace=0.7)
#One-time configuration of plots
self.ax1.set_title('Raw (blue) & Calibration Data (green)', fontsize= 13, style='italic', weight="bold")
self.ax1.set_xlabel('Wavelength [nm]', fontsize= 13)
self.ax1.grid(True, linestyle='-')
self.ax2.set_title('Planck Function Data', fontsize='13', style='italic', weight="bold")
self.ax2.set_xlabel('Wavelength [nm]', fontsize= 13)
self.ax3.set_ylabel("Planck Function [a.u.]", fontsize= 13)
#self.ax2.set_yticks([])
self.ax2.set_yticks([0.1, 0.3, 0.5, 0.7, 0.9])
self.ax3.set_title('Wien Function Data', fontsize='13', style='italic', weight="bold")
self.ax3.set_xlabel(r'1/Wavelength [m$^{-1}$]', fontsize= 13)
self.ax3.set_ylabel("Wien Function", fontsize= 13)
self.ax3.set_yticks([])
self.ax4.set_title('Two-Colour Plot', fontsize='13', style='italic', weight="bold")
self.ax4.set_xlabel('Wavelength [nm]', fontsize= 13)
self.ax4.set_ylabel('Temperature [K]', fontsize= 13)
self.ax4.grid(True, linestyle='-')
self.ax5.set_title('Two-colour Histogram', fontsize='13', style='italic', weight="bold")
self.ax5.set_xlabel('Temperature [K]', fontsize= 13)
self.ax5.set_ylabel('Counts [a.u.]', fontsize= 13)
self.ax6.set_ylabel('Wien Residual', color='g', fontsize= 13)
self.updatePlots(redraw=False)
#ax1 = calibration and raw spectrum
#ax2 = planck spectrum
#ax3 = wien
#ax4 = 2-col
#ax5 =histogram
#ax6 = residuals in subplot (3,2,4)
if not self.debug:
#Draw the plots if we're not debugging
plt.ion()
plt.show()
mngr = plt.get_current_fig_manager()
if US_DS == 'US':
mngr.window.setGeometry(20,280,700, 700)
if US_DS == 'DS':
mngr.window.setGeometry(1000,280,700, 700)
#Needed to make plt appear!
# http://stackoverflow.com/questions/28269157/plotting-in-a-non-blocking-way-with-matplotlib
plt.pause(0.001)
def updatePlots(self, redraw=True):
#Raw and calibration data subgraph
self.ax1.plot(self.luckyCalcs.dataSet[0], self.luckyCalcs.dataSet[1],
self.luckyCalcs.dataSet[0], self.luckyCalcs.calibSet[1],'green',self.luckyCalcs.wlIntegLim,self.luckyCalcs.RawIntegLim,'red')
self.ax1.set_ylim(0, self.getYMax(self.luckyCalcs.dataSet[1], self.luckyCalcs.calibSet[1]))
# self.ax1.set_ylim(0,50000) #TODO Get max fn.
#Planck data subgraph
#self.ax2.plot(self.luckyCalcs.dataSet[0], self.luckyCalcs.dataSet[2],
# self.luckyCalcs.wlIntegLim, self.luckyCalcs.planckFitData, 'red')
#self.ax2.set_xlim(*self.luckyCalcs.planckPlotRange)
#Planck data subgraph
if self.luckyCalcs.fitOkPlanck == 1:
self.ax2.plot(self.luckyCalcs.dataSet[0], self.luckyCalcs.dataSet[2] / max(self.luckyCalcs.dataSet[2]),
self.luckyCalcs.wlIntegLim, self.luckyCalcs.planckFitData / max(self.luckyCalcs.dataSet[2]), 'red')
self.ax2.set_xlim(*self.luckyCalcs.planckPlotRange)
self.ax2.set_ylim([0, 1])
else:
self.ax2.plot(self.luckyCalcs.dataSet[0], self.luckyCalcs.dataSet[2] / max(self.luckyCalcs.dataSet[2]))
self.ax2.set_xlim(*self.luckyCalcs.planckPlotRange)
self.ax2.set_ylim([0, 1])
#Wien data subgraph
if self.luckyCalcs.fitOkWien == 1 and self.luckyCalcs.fitOkPlanck == 1:
self.ax3.plot(self.luckyCalcs.invWL, self.luckyCalcs.wienData,
self.luckyCalcs.invWLIntegLim, self.luckyCalcs.fWien(self.luckyCalcs.invWLIntegLim,*self.luckyCalcs.wienFit), 'red')
self.ax3.set_xlim(*self.luckyCalcs.wienPlotRange)
else:
self.ax3.plot(self.luckyCalcs.invWL, self.luckyCalcs.wienData)
self.ax3.set_xlim(*self.luckyCalcs.wienPlotRange)
#Two Colour data subgraph
self.ax4.plot(self.luckyCalcs.wavelengthred, self.luckyCalcs.twoColData, 'b:',
self.luckyCalcs.wavelengthredLim, self.luckyCalcs.twoColDataLim, 'r:')
self.ax4.set_xlim(*self.luckyCalcs.planckPlotRange)
#Two Colour data subgraph-OLD-
#self.ax4.plot(self.luckyCalcs.dataSet[0], self.luckyCalcs.twoColData, 'b:',
# self.luckyCalcs.wlIntegLim, self.luckyCalcs.twoColDataLim, 'r:')
#self.ax4.set_xlim(*self.luckyCalcs.planckPlotRange)
#self.ax4.set_ylim([np.amin(calcs.TwoColDataLim),np.amax(calcs.TwoColDataLim)])
#self.ax4.set_ylim(*calcs.twoColDataLim)
#nuova modifica
self.ax4.set_ylim(self.luckyCalcs.twoColTemp - 500, self.luckyCalcs.twoColTemp + 500)
#Histogram subgraph
#old
#self.ax5.plot(self.luckyCalcs.twoColHistValues, self.luckyCalcs.twoColHistFreq,
# self.luckyCalcs.twoColHistValues, self.luckyCalcs.gaus(self.luckyCalcs.twoColHistValues, *self.luckyCalcs.histFit), 'red')
#modifica
self.ax5.hist(self.luckyCalcs.twoColDataLim, self.luckyCalcs.binning)
if self.luckyCalcs.fitOkGauss == 1 and self.luckyCalcs.fitOkPlanck == 1:
self.ax5.plot(self.luckyCalcs.twoColHistValues, self.luckyCalcs.gaus(self.luckyCalcs.twoColHistValues, *self.luckyCalcs.histFit), 'red')
#
self.ax5.set_xlim([self.luckyCalcs.twoColTemp - 400, self.luckyCalcs.twoColTemp + 400])
#self.ax5.set_xlim(1800,4000)
#Residual subgraph of the Wien
if self.luckyCalcs.fitOkPlanck == 1 and self.luckyCalcs.fitOkWien == 1:
ordin = len(self.luckyCalcs.invWL)*[0]
self.ax6.plot(self.luckyCalcs.invWLIntegLim, self.luckyCalcs.wienResidual,'green',self.luckyCalcs.invWL,ordin,'black')
#Create text label for calculated T values -OLD-
#textLabel = OrderedDict([("T"+r"$_{Planck}$","{0:10.2f}".format(self.luckyCalcs.planckTemp)),
# ("T"+r"$_{Wien}$","{0:10.2f}".format(self.luckyCalcs.wienTemp)),
# ("T"+r"$_{Two Colour}$","{0:10.2f}".format(self.luckyCalcs.twoColTemp))])
#Create text label for calculated T values -modified-
if self.luckyCalcs.fitOkPlanck == 1 and self.luckyCalcs.fitOkWien == 1:
textLabel = OrderedDict([("T"+r"$_{Planck}$" + "[K]","{0:9d}".format(int(self.luckyCalcs.planckTemp))),
("T"+r"$_{Wien}$"+ "[K]","{0:9d}".format(int(self.luckyCalcs.wienTemp))),
("T"+r"$_{2col}$"+ "[K]","{0:9d}".format(int(self.luckyCalcs.twoColTemp)))])
else:
if self.luckyCalcs.fitOkPlanck == 0:
textLabel = OrderedDict([("T"+r"$_{Planck}$" + "[K]","{0:9s}".format("ERROR")),
("T"+r"$_{Wien}$"+ "[K]","{0:9s}".format("ERROR")),
("T"+r"$_{2col}$"+ "[K]","{0:9d}".format(int(self.luckyCalcs.twoColTemp)))])
if self.luckyCalcs.fitOkWien == 0:
textLabel = OrderedDict([("T"+r"$_{Planck}$" + "[K]","{0:9d}".format(int(self.luckyCalcs.planckTemp))),
("T"+r"$_{Wien}$"+ "[K]","{0:9s}".format("ERROR")),
("T"+r"$_{2col}$"+ "[K]","{0:9d}".format(int(self.luckyCalcs.twoColTemp)))])
#textLabel = OrderedDict([("T"+r"$_{Planck}$" + "[K]","{0:9d}".format(int(self.luckyCalcs.planckTemp))),
# ("T"+r"$_{Wien}$"+ "[K]","{0:9d}".format(int(self.luckyCalcs.wienTemp))),
# ("T"+r"$_{2col}$"+ "[K]","{0:9d}".format(int(self.luckyCalcs.twoColTemp)))])
self.errWienPlanck = (abs(self.luckyCalcs.planckTemp - self.luckyCalcs.wienTemp)/ (self.luckyCalcs.planckTemp))*100
self.std2col = self.luckyCalcs.twoColErr
textLabel1 = OrderedDict([
("ERR"+"$_{2col}$"+ "[K]","{0:9d}".format(int(self.std2col))),
("ERR"+"$_{W-P}$","{0:9.2f}".format(self.errWienPlanck))
])
# {"T"+r"$_{Planck}$" : "{0:10.2f}".format(self.luckyCalcs.planckTemp),
# "T"+r"$_{Wien}$" : "{0:10.2f}".format(self.luckyCalcs.wienTemp),
# "T"+r"$_{Two Colour}$":"{0:10.2f}".format(self.luckyCalcs.twoColTemp)}
labelPosition = (0.54, 0.85)
rowNr = 0
for label,tVal in textLabel.iteritems( ):
plt.figtext(labelPosition[0], labelPosition[1]-(0.05*rowNr), label, fontdict = None, size = 'large')
plt.figtext(labelPosition[0]+0.080, labelPosition[1]-(0.05*rowNr), tVal, fontdict = None, size = 'large')
rowNr += 1
labelPosition1 = (0.78, 0.85)
rowNr = 0
for label,tVal in textLabel1.iteritems( ):
if self.errWienPlanck < 1 or rowNr == 0 :
plt.figtext(labelPosition1[0], labelPosition1[1]-(0.05*rowNr), label, fontdict = None, size = 'large')
plt.figtext(labelPosition1[0]+0.080, labelPosition1[1]-(0.05*rowNr), tVal, fontdict = None, size = 'large')
else:
plt.figtext(labelPosition1[0], labelPosition1[1]-(0.05*rowNr), label, fontdict = None, size = 'large')
plt.figtext(labelPosition1[0]+0.080, labelPosition1[1]-(0.05*rowNr), tVal, fontdict = None, size = 'large', color = 'r')
rowNr += 1
if redraw and not self.debug:
plt.draw()
#Needed to make plt appear!
# http://stackoverflow.com/questions/28269157/plotting-in-a-non-blocking-way-with-matplotlib
plt.pause(0.001)
#Draws text label on plot
# txt=plt.text(4500,33,TP)
# txt1=plt.text(4200,33,'T=')
# txt2=plt.text(2000,17,TW)
# txt3=plt.text(1800,17,'T=')
# txt.set_size(15)
# txt1.set_size(15)
# txt2.set_size(15)
# txt3.set_size(15)
# fig.canvas.draw()
def getYMax(self, *data):
maxes = []
for dat in data:
maxes.append(np.amax(dat))
return max(maxes)*1.1
def dispose(self):
plt.close(self.luckyCalcs.label)
|
python
|
from rest_framework import serializers
from rest_framework_jwt.settings import api_settings
from django.contrib.auth.models import User
from .models import Employee, CostCenter, Unity
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('username',)
class UserSerializerWithToken(serializers.ModelSerializer):
token = serializers.SerializerMethodField()
password = serializers.CharField(write_only=True)
def get_token(self, obj):
jwt_payload_handler = api_settings.JWT_PAYLOAD_HANDLER
jwt_encode_handler = api_settings.JWT_ENCODE_HANDLER
payload = jwt_payload_handler(obj)
token = jwt_encode_handler(payload)
return token
def create(self, validated_data):
password = validated_data.pop('password', None)
instance = self.Meta.model(**validated_data)
if password is not None:
instance.set_password(password)
instance.save()
return instance
class Meta:
model = User
fields = ('token', 'username', 'password')
class UnitySerializer(serializers.ModelSerializer):
model = Unity
fields = ('name')
class EmployeeSerializer(serializers.ModelSerializer):
class Meta:
model = Employee
fields = ('identifier', 'name', 'admission', 'email', 'role',
'resignation', 'birth_date', 'zip_code', 'phone', 'cpf',
'cost_center', 'gender', 'photo')
class CostCenterSerializer(serializers.ModelSerializer):
class Meta:
model = CostCenter
fields = ('number', 'name_department', 'responsible')
|
python
|
import numpy as np
import astropy.constants as cst
import astropy.units as u
def blackbody(wave, T):
'''
Blacbody function
Parameters
----------
wave : float
Wavelength(s) in micron
T : float
Temperature in Kelvin
Results
-------
bb_spectrum : float
Black body spectrum in W/m2/micron/arcsec2
'''
if not hasattr(wave, 'unit'):
wave = wave * u.micron
if not hasattr(T, 'unit'):
T = T * u.K
exp_part = np.exp(cst.h*cst.c/(wave*cst.k_B*T))
bb_spectrum = (2*cst.h*cst.c**2/wave**5*1e10)*(exp_part - 1)**(-1) / u.sr
bb_spectrum = bb_spectrum.to('W/m2/micron/arcsec2')/1e10
# *1e10 is a trick to avoid rounding errors...
return bb_spectrum
|
python
|
# To change this license header, choose License Headers in Project Properties.
# To change this template file, choose Tools | Templates
# and open the template in the editor.
import json
import sys
import re
import traceback
from os.path import expanduser
import os
import urllib
from uforge.objects.uforge import *
import download_utils
import printer
__author__="UShareSoft"
def extract_id(uri):
elements = uri.split("/");
return elements[len(elements) - 1];
def query_yes_no(question, default="yes"):
"""Ask a yes/no question via raw_input() and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
It must be "yes" (the default), "no" or None (meaning
an answer is required of the user).
The "answer" return value is one of "yes" or "no".
"""
valid = {"yes":True, "y":True, "ye":True,
"no":False, "n":False}
if default == None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
printer.out(question + prompt)
choice = raw_input().lower()
if default is not None and choice == '':
return valid[default]
elif choice in valid:
return valid[choice]
else:
printer.out("Please respond with 'yes' or 'no' "\
"(or 'y' or 'n').\n")
def remove_special_chars(string):
return (re.sub('[-]', '_', string)).lower()
def is_uforge_exception(e):
if len(e.args)>=1 and type(e.args[0]) is UForgeError:
return True
def get_uforge_exception(e):
if len(e.args)>=1 and type(e.args[0]) is UForgeError:
return "UForge Error '"+str(e.args[0].statusCode)+"' with method: "+e.args[0].requestMethod+" "+e.args[0].requestUri+"\n"+"Message:\n\t"+e.args[0].get_localizedErrorMsg().message
def print_uforge_exception(e):
if len(e.args)>=1 and type(e.args[0]) is UForgeError:
printer.out(get_uforge_exception(e), printer.ERROR)
else:
traceback.print_exc()
def oder_list_object_by(objects, attribute):
if type(attribute) is str:
return sorted(objects, key=lambda x: getattr(x, attribute).lower(), reverse=False)
return objects
def get_uforge_url_from_ws_url(ws_url):
if ws_url[-1:]!='/':
return ws_url.rpartition('/')[0]
else:
return ws_url[:-1].rpartition('/')[0]
def get_home_dir():
return expanduser("~")
|
python
|
import argparse
import yaml
from train_eval.evaluator import Evaluator
import os
# Parse arguments
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--config", help="Config file with dataset parameters", required=True)
parser.add_argument("-r", "--data_root", help="Root directory with data", required=True)
parser.add_argument("-d", "--data_dir", help="Directory to extract data", required=True)
parser.add_argument("-o", "--output_dir", help="Directory to save results", required=True)
parser.add_argument("-w", "--checkpoint", help="Path to pre-trained or intermediate checkpoint", required=True)
args = parser.parse_args()
# Make directories
if not os.path.isdir(args.output_dir):
os.mkdir(args.output_dir)
if not os.path.isdir(os.path.join(args.output_dir, 'results')):
os.mkdir(os.path.join(args.output_dir, 'results'))
# Load config
with open(args.config, 'r') as yaml_file:
cfg = yaml.safe_load(yaml_file)
# Evaluate
evaluator = Evaluator(cfg, args.data_root, args.data_dir, args.checkpoint)
evaluator.evaluate(output_dir=args.output_dir)
|
python
|
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render
from rest_framework.response import Response
from rest_framework.views import APIView
from users.models import super_table, teacher_table, student_table
from users.serializers import SuperSerializer, TeacherSerializer, StudentSerializer
class UserManagementView(APIView):
"""用户管理"""
def get(self, request):
"""返回"""
# 去 session 中取指定的值
user_id = request.session.get("user_id", None)
is_admin = request.session.get("is_admin", False)
# 如果用户id存在,并且是管理员,那么直接跳转管理后台主页
if user_id and is_admin:
return render(request, "html/super_index.html")
return render(request, "html/admin_login.html")
def post(self, request):
# 获取请求参数
user_info = request.data
username = user_info.getlist("username")[0]
password = user_info.getlist("password")[0]
userkind = user_info.getlist("userkind")[0]
# 判断是否存在
if userkind == "3":
# 超级管理员
try:
user = super_table.objects.filter(username=username)
except:
return Response({"error": "登陆错误"})
user_serializer = SuperSerializer(user, many=True)
# 获取序列化后的用户密码
user_data = user_serializer.data
# 从数据中获取数据
user_info = user_data[0]
user_password = user_info["password"]
if password != user_password:
return Response({"error": "登陆错误"})
else:
request.session['user_id'] = user_info["username"]
request.session['is_admin'] = True
return render(request, "html/super_index.html")
if userkind == "1":
# 教师
try:
user = teacher_table.objects.filter(tid=username)
except:
return Response({"error": "登陆错误"})
if not user:
return Response({"error": "登陆错误"})
if password != username:
return Response({"error": "登陆错误"})
else:
user_serializer = TeacherSerializer(user, many=True)
# 获取序列化后的数据
user_data = user_serializer.data
# 获取具体数据
user_info = user_data[0]
request.session['user_id'] = user_info["tid"]
request.session['is_admin'] = False
return render(request, "html/teacher_index.html")
if userkind == "2":
# 学生
try:
user = student_table.objects.filter(sid=username)
except:
return Response({"error": "登陆错误"})
if not user:
return Response({"error": "登陆错误"})
if password != username:
return Response({"error": "登陆错误"})
else:
user_serializer = StudentSerializer(user, many=True)
# 获取序列化后的数据
user_data = user_serializer.data
# 获取具体数据
user_info = user_data[0]
request.session['user_id'] = user_info["sid"]
request.session['is_admin'] = False
return render(request, "html/student_index.html")
class SupermanagementView(APIView):
"""超级管理员用户管理"""
def get(self, request, userkind):
"""查询所有"""
# 获取查询用户种类
# 教师
if userkind == "1":
try:
user = teacher_table.objects.all()
except:
return Response({"error": "查询失败"})
# 序列化
user_serializer = TeacherSerializer(user, many=True)
classroom_dict = user_serializer.data
# 返回
return Response(classroom_dict)
# 学生
elif userkind == "2":
try:
user = student_table.objects.all()
except:
return Response({"error": "查询失败"})
# 序列化
user_serializer = StudentSerializer(user, many=True)
user_dict = user_serializer.data
# 返回
return Response(user_dict)
def post(self, request):
"""创建"""
# 获取请求参数
user_info = request.data
# 获取创建的用户种类
userkind = user_info.getlist("userkind")[0]
# 教师
if userkind == "1":
# 获取其余信息
tid = user_info.getlist("tid")[0]
tname = user_info.getlist("tname")[0]
# 将获取的数据上在数据库创建
teacher_table.objects.create(
tid=tid,
tname=tname,
)
return Response({
"tid": tid,
"tname": tname,
})
elif userkind == "2":
# 获取其余信息
sid = user_info.getlist("sid")[0]
sname = user_info.getlist("sname")[0]
sclass_id = user_info.getlist("sclass_id")[0]
# 将获取的数据上在数据库创建
teacher_table.objects.create(
sid=sid,
sname=sname,
sclass_id=sclass_id,
)
return Response({
"sid": sid,
"sname": sname,
"sclass_id": sclass_id,
})
def put(self, request, userkind, id):
"""修改"""
# 教师
if userkind == "1":
try:
user = teacher_table.objects.get(tid=id)
except:
return Response({"error": "查询错误"})
# 接收
user_dict = request.data
# 验证
user_serilizer = TeacherSerializer(user, data=user_dict)
if not user_serilizer.is_valid():
return Response(user_serilizer.errors)
# 保存 update
user = user_serilizer.save()
# 响应
user_serilizer = TeacherSerializer(user)
user_dict = user_serilizer.data
return Response(user_dict, status=201)
# 学生
if userkind == "2":
try:
user = student_table.objects.get(sid=id)
except:
return Response({"error": "查询错误"})
# 接收
user_dict = request.data
# 验证
user_serilizer = StudentSerializer(user, data=user_dict)
if not user_serilizer.is_valid():
return Response(user_serilizer.errors)
# 保存 update
user = user_serilizer.save()
# 响应
user_serilizer = StudentSerializer(user)
user_dict = user_serilizer.data
return Response(user_dict, status=201)
def patch(self, request, userkind, id):
"""局部更新"""
# 教师
if userkind == "1":
try:
user = teacher_table.objects.get(tid=id)
except:
return Response({"error": "查询错误"})
# 接收
user_dict = request.data
# 验证
user_serilizer = TeacherSerializer(user, data=user_dict, partial=True)
if not user_serilizer.is_valid():
return Response(user_serilizer.errors)
# 保存 update
user = user_serilizer.save()
# 响应
user_serilizer = TeacherSerializer(user)
user_dict = user_serilizer.data
return Response(user_dict, status=201)
# 学生
if userkind == "2":
try:
user = student_table.objects.get(sid=id)
except:
return Response({"error": "查询错误"})
# 接收
user_dict = request.data
# 验证
user_serilizer = StudentSerializer(user, data=user_dict, partial=True)
if not user_serilizer.is_valid():
return Response(user_serilizer.errors)
# 保存 update
user = user_serilizer.save()
# 响应
user_serilizer = StudentSerializer(user)
user_dict = user_serilizer.data
return Response(user_dict, status=201)
def delete(self, request, userkind, id):
"""删除"""
if userkind == "1":
try:
user = teacher_table.objects.get(tid=id)
except:
return Response({"error": "查询错误"})
# 删除
user.delete()
# 响应
return Response(status=204)
if userkind == "2":
try:
user = student_table.objects.get(sid=id)
except:
return Response({"error": "查询错误"})
# 删除
user.delete()
# 响应
return Response(status=204)
class UserLogOutView(APIView):
"""用户管理退出"""
def get(self, request):
request.session.pop('user_id', None)
request.session.pop('is_admin', None)
# 返回结果
return HttpResponseRedirect("http://www.etms.mp:8000/user/user/")
class StudentView(APIView):
"""
学生视图
"""
def post(self, request):
"""
创建学生信息
路由: POST /user/student/
"""
student_info = request.data
user_id = student_info.getlist("user_id")[0]
user_name = student_info.getlist("user_name")[0]
class_id = student_info.getlist("class_id")[0]
student_table.objects.create(
sid=user_id,
sname=user_name,
sclass_id=class_id
)
return Response({"message": "ok"})
def delete(self, request):
"""删除学生信息路由: DELETE /user/student/"""
student_info = request.data
user_id = student_info.getlist("user_id")[0]
try:
user = student_table.objects.get(sid=user_id)
except:
return Response({"error": "查询错误"})
# 删除
user.delete()
# 响应
return Response(status=204)
class StudentFindView(APIView):
"""学生信息查询"""
def post(self, request):
classes_info = request.data
classes_id = classes_info.getlist("classes_id")[0]
print(classes_id)
user = student_table.objects.filter(sclass_id=classes_id)
print(user)
user_serializer = StudentSerializer(user, many=True)
# 获取序列化后的用户密码
user_data = user_serializer.data
return Response(user_data)
class TeacherView(APIView):
"""
教师视图
"""
def get(self, request):
teacher = teacher_table.objects.filter()
teacher_info = TeacherSerializer(teacher, many=True)
return Response(teacher_info.data)
def post(self, request):
"""
创建教师信息
路由: POST /user/teacher/
"""
student_info = request.data
user_id = student_info.getlist("user_id")[0]
user_name = student_info.getlist("user_name")[0]
teacher_table.objects.create(
tid=user_id,
tname=user_name,
)
return Response({"message": "ok"})
def delete(self, request):
"""
删除学生信息
路由: DELETE /user/teacher/
"""
teacher_info = request.data
user_id = teacher_info.getlist("user_id")[0]
try:
user = teacher_table.objects.get(tid=user_id)
except:
return Response({"error": "查询错误"})
# 删除
user.delete()
# 响应
return Response(status=204)
|
python
|
"""Represents a tether from one actor to another"""
from nari.types.event.base import Event
from nari.types.actor import Actor
from nari.types.event import Type
class NetworkTether(Event):
"""Network tether event"""
__id__ = Type.networktether.value
def handle_params(self):
self.source_actor = Actor(self.params[0], self.params[1])
self.target_actor = Actor(self.params[2], self.params[3])
def __repr__(self):
return f'<Tether ({self.source_actor.name} -> {self.target_actor.name})>'
|
python
|
from flask import Flask, Blueprint, request
from flask_bcrypt import Bcrypt
from flask_cors import CORS
from api.models import db
from api.email import mail
import os
app = Flask(__name__)
CORS(app)
app.config.from_object(os.environ["APP_SETTINGS"])
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
db.init_app(app) # for initializing refactored cyclic imports
bcrypt = Bcrypt(app)
mail.init_app(app) # for initializing refactored cyclic imports
from api.routes import api
app.register_blueprint(api, url_prefix="/api")
|
python
|
from tkinter import *
from tkinter.filedialog import askopenfilename
from tkinter.filedialog import asksaveasfilename
from os.path import basename
from tkinter.messagebox import askokcancel, askyesno
from tkinter import PhotoImage
# functions = Functions()
class Frontend():
activefile = 'unactive'
def __init__(self):
self.window = Tk()
p1 = PhotoImage(file = "logo2.png")
# Setting icon of master window
self.window.iconphoto(False, p1)
self.window.title("Untitled - Text-Editor")
self.window.geometry("800x680") # 1920x1080
textboxstr = StringVar()
self.textbox1 = Text(self.window, width=1920, height=1080)
self.textbox1.pack()
menubar = Menu(self.window)
filename = Menu(menubar, tearoff=0)
filename.add_command(label="New", command=self.new)
filename.add_command(label="Open", command=self._openfile)
filename.add_command(label="Save", command=self.savefile)
filename.add_command(label="Save as", command=self.saveasfile)
filename.add_separator()
filename.add_command(
label="Exit", command=lambda: self.window.destroy())
menubar.add_cascade(label="File", menu=filename)
menubar.add_cascade(label="Edit")
menubar.add_cascade(label="Format")
menubar.add_cascade(label="view")
menubar.add_cascade(label="help")
# menubar.add_cascade(label="Run",command = self.Runfile)
self.window.config(menu=menubar)
def openfile(self):
pass
def _openfile(self):
"""
docstring
"""
self.filename = askopenfilename()
# self.openfilename = self.
# print(self.filename)
with open(self.filename) as file:
content = file.read()
self.textbox1.delete(1.0, END)
self.textbox1.insert(1.0, content)
self.activefile = 'active'
self.window.title(basename(self.filename) + "- Text-Editor")
self.filepath = self.filename
def savefile(self):
# print(self.textbox1.get(1.0,END))
if self.activefile == 'active':
self.tosavefile = self.filename
with open(self.tosavefile, 'w') as finalsave:
finalsave.write(self.textbox1.get(1.0, END))
else:
self.savefiledialog = asksaveasfilename()
with open(self.savefiledialog, 'a') as _savefile:
_savefile.write(self.textbox1.get(1.0, END))
self.window.title(basename(self.savefiledialog) + "- Text-Editor")
def saveasfile(self):
self.saveasfilename = asksaveasfilename()
self.savefilename = basename(self.saveasfilename) + "- Text-Editor"
print(self.saveasfilename)
with open(self.saveasfilename, 'a') as savefile:
savefile.write(self.textbox1.get(1.0, END))
self.window.title(self.savefilename)
self.filepath = self.saveasfilename
def new(self):
# if(self.textbox1.get(1)):
self.ans = askyesno("Warning", "Do you want to save your changes ?")
# print(self.ans)
if(self.ans):
self.saveasfile()
else:
self.window.title("Untitled - Text-Editor")
self.textbox1.delete(1.0, END)
def run(self):
self.window.mainloop()
front = Frontend()
front.run()
# functions.run()
# gui1
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Create COMSOL compatible color encodings for the CET perceptually uniform
colour maps available at: http://peterkovesi.com/projects/colourmaps/
To install to COMSOL, copy the unzipped tsv files to the
comsol\data\colormap directory
Works at least for COMSOL 4.2a.
Credit for those maps below:
Peter Kovesi. Good Colour Maps: How to Design Them.
arXiv:1509.03700 [cs.GR] 2015
This is a quick one-off script, use as you will.
Joseph E. Weaver, [email protected]
"""
import os.path
import urllib.request
import zipfile
import re
import io
# Location of the csv file we want to convert. COMSOL expects a tab-delimited
# file, with RGB values in the range of 0-1. On my windows box, it also uses
# CR/LF endings.
# File below is the closest thing to what we want, let's download and convert.
csv_0_1_url = 'http://peterkovesi.com/projects/colourmaps/CETperceptual_csv_0_1.zip' # noqa
filename = 'CETperceptual_csv_0_1.zip'
# I can never remember how to do this right, so stackoverflow is my friend
# https://stackoverflow.com/questions/3173372/download-files-from-a-list-if-not-already-downloaded # noqa
# https://stackoverflow.com/questions/45247983/urllib-urlretrieve-with-custom-header # noqa
# set the headers to avoid a 403 forbidden
opener = urllib.request.build_opener()
opener.addheaders = [('User-agent', 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:55.0) Gecko/20100101 Firefox/55.0')] # noqa
urllib.request.install_opener(opener)
# actually download the file
if not os.path.isfile(filename):
print('Downloading: ' + filename)
try:
urllib.request.urlretrieve(csv_0_1_url, filename)
except Exception as inst:
print(inst)
print(' Encountered unknown error.')
# read in the downloaded zipfile
zfile = zipfile.ZipFile(filename)
# zipfile we're going to write
out_zip = zipfile.ZipFile(re.sub('csv', 'tsv', filename),
mode='w',
compression=zipfile.ZIP_DEFLATED,
)
# rummage thru the zip file, any time we find a csv, convert it to TSV in
# memory then pop it intot he output zip
# this is not pretty nor exception-proof, but it seems to work
for finfo in zfile.infolist():
infile, file_extension = os.path.splitext(finfo.filename)
if(".csv" == file_extension):
ifile = zfile.open(finfo)
# using StringIO to avoid writing/cleaning up temp files.
# These are short, so there's no memory issue
ofile = io.StringIO()
line_list = ifile.readlines()
# the actual substitutions
for line in line_list:
tabbed = (re.sub(b'\,', b'\t', line))
termed = (re.sub(b'\n', b'\r\n', tabbed))
ofile.write(termed.decode("ascii"))
# getting a little hacky with the new filename
newname = re.sub('csv', 'tsv', finfo.filename)
out_zip.writestr(newname, ofile.getvalue())
ofile.close()
|
python
|
import ipaddress
import os.path
from unittest import TestCase
from unittest.mock import MagicMock, patch
from parameterized import parameterized
from salt.exceptions import CommandExecutionError
import yaml
import metalk8s_network
from tests.unit import mixins
from tests.unit import utils
YAML_TESTS_FILE = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "files", "test_metalk8s_network.yaml"
)
with open(YAML_TESTS_FILE) as fd:
YAML_TESTS_CASES = yaml.safe_load(fd)
class Metalk8sNetworkTestCase(TestCase, mixins.LoaderModuleMockMixin):
"""
TestCase for `metalk8s_network` module
"""
loader_module = metalk8s_network
loader_module_globals = {"__pillar__": {"networks": {"service": "10.0.0.0/8"}}}
def test_virtual(self):
"""
Tests the return of `__virtual__` function
"""
self.assertEqual(metalk8s_network.__virtual__(), "metalk8s_network")
def test_get_kubernetes_service_ip_success(self):
"""
Tests the return of `get_kubernetes_service_ip` function, success
"""
self.assertEqual(metalk8s_network.get_kubernetes_service_ip(), "10.0.0.1")
@parameterized.expand(
[
(None, 'Pillar key "networks:service" must be set.'),
("10.0.0.0/32", "Could not obtain an IP in the network range 10.0.0.0/32"),
]
)
def test_get_kubernetes_service_ip_raise(self, service_ip, error_msg):
"""
Tests the return of `get_kubernetes_service_ip` function, when raising
"""
with patch.dict(
metalk8s_network.__pillar__, {"networks": {"service": service_ip}}
):
self.assertRaisesRegex(
CommandExecutionError,
error_msg,
metalk8s_network.get_kubernetes_service_ip,
)
def test_get_cluster_dns_ip_success(self):
"""
Tests the return of `get_cluster_dns_ip` function, success
"""
self.assertEqual(metalk8s_network.get_cluster_dns_ip(), "10.0.0.10")
@parameterized.expand(
[
(None, 'Pillar key "networks:service" must be set.'),
("10.0.0.0/31", "Could not obtain an IP in the network range 10.0.0.0/31"),
]
)
def test_get_cluster_dns_ip_raise(self, service_ip, error_msg):
"""
Tests the return of `get_cluster_dns_ip` function, when raising
"""
with patch.dict(
metalk8s_network.__pillar__, {"networks": {"service": service_ip}}
):
self.assertRaisesRegex(
CommandExecutionError, error_msg, metalk8s_network.get_cluster_dns_ip
)
def test_get_oidc_service_ip_success(self):
"""
Tests the return of `get_oidc_service_ip` function, success
"""
self.assertEqual(metalk8s_network.get_oidc_service_ip(), "10.0.0.7")
@parameterized.expand(
[
(None, 'Pillar key "networks:service" must be set.'),
("10.0.0.0/32", "Could not obtain an IP in the network range 10.0.0.0/32"),
]
)
def test_get_oidc_service_ip_raise(self, service_ip, error_msg):
"""
Tests the return of `get_oidc_service_ip` function, when raising
"""
with patch.dict(
metalk8s_network.__pillar__, {"networks": {"service": service_ip}}
):
self.assertRaisesRegex(
CommandExecutionError, error_msg, metalk8s_network.get_oidc_service_ip
)
@parameterized.expand(
[
# 1 CIDR, 2 IP, take the first one
(["10.200.0.0/16"], ["10.200.0.1", "10.200.0.42"], "10.200.0.1"),
# 1 CIDR, 2 IP, current_ip set to the second one, take the second one
(
["10.200.0.0/16"],
["10.200.0.1", "10.200.0.42"],
"10.200.0.42",
"10.200.0.42",
),
# 1 CIDR, no IP, errors
(
["10.200.0.0/16"],
[],
"Unable to find an IP on this host in one of this cidr: 10.200.0.0/16",
None,
True,
),
# 2 CIDR, multiple IPs, take the first one of first CIDR
(
["10.200.0.0/16", "10.100.0.0/16"],
{
"10.200.0.0/16": ["10.200.0.1", "10.200.0.42"],
"10.100.0.0/16": ["10.100.0.12", "10.100.0.52"],
},
"10.200.0.1",
),
# 2 CIDR, multiple IPs, with current_ip present
(
["10.200.0.0/16", "10.100.0.0/16"],
{
"10.200.0.0/16": ["10.200.0.1", "10.200.0.42"],
"10.100.0.0/16": ["10.100.0.12", "10.100.0.52"],
},
"10.100.0.52",
"10.100.0.52",
),
# 2 CIDR, multiple IPs, with current_ip absent
(
["10.200.0.0/16", "10.100.0.0/16"],
{
"10.200.0.0/16": ["10.200.0.1", "10.200.0.42"],
"10.100.0.0/16": ["10.100.0.12", "10.100.0.52"],
},
"10.200.0.1",
"10.100.0.87",
),
# 2 CIDR, first CIDR no IP
(
["10.200.0.0/16", "10.100.0.0/16"],
{"10.100.0.0/16": ["10.100.0.12", "10.100.0.52"]},
"10.100.0.12",
),
# 2 CIDR, no IP, with current_ip, errors
(
["10.200.0.0/16", "10.100.0.0/16"],
[],
"Unable to find an IP on this host in one of this cidr: 10.200.0.0/16, 10.100.0.0/16",
"10.200.0.1",
True,
),
]
)
def test_get_ip_from_cidrs(
self, cidrs, ip_addrs, result, current_ip=None, raises=False
):
"""
Tests the return of `get_ip_from_cidrs` function
"""
def _get_ip_addrs(cidr):
if isinstance(ip_addrs, dict):
return ip_addrs.get(cidr)
return ip_addrs
salt_dict = {"network.ip_addrs": MagicMock(side_effect=_get_ip_addrs)}
with patch.dict(metalk8s_network.__salt__, salt_dict):
if raises:
self.assertRaisesRegex(
CommandExecutionError,
result,
metalk8s_network.get_ip_from_cidrs,
cidrs=cidrs,
current_ip=current_ip,
)
else:
self.assertEqual(
result,
metalk8s_network.get_ip_from_cidrs(
cidrs=cidrs, current_ip=current_ip
),
)
@parameterized.expand(
[
# Simple test
(["eth0"], "1500", 1500),
# Mutliple ifaces (first one taken)
(["eth1", "eth0"], {"eth0": "1500", "eth1": "1442"}, 1442),
# No iface, error
([], None, 'Unable to get interface for "10.200.0.42"', True),
]
)
def test_get_mtu_from_ip(self, ifaces, read_mtu, result, raises=False):
"""
Tests the return of `get_mtu_from_ip` function
"""
def _read_mtu_file(path):
if isinstance(read_mtu, dict):
# path = "/sys/class/net/<iface>/mtu"
iface = path.split("/")[-2]
return read_mtu.get(iface, "")
return read_mtu
salt_dict = {
"network.ifacestartswith": MagicMock(return_value=ifaces),
"file.read": MagicMock(side_effect=_read_mtu_file),
}
with patch.dict(metalk8s_network.__salt__, salt_dict):
if raises:
self.assertRaisesRegex(
CommandExecutionError,
result,
metalk8s_network.get_mtu_from_ip,
"10.200.0.42",
)
else:
self.assertEqual(
result, metalk8s_network.get_mtu_from_ip("10.200.0.42")
)
@utils.parameterized_from_cases(YAML_TESTS_CASES["get_listening_processes"])
def test_get_listening_processes(
self, result, net_conns_ret=None, process_ret=None
):
"""
Tests the return of `get_listening_processes` function
"""
net_conns_return = []
for net_conn in net_conns_ret or []:
sconn_mock = MagicMock()
sconn_mock.status = net_conn.get("status", "LISTEN")
sconn_mock.laddr = net_conn.get("laddr")
sconn_mock.pid = net_conn.get("pid")
net_conns_return.append(sconn_mock)
process_return = {}
for pid, name in (process_ret or {}).items():
process_return[pid] = MagicMock()
process_return[pid].name.return_value = name
net_conns_mock = MagicMock(return_value=net_conns_return)
process_mock = MagicMock(side_effect=process_return.get)
with patch("psutil.net_connections", net_conns_mock), patch(
"psutil.Process", process_mock
):
self.assertEqual(metalk8s_network.get_listening_processes(), result)
@utils.parameterized_from_cases(YAML_TESTS_CASES["routes"])
def test_routes(self, ip_route_output, result):
"""
Tests the return of `routes` function
"""
def _mock_convert_cidr(cidr):
ret = {"network": None, "netmask": None, "broadcast": None}
network_info = ipaddress.ip_network(cidr, strict=False)
ret["network"] = str(network_info.network_address)
ret["netmask"] = str(network_info.netmask)
ret["broadcast"] = str(network_info.broadcast_address)
return ret
mock_convert_cidr = MagicMock(side_effect=_mock_convert_cidr)
mock_ip_cmd = MagicMock(return_value=ip_route_output)
with patch.dict(
metalk8s_network.__salt__,
{"cmd.run": mock_ip_cmd, "network.convert_cidr": mock_convert_cidr},
):
self.assertEqual(metalk8s_network.routes(), result)
mock_ip_cmd.assert_called_once_with("ip -4 route show table main")
@utils.parameterized_from_cases(YAML_TESTS_CASES["get_control_plane_ingress_ip"])
def test_get_control_plane_ingress_ip(
self,
result,
raises=False,
pillar=None,
opts=None,
grains=None,
mine_ret=None,
mine_runner_ret=None,
):
"""
Tests the return of `get_control_plane_ingress_ip` function
"""
if pillar is None:
pillar = {"networks": {"control_plane": {}}}
if opts is None:
opts = {"__role": "minion"}
if grains is None:
grains = {"id": "my-node"}
salt_dict = {
"metalk8s.minions_by_role": MagicMock(return_value=["bootstrap"]),
"mine.get": MagicMock(return_value=mine_ret),
"saltutil.runner": MagicMock(return_value=mine_runner_ret),
}
with patch.dict(metalk8s_network.__salt__, salt_dict), patch.dict(
metalk8s_network.__pillar__, pillar
), patch.dict(metalk8s_network.__opts__, opts), patch.dict(
metalk8s_network.__grains__, grains
):
if raises:
self.assertRaisesRegex(
CommandExecutionError,
result,
metalk8s_network.get_control_plane_ingress_ip,
)
else:
self.assertEqual(
metalk8s_network.get_control_plane_ingress_ip(), result
)
@utils.parameterized_from_cases(
YAML_TESTS_CASES["get_control_plane_ingress_endpoint"]
)
def test_get_control_plane_ingress_endpoint(
self, result, raises=False, cp_ingress_ip_ret=None
):
"""
Tests the return of `get_control_plane_ingress_endpoint` function
"""
mock_get_cp_ingress_ip = MagicMock(return_value=cp_ingress_ip_ret)
if raises:
mock_get_cp_ingress_ip.side_effect = CommandExecutionError(
cp_ingress_ip_ret
)
with patch.dict(
metalk8s_network.__salt__,
{"metalk8s_network.get_control_plane_ingress_ip": mock_get_cp_ingress_ip},
):
if raises:
self.assertRaisesRegex(
CommandExecutionError,
result,
metalk8s_network.get_control_plane_ingress_endpoint,
)
else:
self.assertEqual(
metalk8s_network.get_control_plane_ingress_endpoint(), result
)
|
python
|
import torch.nn as nn
from .losses import get_loss
from ..architectures import get_backbone
from .wrapper import Wrapper
class SimpleClassifierWrapper(Wrapper):
def __init__(self, wrapper_config):
super().__init__()
self.backbone = None
self.classifier = None
self._init_modules(wrapper_config)
self._init_loss(wrapper_config)
self.batch_info = {}
def _init_modules(self, wrapper_config):
self.backbone, feature_size = get_backbone(wrapper_config.backbone,
pretrained=wrapper_config.pretrained,
get_feature_size=True)
self.classifier = nn.Linear(feature_size, wrapper_config.nclasses)
def _init_loss(self, wrapper_config):
loss_config = None
if hasattr(wrapper_config, 'loss_config'):
loss_config = wrapper_config.loss_config
self.loss = get_loss(wrapper_config.loss, loss_config)
def forward(self, x):
features = self.backbone(x['data'])
output = self.classifier(features)
if isinstance(self.loss, nn.modules.loss.CrossEntropyLoss):
x['target'] = x['target'].squeeze()
output_dict = {'output': output.detach().cpu().numpy(),
'target': x['target'].detach().cpu().numpy()}
loss = self.loss(output, x['target'])
return output_dict, loss
def predict(self, x):
features = self.backbone(x['data'])
output = self.classifier(features)
output_dict = {'output': output.detach().cpu()}
return output_dict
def to_parallel(self, parallel_class):
self.backbone = parallel_class(self.backbone)
self.classifier = parallel_class(self.classifier)
return self
|
python
|
# -*- coding: utf-8 -*-
#
# Project: silx (originally pyFAI)
# https://github.com/silx-kit/silx
#
# Copyright (C) 2012-2017 European Synchrotron Radiation Facility, Grenoble, France
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
__authors__ = ["J. Kieffer"]
__license__ = "MIT"
__date__ = "02/08/2016"
import unittest
import numpy
import logging
logger = logging.getLogger(__name__)
from ..bilinear import BilinearImage
class TestBilinear(unittest.TestCase):
"""basic maximum search test"""
N = 1000
def test_max_search_round(self):
"""test maximum search using random points: maximum is at the pixel center"""
a = numpy.arange(100) - 40.
b = numpy.arange(100) - 60.
ga = numpy.exp(-a * a / 4000)
gb = numpy.exp(-b * b / 6000)
gg = numpy.outer(ga, gb)
b = BilinearImage(gg)
ok = 0
for s in range(self.N):
i, j = numpy.random.randint(100), numpy.random.randint(100)
k, l = b.local_maxi((i, j))
if abs(k - 40) > 1e-4 or abs(l - 60) > 1e-4:
logger.warning("Wrong guess maximum (%i,%i) -> (%.1f,%.1f)", i, j, k, l)
else:
logger.debug("Good guess maximum (%i,%i) -> (%.1f,%.1f)", i, j, k, l)
ok += 1
logger.debug("Success rate: %.1f", 100. * ok / self.N)
self.assertEqual(ok, self.N, "Maximum is always found")
def test_max_search_half(self):
"""test maximum search using random points: maximum is at a pixel edge"""
a = numpy.arange(100) - 40.5
b = numpy.arange(100) - 60.5
ga = numpy.exp(-a * a / 4000)
gb = numpy.exp(-b * b / 6000)
gg = numpy.outer(ga, gb)
b = BilinearImage(gg)
ok = 0
for s in range(self.N):
i, j = numpy.random.randint(100), numpy.random.randint(100)
k, l = b.local_maxi((i, j))
if abs(k - 40.5) > 0.5 or abs(l - 60.5) > 0.5:
logger.warning("Wrong guess maximum (%i,%i) -> (%.1f,%.1f)", i, j, k, l)
else:
logger.debug("Good guess maximum (%i,%i) -> (%.1f,%.1f)", i, j, k, l)
ok += 1
logger.debug("Success rate: %.1f", 100. * ok / self.N)
self.assertEqual(ok, self.N, "Maximum is always found")
def test_map(self):
N = 100
y, x = numpy.ogrid[:N, :N + 10]
img = x + y
b = BilinearImage(img)
x2d = numpy.zeros_like(y) + x
y2d = numpy.zeros_like(x) + y
res1 = b.map_coordinates((y2d, x2d))
self.assertEqual(abs(res1 - img).max(), 0, "images are the same (corners)")
x2d = numpy.zeros_like(y) + (x[:, :-1] + 0.5)
y2d = numpy.zeros_like(x[:, :-1]) + y
res1 = b.map_coordinates((y2d, x2d))
self.assertEqual(abs(res1 - img[:, :-1] - 0.5).max(), 0, "images are the same (middle)")
x2d = numpy.zeros_like(y[:-1, :]) + (x[:, :-1] + 0.5)
y2d = numpy.zeros_like(x[:, :-1]) + (y[:-1, :] + 0.5)
res1 = b.map_coordinates((y2d, x2d))
self.assertEqual(abs(res1 - img[:-1, 1:]).max(), 0, "images are the same (center)")
def test_profile_grad(self):
N = 100
img = numpy.arange(N * N).reshape(N, N)
b = BilinearImage(img)
res1 = b.profile_line((0, 0), (N - 1, N - 1))
l = numpy.ceil(numpy.sqrt(2) * N)
self.assertEqual(len(res1), l, "Profile has correct length")
self.assertLess((res1[:-2] - res1[1:-1]).std(), 1e-3, "profile is linear (excluding last point)")
def test_profile_gaus(self):
N = 100
x = numpy.arange(N) - N // 2.0
g = numpy.exp(-x * x / (N * N))
img = numpy.outer(g, g)
b = BilinearImage(img)
res_hor = b.profile_line((N // 2, 0), (N // 2, N - 1))
res_ver = b.profile_line((0, N // 2), (N - 1, N // 2))
self.assertEqual(len(res_hor), N, "Profile has correct length")
self.assertEqual(len(res_ver), N, "Profile has correct length")
self.assertLess(abs(res_hor - g).max(), 1e-5, "correct horizontal profile")
self.assertLess(abs(res_ver - g).max(), 1e-5, "correct vertical profile")
# Profile with linewidth=3
expected_profile = img[:, N // 2 - 1:N // 2 + 2].mean(axis=1)
res_hor = b.profile_line((N // 2, 0), (N // 2, N - 1), linewidth=3)
res_ver = b.profile_line((0, N // 2), (N - 1, N // 2), linewidth=3)
self.assertEqual(len(res_hor), N, "Profile has correct length")
self.assertEqual(len(res_ver), N, "Profile has correct length")
self.assertLess(abs(res_hor - expected_profile).max(), 1e-5,
"correct horizontal profile")
self.assertLess(abs(res_ver - expected_profile).max(), 1e-5,
"correct vertical profile")
def suite():
testsuite = unittest.TestSuite()
testsuite.addTest(TestBilinear("test_max_search_round"))
testsuite.addTest(TestBilinear("test_max_search_half"))
testsuite.addTest(TestBilinear("test_map"))
testsuite.addTest(TestBilinear("test_profile_grad"))
testsuite.addTest(TestBilinear("test_profile_gaus"))
return testsuite
|
python
|
#!/usr/bin/env python3
import random
def emit_test_one(max_len_list,len_cmd_min,len_cmd_max,no_exception):
header = '''
#define CATCH_CONFIG_MAIN
#include "catch.hpp"
#include "DoublyLinkedList.h"
#include <list>
#include <stdexcept>
using namespace std;
'''
'''
Test cases
1. Max length
2.
'''
ops = {
'empty': {'args': [], 'delta_len': 0, 'returns': False},
'front': {'args': [], 'delta_len': 0, 'returns': True},
'back': {'args': [], 'delta_len': 0, 'returns': True},
'add_front': {'args': [range(0,9999)], 'delta_len': 1, 'returns': False},
'remove_front': {'args': [], 'delta_len': -1, 'returns': False},
'add_back': {'args': [range(0,9999)], 'delta_len': 1, 'returns': False},
'remove_back': {'args': [], 'delta_len': -1, 'returns': False},
'get_size': {'args': [], 'delta_len': 0, 'returns': True}
}
body = ''
body += '''
list<int> ref;
DoublyLinkedList submit;
int ret_ref;
int ret_submit;\n'''
len_list = 0
keys = ops.keys()
ret_period = 3
last_ret = 0
len_cmd = 0
max_len_list_observed = 0
num_require = 0
while True:
while True:
op = random.choice(list(keys))
if last_ret > ret_period and ops[op]['returns'] == False:
continue
if len_list >= max_len_list and ops[op]['delta_len'] > 0:
continue
if ( (no_exception) and len_list == 0 and
(ops[op]['delta_len'] == -1 or op in ['front','back'])):
continue
break
if ops[op]['returns'] == False:
last_ret += 1
else:
last_ret = 0
args = ''
for a in ops[op]['args']:
args += ',' + str(random.choice(a))
args = args[1:]
submit = 'submit.%s(%s)'%(op,args)
op_ref = (
op.replace('add','push').replace('remove','pop').replace('get_size','size'))
ref = 'ref.%s(%s)'%(op_ref,args)
require = ''
if ops[op]['returns']:
submit = 'ret_submit = ' + submit
ref = 'ret_ref = ' + ref
require = 'REQUIRE( ret_ref == ret_submit);'
num_require += 1
len_cmd += 1
if (len_list == 0) and (
ops[op]['delta_len'] == -1 or op in ['front','back']):
len_list = 0
submit = 'CHECK_THROWS_AS( %s , runtime_error );' %(submit)
#ref = 'CHECK_THROWS_AS( %s, runtime_error );' % (ref)
ref = ''
if require != '':
num_require -= 1
require = ''
else:
len_list += ops[op]['delta_len']
submit += ';'
ref += ';'
submit = ' ' + submit
ref = ' ' + ref
if require != '':
require = ' ' + require
require = '//num_require: %d\n'%(num_require) + require
body += '\n//' + '=' * 30 + '\n'
body += (submit + '\n' + ref + '\n' +
require + '\n')
body += '//' + 'len_list: %d'%len_list + '\n'
body += '//' + '=' * 30 + '\n\n'
if len_list > max_len_list_observed:
max_len_list_observed = len_list
if (len_cmd > len_cmd_max or
(len_cmd > len_cmd_min and max_len_list_observed >
max_len_list)):
break
tc_begin = (
'TEST_CASE( "test list with (max_len_list, len_cmd) = (%s,%s)") {\n'
% (max_len_list_observed, len_cmd))
tc_end = '\n}\n'
return header + tc_begin + body + tc_end
if __name__ == '__main__':
max_len_list = random.choice(range(10,500))
min_len_cmd = random.choice(range(3,30))
max_len_cmd = random.choice(range(50,2000))
enable_exception = random.choice([True,False])
open('.test.cpp','w').write(emit_test_one(max_len_list,min_len_cmd,max_len_cmd,enable_exception))
|
python
|
import numpy as np
from tqdm import tqdm
LOOP_SIZE = 10000000
SEED = [0, 1, 0]
def monty_hall(typed):
doors = SEED
np.random.shuffle(doors)
opted_3 = np.random.randint(0, 3)
if not typed:
result = doors[opted_3]
return result
else:
for i in range(3):
if i != opted_3 and not doors[i]:
excluded_3 = i
for j in range(3):
if j != excluded_3 and j != opted_3:
result = doors[j]
return result
sum_0 = 0.
sum_1 = 0.
for _ in tqdm(range(LOOP_SIZE)):
sum_0 += monty_hall(typed=0)
sum_1 += monty_hall(typed=1)
print('For those who stopped in the second round the percentage of hit was of: ', (sum_0/LOOP_SIZE)*100)
print('For those who changed sides in the second round the percentage of hit was of: ', (sum_1/LOOP_SIZE)*100)
|
python
|
CARDINALITA = 10
lista_numeri = []
somma = 0.0
contatore = 0
while contatore < CARDINALITA:
numero = input('inserisci un numero ')
print "iterazione ", contatore
lista_numeri += [numero]
somma += float(numero)
contatore += 1
media = somma / CARDINALITA
varianza2 = 0.0
contatore = 0
while contatore < CARDINALITA:
varianza2 += ((lista_numeri[contatore] - media)**2)
contatore += 1
varianza2 = varianza2 / (CARDINALITA-1.0)
print "media = ", media
print "varianza2 = ", varianza2
|
python
|
from utils import test_execution
COUNT = 8000
@test_execution('with context manager')
def with_context_manager():
def runner():
for _ in range(COUNT):
with open('test.txt', 'r') as file:
content = file.read()
return runner
@test_execution('without context manager')
def without_context_manager():
def runner():
for _ in range(COUNT):
file = open('test.txt', 'r')
content = file.read()
file.close()
return runner
def main():
with_context_manager()
without_context_manager()
if __name__ == '__main__':
main()
|
python
|
import tensorflow as tf
import cv2
import numpy as np
IMG_SIZE=(224,224)
Model=tf.keras.models.load_model ('Ensamble_Model(Ori).h5')
path='C:/Users/mariotiara/Desktop/GUI/Sampel/ffc04fed30e6_(0)_Normal.jpeg'
img=tf.keras.preprocessing.image.load_img(path,target_size=IMG_SIZE)
x = tf.keras.preprocessing.image.img_to_array(img)
x = np.expand_dims(x, axis=0)
Predict=Model.predict(x)
print("Normal:",Predict[0][0]*100)
print("Mild:",Predict[0][1]*100)
print("Moderate:",Predict[0][2]*100)
print("Severe:",Predict[0][3]*100)
|
python
|
#!/usr/bin/env python
__all__=['configstruc', 'loadpdb', 'loadstruc', 'molecule', 'protein', 'ligand', 'lipid']
from loadstruc import (FetchPDB, Loadstruc)
|
python
|
#!/usr/bin/env python2.5
#
# Copyright 2009 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Appengine Tasks related to GHOP Task handling.
"""
__authors__ = [
'"Madhusudan.C.S" <[email protected]>'
]
import logging
from google.appengine.api.labs import taskqueue
from google.appengine.ext import db
from django import http
from django.utils.translation import ugettext
from soc.logic import system
from soc.tasks.helper import error_handler
from soc.views.helper import redirects
from soc.modules.ghop.logic.models import task as ghop_task_logic
DEF_TASK_UPDATE_SUBJECT_FMT = ugettext('[GHOP Task Update] %(title)s')
def getDjangoURLPatterns():
"""Returns the URL patterns for the tasks in this module.
"""
patterns = [
(r'^tasks/ghop/task/update$',
'soc.modules.ghop.tasks.task_update.updateGHOPTask'),
(r'^tasks/ghop/task/mail/create$',
'soc.modules.ghop.tasks.task_update.createNotificationMail'),
(r'^tasks/ghop/task/update/student_status$',
'soc.modules.ghop.tasks.task_update.updateTasksPostStudentSignUp')]
return patterns
def spawnUpdateTask(entity):
"""Spawns a task to update the state of the task.
"""
update_params = {
'ghop_task_key': entity.key().name(),
}
update_url = '/tasks/ghop/task/update'
new_task = taskqueue.Task(eta=entity.deadline,
params=update_params,
url=update_url)
new_task.add('ghop-update')
def updateGHOPTask(request, *args, **kwargs):
"""Method executed by Task Queue API to update a GHOP Task to
relevant state.
Expects the ghop_task_key entry to be present in the POST dict.
Args:
request: the standard Django HTTP request object
"""
post_dict = request.POST
key_name = post_dict.get('ghop_task_key')
if not key_name:
# invalid task data, log and return OK
return error_handler.logErrorAndReturnOK(
'Invalid updateGHOPTask data: %s' % post_dict)
entity = ghop_task_logic.logic.getFromKeyNameOr404(key_name)
entity, comment_entity = ghop_task_logic.logic.updateTaskStatus(entity)
if entity:
# TODO(madhusudan): does this really mean an unsuccessful update?
# return OK
return http.HttpResponse()
def spawnCreateNotificationMail(entity):
"""Spawns a task to send mail to the user who has subscribed to the specific
task.
Args:
entity: The Comment entity for which mails must be sent
"""
task_params = {
'comment_key': entity.key().id_or_name(),
'task_key': entity.parent_key().id_or_name(),
}
task_url = '/tasks/ghop/task/mail/create'
new_task = taskqueue.Task(params=task_params, url=task_url)
new_task.add('mail')
def createNotificationMail(request, *args, **kwargs):
"""Appengine task that sends mail to the subscribed users.
Expects the following to be present in the POST dict:
comment_key: Specifies the comment id for which to send the notifications
task_key: Specifies the task key name for which the comment belongs to
Args:
request: Django Request object
"""
from soc.modules.ghop.logic.helper import notifications as ghop_notifications
from soc.modules.ghop.logic.models import comment as ghop_comment_logic
from soc.modules.ghop.logic.models import task_subscription as \
ghop_task_subscription_logic
# set default batch size
batch_size = 10
post_dict = request.POST
comment_key = post_dict.get('comment_key')
task_key = post_dict.get('task_key')
if not (comment_key and task_key):
# invalid task data, log and return OK
return error_handler.logErrorAndReturnOK(
'Invalid createNotificationMail data: %s' % post_dict)
comment_key = long(comment_key)
# get the task entity under which the specified comment was made
task_entity = ghop_task_logic.logic.getFromKeyName(task_key)
# get the comment for the given id
comment_entity = ghop_comment_logic.logic.getFromID(
comment_key, task_entity)
if not comment_entity:
# invalid comment specified, log and return OK
return error_handler.logErrorAndReturnOK(
'Invalid comment specified: %s/%s' % (comment_key, task_key))
# check and retrieve the subscriber_start_key that has been done last
if 'subscriber_start_index' in post_dict:
subscriber_start_index = post_dict['subscriber_start_index']
else:
subscriber_start_index = 0
# get all subscribers to GHOP task
fields = {
'task': task_entity,
}
ts_entity = ghop_task_subscription_logic.logic.getForFields(
fields, unique=True)
subscribers = db.get(ts_entity.subscribers[
subscriber_start_index:subscriber_start_index+batch_size])
task_url = "http://%(host)s%(task)s" % {
'host': system.getHostname(),
'task': redirects.getPublicRedirect(
task_entity, {'url_name': 'ghop/task'}),
}
# create the data for the mail to be sent
message_properties = {
'task_url': task_url,
'redirect_url': "%(task_url)s#c%(cid)d" % {
'task_url': task_url,
'cid': comment_entity.key().id_or_name()
},
'comment_entity': comment_entity,
'task_entity': task_entity,
}
subject = DEF_TASK_UPDATE_SUBJECT_FMT % {
'title': task_entity.title,
}
for subscriber in subscribers:
ghop_notifications.sendTaskUpdate(entity, subject, message_properties)
if len(subscribers) == batch_size:
# spawn task for sending out notifications to next set of subscribers
next_start = subscriber_start_index + batch_size
task_params = {
'comment_key': comment_key,
'task_key': task_key,
'subscriber_start_index': next_start
}
task_url = '/tasks/ghop/task/mail/create'
new_task = taskqueue.Task(params=task_params, url=task_url)
new_task.add('mail')
# return OK
return http.HttpResponse()
def updateTasksPostStudentSignUp(request, *args, **kwargs):
"""Appengine task that updates the GHOP Tasks after the student signs up.
Expects the following to be present in the POST dict:
student_key: Specifies the student key name who registered
Args:
request: Django Request object
"""
from soc.modules.ghop.logic.models import student as ghop_student_logic
post_dict = request.POST
student_key = post_dict.get('student_key')
if not student_key:
# invalid student data, log and return OK
return error_handler.logErrorAndReturnOK(
'Invalid Student data: %s' % post_dict)
student_entity = ghop_student_logic.logic.getFromKeyNameOr404(student_key)
# retrieve all tasks currently assigned to the user
task_fields = {
'user': student_entity.user,
}
task_entities = ghop_task_logic.logic.getForFields(task_fields)
# TODO(madhusudan) move this to the Task Logic
# Make sure the tasks store references to the student as well as
# closing all tasks that are AwaitingRegistration.
for task_entity in task_entities:
task_entity.student = student_entity
if task_entity.status == 'AwaitingRegistration':
task_entities.remove(task_entity)
properties = {'status': 'Closed'}
changes = [ugettext('User-MelangeAutomatic'),
ugettext('Action-Student registered'),
ugettext('Status-%s' % (properties['status']))]
comment_properties = {
'parent': task_entity,
'scope_path': task_entity.key().name(),
'created_by': None,
'changes': changes,
'content': ugettext(
'(The Melange Automated System has detected that the student '
'has signed up for the program and hence has closed this task.'),
}
ghop_task_logic.logic.updateEntityPropertiesWithCWS(
task_entity, properties, comment_properties)
db.put(task_entities)
# return OK
return http.HttpResponse()
|
python
|
import argparse as ag
import ast
import csv
import datetime
from ebooklib import epub
import feedparser
import json
import logging
from termcolor import colored
logging.basicConfig(filename='logs.log', level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - '
'%(message)s')
logging.info('Logs go here!')
logging.error('Something went wrong with logs :(')
logging.debug('Logs debug')
log = logging.getLogger()
class ParserRSS(object):
"""Class for RSS Parser"""
news = {}
def __init__(self, url, lim=1, clr_fg='no', choice_list=None,
printer_flag='small'):
if choice_list is None:
choice_list = []
self.url = url
self.lim = lim
self.clr_fg = clr_fg
self.choice_list = choice_list
self.printer_flag = printer_flag
def custom_printer(self):
"""Prints customized news.
Is started by choicer()"""
log.info('Start custom_printer function')
iterator = 0
key_list = list(self.news.keys())
keyz = key_list[iterator].capitalize()
keyz = ''.join([i for i in keyz if not i.isdigit()])
separator = keyz
while iterator != len(key_list):
if self.clr_fg == 'no':
keyz = key_list[iterator].capitalize()
keyz = ''.join([i for i in keyz if not i.isdigit()])
if keyz == separator:
print('\nNew block:')
print(keyz, ': ', self.news[key_list[iterator]])
iterator += 1
elif self.clr_fg == 'yes':
keyz = key_list[iterator].capitalize()
keyz = ''.join([i for i in keyz if not i.isdigit()])
if keyz == separator:
print(colored('\nNew block:', 'green'))
print(colored(keyz, 'red'), colored(': ', 'red'),
colored(self.news[key_list[iterator]], 'blue'))
iterator += 1
def choicer(self):
"""Allows user to choose what to collect.
Is started by news_collector_custom()
Starts custom_printer()"""
log.info('Start choicer function')
choice = input()
if choice != 'end':
try:
choice = int(choice)
print(choice)
if choice not in self.choice_list:
self.choice_list.append(choice)
self.choice_list.sort()
self.choicer()
else:
print('You have already pointed this!\n')
self.choicer()
except ValueError:
print('You must choose a number!')
self.choicer()
elif choice == 'end':
if not self.choice_list:
print('The program is quit because '
'You have not made a choice!')
else:
print(self.choice_list)
iterator = 0
while iterator != self.lim:
NewsFeed = feedparser.parse(self.url)
entry = NewsFeed.entries[iterator]
i = 0
while i != len(self.choice_list):
var = self.choice_list[i]
lst = list(entry.keys())
self.news[lst[var] + str(iterator)] = entry[lst[var]]
i += 1
iterator += 1
self.cache_news()
self.custom_printer()
def news_collector_custom(self):
"""Collects news for custom_printer().
May be started by --custom command or by news_collector()
Starts choicer()"""
log.info('Start news_collector_custom function')
try:
NewsFeed = feedparser.parse(self.url)
entry = NewsFeed.entries[1]
except Exception:
e = 'No url or url is incorrect!'
return e
self.news = {}
list_of_keys = list(entry.keys())
ch_dict = {i: list_of_keys[i] for i in range(0, len(list_of_keys))}
print('Make your choice:\n'
'Print end if no more choices needed\n', ch_dict)
self.choicer()
def news_collector(self):
"""Collects non-custom news.
Starts from trying collect news for printer().
If fails, then tries to collect news for small_printer()
If fails, then initializes news_collector_custom()"""
log.info('Start news_collector function')
try:
NewsFeed = feedparser.parse(self.url)
entry = NewsFeed.entries[1]
except Exception:
e = 'No url or url is incorrect!'
return e
iterator = 0
try:
# Yahoo&Yahoo-alike News like Tut.by
if 'title' and 'published' and 'link' and 'media_content' \
and 'links' in entry:
self.printer_flag = 'big'
while iterator != self.lim:
entry = NewsFeed.entries[iterator]
self.news['Title' + str(iterator)] = entry.title
self.news['Date' + str(iterator)] = entry.published
self.news['Link' + str(iterator)] = entry.link
self.news['Media' + str(iterator)] = entry.media_content
self.news['Links' + str(iterator)] = entry.links
iterator += 1
self.cache_news()
except AttributeError:
try:
# Small collector for BBC&BBC-alike News
if 'title' and 'link' in entry:
self.printer_flag = 'small'
self.news = {}
while iterator != self.lim:
entry = NewsFeed.entries[iterator]
self.news['Title' + str(iterator)] = entry.title
self.news['Link' + str(iterator)] = entry.link
iterator += 1
self.cache_news()
except AttributeError:
# Customized news collection
self.news_collector_custom()
except Exception as exc:
print(exc)
def printer(self):
"""Prints news from Yahoo&Yahoo-alike News sites"""
log.info('Start printer function')
self.news_collector()
if self.printer_flag == 'small':
self.small_printer()
elif self.printer_flag == 'big':
iterator = 0
while iterator != len(self.news.keys()):
if self.clr_fg == 'no':
print('\nNew block:',
'\nTitle: ', self.news['Title' + str(iterator)],
'\nLink: ', self.news['Link' + str(iterator)],
'\nDate: ', self.news['Date' + str(iterator)],
'\nMedia Content:'
' ', self.news['Media' + str(iterator)],
'\nLinks:'
' ', self.news['Links' + str(iterator)], '\n')
iterator += 1
elif self.clr_fg == 'yes':
print(colored('\nNew block:', 'red'),
colored('\nTitle: ', 'green'),
colored(self.news['Title' + str(iterator)], 'blue'),
colored('\nLink: ', 'green'),
colored(self.news['Link' + str(iterator)], 'blue'),
colored('\nDate: ', 'green'),
colored(self.news['Date' + str(iterator)], 'blue'),
colored('\nMedia Content: ', 'green'),
colored(self.news['Media' + str(iterator)], 'blue'),
colored('\nLinks: ', 'green'),
colored(self.news['Links' + str(iterator)], 'blue'),
'\n')
iterator += 1
def small_printer(self):
"""Prints news from BBC&BBC-alike News sites."""
log.info('Start small_printer function')
iterator = 0
while iterator != len(self.news.keys()):
if self.clr_fg == 'no':
print('\nNew block:\n',
'Title: ', self.news['Title' + str(iterator)], '\n',
'Link: ', self.news['Link' + str(iterator)], '\n')
iterator += 1
elif self.clr_fg == 'yes':
print(colored('\nNew block:\n', 'red'),
colored('Title: ', 'red'),
colored(self.news['Title' + str(iterator)], 'green'),
'\n', colored('Link: ', 'red'),
colored(self.news['Link' + str(iterator)], 'green'),
'\n')
iterator += 1
def json_converter(self):
"""Converts collected news to json format."""
log.info('Start json_converter function')
self.news_collector()
parsed = json.dumps(self.news, indent=4, sort_keys=False)
print(parsed)
def json_converter_custom(self):
"""Converts collected custom news to json format"""
log.info('Start json_converter_custom function')
self.news_collector_custom()
parsed = json.dumps(self.news, indent=4, sort_keys=False)
print(parsed)
def cache_news(self):
"""Caches news to cache_csv.csv file.
Checks the cache contain to avoid double-caching.
Creates cache_csv.csv if it does not exist"""
log.info('Start cache')
the_date = datetime.datetime.strftime(datetime.datetime.now(),
"%Y.%m.%d %H:%M:%S")
the_date = the_date[0:10]
the_date = the_date.replace('.', '')
keyz = the_date + self.url
field_names = ['date+url', 'news']
key_list = []
try:
with open('cache_csv.csv', 'r') as file:
csvread = csv.DictReader(file)
for row in csvread:
if row['date+url'] != 'date+url':
key_list.append(row['date+url'])
if keyz not in key_list:
with open('cache_csv.csv', 'a') as f:
w = csv.DictWriter(f, field_names)
w.writeheader()
w.writerow({'date+url': keyz, 'news': self.news})
f.close()
except FileNotFoundError:
with open('cache_csv.csv', 'w') as f:
w = csv.DictWriter(f, field_names)
w.writeheader()
w.writerow({'date+url': keyz, 'news': self.news})
f.close()
def cache_extractor(self):
"""Extracts cache from cache_csv.csv if it exists
and there is cache for this date and url"""
print('start cache extraction!')
key_list = []
val_list = []
try:
with open('cache_csv.csv', 'r') as f:
csvread = csv.DictReader(f)
for row in csvread:
if row['date+url'] != 'date+url':
key_list.append(row['date+url'])
if row['news'] != 'news':
val_list.append(row['news'])
news_dct = dict.fromkeys(key_list, val_list)
try:
cached_news_list = news_dct[self.url]
cached_news_str = cached_news_list[0]
print(cached_news_str)
self.news = ast.literal_eval(cached_news_str)
print('\nNews cache for this date and url: \n')
self.custom_printer()
except Exception:
print('No cache for this date/url!')
except FileNotFoundError:
print('There is no cache at all!')
def to_epub(self):
""" Converts collected news to .epub file"""
self.news_collector()
the_date = datetime.datetime.strftime(datetime.datetime.now(),
"%Y.%m.%d %H:%M:%S")
the_date = the_date[0:10]
the_date = the_date.replace('.', '')
keyz = the_date + self.url
keyz = keyz.replace('/', '')
keyz = keyz.replace('.', '')
keyz = keyz.replace(':', '')
try:
log.info('Convert to epub')
book = epub.EpubBook()
# set metadata
book.set_identifier('id123456')
book.set_title('news')
book.set_language('en')
book.add_author('Andrey Kapitonov')
book.add_author('Andrey Kapitonov', file_as='Epub', role='ill',
uid='coauthor')
chapter_file_counter = 1
ep = ""
c1 = epub.EpubHtml(title='Intro', file_name='{}.xhtml'.format(
chapter_file_counter), lang='hr')
for item in self.news:
ep += '<br>' + str(item) + '<br>'
for element in self.news[item]:
ep += str(element)
chapter_file_counter += 1
c1.content = ep
# add chapter
book.add_item(c1)
# define Table Of Contents
book.toc = (epub.Link('chap_01.xhtml', 'Introduction', 'intro'),
(epub.Section('Simple book'), (c1,)))
# add default NCX and Nav file
book.add_item(epub.EpubNcx())
book.add_item(epub.EpubNav())
# define CSS style
style = 'BODY {color: white;}'
nav_css = epub.EpubItem(uid="style_nav", file_name="style/nav.css",
media_type="text/css", content=style)
# add CSS file
book.add_item(nav_css)
# basic spine
book.spine = ['nav', c1]
# write to the file
try:
path = args.output_path
except Exception:
path = ''
file_name = path + '/book' + keyz + '.epub'
epub.write_epub(file_name, book, {})
print('Successful', file_name)
except Exception:
raise Exception('Unable to convert to .epub')
def main():
"""Gets optional arguments and initializes ParserRSS class"""
try:
parser = ag.ArgumentParser(description='Processes something')
parser.add_argument('--version', action='store_true',
help='Prints version info')
parser.add_argument('--json', action='store_true',
help='Prints result as JSON in stdout')
parser.add_argument('--verbose', action='store_true',
help='Outputs verbose status info')
parser.add_argument('--limit', action='store', dest='limit',
help='Limits news topics if this parameter '
'provided', default=1)
parser.add_argument('--custom', action='store_true',
help='Allows to customize the output')
parser.add_argument('--date', action='store', dest='date',
help='Get cached news by date')
parser.add_argument('--to_epub', action='store_true',
help='Converts to .epub')
parser.add_argument('--output_path', action='store', dest='path',
help='provides a custom path for .epub-file')
parser.add_argument('--colored', action='store_true',
help='Colorizes stdout')
parser.add_argument('string', metavar='Source', type=str)
args = parser.parse_args()
log.info('Start')
custom_flag = 'no'
cache_flag = 'no'
conversation_flag = 'no'
color_flag = 'no'
if args.date:
obj = ParserRSS(str(args.date) + str(args.string))
obj.cache_extractor()
cache_flag = 'yes'
if args.colored:
color_flag = 'yes'
prss = ParserRSS(args.string, int(args.limit), color_flag)
if args.to_epub:
prss.to_epub()
conversation_flag = 'yes'
if args.version:
print('version 1')
log.info('Printed Version')
if args.custom:
custom_flag = 'yes'
log.info('Changed custom_flag')
if args.json:
if custom_flag == 'no' and cache_flag == 'no' \
and conversation_flag == 'no':
prss.json_converter()
log.info('Started as custom json-collector')
elif custom_flag == 'yes' and cache_flag == 'no' \
and conversation_flag == 'no':
prss.json_converter_custom()
log.info('Started as standard json-collector')
if args.verbose:
with open("logs.log", 'r+') as f:
date = f.read()
print(date)
log.info('Logs to stdout')
if args.limit:
if custom_flag == 'no' and cache_flag == 'no' \
and conversation_flag == 'no':
prss.printer()
log.info('Started as standard news-collector')
elif custom_flag == 'yes' and cache_flag == 'no' \
and conversation_flag == 'no':
prss.news_collector_custom()
log.info('Started as custom news-collector')
else:
prss.printer()
log.info('Started with no optional arguments')
prss.cache_news()
except Exception as exc:
print(exc)
if __name__ == '__main__':
print('''usage: rss_reader_kapitonov.py [-h] [--version] [--json]
[--verbose] [--limit LIMIT] [--custom] source
Pure Python command-line RSS reader.
positional arguments:
source RSS URL
optional arguments:
-h, --help show this help message and exit
--version Print version info
--json Print result as JSON in stdout
--verbose Outputs verbose status messages
--limit LIMIT Limit news topics if this parameter provided
--custom Allows to customize the output
--date Get cached news by date
--to-epub Converts to .epub
--output_path Provides a custom path for .epub file
--colored Colorized stdout''')
main()
|
python
|
# ----------------------------------------------------------------------------
# meh
# Copyright (c) 2021 TreeCaptcha
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ----------------------------------------------------------------------------
import pyglet
import random
from stopwatch import Stopwatch
score = 0
window = pyglet.window.Window(width=500, height=400)
breadmans = pyglet.graphics.Batch()
bred1 = pyglet.sprite.Sprite(pyglet.image.load('france.png'),50,50,batch=breadmans)
bred2 = pyglet.sprite.Sprite(pyglet.image.load('france.png'),50,200,batch=breadmans)
bred3 = pyglet.sprite.Sprite(pyglet.image.load('france.png'),50,100,batch=breadmans)
kitten = pyglet.image.load('background.png')
bred1.scale = 0.15
bred2.scale = 0.16
bred3.scale = 0.17
bagete = pyglet.sprite.Sprite(pyglet.image.load('bagg.png'))
bagete.scale = 0.2
window.set_mouse_visible(False)
times = Stopwatch()
game = True
print(times.start())
enabble = True
def enable(dt):
global enabble
enabble = True
@window.event
def on_draw():
global game
global enabble
window.clear()
kitten.blit(0,0)
if game:
breadmans.draw()
bagete.draw()
pyglet.text.Label(text=str(score), font_size=20, x=10, y=370, color=[0,25,25,255]).draw()
pyglet.text.Label(text=str(round(times.duration, 1)), font_size=20, x=10, y=350, color=[0, 25, 25, 255]).draw()
if times.duration >= 30.0:
window.set_mouse_visible(True)
game = False
times.reset()
enabble = False
pyglet.clock.schedule_once(enable, 1.0)
pyglet.text.Label(text='Play Again!', font_size=30, x=150, y=250, color=[0, 25, 25, 255]).draw()
pyglet.text.Label(text=str(score*53682), font_size=20, x=200, y=200, color=[0, 25, 25, 255]).draw()
if not game:
pyglet.text.Label(text='Play Again!', font_size=30, x=150, y=250, color=[0, 25, 25, 255]).draw()
pyglet.text.Label(text=str(score*53682), font_size=20, x=200, y=200, color=[0, 25, 25, 255]).draw()
def update(dt):
global game
global enabble
if times.duration >= 30.0:
window.set_mouse_visible(True)
game = False
times.reset()
enabble = False
pyglet.clock.schedule_once(enable, 1.0)
pyglet.text.Label(text=str(score*53682), font_size=30, x=200, y=200, color=[0, 25, 25, 255]).draw()
pyglet.text.Label(text=str(round(times.duration, 1)), font_size=20, x=10, y=350, color=[0, 25, 25, 255]).draw()
pyglet.clock.schedule_interval(update, 0.1)
@window.event
def on_mouse_press(x, y, button, modifiers):
print(x)
global score
global game
global enabble
if game:
if (x - (bred1.x +35) < 50) and (x - (bred1.x) > 0):
if (y - (bred1.y + 35) < 50) and (y - (bred1.y) > 0):
print('bred1')
bred1.update(random.randint(0,470),random.randint(0,350))
score += 1
if (x - (bred2.x +35) < 50) and (x - (bred2.x) > 0):
if (y - (bred2.y + 35) < 50) and (y - (bred2.y) > 0):
print('bred2')
bred2.update(random.randint(0, 470), random.randint(0, 350))
score += 1
if (x - (bred3.x +35) < 50) and (x - (bred3.x) > 0):
if (y - (bred3.y + 35) < 50) and (y - (bred3.y) > 0):
print('bred3')
bred3.update(random.randint(0, 470), random.randint(0, 350))
score += 1
elif enabble:
if (x < 300) and (x > 100):
if (y < 300) and (y > 250):
print('bred3')
game = True
score = 0
score == 0
window.set_mouse_visible(False)
times.restart()
@window.event
def on_mouse_motion(x, y, dx, dy):
bagete.update(x-30,y-80)
pyglet.app.run()
|
python
|
# -*- coding: utf-8 -*-
# Copyright 2011 Fanficdownloader team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Software: eFiction
import time
import logging
logger = logging.getLogger(__name__)
import re
import urllib2
from .. import BeautifulSoup as bs
from ..htmlcleanup import stripHTML
from .. import exceptions as exceptions
from base_adapter import BaseSiteAdapter, makeDate
# This function is called by the downloader in all adapter_*.py files
# in this dir to register the adapter class. So it needs to be
# updated to reflect the class below it. That, plus getSiteDomain()
# take care of 'Registering'.
def getClass():
return HPFandomNetAdapterAdapter # XXX
# Class name has to be unique. Our convention is camel case the
# sitename with Adapter at the end. www is skipped.
class HPFandomNetAdapterAdapter(BaseSiteAdapter): # XXX
def __init__(self, config, url):
BaseSiteAdapter.__init__(self, config, url)
self.decode = ["Windows-1252",
"utf8"] # 1252 is a superset of iso-8859-1.
# Most sites that claim to be
# iso-8859-1 (and some that claim to be
# utf8) are really windows-1252.
self.username = "NoneGiven" # if left empty, site doesn't return any message at all.
self.password = ""
self.is_adult=False
# get storyId from url--url validation guarantees query is only sid=1234
self.story.setMetadata('storyId',self.parsedUrl.query.split('=',)[1])
# normalized story URL.
# XXX Most sites don't have the /eff part. Replace all to remove it usually.
self._setURL('http://' + self.getSiteDomain() + '/eff/viewstory.php?sid='+self.story.getMetadata('storyId'))
# Each adapter needs to have a unique site abbreviation.
self.story.setMetadata('siteabbrev','hpfdm') # XXX
# The date format will vary from site to site.
# http://docs.python.org/library/datetime.html#strftime-strptime-behavior
self.dateformat = "%Y.%m.%d" # XXX
@staticmethod # must be @staticmethod, don't remove it.
def getSiteDomain():
# The site domain. Does have www here, if it uses it.
return 'www.hpfandom.net' # XXX
@classmethod
def getSiteExampleURLs(cls):
return "http://"+cls.getSiteDomain()+"/eff/viewstory.php?sid=1234"
def getSiteURLPattern(self):
return re.escape("http://"+self.getSiteDomain()+"/eff/viewstory.php?sid=")+r"\d+$"
## Getting the chapter list and the meta data, plus 'is adult' checking.
def extractChapterUrlsAndMetadata(self):
url = self.url
logger.debug("URL: "+url)
try:
data = self._fetchUrl(url)
except urllib2.HTTPError, e:
if e.code == 404:
raise exceptions.StoryDoesNotExist(self.url)
else:
raise e
# use BeautifulSoup HTML parser to make everything easier to find.
soup = bs.BeautifulSoup(data)
# print data
# Now go hunting for all the meta data and the chapter list.
# Find authorid and URL from... author url.
a = soup.find('a', href=re.compile(r"viewuser.php\?uid=\d+"))
self.story.setMetadata('authorId',a['href'].split('=')[1])
self.story.setMetadata('authorUrl','http://'+self.host+'/eff/'+a['href'])
self.story.setMetadata('author',a.string)
## Going to get the rest from the author page.
authdata = self._fetchUrl(self.story.getMetadata('authorUrl'))
# fix a typo in the site HTML so I can find the Characters list.
authdata = authdata.replace('<td width=10%">','<td width="10%">')
# hpfandom.net only seems to indicate adult-only by javascript on the story/chapter links.
if "javascript:if (confirm('Slash/het fiction which incorporates sexual situations to a somewhat graphic degree and some violence. ')) location = 'viewstory.php?sid=%s'"%self.story.getMetadata('storyId') in authdata \
and not (self.is_adult or self.getConfig("is_adult")):
raise exceptions.AdultCheckRequired(self.url)
authsoup = bs.BeautifulSoup(authdata)
reviewsa = authsoup.find('a', href="reviews.php?sid="+self.story.getMetadata('storyId')+"&a=")
# <table><tr><td><p><b><a ...>
metablock = reviewsa.findParent("table")
#print("metablock:%s"%metablock)
## Title
titlea = metablock.find('a', href=re.compile("viewstory.php"))
#print("titlea:%s"%titlea)
if titlea == None:
raise exceptions.FailedToDownload("Story URL (%s) not found on author's page, can't use chapter URLs"%url)
self.story.setMetadata('title',stripHTML(titlea))
# Find the chapters: !!! hpfandom.net differs from every other
# eFiction site--the sid on viewstory for chapters is
# *different* for each chapter
for chapter in soup.findAll('a', {'href':re.compile(r"viewstory.php\?sid=\d+&i=\d+")}):
m = re.match(r'.*?(viewstory.php\?sid=\d+&i=\d+).*?',chapter['href'])
# just in case there's tags, like <i> in chapter titles.
#print("====chapter===%s"%m.group(1))
self.chapterUrls.append((stripHTML(chapter),'http://'+self.host+'/eff/'+m.group(1)))
if len(self.chapterUrls) == 0:
self.chapterUrls.append((stripHTML(self.story.getMetadata('title')),url))
self.story.setMetadata('numChapters',len(self.chapterUrls))
# eFiction sites don't help us out a lot with their meta data
# formating, so it's a little ugly.
# utility method
def defaultGetattr(d,k):
try:
return d[k]
except:
return ""
summary = metablock.find("td",{"class":"summary"})
summary.name='span'
self.setDescription(url,summary)
# words & completed in first row of metablock.
firstrow = stripHTML(metablock.find('tr'))
# A Mother's Love xx Going Grey 1 (G+) by Kiristeen | Reviews - 18 | Words: 27468 | Completed: Yes
m = re.match(r".*?\((?P<rating>[^)]+)\).*?Words: (?P<words>\d+).*?Completed: (?P<status>Yes|No)",firstrow)
if m != None:
if m.group('rating') != None:
self.story.setMetadata('rating', m.group('rating'))
if m.group('words') != None:
self.story.setMetadata('numWords', m.group('words'))
if m.group('status') != None:
if 'Yes' in m.group('status'):
self.story.setMetadata('status', 'Completed')
else:
self.story.setMetadata('status', 'In-Progress')
# <tr><td width="10%" valign="top">Chapters:</td><td width="40%" valign="top">4</td>
# <td width="10%" valign="top">Published:</td><td width="40%" valign="top">2010.09.29</td></tr>
# <tr><td width="10%" valign="top">Completed:</td><td width="40%" valign="top">Yes</td><td width="10%" valign="top">Updated:</td><td width="40%" valign="top">2010.10.03</td></tr>
labels = metablock.findAll('td',{'width':'10%'})
for td in labels:
label = td.string
value = td.nextSibling.string
#print("\nlabel:%s\nvalue:%s\n"%(label,value))
if 'Category' in label and value:
cats = td.parent.findAll('a',href=re.compile(r'categories.php'))
catstext = [cat.string for cat in cats]
for cat in catstext:
self.story.addToList('category',cat.string)
if 'Characters' in label and value: # this site can have Character label with no
# values, apparently. Others as a precaution.
for char in value.split(','):
self.story.addToList('characters',char.strip())
if 'Genre' in label and value:
for genre in value.split(','):
self.story.addToList('genre',genre.strip())
if 'Warnings' in label and value:
for warning in value.split(','):
if warning.strip() != 'none':
self.story.addToList('warnings',warning.strip())
if 'Published' in label:
self.story.setMetadata('datePublished', makeDate(stripHTML(value), self.dateformat))
if 'Updated' in label:
self.story.setMetadata('dateUpdated', makeDate(stripHTML(value), self.dateformat))
# grab the text for an individual chapter.
def getChapterText(self, url):
logger.debug('Getting chapter text from: %s' % url)
data = self._fetchUrl(url)
# There's no good wrapper around the chapter text. :-/
# There are, however, tables with width=100% just above and below the real text.
data = re.sub(r'<table width="100%">.*?</table>','<div name="storybody">',
data,count=1,flags=re.DOTALL)
data = re.sub(r'<table width="100%">.*?</table>','</div>',
data,count=1,flags=re.DOTALL)
soup = bs.BeautifulStoneSoup(data,selfClosingTags=('br','hr')) # otherwise soup eats the br/hr tags.
div = soup.find("div",{'name':'storybody'})
#print("\n\ndiv:%s\n\n"%div)
if None == div:
raise exceptions.FailedToDownload("Error downloading Chapter: %s! Missing required element!" % url)
return self.utf8FromSoup(url,div)
|
python
|
from collections import namedtuple
import csv
import gzip
from os.path import abspath, join
import subprocess
from growser.app import db, log
from growser.db import from_sqlalchemy_table
from growser.models import Recommendation
from growser.cmdr import DomainEvent, Handles
from growser.commands.recommendations import (
ExecuteMahoutRecommender,
ExportRatingsToCSV
)
RecModel = namedtuple('RecModel', ['id', 'source', 'destination', 'sql'])
SQL_PATH = "deploy/etl/sql/recs"
RATINGS_PATH = "data/ratings"
EXPORT_PATH = "data/recs"
MODELS = {
1: RecModel(1, 'all.csv', 'mahout.all.csv.gz', 'ratings.all.sql'),
2: RecModel(2, 'year.csv', 'mahout.year.csv.gz', 'ratings.year.sql'),
3: RecModel(3, '120.csv', 'mahout.120.csv.gz', 'ratings.120.sql')
}
class RatingsExported(DomainEvent):
def __init__(self, model):
self.model = model
class RecommendationsUpdated(DomainEvent):
def __init__(self, model: int, num_results: int):
self.model = model
self.num_results = num_results
class ExportRatingsToCSVHandler(Handles[ExportRatingsToCSV]):
def handle(self, cmd: ExportRatingsToCSV):
model = MODELS.get(cmd.model)
sql = open(join(SQL_PATH, model.sql)).read()
db.engine.execute(sql)
return RatingsExported(cmd.model)
class ExecuteMahoutRecommenderHandler(Handles[ExecuteMahoutRecommender]):
def handle(self, cmd: ExecuteMahoutRecommender):
model = MODELS.get(cmd.model)
source = abspath(join(RATINGS_PATH, model.source))
destination = abspath(join(EXPORT_PATH, model.destination))
log.info('Running Mahout')
run = ["mvn", "exec:java", "-DbatchSize=100",
"-DmodelID={}".format(model.id),
"-Dsrc=" + source,
"-Dout=" + destination]
subprocess.call(run, cwd="../growser-mahout/")
Recommendation.query.filter(
Recommendation.model_id == model.id).delete()
columns = ['model_id', 'repo_id', 'recommended_repo_id', 'score']
batch = from_sqlalchemy_table(
Recommendation.__table__, from_csv(destination), columns)
for rows in batch.batch_execute(db.engine.raw_connection):
log.info("Batch complete: {}".format(rows))
return RecommendationsUpdated(model.id, batch)
def from_csv(path):
file = gzip.open(path, 'rt') if path.endswith('gz') else open(path, 'rt')
return csv.reader(file)
|
python
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.test import TestCase
from cm.unit_tests import utils
plausible_action_variants = {
'unlimited': {
'state_available': 'any',
'state_unavailable': [],
'multi_state_available': 'any',
'multi_state_unavailable': [],
},
'limited_by_available_state': {
'state_available': ['bimbo'],
'state_unavailable': [],
'multi_state_available': 'any',
'multi_state_unavailable': [],
},
'limited_by_unavailable_state': {
'state_available': 'any',
'state_unavailable': ['bimbo'],
'multi_state_available': 'any',
'multi_state_unavailable': [],
},
'limited_by_available_multi_state': {
'state_available': 'any',
'state_unavailable': [],
'multi_state_available': ['bimbo'],
'multi_state_unavailable': [],
},
'limited_by_unavailable_multi_state': {
'state_available': 'any',
'state_unavailable': [],
'multi_state_available': 'any',
'multi_state_unavailable': ['bimbo'],
},
'limited_by_available': {
'state_available': ['bimbo'],
'state_unavailable': [],
'multi_state_available': ['bimbo'],
'multi_state_unavailable': [],
},
'limited_by_unavailable': {
'state_available': 'any',
'state_unavailable': ['bimbo'],
'multi_state_available': 'any',
'multi_state_unavailable': ['bimbo'],
},
'hidden_by_unavalable_state': {
'state_available': 'any',
'state_unavailable': 'any',
'multi_state_available': 'any',
'multi_state_unavailable': [],
},
'hidden_by_unavalable_multi_state': {
'state_available': 'any',
'state_unavailable': [],
'multi_state_available': 'any',
'multi_state_unavailable': 'any',
},
}
cluster_variants = {
'unknown-unknown': {'state': 'unknown', '_multi_state': ['unknown']},
'bimbo-unknown': {'state': 'bimbo', '_multi_state': ['unknown']},
'unknown-bimbo': {'state': 'unknown', '_multi_state': ['bimbo']},
'bimbo-bimbo': {'state': 'bimbo', '_multi_state': ['bimbo']},
}
expected_results = {
'unknown-unknown': {
'unlimited': True,
'limited_by_available_state': False,
'limited_by_unavailable_state': True,
'limited_by_available_multi_state': False,
'limited_by_unavailable_multi_state': True,
'limited_by_available': False,
'limited_by_unavailable': True,
'hidden_by_unavalable_state': False,
'hidden_by_unavalable_multi_state': False,
},
'bimbo-unknown': {
'unlimited': True,
'limited_by_available_state': True,
'limited_by_unavailable_state': False,
'limited_by_available_multi_state': False,
'limited_by_unavailable_multi_state': True,
'limited_by_available': False,
'limited_by_unavailable': False,
'hidden_by_unavalable_state': False,
'hidden_by_unavalable_multi_state': False,
},
'unknown-bimbo': {
'unlimited': True,
'limited_by_available_state': False,
'limited_by_unavailable_state': True,
'limited_by_available_multi_state': True,
'limited_by_unavailable_multi_state': False,
'limited_by_available': False,
'limited_by_unavailable': False,
'hidden_by_unavalable_state': False,
'hidden_by_unavalable_multi_state': False,
},
'bimbo-bimbo': {
'unlimited': True,
'limited_by_available_state': True,
'limited_by_unavailable_state': False,
'limited_by_available_multi_state': True,
'limited_by_unavailable_multi_state': False,
'limited_by_available': True,
'limited_by_unavailable': False,
'hidden_by_unavalable_state': False,
'hidden_by_unavalable_multi_state': False,
},
}
class ActionAllowTest(TestCase):
"""Tests for `cm.models.Action.allowed` method"""
def test_variants(self):
bundle = utils.gen_bundle()
prototype = utils.gen_prototype(bundle, 'cluster')
cluster = utils.gen_cluster(bundle=bundle, prototype=prototype)
action = utils.gen_action(bundle=bundle, prototype=prototype)
for state_name, cluster_states in cluster_variants.items():
for cl_attr, cl_value in cluster_states.items():
setattr(cluster, cl_attr, cl_value)
cluster.save()
for req_name, req_states in plausible_action_variants.items():
for act_attr, act_value in req_states.items():
setattr(action, act_attr, act_value)
action.save()
self.assertIs(action.allowed(cluster), expected_results[state_name][req_name])
|
python
|
from json import decoder, loads
from os import getenv
from re import findall, search
from nfu.expand_bus.network import http_get
from nfu.expand_bus.ticket import get_alipay_url
from nfu.nfu_error import NFUError
def get_bus_schedule(route_id: int, date: list) -> dict:
"""
若日期在车票预售期内,获取班车时刻表。
- 字段说明
- route_id 路线id:南苑 -> 河堤公园:21,
河堤公园 -> 南苑:22,
南苑 -> 中大南校区:13,
中大南校区 -> 南苑:14
- time 乘车日期
:param route_id: 校车路线
:param date: 乘车日期
:return:
"""
url = 'http://nfuedu.zftcloud.com/campusbus_index/ticket/show_schedule.html'
params = {
'route_id': route_id,
'time': date
}
response = http_get(url, params)
try:
data = loads(search(r'var msg = .+', response.text).group()[10:-1])
except (AttributeError, decoder.JSONDecodeError):
raise NFUError('学校车票系统错误,请稍后再试')
else:
return data
def get_passenger_data() -> list:
"""
获取乘车人数据
:return:
"""
url = 'http://nfuedu.zftcloud.com/campusbus_index/my/passenger_puls.html'
response = http_get(url)
try:
passenger = loads(search(r'var passenger = .+', response.text).group()[16:-1])
except (AttributeError, decoder.JSONDecodeError):
raise NFUError('学校车票系统错误,请稍后再试')
else:
return passenger
def get_pay_order(order_id: int, ) -> dict:
"""
获取订单的支付数据
:param order_id:
:return:
"""
url = f'http://nfuedu.zftcloud.com/campusbus_index/order/notpay_order/order_id/{order_id}.html'
response = http_get(url)
route, date = get_route(response.text)
try:
names = findall(r'<span class="title_name title_w">\D+</span>', response.text)
phones = findall(r'<span class="title_iphone">\d+</span>', response.text)
trade_no = search(r'var tradeNo = .+', response.text).group()[15:-2]
price = search(r'¥<span>\d+</span>', response.text).group()[7:-7]
except AttributeError:
raise NFUError('学校车票系统错误,请稍后再试')
# 把乘客信息处理成一个列表
passengers = []
for i, name in enumerate(names):
passengers.append({
'name': name[33:-7],
'phone': phones[i][27:-7]
})
return {
'route': route,
'date': date,
'passengers': passengers,
'price': price,
'alipayUrl': get_alipay_url(trade_no),
'alipayQrUrl': f"{getenv('API_URL')}/school-bus-pro/alipay/qrcode?tradeNo={trade_no}"
}
def get_ticket_ids(order_id: int) -> dict:
"""
因为一个订单里面可能有多张车票,所以我们爬取一下车票号
:param order_id: 订单id
:return:
"""
url = 'http://nfuedu.zftcloud.com/campusbus_index/order/refund_ticket.html'
params = {'order_id': order_id}
response = http_get(url, params)
route, date = get_route(response.text)
ticket_list = []
ticket_data = findall(r'<span class="title_name title_w">.+\n.+\n.+\n.+\n.+', response.text)
try:
price = search(r'<span class="fare">\d+</span>', response.text).group()[19:-7]
except AttributeError:
raise NFUError('学校车票系统错误,请稍后再试')
for ticket in ticket_data:
try:
name = search(r'w">.+<s', ticket).group()[3:-9]
phone = search(r'<span class="title_iphone">\d+</span>', ticket).group()[27:-7]
except AttributeError:
raise NFUError('学校车票系统错误,请稍后再试')
try:
ticket_id = search(r', \d+', ticket).group()[2:]
except AttributeError:
ticket_list.append({
'state': '已退票',
'name': name,
'phone': phone
})
else:
ticket_list.append({
'state': f'{price}¥',
'name': name,
'phone': phone,
'ticketId': ticket_id
})
return {
'route': route,
'date': date,
'passengerList': ticket_list
}
def get_route(response):
"""
从html中获取路径数据
:param response:
:return:
"""
try:
route = '{} -> {}'.format(
search(r'<span class="site_from">.+</span>', response).group()[24:-7],
search(r'<span class="site_to">.+</span>', response).group()[22:-7]
)
date = '{} {}'.format(
search(r'<span class="time_go">\S+</span>', response).group()[22:-7],
search(r'<span class="time_day">\S+</span>', response).group()[23:-7]
)
except AttributeError:
raise NFUError('学校车票系统错误,请稍后再试')
return route, date
|
python
|
import os
import sys
import argparse
import requests
import flask
import json
import re
import yaml
import shutil
import mmh3
from munch import Munch, munchify
from flask import render_template, redirect, url_for, send_from_directory
from markupsafe import escape
def log(*args, **kwargs):
print(*args, **kwargs, flush=True)
def myhash_combine(curr, value):
return curr ^ (value + 0x9e3779b9 + (curr<<6) + (curr>>2))
def optionals(obj, *attrs):
ret = []
for attr in attrs:
if not hasattr(obj, attr):
log("attr not present:", attr)
continue
ret.append(getattr(obj, attr))
return ret
def myhash(*args):
h = 137597
for a in args:
if isinstance(a, str):
if a == "":
continue
b = bytes(a, "utf8")
else:
b = bytes(a)
hb = mmh3.hash(b, signed=False)
h = myhash_combine(h, hb)
s = hex(h)
return s[2:min(10, len(s))]
def copy_file_to_dir(file, dir):
dir = os.path.abspath(dir)
src = os.path.abspath(file)
dst = f"{dir}/{os.path.basename(src)}"
if not os.path.exists(dir):
os.makedirs(dir)
if os.path.exists(dst):
os.remove(dst)
log("copy:", src, "-->", dst)
shutil.copy(src, dst)
return dst
def chk(f):
log(f"looking for file:", f)
assert os.path.exists(f), f
return f
def load_yml_file(filename):
if not os.path.exists(filename):
raise Exception(f"not found: {filename}")
with open(filename) as f:
return load_yml(f.read())
def dump_yml(data, filename):
with open(filename, "w") as f:
yaml.safe_dump(data, f)
def load_yml(yml):
return munchify(yaml.safe_load(yml))
def dump_json(data, filename):
with open(filename, "w") as f:
f.write(json.dumps(data, indent=2, sort_keys=True))
def main():
#
parser = argparse.ArgumentParser(description="Browse benchmark results", prog="bm")
parser.add_argument("--debug", action="store_true", help="enable debug mode")
subparsers = parser.add_subparsers()
#
sp = subparsers.add_parser("create", help="create benchmark collection")
sp.set_defaults(func=BenchmarkCollection.create_new)
sp.add_argument("--debug", action="store_true", help="enable debug mode")
sp.add_argument("filename", type=str, help="the YAML file with the benchmark specs")
sp.add_argument("target", type=str, help="the directory to store the results")
#
sp = subparsers.add_parser("meta", help="get the required meta-information: cpu info, commit data")
sp.set_defaults(func=add_meta)
sp.add_argument("--debug", action="store_true", help="enable debug mode")
sp.add_argument("results", type=str, help="the directory with the results")
sp.add_argument("cmakecache", type=str, help="the path to the CMakeCache.txt file used to build the benchmark binaries")
sp.add_argument("build_type", type=str, help="the build type, eg Release Debug MinSizeRel RelWithDebInfo")
#
sp = subparsers.add_parser("add", help="add benchmark results")
sp.set_defaults(func=add_results)
sp.add_argument("--debug", action="store_true", help="enable debug mode")
sp.add_argument("results", type=str, help="the directory with the results")
sp.add_argument("target", type=str, help="the directory to store the results")
#
sp = subparsers.add_parser("serve", help="serve benchmark results")
sp.set_defaults(func=serve)
sp.add_argument("--debug", action="store_true", help="enable debug mode")
sp.add_argument("bmdir", type=os.path.abspath, default=os.getcwd(), help="the directory with the results. default=.")
sp.add_argument("-H", "--host", type=str, default="localhost", help="host. default=%(default)s")
sp.add_argument("-p", "--port", type=int, default=8000, help="port. default=%(default)s")
#
sp = subparsers.add_parser("export", help="export static html")
sp.set_defaults(func=freeze)
sp.add_argument("--debug", action="store_true", help="enable debug mode")
sp.add_argument("bmdir", type=os.path.abspath, default=os.getcwd(), help="the directory with the results. default=.")
#
sp = subparsers.add_parser("deps", help="install server dependencies")
sp.set_defaults(func=lambda _: download_deps())
sp.add_argument("--debug", action="store_true", help="enable debug mode")
#
args = parser.parse_args(sys.argv[1:] if len(sys.argv) > 1 else ["serve"])
if args.debug:
log(args)
args.func(args)
def get_manifest(args):
bmdir = os.path.abspath(args.bmdir)
manif_yml = os.path.join(bmdir, "manifest.yml")
manif_json = os.path.join(bmdir, "manifest.json")
manif = load_yml_file(manif_yml)
dump_json(manif, manif_json)
return manif
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
app = flask.Flask(__name__,
template_folder='template')
def _setup_app(args):
def _s(prop, val):
assert not hasattr(app, prop), prop
setattr(app, prop, val)
_s('args', args)
_s('manifest', get_manifest(args))
if args.debug:
app.config["DEBUG"] = True
def freeze(args):
"https://pythonhosted.org/Frozen-Flask/"
from flask_frozen import Freezer
_setup_app(args)
freezer = Freezer(app)
freezer.freeze(debug=args.debug)
def serve(args):
_setup_app(args)
app.run(host=args.host, port=args.port, debug=args.debug)
@app.route("/")
def home():
log("requested home")
return render_template("index.html")
@app.route("/<path>")
def other_(path):
path = escape(path)
d = app.args.bmdir
log("requested other path:", path, "---", os.path.join(d, path))
return send_from_directory(d, path)
@app.route("/static/<path>")
def static_(path):
path = escape(path)
d = os.path.join(app.args.bmdir, "static")
log("requested static path:", path, "---", os.path.join(d, path))
return send_from_directory(d, path, cache_timeout=1) # timeout in seconds
@app.route("/bm/<commit>/<run>/<resultjson>")
def bm_(commit, run, resultjson):
commit = escape(commit)
run = escape(run)
resultjson = escape(resultjson)
d = os.path.join(app.args.bmdir, "runs", commit, run)
log("requested result:", os.path.join(d, resultjson))
return send_from_directory(d, resultjson, cache_timeout=1) # timeout in seconds
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
def download_deps():
deps = [
"https://code.jquery.com/jquery-3.3.1.js",
"https://code.jquery.com/jquery-3.3.1.js",
"https://code.jquery.com/ui/1.12.1/jquery-ui.js",
"https://cdn.datatables.net/1.10.20/js/jquery.dataTables.js",
"https://cdn.datatables.net/1.10.20/js/jquery.dataTables.min.js",
"https://cdn.datatables.net/1.10.20/css/jquery.dataTables.css",
"https://cdn.datatables.net/1.10.20/css/jquery.dataTables.min.css",
"https://www.chartjs.org/dist/2.9.1/Chart.min.js",
#("https://cdnjs.cloudflare.com/ajax/libs/highlight.js/10.3.2/styles/github.css", "highlight.github.css"),
("https://cdnjs.cloudflare.com/ajax/libs/highlight.js/10.3.2/styles/github.min.css", "highlight.github.min.css"),
#"https://cdnjs.cloudflare.com/ajax/libs/highlight.js/10.3.2/highlight.js",
"https://cdnjs.cloudflare.com/ajax/libs/highlight.js/10.3.2/highlight.min.js",
]
for src in deps:
if type(src) == str:
base = os.path.basename(src)
else:
src, base = src
dst = f"{os.getcwd()}/static/{base}"
download_url(src, dst)
def download_url(url, dst):
log("download url:", url, "--->", dst)
req = requests.get(url, stream=True)
if req.status_code == 200:
sz = 0
with open(dst, 'wb') as f:
for chunk in req:
f.write(chunk)
sz += len(chunk)
log(f"........ finished: {sz}B")
else:
log(f" error:", req.status_code, url)
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
class BenchmarkCollection:
@staticmethod
def create_new(args):
dir = args.target
filename = os.path.join(dir, "bm.yml")
manifest = os.path.join(dir, "manifest.yml")
if not os.path.exists(dir):
os.makedirs(dir)
shutil.copyfile(args.filename, filename)
dump_yml(load_yml("""{runs: {}, bm: {}}"""), manifest)
return __class__(dir)
def __init__(self, dir):
if not os.path.exists(dir):
raise Exception(f"not found: {dir}")
self.dir = os.path.abspath(dir)
self.runs_dir = os.path.join(self.dir, "runs")
self.manifest = os.path.join(self.dir, "manifest.yml")
self.filename = os.path.join(self.dir, "bm.yml")
self.specs = munchify(load_yml_file(self.filename))
self.manif = munchify(load_yml_file(self.manifest))
def add(self, results_dir):
results_dir = os.path.abspath(results_dir)
dst_dir, meta = self._read_run(results_dir)
self._add_run(results_dir, dst_dir, meta)
dump_yml(self.manif, self.manifest)
def _read_run(self, results_dir):
log("adding run...")
id = f"{len(self.manif.runs.keys()):05d}"
log(f"adding run: id={id}")
meta = ResultMeta.load(results_dir)
dst_dir = os.path.join(self.runs_dir, meta.name)
return dst_dir, meta
def _add_run(self, results_dir, dst_dir, meta):
cats = self._add_meta_categories(meta)
for filename in ("meta.yml",
"CMakeCCompiler.cmake",
"CMakeCXXCompiler.cmake",
"CMakeSystem.cmake",
"compile_commands.json"):
filename = os.path.join(results_dir, filename)
if os.path.exists(filename):
copy_file_to_dir(filename, dst_dir)
else:
if not filename.endswith("compile_commands.json"):
raise Exception(f"wtf???? {filename}")
for name, specs in self.specs.bm.items():
if not hasattr(specs, 'variants'):
filename = chk(f"{results_dir}/{name}.json")
dst = copy_file_to_dir(filename, dst_dir)
self._add_bm_run(name, specs, meta)
else:
for t in specs.variants:
tname = f"{name}-{t}"
filename = chk(f"{results_dir}/{tname}.json")
dst = copy_file_to_dir(filename, dst_dir)
self._add_bm_run(tname, specs, meta)
def _add_bm_run(self, name, specs, meta):
if name not in self.manif.bm.keys():
self.manif.bm[name] = Munch(specs=specs, entries=[])
entry = self.manif.bm[name]
entry.specs = specs
if meta.name not in entry.entries:
entry.entries.append(meta.name)
def _add_meta_categories(self, meta):
run = Munch()
for catname in ('commit', 'cpu', 'system', 'build'):
meta_item = getattr(meta, catname)
self._add_item_to_category(meta.name, catname, meta_item)
run[catname] = meta_item.storage_id
# build specs are too verbose; remove them
self.manif.build[meta.build.storage_id].specs = Munch()
self.manif.runs[meta.name] = run
def _add_item_to_category(self, run, category_name, item):
if not hasattr(self.manif, category_name):
setattr(self.manif, category_name, Munch())
category = getattr(self.manif, category_name)
if item.storage_id not in category.keys():
category[item.storage_id] = Munch(specs=item, entries=[])
entry = category[item.storage_id]
entry.specs = item
if run not in entry.entries:
entry.entries.append(run)
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
class ResultMeta(Munch):
def __init__(self, results_dir, cmakecache, build_type):
super().__init__(self)
self.date = __class__.get_date()
self.commit = __class__.get_commit(results_dir)
self.cpu = __class__.get_cpu_info()
self.system = __class__.get_sys_info()
self.build = __class__.get_build_info(cmakecache, build_type)
self.name = self._get_name()
@staticmethod
def load(results_dir):
results_dir = os.path.join(os.path.abspath(results_dir), "meta.yml")
data = load_yml_file(results_dir)
return munchify(data)
def save(self, results_dir):
out = os.path.join(results_dir, "meta.yml")
log("saving meta:", out)
dump_yml(self, out)
self.build.save(results_dir)
@staticmethod
def get_date():
import datetime
now = datetime.datetime.now()
return now.strftime("%Y%m%d-%H%M%S")
def _get_name(self):
commit = self.commit.storage_name
cpu = self.cpu.storage_name
sys = self.system.storage_name
build = self.build.storage_name
name = f"{commit}/{cpu}-{sys}-{build}"
return name
@staticmethod
def get_commit(results_dir):
import git
repo = git.Repo(results_dir, search_parent_directories=True)
commit = repo.head.commit
commit = {p: str(getattr(commit, p))
for p in ('message', 'summary', 'name_rev',
'author',
'authored_datetime',
'committer',
'committed_datetime',)}
commit = Munch(commit)
commit.message = commit.message.strip()
commit.sha1 = commit.name_rev[:7]
spl = commit.authored_datetime.split(" ")
date = re.sub(r'-', '', spl[0])
time = re.sub(r'(\d+):(\d+):(\d+).*', r'\1\2\3', spl[1])
commit.storage_id = commit.sha1
commit.storage_name = f"git{date}_{time}-{commit.sha1}"
return commit
@staticmethod
def get_cpu_info():
import cpuinfo
nfo = cpuinfo.get_cpu_info()
nfo = Munch(nfo)
for a in ('cpu_version', 'cpu_version_string', 'python_version'):
if hasattr(nfo, a):
delattr(nfo, a)
for a in ('arch_string_raw', 'brand_raw', 'hardware_raw', 'vendor_id_raw'):
if not hasattr(nfo, a):
setattr(nfo, a, '')
nfo.storage_id = myhash(
nfo.arch_string_raw, nfo.brand_raw, nfo.hardware_raw, nfo.vendor_id_raw,
nfo.arch, nfo.bits, nfo.count, nfo.family, nfo.model, nfo.stepping,
",".join(nfo.flags), nfo.hz_advertised_friendly,
nfo.l2_cache_associativity,
nfo.l2_cache_line_size,
nfo.l2_cache_size,
nfo.l3_cache_size,
*optionals('l1_data_cache_size', 'l1_instruction_cache_size')
)
nfo.storage_name = f"{nfo.arch.lower()}_{nfo.storage_id}"
return nfo
@staticmethod
def get_sys_info():
import platform
uname = platform.uname()
nfo = Munch(
sys_platform=sys.platform,
sys=platform.system(),
uname=Munch(
machine=uname.machine,
node=uname.node,
release=uname.release,
system=uname.system,
version=uname.version,
)
)
nfo.storage_id = myhash(
nfo.sys_platform,
nfo.uname.machine,
)
nfo.storage_name = f"{nfo.sys_platform}_{nfo.storage_id}"
return nfo
@staticmethod
def get_build_info(cmakecache_txt, buildtype):
nfo = CMakeCache(cmakecache_txt)
def _btflags(name):
return (getattr(nfo, name), getattr(nfo, f"{name}_{buildtype.upper()}"))
nfo.storage_id = myhash(
buildtype,
nfo.CMAKE_CXX_COMPILER_ID,
nfo.CMAKE_CXX_COMPILER_VERSION,
nfo.CMAKE_CXX_COMPILER_VERSION_INTERNAL,
nfo.CMAKE_CXX_COMPILER_ABI,
nfo.CMAKE_CXX_SIZEOF_DATA_PTR,
nfo.CMAKE_C_COMPILER_ID,
nfo.CMAKE_C_COMPILER_VERSION,
nfo.CMAKE_C_COMPILER_VERSION_INTERNAL,
nfo.CMAKE_C_COMPILER_ABI,
nfo.CMAKE_C_SIZEOF_DATA_PTR,
*_btflags("CMAKE_CXX_FLAGS"),
*_btflags("CMAKE_C_FLAGS"),
*_btflags("CMAKE_STATIC_LINKER_FLAGS"),
*_btflags("CMAKE_SHARED_LINKER_FLAGS"),
)
#
ccname = nfo.CMAKE_CXX_COMPILER_ID.lower()
if ccname == "gnu":
ccname = "gcc"
ccname += nfo.CMAKE_CXX_COMPILER_VERSION.lower()
#
if nfo.CMAKE_C_SIZEOF_DATA_PTR == "4":
bits = "32bit"
elif nfo.CMAKE_C_SIZEOF_DATA_PTR == "8":
bits = "64bit"
else:
raise Exception("unknown architecture")
#
nfo.storage_name = f"{bits}_{buildtype}_{ccname}_{nfo.storage_id}"
return nfo
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
class CMakeCache(Munch):
def __init__(self, cmakecache_txt):
import glob
for line in iter_cmake_lines(cmakecache_txt):
spl = line.split("=")
if len(spl) < 2:
continue
k, ty = spl[0].split(":")
v = "=".join(spl[1:]).strip()
setattr(self, k, v)
bdir = os.path.dirname(os.path.abspath(cmakecache_txt))
self._c_compiler_file = sorted(glob.glob(f"{bdir}/CMakeFiles/*/CMakeCCompiler.cmake"))[-1] # get the last
self._cxx_compiler_file = sorted(glob.glob(f"{bdir}/CMakeFiles/*/CMakeCXXCompiler.cmake"))[-1] # get the last
self._system_file = sorted(glob.glob(f"{bdir}/CMakeFiles/*/CMakeSystem.cmake"))[-1] # get the last
self._load_cmake_file(self._c_compiler_file)
self._load_cmake_file(self._cxx_compiler_file)
ccomfile = f"{bdir}/compile_commands.json"
self._compile_commands_file = ccomfile if os.path.exists(ccomfile) else None
def _load_cmake_file(self, filename):
for line in iter_cmake_lines(filename):
if not line.startswith("set("):
continue
k = re.sub(r"set\((.*)\ +(.*)\)", r"\1", line)
v = re.sub(r"set\((.*)\ +(.*)\)", r"\2", line)
v = v.strip('"').strip("'").strip()
setattr(self, k, v)
def save(self, results_dir):
copy_file_to_dir(self._c_compiler_file, results_dir)
copy_file_to_dir(self._cxx_compiler_file, results_dir)
copy_file_to_dir(self._system_file, results_dir)
if self._compile_commands_file is not None:
copy_file_to_dir(self._compile_commands_file, results_dir)
def iter_cmake_lines(filename):
with open(filename) as f:
for line in f.readlines():
line = line.strip()
if line.startswith("#") or line.startswith("//") or len(line) == 0:
continue
yield line
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
def add_results(args):
log("adding results:", args.results)
col = BenchmarkCollection(args.target)
col.add(args.results)
def add_meta(args):
log("adding bm run metadata to results dir:", args.results)
meta = ResultMeta(results_dir=args.results,
cmakecache=args.cmakecache,
build_type=args.build_type)
meta.save(args.results)
log("adding bm run metadata to results dir: success!")
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
if __name__ == '__main__':
main()
|
python
|
from .models import Electricity
from django.http import HttpResponse
from django.utils import timezone
from django.views.generic import View
import datetime
import json
class get_json(View):
""" Returns list of timestamp-consumption pairs. """
def get(self, request, *args, **kwargs):
time_start = timezone.now() - datetime.timedelta(days=83)
items = []
for item in Electricity.objects.filter(date__gte=time_start):
d = {"timestamp": item.get_timestamp(), "value": {
"W": float(item.usage)}}
items.append(d)
return HttpResponse(json.dumps(items), content_type="application/json")
class get_barchart_json(View):
""" Returns zero-filled list of timestamp-consumption pairs. """
def get(self, request, *args, **kwargs):
time_start = timezone.now() - datetime.timedelta(days=83)
items = []
current_date = None
consumption = 0
db_items = Electricity.objects.filter(date__gte=time_start)
start_date = db_items[0].date
for item in db_items:
if current_date is None:
current_date = item.date
else:
if current_date != item.date:
# Day changed
items.append(
{"date": current_date.isoformat(), "consumption": consumption})
consumption = 0
if item.date != current_date + datetime.timedelta(days=1):
# Skipping days
fill_day = current_date + datetime.timedelta(days=1)
while item.date != fill_day:
items.append(
{"date": fill_day.isoformat(), "consumption": 0})
fill_day += datetime.timedelta(days=1)
current_date = item.date
consumption += float(item.usage)
if current_date and consumption:
items.append({"date": current_date.isoformat(),
"consumption": consumption})
return HttpResponse(json.dumps(items), content_type="application/json")
|
python
|
# utility packages
import os
from tqdm import tqdm
import csv
# math and support packages
from scipy import ndimage
import numpy as np
import pandas as pd
# image processing packages
import cv2
import matplotlib.pyplot as plt
import sklearn
from sklearn.model_selection import train_test_split
from random import shuffle
img_data = []
IMG_PATH = "IMG"
# All images are stored in a directory called IMG living along side model.py path
def normalize(img):
# image data should be normalized so that the data has mean zero and equal variance.
return (img - 128.) / 128.
def grayscale(img):
# remove color channels. image shape should now be (320,160,1)
return cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
def blur(img):
return cv2.blur(img, (6,6))
def crop_img(img):
start_of_landscape = 60
length_of_windshield = 70
cropped_y = start_of_landscape + length_of_windshield
cropped_img = img[start_of_landscape:cropped_y, 0:320]
return cropped_img
def preprocess(img):
# this is our main preprocess pipeline
# img1 = grayscale(img)
# img1.reshape((160,320,1))
img1 = img
# grayscale has to come first for cv2
# error: (-215) depth == CV_8U || depth == CV_16U || depth == CV_32F
# in function cvtColor
# img2 = normalize(img1)
# TODO: Cut image sizes, add filter polygons
# img3 = filter_images(img2)
img3 = blur(img1)
# img4 = crop_img(img3)
return img3
def _get_img_path(orig_path):
jpg_file_path = 'IMG/' + orig_path.split('IMG')[1]
center_image_fp = os.path.join(os.getcwd(), jpg_file_path)
return center_image_fp
def load_images_generator(batch_size=32):
image_directory = os.path.join(os.getcwd(), IMG_PATH)
i = 0
measurements = []
images = []
with open('driving_log.csv') as csvfile:
reader = csv.reader(csvfile)
for row in reader:
if i == batch_size:
images = np.array(images)
measurements = np.array(measurements)
images, measurements = sklearn.utils.shuffle(images, measurements)
yield (images, measurements)
images = []
measurements = []
i = 0
center_image_fp = _get_img_path(row[0])
center_image = cv2.imread(center_image_fp)
center_image_fp = _get_img_path(row[1])
left_image = cv2.imread(center_image_fp)
center_image_fp = _get_img_path(row[2])
right_image = cv2.imread(center_image_fp)
# each of these should contain an nd array
data = dict()
data['X'] = {'center_image': center_image,
'left_image': left_image,
'right_image': right_image}
data['y'] = {'steering_angle': row[3],
'throttle': row[4],
'brake': row[5],
'speed': row[6]}
# image_data.append(data)
center_img_data = _retrieve_center_image(data=data)
measurement = _retrieve_steering_angle(data=data)
center_img_data = process_pipeline(center_img_data)
images.append(center_img_data)
measurements.append(measurement)
# image_flipped = np.fliplr(center_img_data)
# measurement_flipped = -measurement
#
# # yield (np.array(center_img_data), np.array(measurement))
#
# images.append(image_flipped)
# measurements.append(measurement_flipped)
def load_images():
image_directory = os.path.join(os.getcwd(), IMG_PATH)
lines = list()
i = 0
with open('driving_log.csv') as csvfile:
reader = csv.reader(csvfile)
for line in reader:
# lines.append(line)
row = line
# for row in tqdm(lines):
# center_image = cv2.imread(row['center_camera_fp'])
# left_image = cv2.imread(row['left_camera_fp'])
# right_image = ndimage.imread(row['right_camera_fp'])
center_image_fp = _get_img_path(row[0])
center_image = cv2.imread(center_image_fp)
center_image_fp = _get_img_path(row[1])
left_image = cv2.imread(center_image_fp)
center_image_fp = _get_img_path(row[2])
right_image = cv2.imread(center_image_fp)
# each of these should contain an nd array
data = dict()
data['X'] = {'center_image': center_image,
'left_image': left_image,
'right_image': right_image}
data['y'] = {'steering_angle': row[3],
'throttle': row[4],
'brake': row[5],
'speed': row[6]}
# image_data.append(data)
yield data
# return image_data
# returns a list of dicts, each dict containing pointers to the ndarrays
# for each of the 3 camera images, and labels for steering, braking
def process_pipeline(img_data):
# load images as X
# for data in tqdm(img_data, desc='process_pipeline', unit='images'):
# data['X']['center_image'] = preprocess(data['X']['center_image'])
# data['X']['left_image'] = preprocess(data['X']['left_image'])
# data['X']['right_image'] = preprocess(data['X']['right_image'])
return preprocess(img_data)
def model_basic(input_shape=None):
# define models
from keras.models import Sequential
from keras.layers import Flatten, Dense, Lambda
model = Sequential()
model.add(Lambda(lambda x: x/255.0 - 0.5, input_shape=(70,320,3)))
model.add(Flatten())
model.add(Dense(1))
return model
def model_lenet(input_shape=None):
from keras.models import Sequential
from keras.layers import Flatten, Dense, Lambda
from keras.layers.convolutional import Convolution2D, MaxPooling2D
model = Sequential()
model.add(Convolution2D(6,5,5, activation='relu', input_shape=input_shape))
model.add(MaxPooling2D())
model.add(Convolution2D(6,5,5, activation='relu'))
model.add(MaxPooling2D())
model.add(Flatten())
model.add(Dense(120))
model.add(Dense(84))
model.add(Dense(1))
return model
def model_nvidia(input_shape=None):
from keras.models import Sequential
from keras.layers import Flatten, Dense, Convolution2D, MaxPooling2D, Dropout, BatchNormalization, Activation
from keras.layers import Flatten, Dense, Lambda, Cropping2D
# activation = "relu"
# model = Sequential()
#
# # Normalize
# # model.add(BatchNormalization(input_shape=input_shape, axis=1))
#
# model.add(Convolution2D(24, 5, 5, activation=activation, input_shape=input_shape, name="convolution0"))
# model.add(MaxPooling2D(pool_size=(2, 2), strides=(1, 1)))
#
# model.add(Convolution2D(36, 5, 5, activation=activation, name="convolution1"))
# model.add(MaxPooling2D(pool_size=(2, 2), strides=(1, 1)))
#
# model.add(Convolution2D(48, 5, 5, activation=activation, name="convolution2"))
# model.add(MaxPooling2D(pool_size=(2, 2), strides=(1, 1)))
#
# model.add(Convolution2D(64, 3, 3, activation=activation, name="convolution3"))
# model.add(MaxPooling2D(pool_size=(2, 2), strides=(1, 1)))
#
# model.add(Convolution2D(64, 3, 3, activation=activation, name="convolution4"))
# model.add(MaxPooling2D(pool_size=(2, 2), strides=(1, 1)))
#
#
# model.add(Flatten())
# # model.add(Dropout(0.2))
# # model.add(Activation(activation=activation))
#
# model.add(Dense(1164, activation=activation, name="dense1"))
# # model.add(Dropout(0.5))
# # model.add(Activation(activation=activation))
#
# model.add(Dense(100, activation=activation, name="dense2"))
# # model.add(Dropout(0.5))
# # model.add(Activation(activation=activation))
#
# model.add(Dense(50, activation=activation, name="dense3"))
# # model.add(Dropout(0.5))
# # model.add(Activation(activation=activation))
#
# model.add(Dense(10, activation=activation, name="dense4"))
# # model.add(Dropout(0.5))
# # model.add(Activation(activation=activation))
# model.add(Dense(1, name="output"))
activation = 'relu'
model = Sequential()
model.add(Lambda(lambda x: (x / 255.0) - 0.5, input_shape=(160,320,3)))
model.add(Cropping2D(cropping=((50,20), (0,0))))
model.add(Convolution2D(24, 5, 5, activation=activation, input_shape=input_shape))
model.add(MaxPooling2D())
model.add(Convolution2D(36, 5, 5, activation=activation))
model.add(MaxPooling2D())
model.add(Convolution2D(48, 5, 5, activation=activation))
model.add(MaxPooling2D())
model.add(Convolution2D(64, 3,3, activation=activation))
model.add(MaxPooling2D())
model.add(Convolution2D(64, 1, 1, activation=activation))
model.add(MaxPooling2D())
model.add(Flatten())
# model.add(Dense(1164))
# model.add(Dropout(0.5))
model.add(Dense(100))
# model.add(Dropout(0.5))
model.add(Dense(50))
# model.add(Dropout(0.5))
model.add(Dense(10))
# model.add(Dropout(0.5))
model.add(Dense(1))
return model
def _retrieve_center_image(data):
return data['X']['center_image']
def retrieve_images_and_labels(batch_size=32):
images = []
measurements = []
i = 0
for img in load_images():
if i == batch_size:
images = np.array(images)
measurements = np.array(measurements)
images, measurements = sklearn.utils.shuffle(images, measurements)
yield (images, measurements)
images = []
measurements = []
i = 0
center_img_data = _retrieve_center_image(data=img)
measurement = _retrieve_steering_angle(data=img)
center_img_data = process_pipeline(center_img_data)
images.append(center_img_data)
measurements.append(measurement)
image_flipped = np.fliplr(center_img_data)
measurement_flipped = -measurement
# yield (np.array(center_img_data), np.array(measurement))
images.append(image_flipped)
measurements.append(measurement_flipped)
# yield (np.array(image_flipped), np.array(measurement_flipped))
i += 1
# return images, measurements
def _retrieve_steering_angle(data):
return float(data['y']['steering_angle'])
def get_number_of_samples():
data_dir_path = os.path.join( os.getcwd(), IMG_PATH)
return len([name for name in os.listdir(data_dir_path)])
# img_data = process_pipeline(img_data)
# WE can't do any kind of grayscale preprocessing since the simulator won't
# feed those images in to the model
# images, measurements = retrieve_images_and_labels()
#
# X = np.array(images)
# y = np.array(measurements)
#
# X = np.array([images[0]])
# y = np.array([measurements[0]])
#
# for image in images:
# image = np.array([image])
# X = np.concatenate( (X, image) )
#
# for measurement in measurements:
# measurement = np.array([measurement])
# y = np.concatenate( (y, measurement) )
# model = model_basic()
def get_samples():
samples = []
with open('./driving_log.csv') as csvfile:
reader = csv.reader(csvfile)
for line in reader:
samples.append(line)
return samples
samples = get_samples()
train_samples, validation_samples = train_test_split(samples, test_size=0.2)
def generator(samples, batch_size=32):
num_samples = len(samples)
while 1: # Loop forever so the generator never terminates
shuffle(samples)
for offset in range(0, num_samples, batch_size):
batch_samples = samples[offset:offset+batch_size]
images = []
angles = []
for batch_sample in batch_samples:
name = './IMG/'+batch_sample[0].split('/')[-1]
center_image = cv2.imread(name)
center_angle = float(batch_sample[3])
center_image = preprocess(center_image)
images.append(center_image)
angles.append(center_angle)
name = './IMG/'+batch_sample[1].split('/')[-1]
left_image = cv2.imread(name)
left_angle = float(batch_sample[3]) + 0.2
left_image = preprocess(left_image)
images.append(left_image)
angles.append(left_angle)
name = './IMG/'+batch_sample[2].split('/')[-1]
right_image = cv2.imread(name)
right_angle = float(batch_sample[3]) - 0.2
right_image = preprocess(right_image)
images.append(right_image)
angles.append(right_angle)
# trim image to only see section with road
X_train = np.array(images)
y_train = np.array(angles)
yield sklearn.utils.shuffle(X_train, y_train)
train_generator = generator(train_samples, batch_size=32)
validation_generator = generator(validation_samples, batch_size=32)
model = model_nvidia(input_shape=(160,320,3))
model.compile(loss='mse', optimizer='adam')
model.fit_generator(train_generator, samples_per_epoch=len(train_samples)*3,
validation_data=validation_generator,
nb_val_samples=len(validation_samples)*3, nb_epoch=4)
# model.compile(loss='mse', optimizer='adam')
# model.fit_generator(generator=load_images_generator(), samples_per_epoch=number_of_samples,
# # validation_split=0.2, shuffle=True,
# nb_epoch=2)
model.save('model.h5')
# save model to h5
|
python
|
# Copyright 2012-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Modifications made by Cloudera are:
# Copyright (c) 2016 Cloudera, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from cdpcli.compat import json
import dateutil.parser
class ResponseParserFactory(object):
def create_parser(self):
return ResponseParser()
class ResponseParser(object):
DEFAULT_ENCODING = 'utf-8'
def parse(self, response, shape):
if response['status_code'] >= 301:
return self._parse_error(response)
elif shape is None:
return {}
else:
return self._parse_shape(shape, self._decode_body(response))
def _decode_body(self, response):
body = response['body']
if not body:
return {}
return json.loads(body.decode(self.DEFAULT_ENCODING))
def _parse_error(self, response):
try:
body = self._decode_body(response)
except Exception:
# In the case that we hit a completely dead endpoint, etc, the
# response might not be valid JSON, but we still want to return
# a structured error to the caller.
message = response['body'].decode(self.DEFAULT_ENCODING).strip()
body = dict(code='UNKNOWN_ERROR', message=message)
error = {}
error['code'] = body.get('code', '')
if 'message' in body:
error['message'] = body.get('message', '')
elif 'error' in body:
error['message'] = body.get('error', '')
else:
error['message'] = ' '.join(body.get('errorMessages', []))
return {'error': error}
def _parse_shape(self, shape, value):
handle_method_name = '_handle_%s' % shape.type_name
method = getattr(self, handle_method_name, self._handle_default)
return method(shape, value)
def _handle_array(self, shape, value):
parsed = []
for item in value:
parsed.append(self._parse_shape(shape.member, item))
return parsed
def _handle_object(self, shape, value):
parsed = {}
for member_name in shape.members:
member_shape = shape.members[member_name]
member_value = value.get(member_shape.name)
if member_value is not None:
parsed[member_name] = self._parse_shape(
shape.members[member_name], member_value)
return parsed
def _handle_map(self, shape, values):
parsed = {}
for key, value in values.items():
actual_key = self._parse_shape(shape.key, key)
actual_value = self._parse_shape(shape.value, value)
parsed[actual_key] = actual_value
return parsed
# Handles both ISO8601 format and RFC822 format.
# However, server can only parse ISO8601 format as of now.
def _handle_datetime(self, shape, value):
try:
return dateutil.parser.parse(value)
except (TypeError, ValueError) as e:
raise ValueError('Invalid timestamp "%s": %s' % (value, e))
def _handle_default(self, shape, value):
return value
|
python
|
import CollectionsManager
from UserController.UserCommand import UserCommandClass
# ==================================== UserCommandAddNewDuck Class =================================#
class UserCommandAddNewDuck(UserCommandClass):
component = None
component_index = 0
component_selected = None
component_parent = None
def __init__(self):
print 'Initialized a new user command: UserCommandAddNewDuck'
def execute(self, *args):
self.component_selected = args[0][1]
self.component = CollectionsManager.createNewDuck(args[0][0])
if self.component_selected is None or self.component_selected.addDuckComponent(self.component) == False:
self.component_parent = args[0][2]
self.component_parent.append(self.component)
def undo(self, *args):
self.component_index = -1
if self.component_selected is not None:
self.component_index = self.component_selected.removeDuckComponent(self.component)
if self.component_index < 0:
self.component_parent.remove(self.component)
def redo(self, *args):
if self.component_selected is None or self.component_selected.addDuckComponent(self.component, self.component_index) == False:
self.component_parent.append(self.component)
# ----------------------------------------------
|
python
|
import numpy as np
import ctypes as C
import cv2
libmog = C.cdll.LoadLibrary('libmog2.so')
def getfg(img):
(rows, cols) = (img.shape[0], img.shape[1])
res = np.zeros(dtype=np.uint8, shape=(rows, cols))
libmog.getfg(img.shape[0], img.shape[1],
img.ctypes.data_as(C.POINTER(C.c_ubyte)),
res.ctypes.data_as(C.POINTER(C.c_ubyte)))
return res
def getbg(img):
(rows, cols) = (img.shape[0], img.shape[1])
res = np.zeros(dtype=np.uint8, shape=(rows, cols, 3))
libmog.getbg(rows, cols, res.ctypes.data_as(C.POINTER(C.c_ubyte)))
return res
if __name__ == '__main__':
c = cv2.VideoCapture('../datasets/highway/input/in00%04d.jpg')
counter = 0
while 1:
_, f = c.read()
if f is not None:
cv2.imshow('f', f)
cv2.imshow('fg', getfg(f))
cv2.imshow('bg', getbg(f))
if cv2.waitKey(1) == 27:
exit(0)
counter += 1
else:
print counter
break
|
python
|
# coding: utf-8
"""
Camunda BPM REST API
OpenApi Spec for Camunda BPM REST API. # noqa: E501
The version of the OpenAPI document: 7.13.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from openapi_client.configuration import Configuration
class CommentDto(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'id': 'str',
'user_id': 'str',
'task_id': 'str',
'time': 'datetime',
'message': 'str',
'removal_time': 'datetime',
'root_process_instance_id': 'str',
'links': 'list[AtomLink]'
}
attribute_map = {
'id': 'id',
'user_id': 'userId',
'task_id': 'taskId',
'time': 'time',
'message': 'message',
'removal_time': 'removalTime',
'root_process_instance_id': 'rootProcessInstanceId',
'links': 'links'
}
def __init__(self, id=None, user_id=None, task_id=None, time=None, message=None, removal_time=None, root_process_instance_id=None, links=None, local_vars_configuration=None): # noqa: E501
"""CommentDto - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._id = None
self._user_id = None
self._task_id = None
self._time = None
self._message = None
self._removal_time = None
self._root_process_instance_id = None
self._links = None
self.discriminator = None
if id is not None:
self.id = id
if user_id is not None:
self.user_id = user_id
if task_id is not None:
self.task_id = task_id
if time is not None:
self.time = time
if message is not None:
self.message = message
self.removal_time = removal_time
if root_process_instance_id is not None:
self.root_process_instance_id = root_process_instance_id
if links is not None:
self.links = links
@property
def id(self):
"""Gets the id of this CommentDto. # noqa: E501
The id of the task comment. # noqa: E501
:return: The id of this CommentDto. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this CommentDto.
The id of the task comment. # noqa: E501
:param id: The id of this CommentDto. # noqa: E501
:type: str
"""
self._id = id
@property
def user_id(self):
"""Gets the user_id of this CommentDto. # noqa: E501
The id of the user who created the comment. # noqa: E501
:return: The user_id of this CommentDto. # noqa: E501
:rtype: str
"""
return self._user_id
@user_id.setter
def user_id(self, user_id):
"""Sets the user_id of this CommentDto.
The id of the user who created the comment. # noqa: E501
:param user_id: The user_id of this CommentDto. # noqa: E501
:type: str
"""
self._user_id = user_id
@property
def task_id(self):
"""Gets the task_id of this CommentDto. # noqa: E501
The id of the task to which the comment belongs. # noqa: E501
:return: The task_id of this CommentDto. # noqa: E501
:rtype: str
"""
return self._task_id
@task_id.setter
def task_id(self, task_id):
"""Sets the task_id of this CommentDto.
The id of the task to which the comment belongs. # noqa: E501
:param task_id: The task_id of this CommentDto. # noqa: E501
:type: str
"""
self._task_id = task_id
@property
def time(self):
"""Gets the time of this CommentDto. # noqa: E501
The time when the comment was created. [Default format]($(docsUrl)/reference/rest/overview/date-format/) `yyyy-MM-dd'T'HH:mm:ss.SSSZ`. # noqa: E501
:return: The time of this CommentDto. # noqa: E501
:rtype: datetime
"""
return self._time
@time.setter
def time(self, time):
"""Sets the time of this CommentDto.
The time when the comment was created. [Default format]($(docsUrl)/reference/rest/overview/date-format/) `yyyy-MM-dd'T'HH:mm:ss.SSSZ`. # noqa: E501
:param time: The time of this CommentDto. # noqa: E501
:type: datetime
"""
self._time = time
@property
def message(self):
"""Gets the message of this CommentDto. # noqa: E501
The content of the comment. # noqa: E501
:return: The message of this CommentDto. # noqa: E501
:rtype: str
"""
return self._message
@message.setter
def message(self, message):
"""Sets the message of this CommentDto.
The content of the comment. # noqa: E501
:param message: The message of this CommentDto. # noqa: E501
:type: str
"""
self._message = message
@property
def removal_time(self):
"""Gets the removal_time of this CommentDto. # noqa: E501
The time after which the comment should be removed by the History Cleanup job. [Default format]($(docsUrl)/reference/rest/overview/date-format/) `yyyy-MM-dd'T'HH:mm:ss.SSSZ`. # noqa: E501
:return: The removal_time of this CommentDto. # noqa: E501
:rtype: datetime
"""
return self._removal_time
@removal_time.setter
def removal_time(self, removal_time):
"""Sets the removal_time of this CommentDto.
The time after which the comment should be removed by the History Cleanup job. [Default format]($(docsUrl)/reference/rest/overview/date-format/) `yyyy-MM-dd'T'HH:mm:ss.SSSZ`. # noqa: E501
:param removal_time: The removal_time of this CommentDto. # noqa: E501
:type: datetime
"""
self._removal_time = removal_time
@property
def root_process_instance_id(self):
"""Gets the root_process_instance_id of this CommentDto. # noqa: E501
The process instance id of the root process instance that initiated the process containing the task. # noqa: E501
:return: The root_process_instance_id of this CommentDto. # noqa: E501
:rtype: str
"""
return self._root_process_instance_id
@root_process_instance_id.setter
def root_process_instance_id(self, root_process_instance_id):
"""Sets the root_process_instance_id of this CommentDto.
The process instance id of the root process instance that initiated the process containing the task. # noqa: E501
:param root_process_instance_id: The root_process_instance_id of this CommentDto. # noqa: E501
:type: str
"""
self._root_process_instance_id = root_process_instance_id
@property
def links(self):
"""Gets the links of this CommentDto. # noqa: E501
The links associated to this resource, with `method`, `href` and `rel`. # noqa: E501
:return: The links of this CommentDto. # noqa: E501
:rtype: list[AtomLink]
"""
return self._links
@links.setter
def links(self, links):
"""Sets the links of this CommentDto.
The links associated to this resource, with `method`, `href` and `rel`. # noqa: E501
:param links: The links of this CommentDto. # noqa: E501
:type: list[AtomLink]
"""
self._links = links
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CommentDto):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, CommentDto):
return True
return self.to_dict() != other.to_dict()
|
python
|
import multiprocessing
import time
from functools import wraps
def logging_decorator(f):
@wraps(f)
def wrapper(*args, **kwds):
print('Entering decorator')
result=f(*args, **kwds)
print('Exiting decorator')
return result
return wrapper
@logging_decorator
def compute_squares(number):
return number ** 2
if __name__ == '__main__':
pool = multiprocessing.Pool(20)
result = pool.map(compute_squares, range(1000))
print(result)
|
python
|
from setuptools import setup
setup(
name='nlzss',
author='magical',
license='MIT',
packages=['nlzss']
)
|
python
|
# coding: utf-8
#!/usr/bin/python
import sys, getopt, time
import operator
import pickle
import numpy as np #for euclidean distance
import pandas as pd # to read the actual dataset
from numpy import zeros, sum as np_sum, add as np_add, concatenate, repeat as np_repeat, array, float32 as REAL, empty, ones, memmap as np_memmap, sqrt, newaxis, ndarray, dot, vstack, dtype, divide as np_divide, integer
import scipy.sparse
import scipy.linalg
#from gensim import utils, matutils
import logging,os
program = os.path.basename(sys.argv[0])
logger = logging.getLogger(program)
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s')
logging.root.setLevel(level=logging.INFO)
logger.info("running %s" % ' '.join(sys.argv))
blas = lambda name, ndarray: scipy.linalg.get_blas_funcs((name,), (ndarray,))[0]
blas_nrm2 = blas('nrm2', np.array([], dtype=float))
blas_scal = blas('scal', np.array([], dtype=float))
def unitvec(vec, norm='l2'):
"""
Scale a vector to unit length. The only exception is the zero vector, which
is returned back unchanged.
Output will be in the same format as input
"""
if norm not in ('l1', 'l2'):
raise ValueError("'%s' is not a supported norm. Currently supported norms are 'l1' and 'l2'." % norm)
if scipy.sparse.issparse(vec):
vec = vec.tocsr()
if norm == 'l1':
veclen = np.sum(np.abs(vec.data))
if norm == 'l2':
veclen = np.sqrt(np.sum(vec.data ** 2))
if veclen > 0.0:
return vec / veclen
else:
return vec
if isinstance(vec, np.ndarray):
vec = np.asarray(vec, dtype=float)
if norm == 'l1':
veclen = np.sum(np.abs(vec))
if norm == 'l2':
veclen = blas_nrm2(vec)
if veclen > 0.0:
return blas_scal(1.0 / veclen, vec)
else:
return vec
try:
first = next(iter(vec)) # is there at least one element?
except:
return vec
if isinstance(first, (tuple, list)) and len(first) == 2: # gensim sparse format
if norm == 'l1':
length = float(sum(abs(val) for _, val in vec))
if norm == 'l2':
length = 1.0 * math.sqrt(sum(val ** 2 for _, val in vec))
assert length > 0.0, "sparse documents must not contain any explicit zero entries"
return ret_normalized_vec(vec, length)
else:
raise ValueError("unknown input type")
def similarity(d1, d2):
"""
Compute cosine similarity between two docvecs in the trained set, specified by int index or
string tag.
"""
return dot(d1, d2)
def main(argv):
input_file_name = ''
output_file_name = ''
try:
opts, args = getopt.getopt(argv,"h:i:t:o:k:",["ifile=","tfile","ofile=","kval="])
except getopt.GetoptError:
print('top_K_sim_songs_extractor.py -i <train_file_name> -t <test_file_name> -o <output_file_name> -k <value_of_k')
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print('top_K_sim_songs_extractor.py -i <train_file_name> -t <test_file_name> -o <output_file_name> -k <value_of_k')
sys.exit()
elif opt in ("-i", "--ifile"):
train_file_name = arg
print('Input file is ', train_file_name)
elif opt in ("-t", "--tfile"):
test_file_name = arg
print('Input file is ', test_file_name)
elif opt in ("-o", "--ofile"):
output_file_name = arg
print('Output file is ', output_file_name)
elif opt in ("-k", "--kval"):
k_val = int(arg)
print('k is ', k_val)
df_train = pd.read_csv(train_file_name)
song_ids_train = df_train['id'].values.tolist()
#df_train.replace('?', -99999, inplace = True)
df_train.drop(['id'], 1, inplace = True)
np_data_train = np.array(df_train.astype(float).values.tolist())
#print(np_data_train)
df_test = pd.read_csv(test_file_name)
song_ids_test = df_test['id'].values.tolist()
#df_train.replace('?', -99999, inplace = True)
df_test.drop(['id'], 1, inplace = True)
np_data_test = np.array(df_test.astype(float).values.tolist())
#print(np_data_train)
for i in range(len(np_data_test)):
np_data_test[i] = unitvec(np_data_test[i])
for i in range(len(np_data_train)):
np_data_train[i] = unitvec(np_data_train[i])
song_sim_dict = {}
for song_id in song_ids_test:
song_sim_dict[song_id] = []
train_rec_count = len(song_ids_train)
test_rec_count = len(song_ids_test)
total_pair_count = train_rec_count*test_rec_count
print("Need to process:", total_pair_count,"pairs")
print("Percenatg completed: ", end='')
sorted_song_sim_pair = []
pair_count = 0
t1 = time.clock()
f = open('top_'+str(k_val)+'_similar_songs_'+output_file_name+'.txt', 'w')
for i in range(0, test_rec_count):
if i % 10 == 0:
t2 = time.clock()
print("Time taken: ", t2 - t1, "Time needed: ", ((1-(pair_count/total_pair_count))*100)*(t2 - t1),"\n")
logger.info("Processed :",str(i),"/",str(test_rec_count)," Tests")
#break
sim_list = []
for j in range(0, train_rec_count):
#print(song_ids[i], "&&&", song_ids[j],"==>", similarity(np_data[i], np_data[j]))
#song_sim_dict[song_ids_test[i]].append((song_ids_train[j], similarity(np_data_test[i], np_data_train[j])))
sim_list.append((song_ids_train[j], similarity(np_data_test[i], np_data_train[j])))
pair_count += 1
#if pair_count % 1000 == 0:
#t2 = time.clock
#print("Time remaining: "+str((1-(pair_count/total_pair_count))*(t2-t1)/60.0))
# time.sleep(1)
# sys.stdout.write("\r%f%%" % (pair_count/total_pair_count))
# sys.stdout.flush()
#break
sorted_song_sim_pair = (song_ids_test[i], sorted(sim_list, key=operator.itemgetter(1), reverse=True))
#print(sorted_song_sim_pair[0], sorted_song_sim_pair[1][:K])
f.write(str(sorted_song_sim_pair[0])+'=>')
for (song_id, sim_val) in sorted_song_sim_pair[1][:k_val]:
f.write(str(song_id)+':'+str(sim_val)+',')
f.write('\n')
#pickle.dump(song_sim_dict, open('song_sim_dict.pickle','wb'))
#pickle.dump(sorted_song_sim_pair, open('sorted_song_sim_pair.pickle','wb'))
print('here')
f.close()
# sorted_song_sim_list = []
# i = 0
# for key in song_sim_dict:
# sorted_song_sim_list.append((key, sorted(song_sim_dict[key], key=operator.itemgetter(1), reverse=True)))
# #print(sorted_song_sim_list[0][0], sorted_song_sim_list[0][1][:K])
# f.write(str(sorted_song_sim_list[i][0])+'=>')
# for (song_id, sim_val) in sorted_song_sim_list[i][1][:k_val]:
# f.write(str(song_id)+':'+str(sim_val)+',')
# f.write('\n')
# i += 1
# f.close()
if __name__ == "__main__":
#print(sys.argv[1:])
main(sys.argv[1:])
|
python
|
#
# PySNMP MIB module ELTEX-MES-eltMacNotification-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/ELTEX-MES-eltMacNotification-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 18:47:57 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, Integer, OctetString = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "Integer", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsUnion, SingleValueConstraint, ConstraintsIntersection, ValueRangeConstraint, ValueSizeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsUnion", "SingleValueConstraint", "ConstraintsIntersection", "ValueRangeConstraint", "ValueSizeConstraint")
VlanIndex, = mibBuilder.importSymbols("CISCO-VTP-MIB", "VlanIndex")
eltMesMng, = mibBuilder.importSymbols("ELTEX-MES", "eltMesMng")
ifIndex, = mibBuilder.importSymbols("IF-MIB", "ifIndex")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
IpAddress, TimeTicks, iso, Counter32, ObjectIdentity, Unsigned32, Integer32, Counter64, MibScalar, MibTable, MibTableRow, MibTableColumn, Gauge32, MibIdentifier, ModuleIdentity, Bits, NotificationType = mibBuilder.importSymbols("SNMPv2-SMI", "IpAddress", "TimeTicks", "iso", "Counter32", "ObjectIdentity", "Unsigned32", "Integer32", "Counter64", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Gauge32", "MibIdentifier", "ModuleIdentity", "Bits", "NotificationType")
TextualConvention, DisplayString, TruthValue, TimeStamp, MacAddress = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString", "TruthValue", "TimeStamp", "MacAddress")
eltMesMacNotificationMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 35265, 1, 23, 1, 7))
eltMesMacNotificationMIB.setRevisions(('2015-11-05 00:00', '2015-11-19 00:00',))
if mibBuilder.loadTexts: eltMesMacNotificationMIB.setLastUpdated('201511190000Z')
if mibBuilder.loadTexts: eltMesMacNotificationMIB.setOrganization('Eltex Enterprise Co, Ltd.')
eltMesMacNotificationObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 35265, 1, 23, 1, 7, 1))
eltMesMnFlappingObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 35265, 1, 23, 1, 7, 1, 1))
eltMnFlappingFeatureEnabled = MibScalar((1, 3, 6, 1, 4, 1, 35265, 1, 23, 1, 7, 1, 1, 1), TruthValue().clone('true')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: eltMnFlappingFeatureEnabled.setStatus('deprecated')
eltMnFlappingNotificationsEnabled = MibScalar((1, 3, 6, 1, 4, 1, 35265, 1, 23, 1, 7, 1, 1, 2), TruthValue().clone('true')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: eltMnFlappingNotificationsEnabled.setStatus('deprecated')
eltMnFlappingAddress = MibScalar((1, 3, 6, 1, 4, 1, 35265, 1, 23, 1, 7, 1, 1, 3), MacAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: eltMnFlappingAddress.setStatus('deprecated')
eltMnFlappingVlanNumber = MibScalar((1, 3, 6, 1, 4, 1, 35265, 1, 23, 1, 7, 1, 1, 4), VlanIndex()).setMaxAccess("readonly")
if mibBuilder.loadTexts: eltMnFlappingVlanNumber.setStatus('deprecated')
eltMnFlappingFirstPortId = MibScalar((1, 3, 6, 1, 4, 1, 35265, 1, 23, 1, 7, 1, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: eltMnFlappingFirstPortId.setStatus('deprecated')
eltMnFlappingSecondPortId = MibScalar((1, 3, 6, 1, 4, 1, 35265, 1, 23, 1, 7, 1, 1, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: eltMnFlappingSecondPortId.setStatus('deprecated')
eltMnFlappingTime = MibScalar((1, 3, 6, 1, 4, 1, 35265, 1, 23, 1, 7, 1, 1, 7), TimeStamp()).setMaxAccess("readonly")
if mibBuilder.loadTexts: eltMnFlappingTime.setStatus('deprecated')
eltMesMnNotificationPrefix = MibIdentifier((1, 3, 6, 1, 4, 1, 35265, 1, 23, 1, 7, 2))
eltMesMnNotifications = MibIdentifier((1, 3, 6, 1, 4, 1, 35265, 1, 23, 1, 7, 2, 0))
eltMnFlappingNotification = NotificationType((1, 3, 6, 1, 4, 1, 35265, 1, 23, 1, 7, 2, 0, 1)).setObjects(("ELTEX-MES-eltMacNotification-MIB", "eltMnFlappingAddress"), ("ELTEX-MES-eltMacNotification-MIB", "eltMnFlappingVlanNumber"), ("ELTEX-MES-eltMacNotification-MIB", "eltMnFlappingFirstPortId"), ("ELTEX-MES-eltMacNotification-MIB", "eltMnFlappingSecondPortId"), ("ELTEX-MES-eltMacNotification-MIB", "eltMnFlappingTime"))
if mibBuilder.loadTexts: eltMnFlappingNotification.setStatus('deprecated')
mibBuilder.exportSymbols("ELTEX-MES-eltMacNotification-MIB", eltMesMacNotificationMIB=eltMesMacNotificationMIB, eltMnFlappingVlanNumber=eltMnFlappingVlanNumber, eltMesMnFlappingObjects=eltMesMnFlappingObjects, eltMnFlappingFeatureEnabled=eltMnFlappingFeatureEnabled, PYSNMP_MODULE_ID=eltMesMacNotificationMIB, eltMnFlappingTime=eltMnFlappingTime, eltMnFlappingSecondPortId=eltMnFlappingSecondPortId, eltMnFlappingNotification=eltMnFlappingNotification, eltMesMnNotificationPrefix=eltMesMnNotificationPrefix, eltMesMnNotifications=eltMesMnNotifications, eltMesMacNotificationObjects=eltMesMacNotificationObjects, eltMnFlappingFirstPortId=eltMnFlappingFirstPortId, eltMnFlappingNotificationsEnabled=eltMnFlappingNotificationsEnabled, eltMnFlappingAddress=eltMnFlappingAddress)
|
python
|
from abc import ABC
class AbstractDiscriminantStrategy(ABC):
def calculate_discriminant(self, a, b, c):
pass
|
python
|
# Copyright (C) 2013 eNovance SAS <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
from neutron_lib.agent import topics
from neutron_lib import constants
from neutron_lib import context
from neutron_lib import rpc as n_rpc
from neutron_lib.utils import runtime
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging
from oslo_service import loopingcall
from oslo_service import periodic_task
from oslo_service import service
from oslo_utils import timeutils
from neutron._i18n import _
from neutron.agent import rpc as agent_rpc
from neutron.common import config as common_config
from neutron.conf.agent import common as config
from neutron.conf.services import metering_agent
from neutron import manager
from neutron import service as neutron_service
from neutron.services.metering.drivers import abstract_driver as driver
from neutron.services.metering.drivers import utils as driverutils
LOG = logging.getLogger(__name__)
class MeteringPluginRpc(object):
def __init__(self, host):
# NOTE(yamamoto): super.__init__() call here is not only for
# aesthetics. Because of multiple inheritances in MeteringAgent,
# it's actually necessary to initialize parent classes of
# manager.Manager correctly.
super(MeteringPluginRpc, self).__init__(host)
target = oslo_messaging.Target(topic=topics.METERING_PLUGIN,
version='1.0')
self.client = n_rpc.get_client(target)
def _get_sync_data_metering(self, context):
try:
cctxt = self.client.prepare()
return cctxt.call(context, 'get_sync_data_metering',
host=self.host)
except Exception:
LOG.exception("Failed synchronizing routers")
class MeteringAgent(MeteringPluginRpc, manager.Manager):
def __init__(self, host, conf=None):
self.conf = conf or cfg.CONF
self._load_drivers()
self.context = context.get_admin_context_without_session()
self.metering_loop = loopingcall.FixedIntervalLoopingCall(
self._metering_loop
)
measure_interval = self.conf.measure_interval
self.last_report = 0
self.metering_loop.start(interval=measure_interval)
self.host = host
self.label_project_id = {}
self.routers = {}
self.metering_infos = {}
self.metering_labels = {}
super(MeteringAgent, self).__init__(host=host)
def _load_drivers(self):
"""Loads plugin-driver from configuration."""
LOG.info("Loading Metering driver %s", self.conf.driver)
if not self.conf.driver:
raise SystemExit(_('A metering driver must be specified'))
self.metering_driver = driverutils.load_metering_driver(self,
self.conf)
def _metering_notification(self):
for key, info in self.metering_infos.items():
data = self.create_notification_message_data(info, key)
traffic_meter_event = 'l3.meter'
granularity = info.get('traffic-counter-granularity')
if granularity:
traffic_meter_event = 'l3.meter.%s' % granularity
LOG.debug("Send metering report [%s] via event [%s].",
data, traffic_meter_event)
notifier = n_rpc.get_notifier('metering')
notifier.info(self.context, traffic_meter_event, data)
info['pkts'] = 0
info['bytes'] = 0
info['time'] = 0
def create_notification_message_data(self, info, key):
data = {'pkts': info['pkts'],
'bytes': info['bytes'],
'time': info['time'],
'first_update': info['first_update'],
'last_update': info['last_update'],
'host': self.host}
if self.conf.granular_traffic_data:
data['resource_id'] = key
self.set_project_id_for_granular_traffic_data(data, key)
else:
data['label_id'] = key
data['project_id'] = self.label_project_id.get(key)
LOG.debug("Metering notification created [%s] with info data [%s], "
"key[%s], and metering_labels configured [%s]. ", data, info,
key, self.metering_labels)
return data
def set_project_id_for_granular_traffic_data(self, data, key):
if driver.BASE_LABEL_TRAFFIC_COUNTER_KEY in key:
other_ids, actual_label_id = key.split(
driver.BASE_LABEL_TRAFFIC_COUNTER_KEY)
is_label_shared = None
label_name = actual_label_id
metering_label = self.metering_labels.get(actual_label_id)
if metering_label:
is_label_shared = metering_label['shared']
label_name = metering_label['name']
data['label_id'] = actual_label_id
data['label_name'] = label_name
data['label_shared'] = is_label_shared
if is_label_shared:
self.configure_project_id_shared_labels(data, other_ids[:-1])
else:
data['project_id'] = self.label_project_id.get(actual_label_id)
elif driver.BASE_PROJECT_TRAFFIC_COUNTER_KEY in key:
data['project_id'] = key.split(
driver.BASE_PROJECT_TRAFFIC_COUNTER_KEY)[1]
elif driver.BASE_ROUTER_TRAFFIC_COUNTER_KEY in key:
router_id = key.split(driver.BASE_ROUTER_TRAFFIC_COUNTER_KEY)[1]
data['router_id'] = router_id
self.configure_project_id_based_on_router(data, router_id)
else:
raise Exception(_("Unexpected key [%s] format.") % key)
def configure_project_id_shared_labels(self, data, key):
if driver.BASE_PROJECT_TRAFFIC_COUNTER_KEY in key:
project_id = key.split(driver.BASE_PROJECT_TRAFFIC_COUNTER_KEY)[1]
data['project_id'] = project_id
elif driver.BASE_ROUTER_TRAFFIC_COUNTER_KEY in key:
router_id = key.split(driver.BASE_ROUTER_TRAFFIC_COUNTER_KEY)[1]
data['router_id'] = router_id
self.configure_project_id_based_on_router(data, router_id)
else:
data['project_id'] = 'all'
def configure_project_id_based_on_router(self, data, router_id):
if router_id in self.routers:
router = self.routers[router_id]
data['project_id'] = router['project_id']
else:
LOG.warning("Could not find router with ID [%s].", router_id)
def _purge_metering_info(self):
deadline_timestamp = timeutils.utcnow_ts() - self.conf.report_interval
expired_metering_info_key = [
key for key, info in self.metering_infos.items()
if info['last_update'] < deadline_timestamp]
for key in expired_metering_info_key:
del self.metering_infos[key]
def _add_metering_info(self, key, traffic_counter):
granularity = traffic_counter.get('traffic-counter-granularity')
ts = timeutils.utcnow_ts()
info = self.metering_infos.get(
key, {'bytes': 0, 'traffic-counter-granularity': granularity,
'pkts': 0, 'time': 0, 'first_update': ts, 'last_update': ts})
info['bytes'] += traffic_counter['bytes']
info['pkts'] += traffic_counter['pkts']
info['time'] += ts - info['last_update']
info['last_update'] = ts
self.metering_infos[key] = info
return info
def _add_metering_infos(self):
self.label_project_id = {}
for router in self.routers.values():
project_id = router['project_id']
labels = router.get(constants.METERING_LABEL_KEY, [])
for label in labels:
label_id = label['id']
self.label_project_id[label_id] = project_id
LOG.debug("Retrieving traffic counters for routers [%s].",
self.routers)
traffic_counters = self._get_traffic_counters(self.context,
self.routers.values())
LOG.debug("Traffic counters [%s] retrieved for routers [%s].",
traffic_counters, self.routers)
if not traffic_counters:
return
for key, traffic_counter in traffic_counters.items():
self._add_metering_info(key, traffic_counter)
def _metering_loop(self):
self._sync_router_namespaces(self.context, self.routers.values())
self._add_metering_infos()
ts = timeutils.utcnow_ts()
delta = ts - self.last_report
report_interval = self.conf.report_interval
if delta >= report_interval:
self._metering_notification()
self._purge_metering_info()
self.last_report = ts
@runtime.synchronized('metering-agent')
def _invoke_driver(self, context, meterings, func_name):
try:
return getattr(self.metering_driver, func_name)(context, meterings)
except AttributeError:
LOG.exception("Driver %(driver)s does not implement %(func)s",
{'driver': self.conf.driver,
'func': func_name})
except RuntimeError:
LOG.exception("Driver %(driver)s:%(func)s runtime error",
{'driver': self.conf.driver,
'func': func_name})
@periodic_task.periodic_task(run_immediately=True)
def _sync_routers_task(self, context):
routers = self._get_sync_data_metering(self.context)
routers_on_agent = set(self.routers.keys())
routers_on_server = set(
[router['id'] for router in routers] if routers else [])
for router_id in routers_on_agent - routers_on_server:
del self.routers[router_id]
self._invoke_driver(context, router_id, 'remove_router')
if not routers:
return
self._update_routers(context, routers)
def router_deleted(self, context, router_id):
self._add_metering_infos()
if router_id in self.routers:
del self.routers[router_id]
return self._invoke_driver(context, router_id,
'remove_router')
def routers_updated(self, context, routers=None):
if not routers:
routers = self._get_sync_data_metering(self.context)
if not routers:
return
self._update_routers(context, routers)
def _update_routers(self, context, routers):
for router in routers:
self.routers[router['id']] = router
self.store_metering_labels(router)
return self._invoke_driver(context, routers,
'update_routers')
def _get_traffic_counters(self, context, routers):
LOG.debug("Get router traffic counters")
return self._invoke_driver(context, routers, 'get_traffic_counters')
def _sync_router_namespaces(self, context, routers):
LOG.debug("Sync router namespaces")
return self._invoke_driver(context, routers, 'sync_router_namespaces')
def add_metering_label_rule(self, context, routers):
return self._invoke_driver(context, routers,
'add_metering_label_rule')
def remove_metering_label_rule(self, context, routers):
return self._invoke_driver(context, routers,
'remove_metering_label_rule')
def update_metering_label_rules(self, context, routers):
LOG.debug("Update metering rules from agent")
return self._invoke_driver(context, routers,
'update_metering_label_rules')
def add_metering_label(self, context, routers):
LOG.debug("Creating a metering label from agent with parameters ["
"%s].", routers)
for router in routers:
self.store_metering_labels(router)
return self._invoke_driver(context, routers,
'add_metering_label')
def store_metering_labels(self, router):
labels = router[constants.METERING_LABEL_KEY]
for label in labels:
self.metering_labels[label['id']] = label
def remove_metering_label(self, context, routers):
self._add_metering_infos()
LOG.debug("Delete a metering label from agent with parameters ["
"%s].", routers)
for router in routers:
labels = router[constants.METERING_LABEL_KEY]
for label in labels:
if label['id'] in self.metering_labels.keys():
del self.metering_labels[label['id']]
return self._invoke_driver(context, routers,
'remove_metering_label')
class MeteringAgentWithStateReport(MeteringAgent):
def __init__(self, host, conf=None):
super(MeteringAgentWithStateReport, self).__init__(host=host,
conf=conf)
self.state_rpc = agent_rpc.PluginReportStateAPI(topics.REPORTS)
self.failed_report_state = False
self.agent_state = {
'binary': constants.AGENT_PROCESS_METERING,
'host': host,
'topic': topics.METERING_AGENT,
'configurations': {
'metering_driver': self.conf.driver,
'measure_interval':
self.conf.measure_interval,
'report_interval': self.conf.report_interval
},
'start_flag': True,
'agent_type': constants.AGENT_TYPE_METERING}
report_interval = cfg.CONF.AGENT.report_interval
self.use_call = True
if report_interval:
self.heartbeat = loopingcall.FixedIntervalLoopingCall(
self._report_state)
self.heartbeat.start(interval=report_interval)
def _report_state(self):
try:
self.state_rpc.report_state(self.context, self.agent_state,
self.use_call)
self.agent_state.pop('start_flag', None)
self.use_call = False
except AttributeError:
# This means the server does not support report_state
LOG.warning("Neutron server does not support state report. "
"State report for this agent will be disabled.")
self.heartbeat.stop()
except Exception:
self.failed_report_state = True
LOG.exception("Failed reporting state!")
return
if self.failed_report_state:
self.failed_report_state = False
LOG.info("Successfully reported state after a previous failure.")
def agent_updated(self, context, payload):
LOG.info("agent_updated by server side %s!", payload)
def main():
conf = cfg.CONF
common_config.register_common_config_options()
metering_agent.register_metering_agent_opts()
config.register_agent_state_opts_helper(conf)
common_config.init(sys.argv[1:])
config.setup_logging()
config.setup_privsep()
server = neutron_service.Service.create(
binary=constants.AGENT_PROCESS_METERING,
topic=topics.METERING_AGENT,
report_interval=cfg.CONF.AGENT.report_interval,
manager='neutron.services.metering.agents.'
'metering_agent.MeteringAgentWithStateReport')
service.launch(cfg.CONF, server, restart_method='mutate').wait()
|
python
|
import pandas as pd
import numpy as np
UTIL_SCORING = {'player':
{
'assists': 2,
'kills': 3,
'deaths': -1,
'total cs': 0.02,
# 'games_not_played': 20
},
'team':
{
'barons': 3,
'dragons': 2,
'firstblood': 2,
'towers': 1,
'result': 2,
'win_under_30': 2,
# 'games_not_played': 15
}
}
def compute_fanduel_scores(elixir_df):
'''
Compute fantasy scores for each player using the FanDuel formula.
CURRENTLY DOES NOT HANDLE BEST OF 3/5. NEED TO ADD.
:param elixir_df: Historical data df
:return:
'''
gg = elixir_df.groupby(['gameid'])
player_scoring = pd.Series(UTIL_SCORING['player'])
team_scoring = pd.Series(UTIL_SCORING['team'])
rez = []
for gameid, group in gg:
players = group[group['player'].apply(lambda x: x == x)]
teams = group[group['player'].apply(lambda x: x != x)]
teams['game_under_30'] = (teams['gamelength'] / 60.).apply(lambda x: 1 if x < 30 else 0)
teams['win_under_30'] = teams['result'] * teams['game_under_30']
players['FD_points'] = players.reindex(columns = player_scoring.index).dot(player_scoring)
teams['FD_points'] = teams.reindex(columns = team_scoring.index).dot(team_scoring)
rez.append(players)
rez.append(teams)
return pd.concat(rez)
|
python
|
"""
Business logic for the orchestration operations.
"""
from http import HTTPStatus
import bson
import requests
from shipyard_cli.errors import StatusError
class CraneService():
"""Service for orchestration operations."""
def __init__(self, url: str, port: str):
self.base_url = url + ':' + port
def __check_key(self, key: str, resource: str) -> str:
"""
Check if the given key is a valid `ObjectId`.
If it is not, this method considers its the name of a resource of the
given type and retrieves its ID from the server.
Returns the resource's ID as a string.
"""
if bson.ObjectId.is_valid(key):
return key
response = requests.get(self.base_url + f'/{resource}s?name=' + key)
if response.status_code == HTTPStatus.OK:
return response.json()['_id']
else:
raise StatusError(response.json()['error'])
def deploy_task(self, node_key: str, task_key: str):
"""
Deploy a task to a node.
The given keys can be either the resource names or their IDs.
"""
node_key = self.__check_key(node_key, 'node')
task_key = self.__check_key(task_key, 'task')
response = requests.post(
self.base_url + '/nodes/' + node_key + '/tasks?task_id=' + task_key
)
if response.status_code == HTTPStatus.OK:
return
else:
raise StatusError(response.json()['error'])
def remove_task(self, node_key: str, task_key: str):
"""
Remove a task from a node.
The given keys can be either the resource names or their IDs.
"""
node_key = self.__check_key(node_key, 'node')
task_key = self.__check_key(task_key, 'task')
response = requests.delete(
self.base_url + '/nodes/' + node_key + '/tasks/' + task_key
)
if response.status_code == HTTPStatus.OK:
return
else:
raise StatusError(response.json()['error'])
|
python
|
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 21 16:20:46 2018
@author: Arvinder Shinh
"""
import tensorflow as tf
from tensorflow import saved_model as sm
import DevnagriLipiDataGenerator
import DevnagriLipiDataVerifier
SerializedImgContainer, Labels = DevnagriLipiDataGenerator.DevnagriData()
Classfy_Inputs=tf.placeholder(dtype=tf.string, name='Classfy_Inputs')
Feature_trans={'image': tf.FixedLenFeature(shape=(784), dtype=tf.float32)}
data=tf.parse_example(Classfy_Inputs, Feature_trans)
Predict_Inputs=tf.reshape(data['image'], shape=(-1,28,28), name='Predict_Inputs')
Train_Outputs=tf.placeholder(shape=(None,10),dtype=tf.float32, name='Labels')
with tf.Session() as sess:
imageBatch, labelBatch = sess.run(( Predict_Inputs, Train_Outputs), feed_dict={Classfy_Inputs: SerializedImgContainer, Train_Outputs: Labels})
print(imageBatch.shape, labelBatch.shape)
for i in range(imageBatch.shape[0]):
DevnagriLipiDataVerifier.DevnagriDataVerifier(imageBatch[i], labelBatch[i])
|
python
|
from . import settings
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
if hasattr(settings, 'SALES_MODEL_FROM') and hasattr(settings, 'SALES_MODEL_IMPORT'):
User = getattr(__import__(settings.SALES_MODEL_FROM, fromlist=[settings.SALES_MODEL_IMPORT]), settings.SALES_MODEL_IMPORT)
else:
from django.contrib.auth.models import User
def get_sales(salesID):
try:
salesUser = User.objects.get(id=salesID)
return salesUser
except (ValueError, ObjectDoesNotExist):
return get_sales(settings.SALES_DEFAULT_ID)
def sales(request):
salesID = request.GET.get(settings.SALES_LINK_PARAMETER, '') or request.COOKIES.get(settings.SALES_COOKIE_NAME, '')
salesUser = get_sales(salesID)
return {"sales": salesUser}
|
python
|
from datetime import datetime
from unittest import TestCase
import jsons
from jsons import DeserializationError
class TestPrimitive(TestCase):
def test_dump_str(self):
self.assertEqual('some string', jsons.dump('some string'))
def test_dump_int(self):
self.assertEqual(123, jsons.dump(123))
def test_dump_float(self):
self.assertEqual(123.456, jsons.dump(123.456))
def test_dump_bool(self):
self.assertEqual(True, jsons.dump(True))
def test_dump_none(self):
self.assertEqual(None, jsons.dump(None))
def test_load_str(self):
self.assertEqual('some string', jsons.load('some string'))
def test_load_int(self):
self.assertEqual(123, jsons.load(123))
def test_load_float(self):
self.assertEqual(123.456, jsons.load(123.456))
def test_load_bool(self):
self.assertEqual(True, jsons.load(True))
def test_load_none(self):
self.assertEqual(None, jsons.load(None))
self.assertEqual(None, jsons.load(None, datetime))
with self.assertRaises(DeserializationError):
jsons.load(None, datetime, strict=True)
|
python
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2021 Tong LI <[email protected]>
#
# Distributed under terms of the BSD-3 license.
"""
"""
import fire
from zarr import consolidate_metadata
from pathlib import Path
import zarr
def main(file_in):
stem = Path(file_in).stem
print(stem)
consolidated = consolidate_metadata(file_in)
print(consolidated.info)
if __name__ == "__main__":
fire.Fire(main)
|
python
|
# coding=utf-8
# Copyright 2018 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Open images datasets.
https://storage.googleapis.com/openimages/web/index.html
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import os
from absl import logging
import tensorflow as tf
import tensorflow_datasets.public_api as tfds
_DESCRIPTION = '''\
Open Images is a dataset of ~9M images that have been annotated with image-level
labels and object bounding boxes.
The training set of V4 contains 14.6M bounding boxes for 600 object classes on
1.74M images, making it the largest existing dataset with object location
annotations. The boxes have been largely manually drawn by professional
annotators to ensure accuracy and consistency. The images are very diverse and
often contain complex scenes with several objects (8.4 per image on average).
Moreover, the dataset is annotated with image-level labels spanning thousands of
classes.
'''
_CITATION = '''\
@article{OpenImages,
author = {Alina Kuznetsova and
Hassan Rom and
Neil Alldrin and
Jasper Uijlings and
Ivan Krasin and
Jordi Pont-Tuset and
Shahab Kamali and
Stefan Popov and
Matteo Malloci and
Tom Duerig and
Vittorio Ferrari},
title = {The Open Images Dataset V4: Unified image classification,
object detection, and visual relationship detection at scale},
year = {2018},
journal = {arXiv:1811.00982}
}
@article{OpenImages2,
author = {Krasin, Ivan and
Duerig, Tom and
Alldrin, Neil and
Ferrari, Vittorio
and Abu-El-Haija, Sami and
Kuznetsova, Alina and
Rom, Hassan and
Uijlings, Jasper and
Popov, Stefan and
Kamali, Shahab and
Malloci, Matteo and
Pont-Tuset, Jordi and
Veit, Andreas and
Belongie, Serge and
Gomes, Victor and
Gupta, Abhinav and
Sun, Chen and
Chechik, Gal and
Cai, David and
Feng, Zheyun and
Narayanan, Dhyanesh and
Murphy, Kevin},
title = {OpenImages: A public dataset for large-scale multi-label and
multi-class image classification.},
journal = {Dataset available from
https://storage.googleapis.com/openimages/web/index.html},
year={2017}
}
'''
# Reading from .tar.gz is slower than extracting the gz and then reading from
# tar. We still read from the tar because it's faster to read fewer files on
# many network based FS.
# pylint: disable=line-too-long
_URLS = {
'train_images': [
tfds.download.Resource(
url='http://open-images-dataset.s3.amazonaws.com/tar/train_%s.tar.gz' % i,
extract_method=tfds.download.ExtractMethod.GZIP)
for i in '0123456789abcdef'],
'test_images': tfds.download.Resource(
url='http://open-images-dataset.s3.amazonaws.com/tar/test.tar.gz',
extract_method=tfds.download.ExtractMethod.GZIP),
'validation_images': tfds.download.Resource(
url='http://open-images-dataset.s3.amazonaws.com/tar/validation.tar.gz',
extract_method=tfds.download.ExtractMethod.GZIP),
'train_human_labels': 'https://storage.googleapis.com/openimages/2018_04/train/train-annotations-human-imagelabels.csv',
'train_machine_labels': 'https://storage.googleapis.com/openimages/2018_04/train/train-annotations-machine-imagelabels.csv',
'test_human_labels': 'https://storage.googleapis.com/openimages/2018_04/test/test-annotations-human-imagelabels.csv',
'test_machine_labels': 'https://storage.googleapis.com/openimages/2018_04/test/test-annotations-machine-imagelabels.csv',
'validation_human_labels': 'https://storage.googleapis.com/openimages/2018_04/validation/validation-annotations-human-imagelabels.csv',
'validation_machine_labels': 'https://storage.googleapis.com/openimages/2018_04/validation/validation-annotations-machine-imagelabels.csv',
'class_descriptions': 'https://storage.googleapis.com/openimages/2018_04/class-descriptions.csv',
}
# pylint: enable=line-too-long
class OpenImagesV4(tfds.core.GeneratorBasedBuilder):
"""Open Images v4."""
VERSION = tfds.core.Version('0.0.1')
def _info(self):
return tfds.core.DatasetInfo(
builder=self,
description=_DESCRIPTION,
features=tfds.features.FeaturesDict({
'image': tfds.features.Image(),
'image/filename': tfds.features.Text(), # eg '226f0a1873b9bf8e.jpg'
'objects': tfds.features.SequenceDict({
'label': tfds.features.ClassLabel(),
# Original data is 0, .1, ..., 1. We use 0, 1, 2, ..., 10.
'confidence': tf.int32,
'source': tfds.features.ClassLabel(names=[
'verification', 'crowdsource-verification', # human labels
'machine',
]),
}),
}),
urls=['https://storage.googleapis.com/openimages/web/index.html'],
citation=_CITATION,
)
def _split_generators(self, dl_manager):
paths = dl_manager.download_and_extract(_URLS)
source_str2int = self.info.features['objects']['source'].str2int
# Set the labels' names:
with tf.gfile.Open(paths['class_descriptions']) as classes_f:
classes = [l.split(',')[0]
for l in classes_f.read().split('\n') if l]
logging.info('Number of loaded classes: %s', len(classes))
self.info.features['objects']['label'].names = classes
label_str2int = self.info.features['objects']['label'].str2int
# Load labels from CSVs:
def load(paths_):
return _load_objects([paths[p] for p in paths_],
source_str2int, label_str2int)
train_objects = load(['train_human_labels', 'train_machine_labels'])
test_objects = load(['test_human_labels', 'test_machine_labels'])
validation_objects = load(['validation_human_labels',
'validation_machine_labels'])
return [
tfds.core.SplitGenerator(
name=tfds.Split.TRAIN,
num_shards=512,
gen_kwargs=dict(archive_paths=paths['train_images'],
objects=train_objects),
),
tfds.core.SplitGenerator(
name=tfds.Split.TEST,
num_shards=36,
gen_kwargs=dict(archive_paths=[paths['test_images']],
objects=test_objects),
),
tfds.core.SplitGenerator(
name=tfds.Split.VALIDATION,
num_shards=12,
gen_kwargs=dict(archive_paths=[paths['validation_images']],
objects=validation_objects),
),
]
def _generate_examples(self, archive_paths, objects):
for archive_path in archive_paths:
logging.info('Opening archive %s ...', archive_path)
archive = tfds.download.iter_archive(archive_path,
tfds.download.ExtractMethod.TAR_GZ)
for fpath, fobj in archive:
fname = os.path.basename(fpath)
image_id, _ = os.path.splitext(fname)
objs = objects[int(image_id, 16)]
image_objects = [
dict(label=label, confidence=confidence, source=source)
for label, confidence, source in objs]
yield {
'image': fobj,
'image/filename': fname,
'objects': image_objects,
}
def _load_objects(csv_paths, source_str2int, label_str2int):
"""Returns objects listed within given CSV files."""
logging.info('Loading CSVs %s', csv_paths)
objects = []
current_image_id = None
current_objects = []
for labels_path in csv_paths:
with tf.gfile.Open(labels_path) as csv_f:
csv_f.readline() # Drop headers
reader = csv.reader(csv_f)
for image_id, source, label, confidence in reader:
image_id = int(image_id, 16)
source = source_str2int(source)
label = label_str2int(label)
if image_id != current_image_id:
if current_image_id:
objects.append((current_image_id, current_objects))
current_image_id = image_id
current_objects = []
current_objects.append((label, int(float(confidence) * 10), source))
# the last line of file:
objects.append((current_image_id, current_objects))
return dict(objects)
|
python
|
# coding: utf-8
##########################################################################
# NSAp - Copyright (C) CEA, 2019
# Distributed under the terms of the CeCILL-B license, as published by
# the CEA-CNRS-INRIA. Refer to the LICENSE file or to
# http://www.cecill.info/licences/Licence_CeCILL-B_V1-en.html
# for details.
##########################################################################
"""
This module contains the generic parser definition.
"""
# System import
import os
import json
import glob
import pickle
import datetime
# Third party import
import pandas as pd
# Package import
from caravel.io import load
class ParserBase(object):
""" Object to retrieve data from a BIDS directory or a CubicWeb instance.
"""
AVAILABLE_LAYOUTS = ("sourcedata", "rawdata", "derivatives", "phenotype")
def __init__(self, project, layoutdir):
""" Initialize the Caravel class.
Parameters
----------
project: str
the name of the project you are working on.
layoutdir: str
the location of the pre-generated parsing representations. If None
switch to managers mode.
"""
self.project = project
self.layouts = {}
_conf = ParserBase._get_conf()
if project not in _conf:
raise ValueError(
"Unkown configuration for project '{0}'. Available projects "
"are: {1}.".format(project, _conf.keys()))
self.conf = _conf[project]
if layoutdir is not None:
_repr = self._get_repr(layoutdir)
if project not in _repr:
raise ValueError(
"Unkown representation for project '{0}'. Available "
"projects are: {1}.".format(project, _repr.keys()))
self.representation = _repr[project]
else:
self.representation = {"manager": [{"path": "to_be_created.pkl"}]}
self.connection = None
def can_load(self):
""" A method checking the dataset type.
Returns
-------
out: bool
True if the dataset can be loaded, False otherwise.
"""
checks = [elem[-1]["path"] for elem in self.representation.values()]
if len(checks) == 0:
return False
return all([elem.endswith(self.EXT) for elem in checks])
def _check_layout(self, name):
""" Check if the layout name is supported.
"""
if name not in self.AVAILABLE_LAYOUTS:
raise ValueError(
"Layout '{0}' is not yet supported. Available layouts are: "
"{1}.".format(name, self.AVAILABLE_LAYOUTS))
@classmethod
def _get_conf(cls):
""" List all the configurations available and sort them by project.
"""
confdir = os.path.join(
os.path.abspath(os.path.dirname(__file__)), os.pardir, "conf")
conf = {}
for path in glob.glob(os.path.join(confdir, "*.conf")):
basename = os.path.basename(path).replace(".conf", "")
project, name = basename.split("_")
if project not in conf:
conf[project] = {}
conf[project][name] = path
return conf
def _get_repr(self, layoutdir):
""" List all the layout representation available and sort them by
dates.
"""
representations = {}
layout_files = glob.glob(os.path.join(layoutdir, "*.pkl"))
layout_files += glob.glob(os.path.join(layoutdir, "*.cw"))
for path in layout_files:
basename = os.path.basename(path).replace(".pkl", "")
basename = basename.replace(".cw", "")
project, name, timestamp = basename.split("_")
if project not in representations:
representations[project] = {}
representations[project].setdefault(name, []).append(
{"date": timestamp, "path": path})
for project, project_data in representations.items():
for name, name_data in project_data.items():
name_data.sort(key=lambda x: datetime.datetime.strptime(
x["date"], "%Y-%m-%d"))
return representations
def _check_conf(self, name):
""" Check if configuration is declared for the layout.
"""
if name not in self.conf:
raise ValueError(
"No configuration available for layout '{0}'. Please contact "
"the module developpers to add the support for your project.")
def _load_layout(self, name):
""" Load a layout from its pre-generated representation.
"""
if name not in self.layouts:
if name not in self.representation:
raise ValueError(
"A pre-generated '{0}' layout for your project '{1}' is "
"expected in user mode. Please contact the developpers "
"of the module.".format(name, self.project))
path = self.representation[name][-1]["path"]
with open(path, "rb") as open_file:
self.layouts[name] = pickle.load(open_file)
return self.layouts[name]
def _load_conf(self, name):
""" Load the configuration associated to a layout.
"""
if not isinstance(self.conf[name], dict):
print(self.conf[name])
with open(self.conf[name], "rt") as open_file:
self.conf[name] = json.load(open_file)
def export_layout(self, name):
""" Export a layout as a pandas DataFrame.
Parameters
----------
name: str
the name of the layout.
Returns
-------
df: pandas DataFrame
the converted layout.
"""
raise NotImplementedError("This function has to be defined in child "
"child class.")
def list_keys(self, name):
""" List all the filtering keys available in the layout.
Parameters
----------
name: str
the name of the layout.
Returns
-------
keys: list
the layout keys.
"""
raise NotImplementedError("This function has to be defined in child "
"child class.")
def list_values(self, name, key):
""" List all the filtering key values available in the layout.
Parameters
----------
name: str
the name of the layout.
key: str
the name of key in the layout.
Returns
-------
values: list
the key assocaited values in the layout.
"""
raise NotImplementedError("This function has to be defined in child "
"child class.")
def filter_layout(self, name, extensions=None, **kwargs):
""" Filter the layout by using a combination of key-values rules.
Parameters
----------
name: str
the name of the layout.
extensions: str or list of str
a filtering rule on the file extension.
kwargs: dict
the filtering options.
Returns
-------
df: pandas DataFrame
the filtered layout.
"""
raise NotImplementedError("This function has to be defined in child "
"child class.")
def load_data(self, name, df, replace=None):
""" Load the data contained in the filename column of a pandas
DataFrame.
Note:
Only a couple of file extensions are supported. If no loader has been
found None is returned.
Parameters
----------
name: str
the name of the layout.
df: pandas DataFrame
a table with one 'filename' column.
replace: 2-uplet, default None
in the case of a CubicWeb resource, the data are downloaded in a
custom folder. Use this parameter to replace the server location
by your own location.
Returns
-------
data: dict
a dictionaray containing the loaded data.
"""
if "filename" not in df:
raise ValueError("One 'filename' column expected in your table.")
data = {}
for index, path in enumerate(df["filename"]):
if isinstance(path, dict):
_data = pd.DataFrame.from_records([path])
path = ["{0}-{1}".format(key, val)
for key, val in zip(df.columns, df.values[index])
if key != "filename"]
path = "_".join(path)
else:
if replace is not None:
path = path.replace(replace[0], replace[1])
try:
_data = load(path)
except:
_data = None
if isinstance(_data, pd.DataFrame):
layout = self._load_layout(name)
file_obj = layout.files[path]
for ent_name, ent_val in file_obj.entities.items():
if ent_name in self.BASE_ENTITIES:
_data[ent_name] = ent_val
_data["dtype"] = name
if "participant_id" in _data:
_data["participant_id"] = _data[
"participant_id"].str.replace("sub-", "")
data[path] = _data
return data
|
python
|
import codecs
import numpy as np
import copy
import time
import random
import json
import multiprocessing
import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
import os
import joblib
entities2id = {}
relations2id = {}
relation_tph = {}
relation_hpt = {}
def dataloader(file1, file2, file3, file4):
print("load file...")
entity = []
relation = []
with open(file2, 'r') as f1, open(file3, 'r') as f2:
lines1 = f1.readlines()
lines2 = f2.readlines()
for line in lines1:
line = line.strip().split('\t')
if len(line) != 2:
continue
entities2id[line[0]] = line[1]
entity.append(int(line[1]))
for line in lines2:
line = line.strip().split('\t')
if len(line) != 2:
continue
relations2id[line[0]] = line[1]
relation.append(int(line[1]))
triple_list = []
relation_head = {}
relation_tail = {}
with codecs.open(file1, 'r') as f:
content = f.readlines()
for line in content:
triple = line.strip().split("\t")
if len(triple) != 3:
continue
h_ = int(entities2id[triple[0]])
r_ = int(relations2id[triple[1]])
t_ = int(entities2id[triple[2]])
triple_list.append([h_, r_, t_])
if r_ in relation_head:
if h_ in relation_head[r_]:
relation_head[r_][h_] += 1
else:
relation_head[r_][h_] = 1
else:
relation_head[r_] = {}
relation_head[r_][h_] = 1
if r_ in relation_tail:
if t_ in relation_tail[r_]:
relation_tail[r_][t_] += 1
else:
relation_tail[r_][t_] = 1
else:
relation_tail[r_] = {}
relation_tail[r_][t_] = 1
for r_ in relation_head:
sum1, sum2 = 0, 0
for head in relation_head[r_]:
sum1 += 1
sum2 += relation_head[r_][head]
tph = sum2 / sum1
relation_tph[r_] = tph
for r_ in relation_tail:
sum1, sum2 = 0, 0
for tail in relation_tail[r_]:
sum1 += 1
sum2 += relation_tail[r_][tail]
hpt = sum2 / sum1
relation_hpt[r_] = hpt
valid_triple_list = []
with codecs.open(file4, 'r') as f:
content = f.readlines()
for line in content:
triple = line.strip().split("\t")
if len(triple) != 3:
continue
h_ = int(entities2id[triple[0]])
r_ = int(relations2id[triple[1]])
t_ = int(entities2id[triple[2]])
valid_triple_list.append([h_, r_, t_])
print("Complete load. entity : %d , relation : %d , train triple : %d, , valid triple : %d" % (
len(entity), len(relation), len(triple_list), len(valid_triple_list)))
return entity, relation, triple_list, valid_triple_list
def norm_l1(h, r, t):
return np.sum(np.fabs(h + r - t))
def norm_l2(h, r, t):
return np.sum(np.square(h + r - t))
class Entity_Measure:
def __init__(self, entity2id_1, entity2id_2, entity_emb_1, entity_emb_2):
self.entity2_id_1 = entity2id_1
self.entity2_id_2 = entity2id_2
self.entity_emb_1 = entity_emb_1
self.entity_emb_2 = entity_emb_2
self.entitydic_1 = {}
self.entitydic_2 = {}
self.entity_vec_1 = {}
self.entity_vec_2 = {}
self.test_sample_count = 0
self.hit_1_count = 0
self.hit_10_count = 0
self.mean_rank = []
self.hit_1 = 0
self.hit_10 = 0
def load_dic(self):
print('load dic ...')
with codecs.open(self.entity2_id_1, 'r', encoding='UTF-8') as f1, codecs.open(self.entity2_id_2, 'r',
encoding='UTF-8') as f2:
lines1 = f1.readlines()
lines2 = f2.readlines()
for line in lines1:
line = line.strip().split('\t')
if len(line) != 2:
continue
self.entitydic_1[line[0]] = line[1]
for line in lines2:
line = line.strip().split('\t')
if len(line) != 2:
continue
self.entitydic_2[line[0]] = line[1]
print('load dic done!')
def load_vec(self):
print('load vec ...')
with codecs.open(self.entity_emb_1, 'r') as f1, codecs.open(self.entity_emb_2, 'r') as f2:
lines1 = f1.readlines()
lines2 = f2.readlines()
for line in lines1:
line = line.strip().split('\t')
if len(line) != 2:
continue
self.entity_vec_1[int(line[0])] = json.loads(line[1])
for line in lines2:
line = line.strip().split('\t')
if len(line) != 2:
continue
self.entity_vec_2[int(line[0])] = json.loads(line[1])
print('load vec done!')
def calculate_single_pair(self, shiti1, shiti2):
query = self.entity_vec_1[eval(self.entitydic_1[shiti1])]
answer = eval(self.entitydic_2[shiti2])
temporary_dic = copy.deepcopy(self.entity_vec_2)
for index, value in enumerate(temporary_dic.values()):
temporary_dic[index] = np.linalg.norm(np.array(value) - np.array(query))
temporary_list = sorted(temporary_dic.items(), key=lambda x: x[1], reverse=False)
hit_10_list = [temporary_list[x][0] for x in range(10)]
for index, all_answer in enumerate(hit_10_list):
if answer == hit_10_list[index]:
self.hit_10_count += 1
if index == 0:
self.hit_1_count += 1
# 计算mean_rank
for index, value in enumerate(temporary_list):
if value[0] == answer:
self.mean_rank.append(index + 1)
def calculate_all(self, entity_test_way, outputfile):
start = time.time()
print('start calculate ...')
with codecs.open(entity_test_way, 'r', encoding='UTF-8') as f:
lines = f.readlines()
self.test_sample_count = len(lines)
print(self.test_sample_count, 'samples')
for line in lines:
line = line.strip().split()
self.calculate_single_pair(line[0], line[1])
self.hit_1 = self.hit_1_count / self.test_sample_count
self.hit_10 = self.hit_10_count / self.test_sample_count
self.mean_rank = np.array(self.mean_rank).mean()
end = time.time()
with codecs.open(f'{outputfile}test_result.txt', 'w', encoding='UTF-8') as f:
f.write(f'consuming {end - start} s')
f.write('\n')
f.write(f'hit_1 is {self.hit_1} hit_10 is {self.hit_10} mean_rank is {self.mean_rank}')
print('calculate done! consuming', end - start, 's')
print('hit_1 is', self.hit_1, 'hit_10 is', self.hit_10, 'mean_rank is', self.mean_rank)
def calculate_single_pair_multi(self, line, i):
hit1 = 0
hit10 = 0
mean_rank = 0
line = line.strip().split()
shiti1, shiti2 = line[1], line[0]
query = self.entity_vec_1[eval(self.entitydic_1[shiti1])]
answer = eval(self.entitydic_2[shiti2])
temporary_dic = copy.deepcopy(self.entity_vec_2)
for index, value in enumerate(temporary_dic.values()):
temporary_dic[index] = np.linalg.norm(np.array(value) - np.array(query))
temporary_list = sorted(temporary_dic.items(), key=lambda x: x[1], reverse=False)
hit_10_list = [temporary_list[x][0] for x in range(10)]
for index, all_answer in enumerate(hit_10_list):
if answer == hit_10_list[index]:
hit10 = 1
if index == 0:
hit1 = 1
for index, value in enumerate(temporary_list):
if value[0] == answer:
mean_rank = index
print(i, 'done!', hit1, hit10, mean_rank)
def calculate_all_multi(self, entity_test_way, outputfile):
p = multiprocessing.Pool(10)
print('start calculate ...')
with codecs.open(entity_test_way, 'r', encoding='UTF-8') as f:
lines = f.readlines()
self.test_sample_count = len(lines)
for index, line in enumerate(lines):
p.apply_async(self.calculate_single_pair_multi, (line, index))
p.close()
p.join()
class Multi_Model(nn.Module):
def __init__(self, entity_num_1, relation_num_1, entity_num_2, relation_num_2, dim, margin, norm, C):
super(Multi_Model, self).__init__()
self.entity_num_1 = entity_num_1
self.entities_1 = [x for x in range(self.entity_num_1)]
self.relation_num_1 = relation_num_1
self.entity_num_2 = entity_num_2
self.entities_2 = [x for x in range(self.entity_num_2)]
self.relation_num_2 = relation_num_2
self.dim = dim
self.margin = margin
self.norm = norm
self.C = C
self.entity_dic_1 = {}
self.entity_dic_2 = {}
self.relation_dic_1 = {}
self.relation_dic_2 = {}
self.ent_embedding_1 = torch.nn.Embedding(num_embeddings=self.entity_num_1,
embedding_dim=self.dim).cuda()
self.rel_embedding_1 = torch.nn.Embedding(num_embeddings=self.relation_num_1,
embedding_dim=self.dim).cuda()
self.ent_embedding_2 = torch.nn.Embedding(num_embeddings=self.entity_num_2,
embedding_dim=self.dim).cuda()
self.rel_embedding_2 = torch.nn.Embedding(num_embeddings=self.relation_num_2,
embedding_dim=self.dim).cuda()
self.loss_F = nn.MarginRankingLoss(self.margin, reduction="mean")
self.loss_mse = nn.MSELoss()
self.__data_init()
def __data_init(self):
nn.init.xavier_uniform_(self.ent_embedding_1.weight)
nn.init.xavier_uniform_(self.ent_embedding_2.weight)
nn.init.xavier_uniform_(self.rel_embedding_1.weight)
nn.init.xavier_uniform_(self.rel_embedding_2.weight)
self.normalization_rel_embedding()
self.normalization_ent_embedding()
def normalization_ent_embedding(self):
norm1 = self.ent_embedding_1.weight.detach().cpu().numpy()
norm1 = norm1 / np.sqrt(np.sum(np.square(norm1), axis=1, keepdims=True))
self.ent_embedding_1.weight.data.copy_(torch.from_numpy(norm1))
norm2 = self.ent_embedding_2.weight.detach().cpu().numpy()
norm2 = norm2 / np.sqrt(np.sum(np.square(norm2), axis=1, keepdims=True))
self.ent_embedding_2.weight.data.copy_(torch.from_numpy(norm2))
def normalization_rel_embedding(self):
norm1 = self.rel_embedding_1.weight.detach().cpu().numpy()
norm1 = norm1 / np.sqrt(np.sum(np.square(norm1), axis=1, keepdims=True))
self.rel_embedding_1.weight.data.copy_(torch.from_numpy(norm1))
norm2 = self.rel_embedding_2.weight.detach().cpu().numpy()
norm2 = norm2 / np.sqrt(np.sum(np.square(norm2), axis=1, keepdims=True))
self.rel_embedding_2.weight.data.copy_(torch.from_numpy(norm2))
def prepare_data(self, entity1_way, entity2_way, relation1_way, relation2_way):
print('Prepare data...')
with codecs.open(entity1_way, 'r', encoding='UTF-8') as f1, codecs.open(entity2_way, 'r',
encoding='UTF-8') as f2:
lines1 = f1.readlines()
lines2 = f2.readlines()
for line in lines1:
line = line.strip().split('\t')
if len(line) != 2:
continue
self.entity_dic_1[line[0]] = line[1]
for line in lines2:
line = line.strip().split('\t')
if len(line) != 2:
continue
self.entity_dic_2[line[0]] = line[1]
f1.close()
f2.close()
with codecs.open(relation1_way, 'r', encoding='UTF-8') as f1, codecs.open(relation2_way, 'r',
encoding='UTF-8') as f2:
lines1 = f1.readlines()
lines2 = f2.readlines()
for line in lines1:
line = line.strip().split('\t')
if len(line) != 2:
continue
self.relation_dic_1[line[0]] = line[1]
for line in lines2:
line = line.strip().split('\t')
if len(line) != 2:
continue
self.relation_dic_2[line[0]] = line[1]
f1.close()
f2.close()
print('Prepare data done!')
def entity_train_data(self, entity_train_way):
entity_train_data = []
with codecs.open(entity_train_way, 'r', encoding='UTF-8') as f:
lines = f.readlines()
for line in lines:
line = line.strip().split()
entity_train_data.append([int(self.entity_dic_2[line[0]]), int(self.entity_dic_1[line[1]])])
f.close()
return entity_train_data
def get_triples(self, file1, file2):
triple_list_1 = []
triple_list_2 = []
with codecs.open(file1, 'r', encoding='UTF-8') as f:
content = f.readlines()
for line in content:
triple = line.strip().split("\t")
if len(triple) != 3:
continue
h_ = int(self.entity_dic_1[triple[0]])
r_ = int(self.relation_dic_1[triple[1]])
t_ = int(self.entity_dic_1[triple[2]])
triple_list_1.append([h_, r_, t_])
f.close()
with codecs.open(file2, 'r', encoding='UTF-8') as f:
content = f.readlines()
for line in content:
triple = line.strip().split("\t")
if len(triple) != 3:
continue
h_ = int(self.entity_dic_2[triple[0]])
r_ = int(self.relation_dic_2[triple[1]])
t_ = int(self.entity_dic_2[triple[2]])
triple_list_2.append([h_, r_, t_])
return triple_list_1, triple_list_2
def distance(self, h, r, t, entity):
if entity == 'entity_1':
head = self.ent_embedding_1(h)
rel = self.rel_embedding_1(r)
tail = self.ent_embedding_1(t)
distance = head + rel - tail
score = torch.norm(distance, p=self.norm, dim=1)
else:
head = self.ent_embedding_2(h)
rel = self.rel_embedding_2(r)
tail = self.ent_embedding_2(t)
distance = head + rel - tail
score = torch.norm(distance, p=self.norm, dim=1)
return score
def train_relation(self, ent_rel_1, ent_rel_2):
ent_tel_1_list = []
ent_tel_2_list = []
for value in ent_rel_1[0]:
ent_tel_1_list.append(
self.ent_embedding_1(torch.tensor(eval(self.entity_dic_1[value])).long().cuda()).unsqueeze(0))
for value in ent_rel_1[1]:
ent_tel_1_list.append(
self.ent_embedding_1(torch.tensor(eval(self.relation_dic_1[value])).long().cuda()).unsqueeze(0))
for value in ent_rel_2[0]:
ent_tel_2_list.append(
self.ent_embedding_2(torch.tensor(eval(self.entity_dic_2[value])).long().cuda()).unsqueeze(0))
for value in ent_rel_2[1]:
ent_tel_2_list.append(
self.ent_embedding_2(torch.tensor(eval(self.relation_dic_2[value])).long().cuda()).unsqueeze(0))
ent_tel_1_list = torch.cat(ent_tel_1_list, dim=0)
ent_tel_2_list = torch.cat(ent_tel_2_list, dim=0)
ent_tel_1_mean = torch.mean(ent_tel_1_list, dim=0)
ent_tel_2_mean = torch.mean(ent_tel_2_list, dim=0)
loss = self.loss_mse(ent_tel_1_mean, ent_tel_2_mean)
return loss
def train_entity_align(self, entity_1, entity_2):
ent_1_vec = self.ent_embedding_2(torch.tensor(eval(self.entity_dic_2[entity_1])).long().cuda())
ent_2_vec = self.ent_embedding_1(torch.tensor(eval(self.entity_dic_1[entity_2])).long().cuda())
loss = self.loss_mse(ent_1_vec, ent_2_vec)
return loss
def scale_loss(self, embedding):
return torch.sum(
torch.max(
torch.sum(
embedding ** 2, dim=1, keepdim=True
) - torch.autograd.Variable(torch.FloatTensor([1.0]).cuda()),
torch.autograd.Variable(torch.FloatTensor([0.0]).cuda())
))
def forward(self, current_triples, corrupted_triples, train_type):
# current_triples和corrupted_triples输进来的是tensor维度为(batch_size,3)
if train_type == 'entity_1': # 这里是对实体进行训练
h, r, t = torch.chunk(current_triples, 3, dim=1)
h_c, r_c, t_c = torch.chunk(corrupted_triples, 3, dim=1)
h = torch.squeeze(h, dim=1).cuda()
r = torch.squeeze(r, dim=1).cuda()
t = torch.squeeze(t, dim=1).cuda()
h_c = torch.squeeze(h_c, dim=1).cuda()
r_c = torch.squeeze(r_c, dim=1).cuda()
t_c = torch.squeeze(t_c, dim=1).cuda()
# torch.nn.embedding类的forward只接受longTensor类型的张量
pos = self.distance(h, r, t, 'entity_1')
neg = self.distance(h_c, r_c, t_c, 'entity_1')
entity_embedding = self.ent_embedding_1(torch.cat([h, t, h_c, t_c]).cuda())
relation_embedding = self.rel_embedding_1(torch.cat([r, r_c]).cuda())
# loss_F = max(0, -y*(x1-x2) + margin)
# loss1 = torch.sum(torch.relu(pos - neg + self.margin))
y = Variable(torch.Tensor([-1])).cuda()
loss = self.loss_F(pos, neg, y)
ent_scale_loss = self.scale_loss(entity_embedding)
rel_scale_loss = self.scale_loss(relation_embedding)
return loss # + self.C * (ent_scale_loss / len(entity_embedding) + rel_scale_loss / len(relation_embedding))
elif train_type == 'entity_2': # 这里是对实体进行训练
h, r, t = torch.chunk(current_triples, 3, dim=1)
h_c, r_c, t_c = torch.chunk(corrupted_triples, 3, dim=1)
h = torch.squeeze(h, dim=1).cuda()
r = torch.squeeze(r, dim=1).cuda()
t = torch.squeeze(t, dim=1).cuda()
h_c = torch.squeeze(h_c, dim=1).cuda()
r_c = torch.squeeze(r_c, dim=1).cuda()
t_c = torch.squeeze(t_c, dim=1).cuda()
# torch.nn.embedding类的forward只接受longTensor类型的张量
pos = self.distance(h, r, t, 'entity_2')
neg = self.distance(h_c, r_c, t_c, 'entity_2')
entity_embedding = self.ent_embedding_2(torch.cat([h, t, h_c, t_c]).cuda())
relation_embedding = self.rel_embedding_2(torch.cat([r, r_c]).cuda())
# loss_F = max(0, -y*(x1-x2) + margin)
# loss1 = torch.sum(torch.relu(pos - neg + self.margin))
y = Variable(torch.Tensor([-1])).cuda()
loss = self.loss_F(pos, neg, y)
ent_scale_loss = self.scale_loss(entity_embedding)
rel_scale_loss = self.scale_loss(relation_embedding)
return loss # + self.C * (ent_scale_loss / len(entity_embedding) + rel_scale_loss / len(relation_embedding))
elif train_type == 'relation': # 这里是对关系进行训练,即type == 'relation',此时输入的是(batch_size*)
if corrupted_triples == 'p':
h1, r1, t1, h2, r2, t2 = torch.chunk(current_triples, 6, dim=1)
h1 = torch.squeeze(h1, dim=1).cuda()
r1 = torch.squeeze(r1, dim=1).cuda()
t1 = torch.squeeze(t1, dim=1).cuda()
h2 = torch.squeeze(h2, dim=1).cuda()
r2 = torch.squeeze(r2, dim=1).cuda()
t2 = torch.squeeze(t2, dim=1).cuda()
dbp1 = self.ent_embedding_1(h1)
dwy1 = self.ent_embedding_2(h2)
dbp2 = self.rel_embedding_1(r1)
dwy2 = self.rel_embedding_2(r2)
dbp3 = self.ent_embedding_1(t1)
dwy3 = self.ent_embedding_2(t2)
loss = 5 * (self.loss_mse(dbp1, dwy1) + self.loss_mse(dbp2, dwy2) + self.loss_mse(dbp3, dwy3))
return loss
if corrupted_triples == 'n':
h1, r1, t1, h2, r2, t2 = torch.chunk(current_triples, 6, dim=1)
h1 = torch.squeeze(h1, dim=1).cuda()
r1 = torch.squeeze(r1, dim=1).cuda()
t1 = torch.squeeze(t1, dim=1).cuda()
h2 = torch.squeeze(h2, dim=1).cuda()
r2 = torch.squeeze(r2, dim=1).cuda()
t2 = torch.squeeze(t2, dim=1).cuda()
dbp1 = self.ent_embedding_1(h1)
dwy1 = self.ent_embedding_2(h2)
dbp2 = self.rel_embedding_1(r1)
dwy2 = - self.rel_embedding_2(r2)
dbp3 = self.ent_embedding_1(t1)
dwy3 = self.ent_embedding_2(t2)
loss = 5 * (self.loss_mse(dbp1, dwy1) + self.loss_mse(dbp2, dwy2) + self.loss_mse(dbp3, dwy3))
return loss
if corrupted_triples == 'pp':
h1, r1, t1, h2, r2, r22, t2 = torch.chunk(current_triples, 7, dim=1)
h1 = torch.squeeze(h1, dim=1).cuda()
r1 = torch.squeeze(r1, dim=1).cuda()
t1 = torch.squeeze(t1, dim=1).cuda()
h2 = torch.squeeze(h2, dim=1).cuda()
r2 = torch.squeeze(r2, dim=1).cuda()
r22 = torch.squeeze(r22, dim=1).cuda()
t2 = torch.squeeze(t2, dim=1).cuda()
dbp1 = self.ent_embedding_1(h1)
dwy1 = self.ent_embedding_2(h2)
dbp2 = self.rel_embedding_1(r1)
dwy2 = self.rel_embedding_2(r2)
dwy22 = self.rel_embedding_2(r22)
dbp3 = self.ent_embedding_1(t1)
dwy3 = self.ent_embedding_2(t2)
loss = 5 * (self.loss_mse(dbp1, dwy1) + self.loss_mse(dbp2, dwy2+dwy22) + self.loss_mse(dbp3, dwy3))
return loss
if corrupted_triples == 'pn':
h1, r1, t1, h2, r2, r22, t2 = torch.chunk(current_triples, 7, dim=1)
h1 = torch.squeeze(h1, dim=1).cuda()
r1 = torch.squeeze(r1, dim=1).cuda()
t1 = torch.squeeze(t1, dim=1).cuda()
h2 = torch.squeeze(h2, dim=1).cuda()
r2 = torch.squeeze(r2, dim=1).cuda()
r22 = torch.squeeze(r22, dim=1).cuda()
t2 = torch.squeeze(t2, dim=1).cuda()
dbp1 = self.ent_embedding_1(h1)
dwy1 = self.ent_embedding_2(h2)
dbp2 = self.rel_embedding_1(r1)
dwy2 = self.rel_embedding_2(r2)
dwy22 = self.rel_embedding_2(r22)
dbp3 = self.ent_embedding_1(t1)
dwy3 = self.ent_embedding_2(t2)
loss = 5 * (self.loss_mse(dbp1, dwy1) + self.loss_mse(dbp2, dwy2-dwy22) + self.loss_mse(dbp3, dwy3))
return loss
if corrupted_triples == 'np':
h1, r1, t1, h2, r2, r22, t2 = torch.chunk(current_triples, 7, dim=1)
h1 = torch.squeeze(h1, dim=1).cuda()
r1 = torch.squeeze(r1, dim=1).cuda()
t1 = torch.squeeze(t1, dim=1).cuda()
h2 = torch.squeeze(h2, dim=1).cuda()
r2 = torch.squeeze(r2, dim=1).cuda()
r22 = torch.squeeze(r22, dim=1).cuda()
t2 = torch.squeeze(t2, dim=1).cuda()
dbp1 = self.ent_embedding_1(h1)
dwy1 = self.ent_embedding_2(h2)
dbp2 = self.rel_embedding_1(r1)
dwy2 = self.rel_embedding_2(r2)
dwy22 = self.rel_embedding_2(r22)
dbp3 = self.ent_embedding_1(t1)
dwy3 = self.ent_embedding_2(t2)
loss = 5 * (self.loss_mse(dbp1, dwy1) + self.loss_mse(dbp2, -dwy2+dwy22) + self.loss_mse(dbp3, dwy3))
return loss
if corrupted_triples == 'nn':
h1, r1, t1, h2, r2, r22, t2 = torch.chunk(current_triples, 7, dim=1)
h1 = torch.squeeze(h1, dim=1).cuda()
r1 = torch.squeeze(r1, dim=1).cuda()
t1 = torch.squeeze(t1, dim=1).cuda()
h2 = torch.squeeze(h2, dim=1).cuda()
r2 = torch.squeeze(r2, dim=1).cuda()
r22 = torch.squeeze(r22, dim=1).cuda()
t2 = torch.squeeze(t2, dim=1).cuda()
dbp1 = self.ent_embedding_1(h1)
dwy1 = self.ent_embedding_2(h2)
dbp2 = self.rel_embedding_1(r1)
dwy2 = self.rel_embedding_2(r2)
dwy22 = self.rel_embedding_2(r22)
dbp3 = self.ent_embedding_1(t1)
dwy3 = self.ent_embedding_2(t2)
loss = 5 * (self.loss_mse(dbp1, dwy1) + self.loss_mse(dbp2, -dwy2-dwy22) + self.loss_mse(dbp3, dwy3))
return loss
if corrupted_triples == 'nnn':
h1, r1, t1, h2, r2, r22, r222, t2 = torch.chunk(current_triples, 8, dim=1)
h1 = torch.squeeze(h1, dim=1).cuda()
r1 = torch.squeeze(r1, dim=1).cuda()
t1 = torch.squeeze(t1, dim=1).cuda()
h2 = torch.squeeze(h2, dim=1).cuda()
r2 = torch.squeeze(r2, dim=1).cuda()
r22 = torch.squeeze(r22, dim=1).cuda()
r222 = torch.squeeze(r222, dim=1).cuda()
t2 = torch.squeeze(t2, dim=1).cuda()
dbp1 = self.ent_embedding_1(h1)
dwy1 = self.ent_embedding_2(h2)
dbp2 = self.rel_embedding_1(r1)
dwy2 = self.rel_embedding_2(r2)
dwy22 = self.rel_embedding_2(r22)
dwy222 = self.rel_embedding_2(r222)
dbp3 = self.ent_embedding_1(t1)
dwy3 = self.ent_embedding_2(t2)
loss = 5 * (self.loss_mse(dbp1, dwy1) + self.loss_mse(dbp2, -dwy2-dwy22-dwy222) + self.loss_mse(dbp3, dwy3))
return loss
if corrupted_triples == 'npn':
h1, r1, t1, h2, r2, r22, r222, t2 = torch.chunk(current_triples, 8, dim=1)
h1 = torch.squeeze(h1, dim=1).cuda()
r1 = torch.squeeze(r1, dim=1).cuda()
t1 = torch.squeeze(t1, dim=1).cuda()
h2 = torch.squeeze(h2, dim=1).cuda()
r2 = torch.squeeze(r2, dim=1).cuda()
r22 = torch.squeeze(r22, dim=1).cuda()
r222 = torch.squeeze(r222, dim=1).cuda()
t2 = torch.squeeze(t2, dim=1).cuda()
dbp1 = self.ent_embedding_1(h1)
dwy1 = self.ent_embedding_2(h2)
dbp2 = self.rel_embedding_1(r1)
dwy2 = self.rel_embedding_2(r2)
dwy22 = self.rel_embedding_2(r22)
dwy222 = self.rel_embedding_2(r222)
dbp3 = self.ent_embedding_1(t1)
dwy3 = self.ent_embedding_2(t2)
loss = 5 * (self.loss_mse(dbp1, dwy1) + self.loss_mse(dbp2, -dwy2+dwy22-dwy222) + self.loss_mse(dbp3, dwy3))
return loss
if corrupted_triples == 'npp':
h1, r1, t1, h2, r2, r22, r222, t2 = torch.chunk(current_triples, 8, dim=1)
h1 = torch.squeeze(h1, dim=1).cuda()
r1 = torch.squeeze(r1, dim=1).cuda()
t1 = torch.squeeze(t1, dim=1).cuda()
h2 = torch.squeeze(h2, dim=1).cuda()
r2 = torch.squeeze(r2, dim=1).cuda()
r22 = torch.squeeze(r22, dim=1).cuda()
r222 = torch.squeeze(r222, dim=1).cuda()
t2 = torch.squeeze(t2, dim=1).cuda()
dbp1 = self.ent_embedding_1(h1)
dwy1 = self.ent_embedding_2(h2)
dbp2 = self.rel_embedding_1(r1)
dwy2 = self.rel_embedding_2(r2)
dwy22 = self.rel_embedding_2(r22)
dwy222 = self.rel_embedding_2(r222)
dbp3 = self.ent_embedding_1(t1)
dwy3 = self.ent_embedding_2(t2)
loss = 5 * (self.loss_mse(dbp1, dwy1) + self.loss_mse(dbp2, -dwy2+dwy22+dwy222) + self.loss_mse(dbp3, dwy3))
return loss
if corrupted_triples == 'pnn':
h1, r1, t1, h2, r2, r22, r222, t2 = torch.chunk(current_triples, 8, dim=1)
h1 = torch.squeeze(h1, dim=1).cuda()
r1 = torch.squeeze(r1, dim=1).cuda()
t1 = torch.squeeze(t1, dim=1).cuda()
h2 = torch.squeeze(h2, dim=1).cuda()
r2 = torch.squeeze(r2, dim=1).cuda()
r22 = torch.squeeze(r22, dim=1).cuda()
r222 = torch.squeeze(r222, dim=1).cuda()
t2 = torch.squeeze(t2, dim=1).cuda()
dbp1 = self.ent_embedding_1(h1)
dwy1 = self.ent_embedding_2(h2)
dbp2 = self.rel_embedding_1(r1)
dwy2 = self.rel_embedding_2(r2)
dwy22 = self.rel_embedding_2(r22)
dwy222 = self.rel_embedding_2(r222)
dbp3 = self.ent_embedding_1(t1)
dwy3 = self.ent_embedding_2(t2)
loss = 5 * (self.loss_mse(dbp1, dwy1) + self.loss_mse(dbp2, dwy2-dwy22-dwy222) + self.loss_mse(dbp3, dwy3))
return loss
if corrupted_triples == 'ppn':
h1, r1, t1, h2, r2, r22, r222, t2 = torch.chunk(current_triples, 8, dim=1)
h1 = torch.squeeze(h1, dim=1).cuda()
r1 = torch.squeeze(r1, dim=1).cuda()
t1 = torch.squeeze(t1, dim=1).cuda()
h2 = torch.squeeze(h2, dim=1).cuda()
r2 = torch.squeeze(r2, dim=1).cuda()
r22 = torch.squeeze(r22, dim=1).cuda()
r222 = torch.squeeze(r222, dim=1).cuda()
t2 = torch.squeeze(t2, dim=1).cuda()
dbp1 = self.ent_embedding_1(h1)
dwy1 = self.ent_embedding_2(h2)
dbp2 = self.rel_embedding_1(r1)
dwy2 = self.rel_embedding_2(r2)
dwy22 = self.rel_embedding_2(r22)
dwy222 = self.rel_embedding_2(r222)
dbp3 = self.ent_embedding_1(t1)
dwy3 = self.ent_embedding_2(t2)
loss = 5 * (self.loss_mse(dbp1, dwy1) + self.loss_mse(dbp2, dwy2+dwy22-dwy222) + self.loss_mse(dbp3, dwy3))
return loss
if corrupted_triples == 'ppp':
h1, r1, t1, h2, r2, r22, r222, t2 = torch.chunk(current_triples, 8, dim=1)
h1 = torch.squeeze(h1, dim=1).cuda()
r1 = torch.squeeze(r1, dim=1).cuda()
t1 = torch.squeeze(t1, dim=1).cuda()
h2 = torch.squeeze(h2, dim=1).cuda()
r2 = torch.squeeze(r2, dim=1).cuda()
r22 = torch.squeeze(r22, dim=1).cuda()
r222 = torch.squeeze(r222, dim=1).cuda()
t2 = torch.squeeze(t2, dim=1).cuda()
dbp1 = self.ent_embedding_1(h1)
dwy1 = self.ent_embedding_2(h2)
dbp2 = self.rel_embedding_1(r1)
dwy2 = self.rel_embedding_2(r2)
dwy22 = self.rel_embedding_2(r22)
dwy222 = self.rel_embedding_2(r222)
dbp3 = self.ent_embedding_1(t1)
dwy3 = self.ent_embedding_2(t2)
loss = 5 * (self.loss_mse(dbp1, dwy1) + self.loss_mse(dbp2, dwy2+dwy22+dwy222) + self.loss_mse(dbp3, dwy3))
return loss
elif train_type == 'entity_align':
'''
这里是硬着来的实体对齐
'''
dwy, dbpedia = torch.chunk(current_triples, 2, dim=1)
dwy = torch.squeeze(dwy, dim=1).cuda()
dbpedia = torch.squeeze(dbpedia, dim=1).cuda()
dwy_emb = self.ent_embedding_2(dwy)
dbpedia_emb = self.ent_embedding_1(dbpedia)
loss = 5 * self.loss_mse(dwy_emb, dbpedia_emb)
return loss
class Train:
def __init__(self):
self.loss = 0
def init_model(self):
print('if data vary, remember to rewrite here!')
self.model = Multi_Model(100000, 302, 100000, 31, 100, 2.0, 1, 0.25)
self.optimizer = optim.Adam(self.model.parameters(), lr=0.005)
def get_triples(self, file1, file2):
print('get triples ...')
self.triples_1, self.triples_2 = self.model.get_triples(file1, file2)
print('get triples done!')
def train_relation(self, ent_rel_1, ent_rel_2):
# 传入[['a', 'b'], 'c'] [['a', 'b'], 'd']这样的即可
self.optimizer.zero_grad()
loss = self.model(ent_rel_1, ent_rel_2, 'relation')
self.loss += loss.item()
loss.backward()
self.optimizer.step()
def train_entity(self, correct_sample, corrupted_sample, entity_1_or_2):
self.optimizer.zero_grad()
if entity_1_or_2 == 'entity_1':
loss = self.model(correct_sample, corrupted_sample, 'entity_1')
else:
loss = self.model(correct_sample, corrupted_sample, 'entity_2')
self.loss += loss.item()
loss.backward()
self.optimizer.step()
def train_entity_align(self, entity_1, entity_2):
self.optimizer.zero_grad()
loss = self.model(entity_1, entity_2, 'entity_align')
self.loss += loss.item()
loss.backward()
self.optimizer.step()
def get_relation_train(self, relation_train_way):
# /root/dxh/ccw_workplace/relation_align
print('loading relation_train data...')
list_dir = os.listdir(relation_train_way)
self.relation_train_data = []
for file in range(len(list_dir)):
chunk = []
with codecs.open(f'{relation_train_way}/{list_dir[file]}', 'r', encoding='UTF-8') as f:
lines = f.readlines()
for line in lines:
if line != '\n':
line = line.strip().split()
chunk.append(line)
else:
if chunk:
db_ent = [chunk[0][0], chunk[0][2]]
db_rel = [chunk[0][1]]
dwy_ent = []
dwy_rel = []
for dwy in chunk[1:]:
dwy_ent.append(dwy[0])
dwy_ent.append((dwy[2]))
dwy_rel.append((dwy[1]))
dwy_ent = list(set(dwy_ent))
dwy_rel = list(set(dwy_rel))
self.relation_train_data.append([[db_ent, db_rel], [dwy_ent, dwy_rel]])
chunk = []
print('loading relation_train data done!')
def get_entity_train(self, entity_train_way):
# line[0]是dwy的 line[1]是dbpedia的
print('get entity train ...')
# self.entity_train_data = []
# with codecs.open(entity_train_way, 'r', encoding='UTF-8') as f:
# lines = f.readlines()
# for line in lines:
# line = line.strip().split()
# self.entity_train_data.append([line[0], line[1]])
#
# f.close()
self.entity_train_data = self.model.entity_train_data(entity_train_way)
print('get entity train done!')
return self.entity_train_data
def save_weight(self, save_location):
with codecs.open(save_location + "MTransE_ent_1", "w") as f1:
for i, e in enumerate(self.model.ent_embedding_1.weight):
f1.write(str(i) + "\t")
f1.write(str(e.cpu().detach().numpy().tolist()))
f1.write("\n")
with codecs.open(save_location + "MTransE_ent_2", "w") as f1:
for i, e in enumerate(self.model.ent_embedding_2.weight):
f1.write(str(i) + "\t")
f1.write(str(e.cpu().detach().numpy().tolist()))
f1.write("\n")
with codecs.open(save_location + "MTransE_rel_1", "w") as f1:
for i, e in enumerate(self.model.rel_embedding_1.weight):
f1.write(str(i) + "\t")
f1.write(str(e.cpu().detach().numpy().tolist()))
f1.write("\n")
with codecs.open(save_location + "MTransE_rel_2", "w") as f1:
for i, e in enumerate(self.model.rel_embedding_2.weight):
f1.write(str(i) + "\t")
f1.write(str(e.cpu().detach().numpy().tolist()))
f1.write("\n")
def get_aligned_triple1_p(self):
'''
这里准备加路径
'''
aligned_triple1_p = joblib.load('/root/dxh/ccw_workplace/Archive/aligned_triple1_p.txt')
self.aligned_triple1_p = []
for single in aligned_triple1_p:
tmp = [eval(x) for x in single]
tmp[0] -= 70000
tmp[2] -= 70000
tmp[3] -= 70000
tmp[4] -= 302
tmp[-1] -= 70000
self.aligned_triple1_p.append(tuple(tmp))
self.aligned_triple1_p = [list(x) for x in list(set(self.aligned_triple1_p))]
aligned_triple1_p = joblib.load('/root/dxh/ccw_workplace/Archive/aligned_triple1_n.txt')
self.aligned_triple1_n = []
for single in aligned_triple1_p:
tmp = [eval(x) for x in single]
tmp[0] -= 70000
tmp[2] -= 70000
tmp[3] -= 70000
tmp[4] -= 302
tmp[-1] -= 70000
self.aligned_triple1_n.append(tuple(tmp))
self.aligned_triple1_n = [list(x) for x in list(set(self.aligned_triple1_n))]
aligned_triple2_pp = joblib.load('/root/dxh/ccw_workplace/Archive/aligned_triple2_pp.txt')
self.aligned_triple2_pp = []
for single in aligned_triple2_pp:
tmp = [eval(x) for x in single]
tmp[0] -= 70000
tmp[2] -= 70000
tmp[3] -= 70000
tmp[4] -= 302
tmp[5] -= 302
tmp[-1] -= 70000
self.aligned_triple2_pp.append(tuple(tmp))
self.aligned_triple2_pp = [list(x) for x in list(set(self.aligned_triple2_pp))]
aligned_triple2_pn = joblib.load('/root/dxh/ccw_workplace/Archive/aligned_triple2_pn.txt')
self.aligned_triple2_pn = []
for single in aligned_triple2_pn:
if single[4] == single[5]:
continue
tmp = [eval(x) for x in single]
tmp[0] -= 70000
tmp[2] -= 70000
tmp[3] -= 70000
tmp[4] -= 302
tmp[5] -= 302
tmp[-1] -= 70000
self.aligned_triple2_pn.append(tuple(tmp))
self.aligned_triple2_pn = [list(x) for x in list(set(self.aligned_triple2_pn))]
aligned_triple2_np = joblib.load('/root/dxh/ccw_workplace/Archive/aligned_triple2_np.txt')
self.aligned_triple2_np = []
for single in aligned_triple2_np:
if single[4] == single[5]:
continue
tmp = [eval(x) for x in single]
tmp[0] -= 70000
tmp[2] -= 70000
tmp[3] -= 70000
tmp[4] -= 302
tmp[5] -= 302
tmp[-1] -= 70000
self.aligned_triple2_np.append(tuple(tmp))
self.aligned_triple2_np = [list(x) for x in list(set(self.aligned_triple2_np))]
aligned_triple2_nn = joblib.load('/root/dxh/ccw_workplace/Archive/aligned_triple2_nn.txt')
self.aligned_triple2_nn = []
for single in aligned_triple2_nn:
tmp = [eval(x) for x in single]
tmp[0] -= 70000
tmp[2] -= 70000
tmp[3] -= 70000
tmp[4] -= 302
tmp[5] -= 302
tmp[-1] -= 70000
self.aligned_triple2_nn.append(tuple(tmp))
self.aligned_triple2_nn = [list(x) for x in list(set(self.aligned_triple2_nn))]
aligned_triple3_nnn = joblib.load('/root/dxh/ccw_workplace/Archive/aligned_triple3_nnn.txt')
self.aligned_triple3_nnn = []
for single in aligned_triple3_nnn:
tmp = [eval(x) for x in single]
tmp[0] -= 70000
tmp[2] -= 70000
tmp[3] -= 70000
tmp[4] -= 302
tmp[5] -= 302
tmp[6] -= 302
tmp[-1] -= 70000
self.aligned_triple3_nnn.append(tuple(tmp))
self.aligned_triple3_nnn = [list(x) for x in list(set(self.aligned_triple3_nnn))]
# 这个是0
aligned_triple3_nnp = joblib.load('/root/dxh/ccw_workplace/Archive/aligned_triple3_nnp.txt')
self.aligned_triple3_nnp = []
for single in aligned_triple3_nnp:
tmp = [eval(x) for x in single]
tmp[0] -= 70000
tmp[2] -= 70000
tmp[3] -= 70000
tmp[4] -= 302
tmp[5] -= 302
tmp[6] -= 302
tmp[-1] -= 70000
self.aligned_triple3_nnp.append(tuple(tmp))
self.aligned_triple3_nnp = [list(x) for x in list(set(self.aligned_triple3_nnp))]
aligned_triple3_npn = joblib.load('/root/dxh/ccw_workplace/Archive/aligned_triple3_npn.txt')
self.aligned_triple3_npn = []
for single in aligned_triple3_npn:
tmp = [eval(x) for x in single]
tmp[0] -= 70000
tmp[2] -= 70000
tmp[3] -= 70000
tmp[4] -= 302
tmp[5] -= 302
tmp[6] -= 302
tmp[-1] -= 70000
self.aligned_triple3_npn.append(tuple(tmp))
self.aligned_triple3_npn = [list(x) for x in list(set(self.aligned_triple3_npn))]
aligned_triple3_npp = joblib.load('/root/dxh/ccw_workplace/Archive/aligned_triple3_npp.txt')
self.aligned_triple3_npp = []
for single in aligned_triple3_npp:
tmp = [eval(x) for x in single]
tmp[0] -= 70000
tmp[2] -= 70000
tmp[3] -= 70000
tmp[4] -= 302
tmp[5] -= 302
tmp[6] -= 302
tmp[-1] -= 70000
self.aligned_triple3_npp.append(tuple(tmp))
self.aligned_triple3_npp = [list(x) for x in list(set(self.aligned_triple3_npp))]
aligned_triple3_pnn = joblib.load('/root/dxh/ccw_workplace/Archive/aligned_triple3_pnn.txt')
self.aligned_triple3_pnn = []
for single in aligned_triple3_pnn:
tmp = [eval(x) for x in single]
tmp[0] -= 70000
tmp[2] -= 70000
tmp[3] -= 70000
tmp[4] -= 302
tmp[5] -= 302
tmp[6] -= 302
tmp[-1] -= 70000
self.aligned_triple3_pnn.append(tuple(tmp))
self.aligned_triple3_pnn = [list(x) for x in list(set(self.aligned_triple3_pnn))]
# 这个是0
aligned_triple3_pnp = joblib.load('/root/dxh/ccw_workplace/Archive/aligned_triple3_pnp.txt')
self.aligned_triple3_pnp = []
for single in aligned_triple3_pnp:
tmp = [eval(x) for x in single]
tmp[0] -= 70000
tmp[2] -= 70000
tmp[3] -= 70000
tmp[4] -= 302
tmp[5] -= 302
tmp[6] -= 302
tmp[-1] -= 70000
self.aligned_triple3_pnp.append(tuple(tmp))
self.aligned_triple3_pnp = [list(x) for x in list(set(self.aligned_triple3_pnp))]
aligned_triple3_ppn = joblib.load('/root/dxh/ccw_workplace/Archive/aligned_triple3_ppn.txt')
self.aligned_triple3_ppn = []
for single in aligned_triple3_ppn:
tmp = [eval(x) for x in single]
tmp[0] -= 70000
tmp[2] -= 70000
tmp[3] -= 70000
tmp[4] -= 302
tmp[5] -= 302
tmp[6] -= 302
tmp[-1] -= 70000
self.aligned_triple3_ppn.append(tuple(tmp))
self.aligned_triple3_ppn = [list(x) for x in list(set(self.aligned_triple3_ppn))]
aligned_triple3_ppp = joblib.load('/root/dxh/ccw_workplace/Archive/aligned_triple3_ppp.txt')
self.aligned_triple3_ppp = []
for single in aligned_triple3_ppp:
tmp = [eval(x) for x in single]
tmp[0] -= 70000
tmp[2] -= 70000
tmp[3] -= 70000
tmp[4] -= 302
tmp[5] -= 302
tmp[6] -= 302
tmp[-1] -= 70000
self.aligned_triple3_ppp.append(tuple(tmp))
self.aligned_triple3_ppp = [list(x) for x in list(set(self.aligned_triple3_ppp))]
def training_run_relation_MtransE(self, ent1_way, enti2_way, relation_train_way, entity_train_way,
epochs=300,
model_type='MtransE_relation',
batch_size=400,
out_file_title='/root/dxh/ccw_workplace/save_weights/'):
print('begin training...')
self.get_triples(ent1_way, enti2_way)
if model_type == 'MtransE' or model_type == 'MtransE_relation':
self.get_entity_train(entity_train_way)
if model_type == 'MtransE_relation':
self.get_relation_train(relation_train_way)
'''
这里准备改
'''
self.get_aligned_triple1_p()
for epoch_ in range(epochs):
self.loss = 0
start = time.time()
print(f"epoch {epoch_} begins ...")
batch_size_1 = 6000
n_batches_1 = int(len(self.triples_1) / batch_size_1)
epoch_11 = 10
print("the number of batches_1: ", n_batches_1)
for epoch in range(epoch_11):
for batch in range(1):
batch_samples = random.sample(self.triples_1, batch_size_1)
current = []
corrupted = []
for sample in batch_samples:
corrupted_sample = copy.deepcopy(sample)
pr = np.random.random(1)[0]
if pr < 0.5:
corrupted_sample[0] = random.sample(self.model.entities_1, 1)[0]
while corrupted_sample[0] == sample[0]:
corrupted_sample[0] = random.sample(self.model.entities_1, 1)[0]
else:
corrupted_sample[2] = random.sample(self.model.entities_1, 1)[0]
while corrupted_sample[2] == sample[2]:
corrupted_sample[2] = random.sample(self.model.entities_1, 1)[0]
current.append(sample)
corrupted.append(corrupted_sample)
current = torch.from_numpy(np.array(current)).long()
corrupted = torch.from_numpy(np.array(corrupted)).long()
self.train_entity(current, corrupted, 'entity_1')
print('train kg_1', 'done!')
batch_size_2 = 6000
n_batches_2 = int(len(self.triples_2) / batch_size_2)
epoch_12 = 10
print("the number of batches_2: ", n_batches_2)
for epoch in range(epoch_12):
for batch in range(n_batches_2):
batch_samples = random.sample(self.triples_2, batch_size_2)
current = []
corrupted = []
for sample in batch_samples:
corrupted_sample = copy.deepcopy(sample)
pr = np.random.random(1)[0]
if pr < 0.5:
corrupted_sample[0] = random.sample(self.model.entities_2, 1)[0]
while corrupted_sample[0] == sample[0]:
corrupted_sample[0] = random.sample(self.model.entities_2, 1)[0]
else:
corrupted_sample[2] = random.sample(self.model.entities_2, 1)[0]
while corrupted_sample[2] == sample[2]:
corrupted_sample[2] = random.sample(self.model.entities_2, 1)[0]
current.append(sample)
corrupted.append(corrupted_sample)
current = torch.from_numpy(np.array(current)).long()
corrupted = torch.from_numpy(np.array(corrupted)).long()
self.train_entity(current, corrupted, 'entity_2')
print('train kg_2', 'done!')
if model_type == 'MtransE' or model_type == 'MtransE_relation':
batch_siize = 1500
n_batches_3 = int(len(self.entity_train_data) / batch_siize)
print("the number of n_batches_3: ", n_batches_3)
epochs_1 = 10
for epoch in range(epochs_1 * n_batches_3):
training_entity_sample = random.sample(self.entity_train_data, batch_siize)
training_entity_sample = torch.from_numpy(np.array(training_entity_sample)).long()
self.train_entity_align(training_entity_sample, 'whatever')
print('train entity seed', 'done!')
if model_type == 'MtransE_relation':
# train_relation
epochs_2 = 50
for epoch in range(epochs_2):
b_size = 5000
n_b_size = int(len(self.aligned_triple1_p) / b_size)
for i in range(n_b_size):
training_entity_sample = random.sample(self.aligned_triple1_p, b_size)
training_entity_sample = torch.from_numpy(np.array(training_entity_sample)).long()
self.train_relation(training_entity_sample, 'p')
b_size_1 = 1000
n_b_size_1 = int(len(self.aligned_triple1_n) / b_size_1)
for i in range(n_b_size_1):
training_entity_sample = random.sample(self.aligned_triple1_n, b_size_1)
training_entity_sample = torch.from_numpy(np.array(training_entity_sample)).long()
self.train_relation(training_entity_sample, 'n')
tmp = torch.from_numpy(np.array(self.aligned_triple2_nn)).long()
self.train_relation(tmp, 'nn')
tmp = torch.from_numpy(np.array(self.aligned_triple2_np)).long()
self.train_relation(tmp, 'np')
tmp = torch.from_numpy(np.array(self.aligned_triple2_pn)).long()
self.train_relation(tmp, 'pn')
b_size_1 = 1000
n_b_size_1 = int(len(self.aligned_triple2_pp) / b_size_1)
for i in range(n_b_size_1):
training_entity_sample = random.sample(self.aligned_triple2_pp, b_size_1)
training_entity_sample = torch.from_numpy(np.array(training_entity_sample)).long()
self.train_relation(training_entity_sample, 'pp')
tmp = torch.from_numpy(np.array(self.aligned_triple3_nnn)).long()
self.train_relation(tmp, 'nnn')
tmp = torch.from_numpy(np.array(self.aligned_triple3_npn)).long()
self.train_relation(tmp, 'npn')
tmp = torch.from_numpy(np.array(self.aligned_triple3_npp)).long()
self.train_relation(tmp, 'npp')
tmp = torch.from_numpy(np.array(self.aligned_triple3_pnn)).long()
self.train_relation(tmp, 'pnn')
tmp = torch.from_numpy(np.array(self.aligned_triple3_ppn)).long()
self.train_relation(tmp, 'ppn')
b_size_1 = 1000
n_b_size_1 = int(len(self.aligned_triple3_ppp) / b_size_1)
for i in range(n_b_size_1):
training_entity_sample = random.sample(self.aligned_triple3_ppp, b_size_1)
training_entity_sample = torch.from_numpy(np.array(training_entity_sample)).long()
self.train_relation(training_entity_sample, 'ppp')
print('train relation seed', 'done!')
end = time.time()
print(f"epoch {epoch_} consuming {end - start} s and the loss is {self.loss}")
end = time.time()
# writing log
with open(out_file_title + 'training_log.txt', 'a') as f:
f.write(f"epoch {epoch_} consuming {end - start}s loss is {self.loss}")
f.write('\n')
print('saving weight to ', out_file_title, '...')
self.save_weight(out_file_title)
print('saving weight done!')
print('all training done!')
if __name__ == '__main__':
file1 = "/root/dxh/ccw_workplace/final/dbpedia_entity2id.txt"
file2 = "/root/dxh/ccw_workplace/final/dwy_entity2id.txt"
file3 = "/root/dxh/ccw_workplace/final/dbpedia_relation2id.txt"
file4 = "/root/dxh/ccw_workplace/final/dwy_relation2id.txt"
TRANSE = Train()
TRANSE.init_model()
TRANSE.model.prepare_data(file1, file2, file3, file4)
TRANSE.training_run_relation_MtransE("/root/dxh/ccw_workplace/final/dbpedia_train.txt", "/root/dxh/ccw_workplace/final/dwy_train.txt",
'/root/dxh/ccw_workplace/final/result', '/root/dxh/ccw_workplace/final/entity_align_train.txt',
model_type='MtransE_relation',
out_file_title='/root/dxh/ccw_workplace/final/new_300/')
a = Entity_Measure("/root/dxh/ccw_workplace/final/dbpedia_entity2id.txt",
"/root/dxh/ccw_workplace/final/dwy_entity2id.txt",
"/root/dxh/ccw_workplace/final/new_300/MTransE_ent_1",
"/root/dxh/ccw_workplace/final/new_300/MTransE_ent_2")
a.load_dic()
a.load_vec()
a.calculate_all_multi('/root/dxh/ccw_workplace/final/entity_align_test.txt',
outputfile='/root/dxh/ccw_workplace/final/test_result_norelation/')
|
python
|
# -*- coding: utf-8 -*-
# encoding: utf-8
# /run.py
"""
Tic-Toc-Tpe API
------------------------------------------------------------------------
App for production
------------------------------------------------------------------------
"""
import os
from dotenv import load_dotenv, find_dotenv
from src.app import create_app
load_dotenv(find_dotenv(filename='.env'))
app = create_app(os.getenv('FLASK_ENV'))
|
python
|
from Tkinter import *
root = Tk()
root.title("kaki")
root.resizable(0,0)
points = []
tag = "line"
c = Canvas(root, bg="white", width=1280, height= 720)
def make_dot(x, y, xmax, ymax):
dot = c.create_oval(x, y, xmax, ymax, fill="black")
def point(event):
x = event.x - 2
y = event.y - 2
o = 4
dot = make_dot(x, y, x+o, y+o)
points.append(event.x)
points.append(event.y)
def graph(event):
global points
x = event.x - 2
y = event.y - 2
o = 4
dot = make_dot(x, y, x+o, y+o)
points.append(event.x)
points.append(event.y)
line = c.create_line(points, tags=tag, fill="black", width=4)
points = []
def clear_all(event=None):
global points
points = []
c.delete(ALL)
def move(event):
points.append(event.x)
points.append(event.y)
x = event.x - 2
y = event.y - 2
o = 4
dot = make_dot(x, y, x+o, y+o)
return points
# Bind space to clear_all just in case
root.bind("<space>", clear_all)
c.pack()
c.bind("<ButtonRelease-1>", graph)
c.bind("<B1-Motion>", move)
b = Button(root, text = "Clear", command=clear_all, height=5, width=10, bg="#0AA62A")
b.place(relx=1.0, rely=1.0, x=-2, y=-2,anchor="se")
root.mainloop()
|
python
|
#
# Catalog creation history:
#
# catalog creation requires one to set the RESULTS_VERS environment variable to the correct reduction
#
# DR11 catalog created using 'python make_rcsample.py -o /work/bovy/data/bovy/apogee/apogee-rc-DR11.fits --addl-logg-cut --rmdups'
# rmdups was added after the fact because this option changed
#
# DR12 catalog created using 'python make_rcsample.py -o /work/bovy/data/bovy/apogee/apogee-rc-DR12.fits --addl-logg-cut --rmdups'
#
# DR13 catalog created using 'python make_rcsample.py -o ~/tmp/apogee-rc-DR13.fits --rmdups --tyc2'
#
# DR14 catalog created using 'python make_rcsample.py -o ~/tmp/apogee-rc-DR14.fits --rmdups --addl-logg-cut --nostat'
#
# DR16 catalog created using 'python make_rcsample.py -o ~/tmp/apogee-rc-DR16.fits --rmdups --addl-logg-cut --nostat'
#
# current catalog created using 'python make_rcsample.py -o /work/bovy/data/bovy/apogee/apogee-rc-current.fits --addl-logg-cut --rmdups --nostat --nopm'
#
import os, os.path
from optparse import OptionParser
import csv
import tempfile
import subprocess
import numpy
try:
import fitsio
fitsread = fitsio.read
fitswrite = fitsio.write
except ImportError:
import astropy.io.fits as pyfits
fitsread = pyfits.getdata
fitswrite = pyfits.writeto
import esutil
from galpy.util import bovy_coords
import isodist
import apogee.tools.read as apread
import apogee.tools.path as appath
import apogee.select.apogeeSelect
import apogee.samples.rc as rcmodel
from apogee.tools import paramIndx
_ADDHAYDENDIST= False
_ERASESTR= " "
def make_rcsample(parser):
options,args= parser.parse_args()
savefilename= options.savefilename
if savefilename is None:
#Create savefilename if not given
savefilename= os.path.join(appath._APOGEE_DATA,
'rcsample_'+appath._APOGEE_REDUX+'.fits')
print("Saving to %s ..." % savefilename)
#Read the base-sample
data= apread.allStar(adddist=_ADDHAYDENDIST,rmdups=options.rmdups)
#Remove a bunch of fields that we do not want to keep
data= esutil.numpy_util.remove_fields(data,
['TARGET_ID',
'FILE',
'AK_WISE',
'SFD_EBV',
'SYNTHVHELIO_AVG',
'SYNTHVSCATTER',
'SYNTHVERR',
'SYNTHVERR_MED',
'RV_TEFF',
'RV_LOGG',
'RV_FEH',
'RV_ALPHA',
'RV_CARB',
'RV_CCFWHM',
'RV_AUTOFWHM',
'SYNTHSCATTER',
'STABLERV_CHI2',
'STABLERV_RCHI2',
'STABLERV_CHI2_PROB',
'CHI2_THRESHOLD',
'APSTAR_VERSION',
'ASPCAP_VERSION',
'RESULTS_VERSION',
'WASH_M',
'WASH_M_ERR',
'WASH_T2',
'WASH_T2_ERR',
'DDO51',
'DDO51_ERR',
'IRAC_3_6',
'IRAC_3_6_ERR',
'IRAC_4_5',
'IRAC_4_5_ERR',
'IRAC_5_8',
'IRAC_5_8_ERR',
'IRAC_8_0',
'IRAC_8_0_ERR',
'WISE_4_5',
'WISE_4_5_ERR',
'TARG_4_5',
'TARG_4_5_ERR',
'WASH_DDO51_GIANT_FLAG',
'WASH_DDO51_STAR_FLAG',
'REDUCTION_ID',
'SRC_H',
'PM_SRC'])
# More
if appath._APOGEE_REDUX.lower() == 'l33':
data= esutil.numpy_util.remove_fields(data,
['GAIA_SOURCE_ID',
'GAIA_PARALLAX',
'GAIA_PARALLAX_ERROR',
'GAIA_PMRA',
'GAIA_PMRA_ERROR',
'GAIA_PMDEC',
'GAIA_PMDEC_ERROR',
'GAIA_PHOT_G_MEAN_MAG',
'GAIA_PHOT_BP_MEAN_MAG',
'GAIA_PHOT_RP_MEAN_MAG',
'GAIA_RADIAL_VELOCITY',
'GAIA_RADIAL_VELOCITY_ERROR',
'GAIA_R_EST',
'GAIA_R_LO',
'GAIA_R_HI',
'TEFF_SPEC',
'LOGG_SPEC'])
if not appath._APOGEE_REDUX.lower() == 'current' \
and not 'l3' in appath._APOGEE_REDUX \
and int(appath._APOGEE_REDUX[1:]) < 500:
data= esutil.numpy_util.remove_fields(data,
['ELEM'])
#Select red-clump stars
jk= data['J0']-data['K0']
z= isodist.FEH2Z(data['METALS'],zsolar=0.017)
if 'l31' in appath._APOGEE_REDUX:
logg= data['LOGG']
elif 'l30' in appath._APOGEE_REDUX:
logg= data['LOGG']
elif appath._APOGEE_REDUX.lower() == 'current' \
or int(appath._APOGEE_REDUX[1:]) > 600:
if False:
#Use my custom logg calibration that's correct for the RC
logg= (1.-0.042)*data['FPARAM'][:,paramIndx('logg')]-0.213
lowloggindx= data['FPARAM'][:,paramIndx('logg')] < 1.
logg[lowloggindx]= data['FPARAM'][lowloggindx,paramIndx('logg')]-0.255
hiloggindx= data['FPARAM'][:,paramIndx('logg')] > 3.8
logg[hiloggindx]= data['FPARAM'][hiloggindx,paramIndx('logg')]-0.3726
else:
#Use my custom logg calibration that's correct on average
logg= (1.+0.03)*data['FPARAM'][:,paramIndx('logg')]-0.37
lowloggindx= data['FPARAM'][:,paramIndx('logg')] < 1.
logg[lowloggindx]= data['FPARAM'][lowloggindx,paramIndx('logg')]-0.34
hiloggindx= data['FPARAM'][:,paramIndx('logg')] > 3.8
logg[hiloggindx]= data['FPARAM'][hiloggindx,paramIndx('logg')]-0.256
else:
logg= data['LOGG']
indx= (jk < 0.8)*(jk >= 0.5)\
*(z <= 0.06)\
*(z <= rcmodel.jkzcut(jk,upper=True))\
*(z >= rcmodel.jkzcut(jk))\
*(logg >= rcmodel.loggteffcut(data['TEFF'],z,upper=False))\
*(logg+0.1*('l31' in appath._APOGEE_REDUX
or 'l33' in appath._APOGEE_REDUX) \
<= rcmodel.loggteffcut(data['TEFF'],z,upper=True))
data= data[indx]
#Add more aggressive flag cut
data= esutil.numpy_util.add_fields(data,[('ADDL_LOGG_CUT',numpy.int32)])
data['ADDL_LOGG_CUT']= ((data['TEFF']-4800.)/1000.+2.75) > data['LOGG']
if options.loggcut:
data= data[data['ADDL_LOGG_CUT'] == 1]
print("Making catalog of %i objects ..." % len(data))
#Add distances
data= esutil.numpy_util.add_fields(data,[('RC_DIST', float),
('RC_DM', float),
('RC_GALR', float),
('RC_GALPHI', float),
('RC_GALZ', float)])
rcd= rcmodel.rcdist()
jk= data['J0']-data['K0']
z= isodist.FEH2Z(data['METALS'],zsolar=0.017)
data['RC_DIST']= rcd(jk,z,appmag=data['K0'])*options.distfac
data['RC_DM']= 5.*numpy.log10(data['RC_DIST'])+10.
XYZ= bovy_coords.lbd_to_XYZ(data['GLON'],
data['GLAT'],
data['RC_DIST'],
degree=True)
RphiZ= bovy_coords.XYZ_to_galcencyl(XYZ[:,0],
XYZ[:,1],
XYZ[:,2],
Xsun=8.15,Zsun=0.0208)
R= RphiZ[:,0]
phi= RphiZ[:,1]
Z= RphiZ[:,2]
data['RC_GALR']= R
data['RC_GALPHI']= phi
data['RC_GALZ']= Z
#Save
fitswrite(savefilename,data,clobber=True)
# Add Tycho-2 matches
if options.tyc2:
data= esutil.numpy_util.add_fields(data,[('TYC2MATCH',numpy.int32),
('TYC1',numpy.int32),
('TYC2',numpy.int32),
('TYC3',numpy.int32)])
data['TYC2MATCH']= 0
data['TYC1']= -1
data['TYC2']= -1
data['TYC3']= -1
# Write positions
posfilename= tempfile.mktemp('.csv',dir=os.getcwd())
resultfilename= tempfile.mktemp('.csv',dir=os.getcwd())
with open(posfilename,'w') as csvfile:
wr= csv.writer(csvfile,delimiter=',',quoting=csv.QUOTE_MINIMAL)
wr.writerow(['RA','DEC'])
for ii in range(len(data)):
wr.writerow([data[ii]['RA'],data[ii]['DEC']])
# Send to CDS for matching
result= open(resultfilename,'w')
try:
subprocess.check_call(['curl',
'-X','POST',
'-F','request=xmatch',
'-F','distMaxArcsec=2',
'-F','RESPONSEFORMAT=csv',
'-F','cat1=@%s' % os.path.basename(posfilename),
'-F','colRA1=RA',
'-F','colDec1=DEC',
'-F','cat2=vizier:Tycho2',
'http://cdsxmatch.u-strasbg.fr/xmatch/api/v1/sync'],
stdout=result)
except subprocess.CalledProcessError:
os.remove(posfilename)
if os.path.exists(resultfilename):
result.close()
os.remove(resultfilename)
result.close()
# Directly match on input RA
ma= numpy.loadtxt(resultfilename,delimiter=',',skiprows=1,
usecols=(1,2,7,8,9))
iis= numpy.arange(len(data))
mai= [iis[data['RA'] == ma[ii,0]][0] for ii in range(len(ma))]
data['TYC2MATCH'][mai]= 1
data['TYC1'][mai]= ma[:,2]
data['TYC2'][mai]= ma[:,3]
data['TYC3'][mai]= ma[:,4]
os.remove(posfilename)
os.remove(resultfilename)
if not options.nostat:
#Determine statistical sample and add flag
apo= apogee.select.apogeeSelect()
statIndx= apo.determine_statistical(data)
mainIndx= apread.mainIndx(data)
data= esutil.numpy_util.add_fields(data,[('STAT',numpy.int32),
('INVSF',float)])
data['STAT']= 0
data['STAT'][statIndx*mainIndx]= 1
for ii in range(len(data)):
if (statIndx*mainIndx)[ii]:
data['INVSF'][ii]= 1./apo(data['LOCATION_ID'][ii],
data['H'][ii])
else:
data['INVSF'][ii]= -1.
if options.nopm:
fitswrite(savefilename,data,clobber=True)
return None
data= _add_proper_motions(data,savefilename)
# Save
fitswrite(savefilename,data,clobber=True)
return None
def _add_proper_motions(data,savefilename):
if 'l33' in appath._APOGEE_REDUX:
return _add_proper_motions_gaia(data)
else:
return _add_proper_motions_pregaia(data,savefilename)
def _add_proper_motions_gaia(data):
from gaia_tools import xmatch
gaia2_matches, matches_indx= xmatch.cds(data,colRA='RA',
colDec='DEC',
xcat='vizier:I/345/gaia2')
# Add matches
try: #These already exist currently, but may not always exist
data= esutil.numpy_util.remove_fields(data,['PMRA','PMDEC'])
except ValueError:
pass
data= esutil.numpy_util.add_fields(data,[('PLX', numpy.float),
('PMRA', numpy.float),
('PMDEC', numpy.float),
('PLX_ERR', numpy.float),
('PMRA_ERR', numpy.float),
('PMDEC_ERR', numpy.float),
('PMMATCH',numpy.int32)])
data['PMMATCH']= 0
data['PMMATCH'][matches_indx]= 1
data['PLX'][matches_indx]= gaia2_matches['parallax']
data['PMRA'][matches_indx]= gaia2_matches['pmra']
data['PMDEC'][matches_indx]= gaia2_matches['pmdec']
data['PLX_ERR'][matches_indx]= gaia2_matches['parallax_error']
data['PMRA_ERR'][matches_indx]= gaia2_matches['pmra_error']
data['PMDEC_ERR'][matches_indx]= gaia2_matches['pmdec_error']
# Set values for those without match to -999
pmindx= data['PMMATCH'] == 1
data['PLX'][True^pmindx]= -9999.99
data['PMRA'][True^pmindx]= -9999.99
data['PMDEC'][True^pmindx]= -9999.99
data['PLX_ERR'][True^pmindx]= -9999.99
data['PMRA_ERR'][True^pmindx]= -9999.99
data['PMDEC_ERR'][True^pmindx]= -9999.99
#Calculate Galactocentric velocities
data= esutil.numpy_util.add_fields(data,[('GALVR', numpy.float),
('GALVT', numpy.float),
('GALVZ', numpy.float)])
lb= bovy_coords.radec_to_lb(data['RA'],data['DEC'],degree=True)
XYZ= bovy_coords.lbd_to_XYZ(lb[:,0],lb[:,1],data['RC_DIST'],degree=True)
pmllpmbb= bovy_coords.pmrapmdec_to_pmllpmbb(data['PMRA'],data['PMDEC'],
data['RA'],data['DEC'],
degree=True)
vxvyvz= bovy_coords.vrpmllpmbb_to_vxvyvz(data['VHELIO_AVG'],
pmllpmbb[:,0],
pmllpmbb[:,1],
lb[:,0],lb[:,1],data['RC_DIST'],
degree=True)
vRvTvZ= bovy_coords.vxvyvz_to_galcencyl(vxvyvz[:,0],
vxvyvz[:,1],
vxvyvz[:,2],
8.-XYZ[:,0],
XYZ[:,1],
XYZ[:,2]+0.0208,
vsun=[-11.1,30.24*8.15,7.25])#Assumes proper motion of Sgr A* and R0=8.15 kpc, zo= 20.8 pc (Bennett & Bovy 2019)
data['GALVR']= vRvTvZ[:,0]
data['GALVT']= vRvTvZ[:,1]
data['GALVZ']= vRvTvZ[:,2]
data['GALVR'][True^pmindx]= -9999.99
data['GALVT'][True^pmindx]= -9999.99
data['GALVZ'][True^pmindx]= -9999.99
return data
def _add_proper_motions_pregaia(data,savefilename):
#Get proper motions, in a somewhat roundabout way
pmfile= savefilename.split('.')[0]+'_pms.fits'
if os.path.exists(pmfile):
pmdata= fitsread(pmfile,1)
else:
pmdata= numpy.recarray(len(data),
formats=['f8','f8','f8','f8','f8','f8','i4'],
names=['RA','DEC','PMRA','PMDEC',
'PMRA_ERR','PMDEC_ERR','PMMATCH'])
# Write positions, again ...
posfilename= tempfile.mktemp('.csv',dir=os.getcwd())
resultfilename= tempfile.mktemp('.csv',dir=os.getcwd())
with open(posfilename,'w') as csvfile:
wr= csv.writer(csvfile,delimiter=',',quoting=csv.QUOTE_MINIMAL)
wr.writerow(['RA','DEC'])
for ii in range(len(data)):
wr.writerow([data[ii]['RA'],data[ii]['DEC']])
# Send to CDS for matching
result= open(resultfilename,'w')
try:
subprocess.check_call(['curl',
'-X','POST',
'-F','request=xmatch',
'-F','distMaxArcsec=4',
'-F','RESPONSEFORMAT=csv',
'-F','cat1=@%s' % os.path.basename(posfilename),
'-F','colRA1=RA',
'-F','colDec1=DEC',
'-F','cat2=vizier:UCAC4',
'http://cdsxmatch.u-strasbg.fr/xmatch/api/v1/sync'],
stdout=result)
except subprocess.CalledProcessError:
os.remove(posfilename)
if os.path.exists(resultfilename):
result.close()
os.remove(resultfilename)
result.close()
# Match back and only keep the closest one
ma= numpy.loadtxt(resultfilename,delimiter=',',skiprows=1,
converters={15: lambda s: float(s.strip() or -9999),
16: lambda s: float(s.strip() or -9999),
17: lambda s: float(s.strip() or -9999),
18: lambda s: float(s.strip() or -9999)},
usecols=(4,5,15,16,17,18))
h=esutil.htm.HTM()
m1,m2,d12 = h.match(data['RA'],data['DEC'],
ma[:,0],ma[:,1],4./3600.,maxmatch=1)
pmdata['PMMATCH']= 0
pmdata['RA']= data['RA']
pmdata['DEC']= data['DEC']
pmdata['PMMATCH'][m1]= 1
pmdata['PMRA'][m1]= ma[m2,2]
pmdata['PMDEC'][m1]= ma[m2,3]
pmdata['PMRA_ERR'][m1]= ma[m2,4]
pmdata['PMDEC_ERR'][m1]= ma[m2,5]
pmdata['PMMATCH'][(pmdata['PMRA'] == -9999) \
+(pmdata['PMDEC'] == -9999) \
+(pmdata['PMRA_ERR'] == -9999) \
+(pmdata['PMDEC_ERR'] == -9999)]= 0
fitswrite(pmfile,pmdata,clobber=True)
#To make sure we're using the same format below
pmdata= fitsread(pmfile,1)
os.remove(posfilename)
os.remove(resultfilename)
#Match proper motions
try: #These already exist currently, but may not always exist
data= esutil.numpy_util.remove_fields(data,['PMRA','PMDEC'])
except ValueError:
pass
data= esutil.numpy_util.add_fields(data,[('PMRA', numpy.float),
('PMDEC', numpy.float),
('PMRA_ERR', numpy.float),
('PMDEC_ERR', numpy.float),
('PMMATCH',numpy.int32)])
data['PMMATCH']= 0
h=esutil.htm.HTM()
m1,m2,d12 = h.match(pmdata['RA'],pmdata['DEC'],
data['RA'],data['DEC'],
2./3600.,maxmatch=1)
data['PMRA'][m2]= pmdata['PMRA'][m1]
data['PMDEC'][m2]= pmdata['PMDEC'][m1]
data['PMRA_ERR'][m2]= pmdata['PMRA_ERR'][m1]
data['PMDEC_ERR'][m2]= pmdata['PMDEC_ERR'][m1]
data['PMMATCH'][m2]= pmdata['PMMATCH'][m1].astype(numpy.int32)
pmindx= data['PMMATCH'] == 1
data['PMRA'][True^pmindx]= -9999.99
data['PMDEC'][True^pmindx]= -9999.99
data['PMRA_ERR'][True^pmindx]= -9999.99
data['PMDEC_ERR'][True^pmindx]= -9999.99
#Calculate Galactocentric velocities
data= esutil.numpy_util.add_fields(data,[('GALVR', numpy.float),
('GALVT', numpy.float),
('GALVZ', numpy.float)])
lb= bovy_coords.radec_to_lb(data['RA'],data['DEC'],degree=True)
XYZ= bovy_coords.lbd_to_XYZ(lb[:,0],lb[:,1],data['RC_DIST'],degree=True)
pmllpmbb= bovy_coords.pmrapmdec_to_pmllpmbb(data['PMRA'],data['PMDEC'],
data['RA'],data['DEC'],
degree=True)
vxvyvz= bovy_coords.vrpmllpmbb_to_vxvyvz(data['VHELIO_AVG'],
pmllpmbb[:,0],
pmllpmbb[:,1],
lb[:,0],lb[:,1],data['RC_DIST'],
degree=True)
vRvTvZ= bovy_coords.vxvyvz_to_galcencyl(vxvyvz[:,0],
vxvyvz[:,1],
vxvyvz[:,2],
8.-XYZ[:,0],
XYZ[:,1],
XYZ[:,2]+0.025,
vsun=[-11.1,30.24*8.,7.25])#Assumes proper motion of Sgr A* and R0=8 kpc, zo= 25 pc
data['GALVR']= vRvTvZ[:,0]
data['GALVT']= vRvTvZ[:,1]
data['GALVZ']= vRvTvZ[:,2]
data['GALVR'][True^pmindx]= -9999.99
data['GALVT'][True^pmindx]= -9999.99
data['GALVZ'][True^pmindx]= -9999.99
#Get HSOY proper motions, in a somewhat roundabout way
pmfile= savefilename.split('.')[0]+'_pms_ppmxl.fits'
if os.path.exists(pmfile):
pmdata= fitsread(pmfile,1)
else:
pmdata= numpy.recarray(len(data),
formats=['f8','f8','f8','f8','f8','f8','i4'],
names=['RA','DEC','PMRA','PMDEC',
'PMRA_ERR','PMDEC_ERR','PMMATCH'])
# Write positions, again ...
posfilename= tempfile.mktemp('.csv',dir=os.getcwd())
resultfilename= tempfile.mktemp('.csv',dir=os.getcwd())
with open(posfilename,'w') as csvfile:
wr= csv.writer(csvfile,delimiter=',',quoting=csv.QUOTE_MINIMAL)
wr.writerow(['RA','DEC'])
for ii in range(len(data)):
wr.writerow([data[ii]['RA'],data[ii]['DEC']])
# Send to CDS for matching
result= open(resultfilename,'w')
try:
subprocess.check_call(['curl',
'-X','POST',
'-F','request=xmatch',
'-F','distMaxArcsec=4',
'-F','RESPONSEFORMAT=csv',
'-F','cat1=@%s' % os.path.basename(posfilename),
'-F','colRA1=RA',
'-F','colDec1=DEC',
'-F','cat2=vizier:I/339/hsoy',
'http://cdsxmatch.u-strasbg.fr/xmatch/api/v1/sync'],
stdout=result)
except subprocess.CalledProcessError:
os.remove(posfilename)
if os.path.exists(resultfilename):
result.close()
os.remove(resultfilename)
result.close()
# Match back and only keep the closest one
ma= numpy.loadtxt(resultfilename,delimiter=',',skiprows=1,
converters={12: lambda s: float(s.strip() or -9999),
13: lambda s: float(s.strip() or -9999),
14: lambda s: float(s.strip() or -9999),
15: lambda s: float(s.strip() or -9999)},
usecols=(3,4,12,13,14,15))
h=esutil.htm.HTM()
m1,m2,d12 = h.match(data['RA'],data['DEC'],
ma[:,0],ma[:,1],4./3600.,maxmatch=1)
pmdata['PMMATCH']= 0
pmdata['RA']= data['RA']
pmdata['DEC']= data['DEC']
pmdata['PMMATCH'][m1]= 1
pmdata['PMRA'][m1]= ma[m2,2]
pmdata['PMDEC'][m1]= ma[m2,3]
pmdata['PMRA_ERR'][m1]= ma[m2,4]
pmdata['PMDEC_ERR'][m1]= ma[m2,5]
pmdata['PMMATCH'][(pmdata['PMRA'] == -9999) \
+(pmdata['PMDEC'] == -9999) \
+(pmdata['PMRA_ERR'] == -9999) \
+(pmdata['PMDEC_ERR'] == -9999)]= 0
fitswrite(pmfile,pmdata,clobber=True)
#To make sure we're using the same format below
pmdata= fitsread(pmfile,1)
os.remove(posfilename)
os.remove(resultfilename)
#Match proper motions to ppmxl/HSOY
data= esutil.numpy_util.add_fields(data,[('PMRA_HSOY', numpy.float),
('PMDEC_HSOY', numpy.float),
('PMRA_ERR_HSOY', numpy.float),
('PMDEC_ERR_HSOY', numpy.float),
('PMMATCH_HSOY',numpy.int32)])
data['PMMATCH_HSOY']= 0
h=esutil.htm.HTM()
m1,m2,d12 = h.match(pmdata['RA'],pmdata['DEC'],
data['RA'],data['DEC'],
2./3600.,maxmatch=1)
data['PMRA_HSOY'][m2]= pmdata['PMRA'][m1]
data['PMDEC_HSOY'][m2]= pmdata['PMDEC'][m1]
data['PMRA_ERR_HSOY'][m2]= pmdata['PMRA_ERR'][m1]
data['PMDEC_ERR_HSOY'][m2]= pmdata['PMDEC_ERR'][m1]
data['PMMATCH_HSOY'][m2]= pmdata['PMMATCH'][m1].astype(numpy.int32)
pmindx= data['PMMATCH_HSOY'] == 1
data['PMRA_HSOY'][True^pmindx]= -9999.99
data['PMDEC_HSOY'][True^pmindx]= -9999.99
data['PMRA_ERR_HSOY'][True^pmindx]= -9999.99
data['PMDEC_ERR_HSOY'][True^pmindx]= -9999.99
#Calculate Galactocentric velocities
data= esutil.numpy_util.add_fields(data,[('GALVR_HSOY', numpy.float),
('GALVT_HSOY', numpy.float),
('GALVZ_HSOY', numpy.float)])
lb= bovy_coords.radec_to_lb(data['RA'],data['DEC'],degree=True)
XYZ= bovy_coords.lbd_to_XYZ(lb[:,0],lb[:,1],data['RC_DIST'],degree=True)
pmllpmbb= bovy_coords.pmrapmdec_to_pmllpmbb(data['PMRA_HSOY'],
data['PMDEC_HSOY'],
data['RA'],data['DEC'],
degree=True)
vxvyvz= bovy_coords.vrpmllpmbb_to_vxvyvz(data['VHELIO_AVG'],
pmllpmbb[:,0],
pmllpmbb[:,1],
lb[:,0],lb[:,1],data['RC_DIST'],
degree=True)
vRvTvZ= bovy_coords.vxvyvz_to_galcencyl(vxvyvz[:,0],
vxvyvz[:,1],
vxvyvz[:,2],
8.-XYZ[:,0],
XYZ[:,1],
XYZ[:,2]+0.025,
vsun=[-11.1,30.24*8.,7.25])#Assumes proper motion of Sgr A* and R0=8 kpc, zo= 25 pc
data['GALVR_HSOY']= vRvTvZ[:,0]
data['GALVT_HSOY']= vRvTvZ[:,1]
data['GALVZ_HSOY']= vRvTvZ[:,2]
data['GALVR_HSOY'][True^pmindx]= -9999.99
data['GALVT_HSOY'][True^pmindx]= -9999.99
data['GALVZ_HSOY'][True^pmindx]= -9999.99
#Return
return data
return None
def cos_sphere_dist(theta,phi,theta_o,phi_o):
"""
NAME:
cos_sphere_dist
PURPOSE:
computes the cosine of the spherical distance between two
points on the sphere
INPUT:
theta - polar angle [0,pi]
phi - azimuth [0,2pi]
theta - polar angle of center of the disk
phi_0 - azimuth of the center of the disk
OUTPUT:
spherical distance
HISTORY:
2010-04-29 -Written - Bovy (NYU)
"""
return (numpy.sin(theta)*numpy.sin(theta_o)*(numpy.cos(phi_o)*numpy.cos(phi)+
numpy.sin(phi_o)*numpy.sin(phi))+
numpy.cos(theta_o)*numpy.cos(theta))
def get_options():
usage = "usage: %prog [options]"
parser = OptionParser(usage=usage)
parser.add_option("-o",dest='savefilename',default=None,
help="Name for catalog file")
parser.add_option("--distfac",dest='distfac',default=1.,
type='float',
help="Factor to apply to the RC distances")
parser.add_option("--nopm",action="store_true", dest="nopm",
default=False,
help="If set, don't match to proper motion catalogs")
parser.add_option("--nostat",action="store_true", dest="nostat",
default=False,
help="If set, don't determine the statistical sample")
parser.add_option("--rmdups",action="store_true", dest="rmdups",
default=False,
help="If set, remove duplicates from the allStar file to begin")
parser.add_option("--addl-logg-cut",action="store_true", dest="loggcut",
default=False,
help="If set, apply the ADDL_LOGG_CUT to the sample")
parser.add_option("--tyc2",action="store_true", dest="tyc2",
default=False,
help="If set, add matches to Tycho-2 catalog")
return parser
if __name__ == '__main__':
parser= get_options()
make_rcsample(parser)
|
python
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-09-15 09:15
from django.db import migrations, models
import django.db.models.deletion
import wagtail.core.models
class Migration(migrations.Migration):
dependencies = [
("wagtailcore", "0028_merge"),
("torchbox", "0067_auto_20160906_1651"),
]
operations = [
migrations.AddField(
model_name="torchboximage",
name="collection",
field=models.ForeignKey(
default=wagtail.core.models.get_root_collection_id,
on_delete=django.db.models.deletion.CASCADE,
related_name="+",
to="wagtailcore.Collection",
verbose_name="collection",
),
),
]
|
python
|
n=int(input('no of primes to be generated : '))
counter=0
n1=2
while True:
is_prime=True
for i in range(2,n1//2+1):
if n1%i==0:
is_prime=False
break
if is_prime==True:
print(n1)
counter=counter+1
if counter==n:
break
n1=n1+1
|
python
|
# -*- encoding: utf-8 -*-
"""Main application window
Contains :
- menubar with File, Edit
- statusbar displaying cursor position on the scene
- NodeEditorWidget instance containing a graphics view and the scene
"""
import os
import json
from PyQt5.QtWidgets import QMainWindow, QAction, QFileDialog, QLabel, QApplication, QMessageBox
from PyQt5.QtCore import QSettings, QPoint, QSize
from PyQt5.QtGui import QCloseEvent
from .node_editor_widget import NodeEditorWidget
from .utils import dumpException
DEBUG = False
class NodeEditorWindow(QMainWindow):
NodeEditorWidget_class = NodeEditorWidget # Static variable to subclass the NodeEditorWidget
"""Class representing NodeEditor's Main Window"""
def __init__(self):
super().__init__()
self.name_company = 'Copey'
self.name_product = 'NodeEditor'
self.initUI()
def initUI(self):
"""Initialize main window UI setup"""
self.createActions() # Create actions from the main menubar
self.createMenus() # Populate menubar with previous actions
# set node editor
self.nodeEditor = self.__class__.NodeEditorWidget_class(self)
self.nodeEditor.scene.addHasBeenModifiedListener(self.setTitle)
self.setCentralWidget(self.nodeEditor)
# create status bar
self.createStatusBar()
# set window properties
# self.setGeometry(200, 200, 800, 600)
self.setTitle()
self.show()
def sizeHint(self):
return QSize(800, 600)
# noinspection PyArgumentList
def createActions(self):
self.actNew = QAction('&New', self, shortcut='Ctrl+N', statusTip='Create new graph',
triggered=self.onFileNew)
self.actOpen = QAction('&Open', self, shortcut='Ctrl+O', statusTip='Open file',
triggered=self.onFileOpen)
self.actSave = QAction('&Save', self, shortcut='Ctrl+S', statusTip='Save file',
triggered=self.onFileSave)
self.actSaveAs = QAction('Save &as', self, shortcut='Ctrl+Shift+S', statusTip='Save file as',
triggered=self.onFileSaveAs)
self.actExit = QAction('E&xit', self, shortcut='Ctrl+Q', statusTip='Exit application',
triggered=self.close)
self.actUndo = QAction('&Undo', self, shortcut='Ctrl+Z', statusTip='Undo last operation',
triggered=self.onEditUndo)
self.actRedo = QAction('&Redo', self, shortcut='Ctrl+Shift+Z', statusTip='Redo last operation',
triggered=self.onEditRedo)
self.actCut = QAction('Cu&t', self, shortcut='Ctrl+X', statusTip='Cut to clipboard',
triggered=self.onEditCut)
self.actCopy = QAction('&Copy', self, shortcut='Ctrl+C', statusTip='Copy to clipboard',
triggered=self.onEditCopy)
self.actPaste = QAction('&Paste', self, shortcut='Ctrl+V', statusTip='Paste from clipboard',
triggered=self.onEditPaste)
self.actDelete = QAction('&Delete', self, shortcut='Del', statusTip='Delete selected items',
triggered=self.onEditDelete)
def createMenus(self):
"""Utility function, instanciate actions from the menubar"""
self.createFileMenu()
self.createEditMenu()
def createFileMenu(self):
menubar = self.menuBar()
# initialize main menu
self.fileMenu = menubar.addMenu('&File')
self.fileMenu.addAction(self.actNew)
self.fileMenu.addSeparator()
self.fileMenu.addAction(self.actOpen)
self.fileMenu.addAction(self.actSave)
self.fileMenu.addAction(self.actSaveAs)
self.fileMenu.addSeparator()
self.fileMenu.addAction(self.actExit)
def createEditMenu(self):
menubar = self.menuBar()
# initialize editmenu
self.editMenu = menubar.addMenu('&Edit')
self.editMenu.addAction(self.actUndo)
self.editMenu.addAction(self.actRedo)
self.editMenu.addSeparator()
self.editMenu.addAction(self.actCut)
self.editMenu.addAction(self.actCopy)
self.editMenu.addAction(self.actPaste)
self.editMenu.addSeparator()
self.editMenu.addAction(self.actDelete)
def createStatusBar(self):
"""Utility function, instantiate the status bar notifying the cursor position in the scene"""
# intialize status bar
self.statusBar().showMessage('')
self.status_mouse_pos = QLabel('')
self.statusBar().addPermanentWidget(self.status_mouse_pos)
# connect the scene pos changed to a function formatting self.status_mouse_pos
self.nodeEditor.view.scenePosChanged.connect(self.onScenePosChanged)
def setTitle(self):
"""Handle title change updon modification of the scene"""
title = 'Node Editor - '
title += self.getCurrentNodeEditorWidget().getUserFriendlyFilename()
self.setWindowTitle(title)
def isModified(self):
nodeeditor = self.getCurrentNodeEditorWidget()
return nodeeditor.scene.isModified() if nodeeditor else False
def getCurrentNodeEditorWidget(self) -> NodeEditorWidget:
"""Return the widget currently holding the scene.
For different application, the method can be overridden to return mdiArea, the central widget...
Returns
-------
NodeEditorWidget
Node editor Widget. The widget holding the scene.
"""
return self.centralWidget()
def getFileDialogDirectory(self):
"""Returns starting directory for ``QFileDialog`` file open/save"""
return ''
def getFileDialogFilter(self):
"""Returns ``str`` standard file open/save filter for ``QFileDialog``"""
return 'Graph (*.json);;All files (*)'
def maybeSave(self):
"""Handling the dialog asking to save the file when closing the window
Returns
-------
bool
- True : if change are saved or discarded
- False : if change are canceled
"""
if not self.isModified():
return True
res = QMessageBox.warning(self, 'About to close your work ?',
'The document has been modified.\n Do you wand to save your changes ?',
QMessageBox.Save | QMessageBox.Discard | QMessageBox.Cancel)
if res == QMessageBox.Save:
return self.onFileSave()
elif res == QMessageBox.Cancel:
return False
return True
def onScenePosChanged(self, x, y):
"""Update mouse position to status bar"""
self.status_mouse_pos.setText('Scene Pos: [{}, {}]'.format(x, y))
def closeEvent(self, event: QCloseEvent) -> None:
"""Close if no change is detected, else prompt a dialog asking to save."""
if self.maybeSave():
event.accept()
else:
event.ignore()
def onFileNew(self):
"""Clear the scene after prompting dialod asking to save"""
if self.maybeSave():
if DEBUG: print('On File New clicked')
self.getCurrentNodeEditorWidget().fileNew()
# self.getCurrentNodeEditorWidget().scene.clear()
# self.getCurrentNodeEditorWidget().filename = None # clear filename (default save) when starting new scene
self.setTitle() # reset the title
def onFileOpen(self):
"""Open OpenFileDialog"""
if self.maybeSave():
if DEBUG: print('On File New open')
# OpenFile dialog
fname, filter = QFileDialog.getOpenFileName(self, 'Open graph from file', self.getFileDialogDirectory(),
self.getFileDialogFilter())
if fname == '':
return
if os.path.isfile(fname):
self.getCurrentNodeEditorWidget().fileLoad(fname)
self.setTitle()
def onFileSave(self) -> bool:
"""Save file without dialog. Overwrite filename.
Return True if save is not canceled and successful."""
if DEBUG: print('On File save')
current_nodeeditor = self.getCurrentNodeEditorWidget()
if current_nodeeditor is not None:
if not current_nodeeditor.isFilenameSet(): # if no filename exist, open SaveAs dialog
return self.onFileSaveAs()
current_nodeeditor.fileSave()
self.statusBar().showMessage('Successfully saved {}'.format(current_nodeeditor.filename), 5000)
# support for mdi application, setTitle is contained in the widget instead of the window
if hasattr(current_nodeeditor, 'setTitle'):
current_nodeeditor.setTitle()
else:
self.setTitle()
return True
def onFileSaveAs(self) -> bool:
"""Open SaveAs dialog. Return True if save is not canceled and successful."""
if DEBUG: print('OnFileSaveAs')
current_nodeeditor = self.getCurrentNodeEditorWidget()
if current_nodeeditor is not None:
fname, filter = QFileDialog.getSaveFileName(self, 'Save graph to file', self.getFileDialogDirectory(),
self.getFileDialogFilter())
if fname == '':
return False
current_nodeeditor.fileSave(fname)
self.statusBar().showMessage('Successfully saved as {}'.format(current_nodeeditor.filename), 5000)
# support for mdi application, setTitle is contained in the widget instead of the window
if hasattr(current_nodeeditor, 'setTitle'):
current_nodeeditor.setTitle()
else:
self.setTitle()
return True
def onEditUndo(self):
"""Undo callback"""
if DEBUG: print('Undo')
current_nodeeditor = self.getCurrentNodeEditorWidget()
if current_nodeeditor:
current_nodeeditor.scene.history.undo()
def onEditRedo(self):
"""Redo callback"""
if DEBUG: print('Redo')
current_nodeeditor = self.getCurrentNodeEditorWidget()
if current_nodeeditor:
current_nodeeditor.scene.history.redo()
def onEditDelete(self):
"""Delate callback"""
if DEBUG: print('Delete')
current_nodeeditor = self.getCurrentNodeEditorWidget()
if current_nodeeditor:
current_nodeeditor.scene.getView().deleteSelected()
def onEditCut(self):
"""Cut callback"""
current_nodeeditor = self.getCurrentNodeEditorWidget()
if current_nodeeditor:
data = current_nodeeditor.scene.clipboard.serializeSelected(delete=True)
str_data = json.dumps(data, indent=4)
if DEBUG: print('Cut :', str_data)
QApplication.instance().clipboard().setText(str_data)
def onEditCopy(self):
"""Copy callback"""
current_nodeeditor = self.getCurrentNodeEditorWidget()
if current_nodeeditor:
try:
data = current_nodeeditor.scene.clipboard.serializeSelected(delete=False)
str_data = json.dumps(data, indent=4)
if DEBUG: print('Copy :', str_data)
QApplication.instance().clipboard().setText(str_data)
except Exception as e:
dumpException(e)
def onEditPaste(self):
"""Paste callback"""
current_nodeeditor = self.getCurrentNodeEditorWidget()
if current_nodeeditor:
raw_data = QApplication.instance().clipboard().text()
try:
data = json.loads(raw_data)
except ValueError as e:
print('Pasting of not valid json data', e)
return
# check if json data are correct
if 'nodes' not in data:
print('JSON does not contain any nodes')
return
return current_nodeeditor.scene.clipboard.deserializeFromClipboard(data)
def readSettings(self):
settings = QSettings(QSettings.IniFormat, QSettings.UserScope, self.name_company, self.name_product)
print(settings.fileName())
pos = settings.value('pos', QPoint(200, 200))
size = settings.value('size', QSize(400, 400))
self.move(pos)
self.resize(size)
def writeSettings(self):
settings = QSettings(QSettings.IniFormat, QSettings.UserScope, self.name_company, self.name_product)
settings.setValue('pos', self.pos())
settings.setValue('size', self.size())
|
python
|
import dgl
import networkx as nx
# create a graph
g_nx = nx.petersen_graph()
g_dgl = dgl.DGLGraph(g_nx)
import matplotlib.pyplot as plt
plt.subplot(121)
nx.draw(g_nx, with_labels=True)
plt.subplot(122)
nx.draw(g_dgl.to_networkx(), with_labels=True)
plt.show()
# add edges and nodes into graph
import dgl
import torch as th
g = dgl.DGLGraph()
g.add_nodes(10)
# A couple edges one-by-one
for i in range(1, 5):
g.add_edge(i, 0)
# A few more with a paired list
src = list(range(5, 8));
dst = [0] * 3
g.add_edges(src, dst)
# finish with a pair of tensors
src = th.tensor([8, 9]);
dst = th.tensor([0, 0])
g.add_edges(src, dst)
g.add_edges([2], [8])
nx.draw(g.to_networkx(), with_labels=True)
plt.show()
# Edge broadcasting will do star graph in one go!
g.clear();
g.add_nodes(10)
src = th.tensor(list(range(1, 10)));
g.add_edges(src, 0)
import networkx as nx
import matplotlib.pyplot as plt
nx.draw(g.to_networkx(), with_labels=True)
plt.show()
# assigin a feature
import dgl
import torch
# assign node features
x = torch.randn(10, 3)
# g.clear()
g.ndata['x'] = x
# print(g.ndata['x'] == g.nodes[:].data['x'])
print(g.ndata['x'])
print('x value of first node in graph : {}'.format(g.nodes[0].data['x']))
# Access node set with integer, list, or integer tensor
g.nodes[0].data['x'] = torch.zeros(1, 3)
g.nodes[[0, 1, 2]].data['x'] = torch.zeros(3, 3)
g.nodes[torch.tensor([0, 1, 2])].data['x'] = torch.zeros(3, 3)
# Assign edge features
g.edata['w'] = th.randn(9, 2)
print(g.edata['w'])
print('w value of first edge in graph : {}'.format(g.edges[0].data['w']))
# Access edge set with IDs in integer, list, or integer tensor
g.edges[1].data['w'] = th.randn(1, 2)
g.edges[[0, 1, 2]].data['w'] = th.zeros(3, 2)
print("g.edges[[0, 1, 2]].data['w'] : \n{}".format(g.edges[[0, 1, 2]].data['w']))
g.edges[th.tensor([0, 1, 2])].data['w'] = th.zeros(3, 2)
# You can also access the edges by giving endpoints
g.edges[1, 0].data['w'] = th.ones(1, 2) # edge 1 -> 0
g.edges[[1, 2, 3], [0, 0, 0]].data['w'] = th.ones(3, 2) # edges [1, 2, 3] -> 0
print(g.node_attr_schemes())
g.ndata['x'] = th.zeros((10, 4))
print(g.node_attr_schemes())
# remove node or edge states
g.ndata.pop('x')
g.edata.pop('w')
print(g.node_attr_schemes())
# create multigraphs
g_multi = dgl.DGLGraph(multigraph=True)
g_multi.add_nodes(10)
g_multi.ndata['x'] = torch.randn(10, 2)
g_multi.add_edges(list(range(1, 10)), 0)
g_multi.add_edge(1, 0) # two edges on 1->0
g_multi.edata['w'] = th.randn(10, 2)
g_multi.edges[1].data['w'] = th.zeros(1, 2)
print(g_multi.edges())
plt.figure()
nx.draw(g_dgl.to_networkx(), with_labels=True)
plt.show()
# in multigraphs, use edge's id to query edge
eid_10 = g_multi.edge_id(1, 0)
g_multi.edges[eid_10].data['w'] = th.ones(len(eid_10), 2)
print(g_multi.edata['w'])
# !!!!nodes and edges can be added but not remove
|
python
|
#!/usr/bin/env python3
import base64
import sys
from pprint import pformat
from nullroute.irc import Frame
class SaslMechanism(object):
def __init__(self):
self.step = 0
self.inbuf = b""
def do_step(self, inbuf):
return None
def feed_input(self, inbuf):
self.inbuf += inbuf
return None
def get_output(self):
outbuf = self.do_step(self.inbuf)
if outbuf is None:
raise IndexError("no more SASL steps to take")
self.step += 1
self.inbuf = b""
return outbuf
class SaslPasswordMechanism(SaslMechanism):
def __init__(self, username, password, authzid=None):
super().__init__()
self.authz = authzid or username
self.authn = username
self.passwd = password
class SaslEXTERNAL(SaslMechanism):
# https://tools.ietf.org/html/rfc4422
name = "EXTERNAL"
def __init__(self, authzid=None):
super().__init__()
self.authz = authzid or ""
def do_step(self, inbuf):
if self.step == 0:
return self.authz.encode("utf-8")
class SaslPLAIN(SaslMechanism):
# https://tools.ietf.org/html/rfc4616
name = "PLAIN"
def __init__(self, username, password, authzid=None):
super().__init__()
self.authz = authzid or username
self.authn = username
self.passwd = password
def do_step(self, inbuf):
if self.step == 0:
buf = "%s\0%s\0%s" % (self.authz, self.authn, self.passwd)
return buf.encode("utf-8")
class SaslDIGEST_MD5(SaslPasswordMechanism):
# http://tools.ietf.org/html/rfc2831
# obsoleted by http://tools.ietf.org/html/rfc6331
name = "DIGEST-MD5"
class SaslCRAM_MD5(SaslPasswordMechanism):
# http://tools.ietf.org/html/rfc2195
# ?? https://tools.ietf.org/html/draft-ietf-sasl-crammd5-10
name = "CRAM-MD5"
def b64chunked(buf):
buf = base64.b64encode(buf).decode("utf-8")
size, last = 40, ""
while buf:
last = buf[:size]
yield last or "+"
buf = buf[size:]
if not 0 < len(last) < size:
yield "+"
def trace(*a):
print(*a, file=sys.stderr)
class IrcClient(object):
def __init__(self, conn):
self.conn = conn
self.settings = {
"nick": "grawity",
"pass": "foo",
}
self.required_caps = {
"multi-prefix",
#"sasl",
}
self.wanted_caps = {
"account-notify",
"away-notify",
"extended-join",
"server-time",
"znc.in/server-time",
"znc.in/server-time-iso",
}
self.sasl_mech = None
self.enabled_caps = set()
self.current_nick = self.settings["nick"]
self.nick_counter = 0
self.isupport = {
"PREFIX": "(ov)@+",
"PREFIX.modes": {"o": "@", "v": "+"},
"PREFIX.chars": {"@": "o", "+": "v"},
"PREFIX.ranks": {"o": 2, "@": 2,
"v": 1, "+": 1},
"CHANTYPES": set("#"),
"CHANMODES": "b,k,l,imnpst",
"CHANMODES.a": set("b"),
"CHANMODES.b": set("k"),
"CHANMODES.c": set("l"),
"CHANMODES.d": set("imnpst"),
"NICKLEN": 9,
"CASEMAPPING": "rfc1459",
}
self.low_connected = False
self.high_connected = False
def is_channel(self, name):
return name[0] in self.isupport["CHANTYPES"]
def strip_prefix(self, name):
for i, c in enumerate(name):
if c not in self.isupport["PREFIX.chars"]:
return name[:i], name[i:]
raise ValueError("name %r has only prefix characters" % name)
def send_raw(self, buf):
trace("\033[35m--> %r\033[m" % buf)
self.conn.write(buf)
self.conn.flush()
def send(self, line):
buf = (line + "\r\n").encode("utf-8")
return self.send_raw(buf)
def sendv(self, *args):
buf = Frame.join(args)
return self.send_raw(buf)
def recv_raw(self):
buf = self.conn.readline()
if buf == b"":
return None
return buf
def recv(self):
buf = self.recv_raw()
if buf is None:
return None
frame = Frame.parse(buf, parse_prefix=False)
trace("\033[36m<-- %r\033[m" % frame)
return frame
def handshake(self):
self.send("CAP LS")
#self.send("PASS %(nick)s:%(pass)s" % self.settings)
self.send("NICK %(nick)s" % self.settings)
self.send("USER %(nick)s * * %(nick)s" % self.settings)
def check_low_connected(self):
if not self.low_connected:
self.low_connected = True
yield "connected", {"early": True}
def check_high_connected(self):
if not self.high_connected:
yield from self.check_low_connected()
self.high_connected = True
yield "connected", {"early": False}
def process_frame(self, frame):
if frame is None:
yield "disconnected", {"reason": "connection-lost"}
return False
elif frame.cmd == "ERROR":
error = " ".join(frame.args[1:])
trace("Server error: %r" % error)
yield "disconnected", {"reason": "server-error", "error": error}
return False
elif frame.cmd == "PING":
self.send("PONG %s" % " ".join(frame.args[1:]))
elif frame.cmd == "CAP":
sub = frame.args[2].upper()
if sub == "LS":
offered_caps = set(frame.args[3].split())
trace("Server offers capabilities: %s" % offered_caps)
missing_caps = self.required_caps - offered_caps
if missing_caps:
trace("Server is missing required capabilities: %s" % missing_caps)
self.send("QUIT")
yield "disconnected", {
"reason": "missing-caps",
"caps": missing_caps,
"refused": False,
}
request_caps = offered_caps & (self.wanted_caps | self.required_caps)
self.send("CAP REQ :%s" % " ".join(request_caps))
elif sub == "ACK":
acked_caps = set(frame.args[3].split())
trace("Server enabled capabilities: %s" % acked_caps)
self.enabled_caps |= acked_caps
if "sasl" in acked_caps:
self.sasl_mech = SaslPLAIN(username=self.settings["nick"],
password=self.settings["pass"])
trace("Starting SASL %s authentication" % self.sasl_mech.name)
self.send("AUTHENTICATE %s" % self.sasl_mech.name)
else:
self.send("CAP END")
elif sub == "NAK":
refused_caps = set(frame.args[3].split())
trace("Server refused capabilities: %s" % refused_caps)
self.send("QUIT")
yield "disconnected", {
"reason": "missing-caps",
"caps": refused_caps,
"refused": True,
}
elif frame.cmd == "AUTHENTICATE":
data = frame.args[1]
if data != "+":
self.sasl_mech.feed_input(b64decode(data))
if len(data) != 400:
outbuf = self.sasl_mech.get_output()
if outbuf is None:
trace("SASL mechanism did not return any data")
self.send("QUIT")
yield "disconnected", {"reason": "auth-failed"}
for chunk in b64chunked(outbuf):
self.send("AUTHENTICATE " + chunk)
elif frame.cmd == "001":
yield from self.check_low_connected()
elif frame.cmd == "005":
isupport_tokens = frame.args[2:-1]
for isupport_item in isupport_tokens:
if "=" in isupport_item:
k, v = isupport_item.split("=", 1)
if k == "CHANMODES":
a, b, c, d = v.split(",", 3)
self.isupport["CHANMODES.a"] = set(a)
self.isupport["CHANMODES.b"] = set(b)
self.isupport["CHANMODES.c"] = set(c)
self.isupport["CHANMODES.d"] = set(d)
elif k in {"CHANLIMIT", "MAXLIST"}:
self.isupport["%s.types" % k] = {}
limit_tokens = v.split(",")
for limit_item in limit_tokens:
types, limit = limit_item.split(":", 1)
for type in types:
self.isupport["%s.types" % k][type] = int(limit)
elif k in {"CHANNELLEN", "NICKLEN", "MODES",
"MONITOR", "TOPICLEN"}:
v = int(v)
elif k == "CHANTYPES":
v = set(v)
elif k == "EXTBAN":
char, types = v.split(",", 1)
self.isupport["EXTBAN.char"] = char
self.isupport["EXTBAN.types"] = set(types)
elif k == "NAMESX":
if "multi-prefix" not in enabled_caps:
self.send("PROTOCTL NAMESX")
elif k == "UHNAMES":
if "userhost-in-names" not in enabled_caps:
self.send("PROTOCTL UHNAMES")
elif k == "PREFIX":
self.isupport["PREFIX.modes"] = {}
self.isupport["PREFIX.chars"] = {}
modes, chars = v[1:].split(")", 1)
num = len(modes)
for i in range(num):
self.isupport["PREFIX.modes"][modes[i]] = chars[i]
self.isupport["PREFIX.chars"][chars[i]] = modes[i]
self.isupport["PREFIX.ranks"][modes[i]] = num - i
self.isupport["PREFIX.ranks"][chars[i]] = num - i
else:
k, v = isupport_item, True
self.isupport[k] = v
trace(pformat(self.isupport))
elif frame.cmd == "XXX END OF MOTD":
yield from self.check_high_connected()
elif frame.cmd == "433":
trace("Nickname %r is already in use" % self.settings["nick"])
nick_counter += 1
self.current_nick = "%s%d" % (self.settings["nick"], self.nick_counter)
self.send("NICK " + self.current_nick)
elif frame.cmd == "903":
trace("Authentication successful!")
self.send("CAP END")
elif frame.cmd == "904":
if self.sasl_mech.step == 0:
trace("Authentication failed; server does not support SASL %r" %
(self.sasl_mech.name))
else:
trace("Authentication failed; the credentials were incorrect")
self.send("QUIT")
yield "disconnected", {"reason": "auth-failed"}
elif frame.cmd == "908":
trace("Authentication failed; server does not support SASL %r" %
(self.sasl_mech.name))
self.send("QUIT")
yield "disconnected", {"reason": "auth-failed"}
elif frame.cmd == "PRIVMSG":
if len(frame.args) != 3:
return True
_, rcpt, text = frame.args
yield "message", {
"from": frame.prefix,
"to": rcpt,
"text": text,
"private": not self.is_channel(rcpt),
}
elif frame.cmd == "NOTICE":
if len(frame.args) != 3:
return True
_, rcpt, text = frame.args
yield "notice", {
"from": frame.prefix,
"to": rcpt,
"text": text,
"private": not self.is_channel(rcpt),
}
return True
def process(self, buf):
frame = Frame.parse(buf, parse_prefix=False)
trace("\033[36m<-- %r\033[m" % frame)
return self.process_frame(frame)
def run(self):
self.handshake()
while True:
frame = self.recv()
if frame is None:
break
ok = yield from self.process_frame(frame)
if ok == False:
break
def send_message(self, rcpt, text):
self.sendv("PRIVMSG", rcpt, text)
class PipeWrapper(object):
def __init__(self, rd, wr):
self.rd = rd
self.wr = wr
@classmethod
def from_stdio(klass):
if hasattr(sys.stdin, "detach"):
sys.stdin = sys.stdin.detach()
if hasattr(sys.stdout, "detach"):
sys.stdout = sys.stdout.detach()
return klass(sys.stdin, sys.stdout)
def read(self, size):
return self.rd.read(size)
def readline(self):
return self.rd.readline()
def write(self, buf):
return self.wr.write(buf)
def flush(self):
return self.wr.flush()
conn = PipeWrapper.from_stdio()
client = IrcClient(conn)
for event, data in client.run():
trace("%s:" % event, pformat(data))
|
python
|
"""Config flow for nVent RAYCHEM SENZ."""
import logging
from homeassistant.helpers import config_entry_oauth2_flow
from .const import DOMAIN
class OAuth2FlowHandler(
config_entry_oauth2_flow.AbstractOAuth2FlowHandler, domain=DOMAIN
):
"""Config flow to handle SENZ OAuth2 authentication."""
DOMAIN = DOMAIN
@property
def logger(self) -> logging.Logger:
"""Return logger."""
return logging.getLogger(__name__)
@property
def extra_authorize_data(self) -> dict:
"""Extra data that needs to be appended to the authorize url."""
return {"scope": "restapi offline_access"}
|
python
|
# -*- encoding: utf-8 -*-
#
# Copyright 2012 Martin Zimmermann <[email protected]>. All rights reserved.
# License: BSD Style, 2 clauses -- see LICENSE.
#
# Provide a homogenous interface to Templating Engines like Jinja2
import abc
class AbstractEnvironment(object):
"""Generic interface for python templating engines like Jinja2 or Mako."""
__metaclass__ = abc.ABCMeta
extension = ['.html']
@abc.abstractmethod
def __init__(self, layoutdir, cachedir):
"""Initialize templating engine and set default layoutdir as well
as cache dir. You should use a custom cache filename prefix like
*__engine_hexcode.cache*."""
return
@abc.abstractmethod
def register(self, name, func):
"""Register a :param function: to :param name:"""
return
@abc.abstractmethod
def fromfile(self, env, path):
"""Load (relative) :param path: template and return a
:class:`AbstractTemplate`-like class`."""
return
@abc.abstractmethod
def extend(self, path):
"""Extend search PATH for templates by `path`."""
return
@abc.abstractproperty
def loader(self):
return
class AbstractTemplate(object):
__metaclass__ = abc.ABCMeta
def __init__(self, environment, path, template):
self.environment = environment
self.path = path
self.template = template
self.engine = environment.engine
self.loader = environment.engine.loader
@abc.abstractmethod
def render(self, **dikt):
"""Render template with :param dikt:"""
return
|
python
|
import pandas as pd
import torch
def get_covariates(cov_name, split):
"""
get a list of strutured covariates as a pytorch tensor
split by train/val/test
input:
- cov_name = 'Male'
- split = 'train', 'val' or 'test'
Here we're mostly interested in gender so our covaraite is male/female
Male = 1, Female = 0
return:
- a pytorch tensor, list of gender attribute values
"""
list_attr_fn = "../data/celeba/list_attr_celeba.txt"
splits_fn = "../data/celeba/list_eval_partition.txt"
attr = pd.read_csv(list_attr_fn, delim_whitespace=True, header=1)
splits = pd.read_csv(splits_fn, delim_whitespace=True, header=None, index_col=0)
attr = (attr + 1) // 2 # map from {-1, 1} to {0, 1}
train_mask = (splits[1] == 0)
val_mask = (splits[1] == 1)
test_mask = (splits[1] == 2)
if split == 'train':
return torch.as_tensor(attr[cov_name][train_mask])
elif split == 'val':
return torch.as_tensor(attr[cov_name][val_mask])
else:
return torch.as_tensor(attr[cov_name][test_mask])
# def main():
# res = get_covariates('Male', 'train')
# print(res)
# if __name__ == "__main__":
# main()
|
python
|
# -*- coding: utf-8 -*-
"""
Defines the unit tests for the :mod:`colour.plotting.section` module.
"""
import unittest
from matplotlib.pyplot import Axes, Figure
from colour.geometry import primitive_cube
from colour.models import RGB_COLOURSPACE_sRGB, RGB_to_XYZ
from colour.plotting import (plot_visible_spectrum_section,
plot_RGB_colourspace_section)
from colour.plotting.section import (plot_hull_section_colours,
plot_hull_section_contour)
from colour.utilities import is_trimesh_installed
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013-2021 - Colour Developers'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = '[email protected]'
__status__ = 'Production'
__all__ = [
'TestPlotHullSectionColours', 'TestPlotHullSectionContour',
'TestPlotVisibleSpectrumSection', 'TestPlotRGBColourspaceSection'
]
class TestPlotHullSectionColours(unittest.TestCase):
"""
Defines :func:`colour.plotting.section.plot_hull_section_colours`
definition unit tests methods.
"""
def test_plot_hull_section_colours(self):
"""
Tests :func:`colour.plotting.section.plot_hull_section_colours`
definition.
"""
if not is_trimesh_installed: # pragma: no cover
return
import trimesh
vertices, faces, _outline = primitive_cube(1, 1, 1, 64, 64, 64)
XYZ_vertices = RGB_to_XYZ(
vertices['position'] + 0.5,
RGB_COLOURSPACE_sRGB.whitepoint,
RGB_COLOURSPACE_sRGB.whitepoint,
RGB_COLOURSPACE_sRGB.matrix_RGB_to_XYZ,
)
hull = trimesh.Trimesh(XYZ_vertices, faces, process=False)
figure, axes = plot_hull_section_colours(hull)
self.assertIsInstance(figure, Figure)
self.assertIsInstance(axes, Axes)
figure, axes = plot_hull_section_colours(hull, axis='+x')
self.assertIsInstance(figure, Figure)
self.assertIsInstance(axes, Axes)
figure, axes = plot_hull_section_colours(hull, axis='+y')
self.assertIsInstance(figure, Figure)
self.assertIsInstance(axes, Axes)
class TestPlotHullSectionContour(unittest.TestCase):
"""
Defines :func:`colour.plotting.section.plot_hull_section_contour`
definition unit tests methods.
"""
def test_plot_hull_section_contour(self):
"""
Tests :func:`colour.plotting.section.plot_hull_section_contour`
definition.
"""
if not is_trimesh_installed: # pragma: no cover
return
import trimesh
vertices, faces, _outline = primitive_cube(1, 1, 1, 64, 64, 64)
XYZ_vertices = RGB_to_XYZ(
vertices['position'] + 0.5,
RGB_COLOURSPACE_sRGB.whitepoint,
RGB_COLOURSPACE_sRGB.whitepoint,
RGB_COLOURSPACE_sRGB.matrix_RGB_to_XYZ,
)
hull = trimesh.Trimesh(XYZ_vertices, faces, process=False)
figure, axes = plot_hull_section_contour(hull)
self.assertIsInstance(figure, Figure)
self.assertIsInstance(axes, Axes)
class TestPlotVisibleSpectrumSection(unittest.TestCase):
"""
Defines :func:`colour.plotting.section.plot_visible_spectrum_section`
definition unit tests methods.
"""
def test_plot_visible_spectrum_section(self):
"""
Tests :func:`colour.plotting.section.plot_visible_spectrum_section`
definition.
"""
if not is_trimesh_installed: # pragma: no cover
return
figure, axes = plot_visible_spectrum_section()
self.assertIsInstance(figure, Figure)
self.assertIsInstance(axes, Axes)
class TestPlotRGBColourspaceSection(unittest.TestCase):
"""
Defines :func:`colour.plotting.section.plot_RGB_colourspace_section`
definition unit tests methods.
"""
def test_plot_RGB_colourspace_section(self):
"""
Tests :func:`colour.plotting.section.plot_RGB_colourspace_section`
definition.
"""
if not is_trimesh_installed: # pragma: no cover
return
figure, axes = plot_RGB_colourspace_section('sRGB')
self.assertIsInstance(figure, Figure)
self.assertIsInstance(axes, Axes)
if __name__ == '__main__':
unittest.main()
|
python
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.index, name="index"),
url(r'^dashboard$', views.dashboard, name="dashboard"),
url(r'^properties$', views.properties, name="properties"),
url(r'^add_property$', views.add_property, name="add_property"),
url(r'^property/(?P<prop_id>\d+)/events$', views.events, name="events"),
url(r'^property/(?P<prop_id>\d+)/event/(?P<event_id>\d+)$', views.event, name="event_by_id"),
url(r'^property/(?P<prop_id>\d+)/add_event$', views.add_event, name="add_events"),
url(r'^property/(?P<prop_id>\d+)/event/(?P<event_id>\d+)/alert/(?P<alert_id>\d+)$', views.alert, name="alert"),
url(r'^property/(?P<prop_id>\d+)/event/(?P<event_id>\d+)/add_alert$', views.add_alert, name="add_alert"),
url(r'^property/(?P<prop_id>\d+)/event/(?P<event_id>\d+)/note/(?P<note_id>\d+)$', views.note, name="note"),
url(r'^property/(?P<prop_id>\d+)/event/(?P<event_id>\d+)/notes$', views.notes, name="notes"),
url(r'^property/(?P<prop_id>\d+)/event/(?P<event_id>\d+)/add_note$', views.add_note, name="add_note"),
url(r'^property/(?P<prop_id>\d+)/event/(?P<event_id>\d+)/note/(?P<note_id>\d+)/update_note$', views.update_note, name="update_note"),
url(r'^property/(?P<prop_id>\d+)/event/(?P<event_id>\d+)/note/(?P<note_id>\d+)/add_file$', views.add_file, name="add_file"),
url(r'^sidebar$', views.sidebar, name="sidebar"),
url(r'^attachment/(?P<file_id>\d+)$', views.get_file, name="attachment"),
]
|
python
|
import sys
import os
import cv2
images_path = sys.argv[1]
thumbnail_size = 300
thumbnails_path = os.path.join(images_path, "thumbnails")
event_name = os.path.basename(images_path)
if not os.path.isdir(thumbnails_path):
os.mkdir(thumbnails_path)
images = os.listdir(images_path)
yml_line = "gallery:\n"
for ind, image in enumerate(images):
im_path = os.path.join(images_path, image)
if image.endswith(".jpg") or image.endswith(".jpeg") or image.endswith(".JPG") or image.endswith(".jpeg"):
img = cv2.imread(im_path)
h, w = img.shape[:2]
if h >= w:
# vertical
img = img[:w, ...]
else:
# horizaonal
edges = w // 2
img = img[:, edges: w - edges, :]
img = cv2.resize(img, (thumbnail_size, thumbnail_size))
cv2.imwrite(os.path.join(thumbnails_path, image), img)
yml_line += f" - url: /photos/{event_name}/{image}\n"
yml_line += f" image_path: /photos/{event_name}/thumbnails/{image}\n"
print(yml_line)
|
python
|
class Solution:
def bstFromPreorder(self, preorder: List[int]) -> TreeNode:
if len(preorder)==0:
return None
tn=TreeNode(preorder[0])
l=[]
r=[]
for i in preorder:
if i<preorder[0]:
l.append(i)
elif i>preorder[0]:
r.append(i)
tn.left=self.bstFromPreorder(l)
tn.right=self.bstFromPreorder(r)
return tn
|
python
|
from onboarding_project.celery import app
from squad_pantry_app.models import PerformanceMetrics
@app.task
def calc_performance_metrics():
PerformanceMetrics.create_avg_performance_metrics()
|
python
|
""" Creates components from JSON files. This provides an easy
way for adding custom components without mucking around with blenders
API. Of course, for more advanced tools it is necessary to create
more complex UI's, but if you just want a component with a couple twiddleable
numbers, bools and vecs, a JSON component can do it for you.
Warning, this code is meta-programming heavy and probably impossible to
debug. Sorry
"""
import os
import json
import collections
import functools
import bpy
import mathutils
from .component_base import ComponentRepresentation, register_component
import utils
FieldDefinition = collections.namedtuple("FieldDefinition", ["field", "type", "default", "description"])
ComponentDefininition = collections.namedtuple("ComponentDefinition", ["name", "description", "id", "struct", "fields"])
# Map from JSON strings to blender property types
TYPE_PROPERTIES = {
"string": bpy.props.StringProperty,
"bool": bpy.props.BoolProperty,
"f64": bpy.props.FloatProperty,
"f32": bpy.props.FloatProperty,
"int": bpy.props.IntProperty,
"vec3": functools.partial(bpy.props.FloatVectorProperty, size=3),
"vec2": functools.partial(bpy.props.FloatVectorProperty, size=2),
"u8enum": bpy.props.EnumProperty,
}
# Map from JSON strings to a function/object that the encoder can process
# to serialize the data
TYPE_ENCODERS = {
"string": str,
"bool": bool,
"f64": utils.F64,
"f32": utils.F32,
"int": int,
"vec3": mathutils.Vector,
"vec2": mathutils.Vector,
"u8enum": int
}
def get_component_files(folder):
component_definitions = []
for filename in os.listdir(folder):
if filename.endswith(".json"):
component_definitions.append(os.path.join(folder, filename))
return component_definitions
def construct_component_classes(component_filepath):
# Parse the file from JSON into some python namedtuples
try:
component = json.load(open(component_filepath))
except json.decoder.JSONDecodeError as err:
print("Failed to construct class for {}: {}".format(component_filepath, err))
return None
def parse_field(field):
return FieldDefinition(
field=field["field"],
type=field["type"],
default=field["default"],
description=field["description"]
)
component_def = ComponentDefininition(
name=component["name"],
description=component["description"],
id=component["id"],
struct=component["struct"],
fields=[parse_field(f) for f in component["fields"]]
)
# component becomes bpy.types.Object.<<<obj_key>>>
obj_key = component_def.id
# Create a class that stores all the internals of the properties in
# a blender-compatible way.
properties = type(
component["name"] + "Properties",
(bpy.types.PropertyGroup, ),
{}
)
# Create bpy.props Properties for each field inside the component
fields = {}
for field in component_def.fields:
prop_type = TYPE_PROPERTIES[field.type]
args_dict = {
'name': field.field,
'description': field.description,
}
if prop_type == bpy.props.EnumProperty:
items = []
for index, name in enumerate(field.default):
items.append((str(index), name, ""))
args_dict["items"] = items
else:
args_dict["default"]=field.default
prop = prop_type(**args_dict)
fields[field.field] = prop
fields["present"] = bpy.props.BoolProperty(name="Present", default=False)
properties.__annotations__ = fields
# Create a class to store the data about this component inside the
# blender object
component_class = type(
component["name"],
(),
{
"can_add": lambda _: True,
"is_present": lambda obj: getattr(obj, obj_key).present
}
)
# Create a class that will create a UI for the component
panel = type(
component_def.name+"Panel",
(bpy.types.Panel, ),
{
"bl_idname": "OBJECT_PT_" + component_def.id,
"bl_label": component_def.name,
"bl_space_type": 'PROPERTIES',
"bl_region_type": 'WINDOW',
"bl_context": "physics",
}
)
panel.poll = classmethod(lambda cls, context: component_class.is_present(context.object))
def draw(self, context):
row = self.layout.row()
row.label(text=component_def.description)
if len(fields) == 1:
row = self.layout.row()
row.label(text="No Options")
else:
for field in fields:
if field == "present":
continue
row = self.layout.row()
row.prop(getattr(context.object, obj_key), field)
panel.draw = draw
# These functions all get put inside the component_class
def register():
bpy.utils.register_class(panel)
bpy.utils.register_class(properties)
setattr(bpy.types.Object, obj_key, bpy.props.PointerProperty(type=properties))
def unregister():
bpy.utils.unregister_class(panel)
bpy.utils.unregister_class(properties)
delattr(bpy.types.Object, obj_key)
def add(obj):
getattr(obj, obj_key).present = True
def remove(obj):
getattr(obj, obj_key).present = False
def encode(config, obj):
""" Returns a ComponentRepresentation representing this component """
component_data = getattr(obj, obj_key)
def fix_types(field_name, value):
""" Ensure types are properly represented for encoding """
field_data = [f for f in component_def.fields if f.field == field_name][0]
encoder = TYPE_ENCODERS[field_data.type]
return encoder(value)
component_values = {f:fix_types(f, getattr(component_data, f)) for f in fields if f != "present"}
return ComponentRepresentation(component_def.struct, component_values)
component_class.register = staticmethod(register)
component_class.unregister = staticmethod(unregister)
component_class.add = staticmethod(add)
component_class.remove = staticmethod(remove)
component_class.encode = staticmethod(encode)
return component_class
def load_folder(folder):
json_files = get_component_files(folder)
classes = [construct_component_classes(c) for c in json_files]
for cls in classes:
register_component(cls)
|
python
|
#!/usr/bin/env python3
import os
import re
import sys
import errno
import subprocess
import json
import glob
IMPORTPREFIX="github.com/kentik/"
REPOROOT=os.path.basename(os.getcwd())
SRCDIR="staging"
TGTDIR="proto_go"
PACKAGE="^(package .*);"
#
# Attributes pertinent to building the go code to serve proto types
# for given gather points
#
# MAPFILEGLOB matches any platform _yang2proto meta data. Meta data
# allows us to map the yang gather point to messages in pertinent
# map files.
MAPFILEGLOB="{}/*_yang2proto_map.json".format(SRCDIR)
GENGOPACKAGE="telemetry"
GENGOMAIN="{}/mdt_telemetry.go".format(TGTDIR)
GENGOMAINSKELETON="etc/mdt_telemetry.go_"
GENGOTEST="{}/mdt_telemetry_test.go".format(TGTDIR)
GENGOTESTSKELETON="etc/mdt_telemetry_test.go_"
GENGODOC="{}/doc.go".format(TGTDIR)
GENGODOCSKELETON="etc/doc.go_"
GENGOREADME="{}/README.md".format(TGTDIR)
GENGOREADMESRC="etc/README_golang.md"
BPXMAP="basepathxlation.json"
GENGOBPXMAP="{}/{}".format(TGTDIR, BPXMAP)
GENGOPREFIX="{}/{}/".format(REPOROOT, TGTDIR)
def walkTree(start):
for root, dirs, files in os.walk(start):
yield root, dirs, files
def replace_snake(m):
if m:
return m.group(1).upper()
else:
return ""
def toCamelCase(snake_str):
"""
Modelled on protoc-gen-go/generator/generator.go Remember to
catch the first one too. Logical OR of regexp might have been a
little neater.
"""
capital_snake = re.sub("^([a-z])", lambda m: m.group(1).upper(), snake_str)
camel_onepass = re.sub("_([a-z])", lambda m: m.group(1).upper(), capital_snake)
camel_twopass = re.sub("([0-9][a-z])", lambda m: m.group(1).upper(), camel_onepass)
return camel_twopass
def extractSchema(entry):
if 'schema' in entry:
return entry['schema']
else:
return ""
def createGatherPathMap():
"""
Create the map with all the content we need to auto generate the
code for mapping from yang path to corresponding messages.
"""
# We handle as many instances of mapping files as match the GLOB
listing = []
for mapfilename in list(glob.glob(MAPFILEGLOB)):
with open(mapfilename) as mapfile:
listing.extend(json.load(mapfile))
gatherPathMap = []
for entry in listing:
# If operator has removed models, simply skip them
protoFile = "{}/{}".format(SRCDIR, entry['file'])
if os.path.exists(protoFile):
gatherPathMap.append({
'gather_path' : entry['encoding_path'],
'package' : entry['package'].replace('.','_'),
'path' : GENGOPREFIX + os.path.dirname(entry['file']),
'content' : toCamelCase(entry['message']),
'keys' : toCamelCase(entry['message_keys']),
'schema': extractSchema(entry)})
return gatherPathMap
def createDictForPath2Yang(gatherPaths):
"""
Provide dict to find yang path from proto path
"""
d = {}
for g in gatherPaths:
d[g['path']] = g['gather_path']
return d
def createImportsString(gatherPaths):
"""
Build the import statement from the mapping
"""
# Only the directory is required.
imports = "\n\t".join(['{} "{}{}"'.format(
g['package'], IMPORTPREFIX, g["path"]) for g in gatherPaths])
return ('import ("reflect"\n\t{})'.format(imports))
def createReflectionGoMap(gatherPaths):
"""
Build the reflection statement for the go code providing the
pertinent mappings from yang path to message types. Version
numbers for models will be added once we get this content.
NOTE: {{ and }} are escaped { and } in format string. You'll
thank me for writing this down if you are trying to understand
the go snippet below.
"""
s = ",\n".join(['ProtoKey{{EncodingPath:"{}",Version:""}}:[]reflect.Type{{reflect.TypeOf((*{}.{})(nil)),reflect.TypeOf((*{}.{})(nil))}}'.format(g["gather_path"], g["package"], g["content"], g["package"], g["keys"]) for g in gatherPaths])
return ("var schema2message = map[ProtoKey][]reflect.Type{{{}}}".format(s))
def createTestTable(gatherPaths):
"""
Build the test table from gatherPaths set, and validate generated JSON
for base path xlation is valid.
"""
testTableHdr = """
var Yang2ProtoTestTable = []Yang2ProtoTestTableEntry{
{"", "", false, nil, nil},
{"Cisco-IOS-XR-infra-statsd-oper:infra-statistics", "", false, nil, nil},
{"Cisco-IOS-XR-infra-statsd-oper:infra-statistics/interfaces/interface/latest/generic-NONEXIST", "", false, nil, nil},
{"Cisco-IOS-XR-infra-statsd-oper:infra-statistics/interfaces/interface/latest/generic-counters/TOOLONG", "", false, nil, nil},
"""
testTableBdy = ",\n".join(['{{"{}","",true,reflect.TypeOf((*{}.{})(nil)),reflect.TypeOf((*{}.{})(nil))}}'.format(
g["gather_path"], g["package"], g["keys"], g["package"], g["content"]) for g in gatherPaths])
return "{}{}}}".format(testTableHdr, testTableBdy)
def createProtolist():
" get list of protos in [(rootdir, tgtdir, file),...]"
protolist = []
for root,dirs,files in walkTree(SRCDIR):
tgt = root.replace(SRCDIR, TGTDIR, 1)
for f in files:
protolist.append((root, tgt, f))
return protolist
def extractPackageName(filename):
r = re.compile(PACKAGE)
with open(filename) as f:
for line in f:
m = r.search(line)
if None != m:
return m.group(1).replace(".","_")
def extractRelativePath(src, tgt):
"""
When at src, rooted from same place as tgt, get relative path to
target. Implementation looks esoteric.
"""
seps = src.count("/")
return "{}/{}".format("/".join([".."] * seps), tgt)
def generateGoCode(gatherPathMap):
"""
Generate go code, tests and docs
"""
imports = createImportsString(gatherPathMap)
reflections = createReflectionGoMap(gatherPathMap)
testtable = createTestTable(gatherPathMap)
os.makedirs(TGTDIR, exist_ok=True)
print("Generating {}...".format(GENGOMAIN))
with open(GENGOMAIN, "w") as t:
t.write('package {}\n'.format(GENGOPACKAGE))
t.write(imports + "\n")
with open(GENGOMAINSKELETON) as skeleton:
t.write(skeleton.read() + "\n")
t.write(reflections)
print("Generating {}...".format(GENGOTEST))
testBasePathXlationMap = """
func TestBasePathXlationMap(t *testing.T) {{
bpxJSON, err := ioutil.ReadFile("{}")
if err != nil {{
t.Fatal("Failed to open base path xlation map {}:", err)
}}
basePathXlation := map[string]string{{}}
err = json.Unmarshal(bpxJSON, &basePathXlation)
if err != nil {{
t.Fatal("Failed to unmarshal to expected structure:", err)
}}
}}
""".format(BPXMAP, BPXMAP)
with open(GENGOTEST, "w") as t:
t.write('package {}\n'.format(GENGOPACKAGE))
t.write(imports + "\n")
with open(GENGOTESTSKELETON) as skeleton:
t.write(skeleton.read() + "\n")
t.write(testBasePathXlationMap)
t.write(testtable)
print("Copying {}...".format(GENGODOC))
with open(GENGODOC, "w") as t:
with open(GENGODOCSKELETON) as skeleton:
t.write(skeleton.read() + "\n")
# Package at the end for doc.go
t.write('package {}\n'.format(GENGOPACKAGE))
print("Copying {}...".format(GENGOREADME))
with open(GENGOREADME, "w") as readme:
with open(GENGOREADMESRC) as readmesrc:
readme.write(readmesrc.read())
# Straight copy for this one.
def generateBasePathXlationMap():
print("Generating {}...".format(GENGOBPXMAP))
# We handle as many instances of mapping files as match the GLOB,
# much as when we build the gather paths.
listing = []
for mapfilename in list(glob.glob(MAPFILEGLOB)):
with open(mapfilename) as mapfile:
listing.extend(json.load(mapfile))
bpx = dict()
for entry in listing:
if 'schema' in entry:
bpx[entry['schema']] = entry['encoding_path']
with open(GENGOBPXMAP, 'w') as bpxFile:
json.dump(bpx, bpxFile, separators=(',\n',': '), sort_keys=True)
if __name__ == "__main__":
gatherPathMap = createGatherPathMap()
generateGoCode(gatherPathMap)
generateBasePathXlationMap()
path2yang = createDictForPath2Yang(gatherPathMap)
count = 0
print("Soft links to protos and adding 'go generate' directive in a.go...")
l = createProtolist()
for src,tgt,f in l:
if not f.endswith(".proto"):
continue
count = count + 1
srcfilename = os.path.join(src, f)
tgtfilename = os.path.join(tgt, f)
docsfile = os.path.join(tgt, "a.go")
package = extractPackageName(srcfilename)
#
# Make directory if it does not exist
os.makedirs(tgt, exist_ok=True)
#
# Make symlink
relativepath = extractRelativePath(tgtfilename, srcfilename)
try:
os.symlink(relativepath, tgtfilename)
except OSError as e:
if e.errno != errno.EEXIST:
raise
pass
#
# Write docs for go generate ./...
# and add package if necessary
doccontent = """
//go:generate protoc --go_out=plugins=grpc:. {}
""".format(f)
if not os.path.exists(docsfile):
path = "{}/{}".format(REPOROOT, tgt)
if path in path2yang:
yangPath = "// " + path2yang[path]
else:
yangPath = ""
doccontent = doccontent + """
{}
{}
""".format(yangPath, package)
# A messy way of creating it if it does not exist but reading content
# looking for previous instances of go gen directives.
with open(docsfile, "a+") as docfile:
docfile.seek(0)
if doccontent not in docfile.read():
docfile.write(doccontent)
print("Generating golang bindings for {} .proto files. This stage takes some time...".format(count))
try:
subprocess.check_call(["go", "generate", "./..."])
except subprocess.CalledProcessError as e:
print("'go generate' interprets .proto and builds go binding")
print(" *** STAGE DID NOT RUN CLEAN. ERROR MESSAGE ABOVE. COMMON PROBLEMS BELOW ***")
print(" GOROOT must be set to where golang is installed, minimum version go1.7 to run tests")
print(" GOPATH must be workspace root")
print(" Guidelines here: https://golang.org/doc/code.html")
print(" protoc-gen-go must be in PATH (https://github.com/golang/protobuf)")
print(" protoc must be in PATH")
print(" go get -u github.com/golang/protobuf/{proto,protoc-gen-go}")
print(e)
print("Building and running tests for package. The build stage also takes some time...")
try:
subprocess.check_call(["go", "test", "-run=.", "-bench=.", "{}{}".format(
IMPORTPREFIX, GENGOPREFIX)])
except subprocess.CalledProcessError as e:
print("This stage builds and runs.")
print(" *** STAGE DID NOT RUN CLEAN. ERROR MESSAGE ABOVE. COMMON PROBLEMS BELOW ***")
print(" GOROOT must be set to where golang is installed, minimum go1.7 to run subtests")
print(" GOPATH must be workspace root")
print(" Guidelines here: https://golang.org/doc/code.html")
print(e)
print("Done.")
|
python
|
#!/usr/bin/env python3
import sys
import time
import automationhat
time.sleep(0.1) # Short pause after ads1015 class creation recommended
try:
from PIL import Image, ImageFont, ImageDraw
except ImportError:
print("""This example requires PIL.
Install with: sudo apt install python{v}-pil
""".format(v="" if sys.version_info.major == 2 else sys.version_info.major))
sys.exit(1)
import ST7735 as ST7735
try:
from fonts.ttf import RobotoBlackItalic as UserFont
except ImportError:
print("""This example requires the Roboto font.
Install with: sudo pip{v} install fonts font-roboto
""".format(v="" if sys.version_info.major == 2 else sys.version_info.major))
sys.exit(1)
print("""analog.py
This Automation HAT Mini example displays the three ADC
analog input voltages numerically and as bar charts.
Press CTRL+C to exit.
""")
# Create ST7735 LCD display class.
disp = ST7735.ST7735(
port=0,
cs=ST7735.BG_SPI_CS_FRONT,
dc=9,
backlight=25,
rotation=270,
spi_speed_hz=4000000
)
# Initialise display.
disp.begin()
colour = (255, 181, 86)
font = ImageFont.truetype(UserFont, 12)
# Values to keep everything aligned nicely.
text_x = 110
text_y = 34
bar_x = 25
bar_y = 37
bar_height = 8
bar_width = 73
while True:
# Value to increment for spacing text and bars vertically.
offset = 0
# Open our background image.
image = Image.open("images/analog-inputs-blank.jpg")
draw = ImageDraw.Draw(image)
# Draw the text and bar for each channel in turn.
for channel in range(3):
reading = automationhat.analog[channel].read()
draw.text((text_x, text_y + offset), "{reading:.2f}".format(reading=reading), font=font, fill=colour)
# Scale bar dependent on channel reading.
width = int(bar_width * (reading / 24.0))
draw.rectangle((bar_x, bar_y + offset, bar_x + width, bar_y + bar_height + offset), colour)
offset += 14
# Draw the image to the display.
disp.display(image)
time.sleep(0.25)
|
python
|
"""Application credentials platform for the Honeywell Lyric integration."""
from homeassistant.components.application_credentials import (
AuthorizationServer,
ClientCredential,
)
from homeassistant.core import HomeAssistant
from homeassistant.helpers import config_entry_oauth2_flow
from .api import LyricLocalOAuth2Implementation
from .const import OAUTH2_AUTHORIZE, OAUTH2_TOKEN
async def async_get_auth_implementation(
hass: HomeAssistant, auth_domain: str, credential: ClientCredential
) -> config_entry_oauth2_flow.AbstractOAuth2Implementation:
"""Return custom auth implementation."""
return LyricLocalOAuth2Implementation(
hass,
auth_domain,
credential,
AuthorizationServer(
authorize_url=OAUTH2_AUTHORIZE,
token_url=OAUTH2_TOKEN,
),
)
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.