code
stringlengths 22
1.05M
| apis
sequencelengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
---|---|---|
import time
import matplotlib.pyplot as plt
from joblib import dump, load
from sklearn.model_selection import *
# A custom-made library for reporting
from my_eval_functions import set_seeds, get_clf_eval, dingdong, printtimer
# Written by <NAME>, MD. Dec 2021.
##### BEGIN
print('Loading dataframe, base, and ensemble classifiers')
start_time = printtimer(time.time())
set_seeds(123)
# READS DF
df_final = load('df_final.joblib')
df = df_final[0]
df_label = df_final[1]
df_train, df_test, df_train_label, df_test_label = train_test_split (df, df_label, test_size=0.3, random_state=123)
# READS INDIVIDUAL BASE MODELS (Lv 0)
clflist = load('MortalityOutcomeModels.joblib')
clfnamelist = load('ClassifierNameList.joblib')
# READS STACKING ENSEMBLE MODEL (Lv 1)
ensemble_model = load('EnsembleModel.joblib')
### TO STDOUT
print('*****************************************************************************************')
print(' TRAINING SET\n')
print('=========================================================================================')
for i in range (0,len(clflist)):
print('\n***** INDIVIDUAL MODEL (best): ', clfnamelist[i])
get_clf_eval(df_train_label.tolist(), clflist[i].best_estimator_.predict_proba(df_train)[:, 1].tolist())
printtimer(start_time)
print('********** Ensemble [KNN + XGB + SVM + NB + RF + ANN + LR] ******************************')
print('*****************************************************************************************\n\n')
get_clf_eval(df_train_label.tolist(), ensemble_model.predict_proba(df_train)[:,1].tolist())
################# VALIDATION #################
print('*****************************************************************************************')
print(' VALIDATION SET\n')
print('=========================================================================================')
for i in range (0,len(clflist)):
print('\n***** INDIVIDUAL MODEL (best): ', clfnamelist[i])
get_clf_eval(df_test_label.tolist(), clflist[i].best_estimator_.predict_proba(df_test)[:, 1].tolist())
printtimer(start_time)
print('=========================================================================================')
print('********** Ensemble [KNN + XGB + SVM + NB + RF + ANN + LR] ******************************')
print('*****************************************************************************************\n\n')
get_clf_eval(df_test_label.tolist(), ensemble_model.predict_proba(df_test)[:,1].tolist())
printtimer(start_time)
dingdong()
| [
"my_eval_functions.printtimer",
"my_eval_functions.dingdong",
"my_eval_functions.set_seeds",
"time.time",
"joblib.load"
] | [((370, 384), 'my_eval_functions.set_seeds', 'set_seeds', (['(123)'], {}), '(123)\n', (379, 384), False, 'from my_eval_functions import set_seeds, get_clf_eval, dingdong, printtimer\n'), ((408, 431), 'joblib.load', 'load', (['"""df_final.joblib"""'], {}), "('df_final.joblib')\n", (412, 431), False, 'from joblib import dump, load\n'), ((637, 674), 'joblib.load', 'load', (['"""MortalityOutcomeModels.joblib"""'], {}), "('MortalityOutcomeModels.joblib')\n", (641, 674), False, 'from joblib import dump, load\n'), ((689, 722), 'joblib.load', 'load', (['"""ClassifierNameList.joblib"""'], {}), "('ClassifierNameList.joblib')\n", (693, 722), False, 'from joblib import dump, load\n'), ((779, 807), 'joblib.load', 'load', (['"""EnsembleModel.joblib"""'], {}), "('EnsembleModel.joblib')\n", (783, 807), False, 'from joblib import dump, load\n'), ((2496, 2518), 'my_eval_functions.printtimer', 'printtimer', (['start_time'], {}), '(start_time)\n', (2506, 2518), False, 'from my_eval_functions import set_seeds, get_clf_eval, dingdong, printtimer\n'), ((2519, 2529), 'my_eval_functions.dingdong', 'dingdong', ([], {}), '()\n', (2527, 2529), False, 'from my_eval_functions import set_seeds, get_clf_eval, dingdong, printtimer\n'), ((357, 368), 'time.time', 'time.time', ([], {}), '()\n', (366, 368), False, 'import time\n'), ((1269, 1291), 'my_eval_functions.printtimer', 'printtimer', (['start_time'], {}), '(start_time)\n', (1279, 1291), False, 'from my_eval_functions import set_seeds, get_clf_eval, dingdong, printtimer\n'), ((2081, 2103), 'my_eval_functions.printtimer', 'printtimer', (['start_time'], {}), '(start_time)\n', (2091, 2103), False, 'from my_eval_functions import set_seeds, get_clf_eval, dingdong, printtimer\n')] |
import requests
url = 'http://source.darkarmy.xyz/'
r = requests.get(url, headers={
'user-agent': '9e9',
})
print(r.text)
# darkCTF{changeing_http_user_agent_is_easy} | [
"requests.get"
] | [((58, 106), 'requests.get', 'requests.get', (['url'], {'headers': "{'user-agent': '9e9'}"}), "(url, headers={'user-agent': '9e9'})\n", (70, 106), False, 'import requests\n')] |
import csv
import os
import logging
import argparse
import random
import collections
import operator
from tqdm import tqdm, trange
import numpy as np
import torch
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
from torch.utils.data.distributed import DistributedSampler
from pytorch_pretrained_bert.tokenization import BertTokenizer
from pytorch_pretrained_bert.optimization import BertAdam
from tensorboardX import SummaryWriter
import pdb
import matplotlib.pyplot as plt
import seaborn
seaborn.set_context(context="talk")
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO)
logger = logging.getLogger(__name__)
###############################################################################
# Data Preprocessing
###############################################################################
class InputExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, guid, text_a, text_b=None, label=None, prev_label=None):
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label # Target slots in this training task
self.prev_label = prev_label # trained slots in previous tasks
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, input_ids, input_len, label_id, prev_label_id):
self.input_ids = input_ids
self.input_len = input_len
self.label_id = label_id
self.prev_label_id = prev_label_id # trained slots in previous tasks
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def get_train_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
def get_dev_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
@classmethod
def _read_tsv(cls, input_file, quotechar=None):
"""Reads a tab separated value file."""
with open(input_file, "r", encoding='utf-8') as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for line in reader:
if len(line) > 0 and line[0][0] == '#': # ignore comments (starting with '#')
continue
lines.append(line)
return lines
class Processor(DataProcessor):
"""Processor for the belief tracking dataset (GLUE version)."""
def __init__(self, config):
super(Processor, self).__init__()
import json
if config.data_dir == "data/woz" or config.data_dir=="data/woz-turn":
fp_ontology = open(os.path.join(config.data_dir, "ontology_dstc2_en.json"), "r")
ontology = json.load(fp_ontology)
ontology = ontology["informable"]
del ontology["request"]
for slot in ontology.keys():
ontology[slot].append("do not care")
ontology[slot].append("none")
fp_ontology.close()
elif config.data_dir == "data/multiwoz":
fp_ontology = open(os.path.join(config.data_dir, "ontology.json"), "r")
ontology = json.load(fp_ontology)
for slot in ontology.keys():
ontology[slot].append("none")
fp_ontology.close()
if not config.target_slot == 'all':
slot_idx = {'attraction':'0:1:2', 'bus':'3:4:5:6', 'hospital':'7', 'hotel':'8:9:10:11:12:13:14:15:16:17',\
'restaurant':'18:19:20:21:22:23:24', 'taxi':'25:26:27:28', 'train':'29:30:31:32:33:34'}
target_slot =[]
prev_slot = []
for key, value in slot_idx.items():
if key == config.target_slot:
target_slot.append(value)
else:
prev_slot.append(value)
config.target_slot = ':'.join(target_slot)
config.prev_slot = ':'.join(prev_slot)
else:
raise NotImplementedError()
# sorting the ontology according to the alphabetic order of the slots
self.ontology = collections.OrderedDict(sorted(ontology.items()))
# select slots to train
self.target_slot = []
self.prev_slot = []
self.target_slot_idx = sorted([ int(x) for x in config.target_slot.split(':')])
self.prev_slot_idx = sorted([ int(x) for x in config.prev_slot.split(':')])
ontology_items = list(self.ontology.items())
for idx, domain in enumerate(ontology_items):
slot, value = domain
if slot == "pricerange":
slot = "price range"
if idx in self.target_slot_idx:
self.target_slot.append(slot)
elif idx in self.prev_slot_idx:
self.prev_slot.append(slot)
self.all_slot = self.prev_slot + self.target_slot
logger.info('Processor: previous slots: ' + ', '.join(self.prev_slot))
logger.info('Processor: target slots: '+ ', '.join(self.target_slot))
def get_train_examples(self, data_dir, accumulation=False):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train", accumulation)
def get_dev_examples(self, data_dir, accumulation=False):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev", accumulation)
def get_test_examples(self, data_dir, accumulation=False):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test.tsv")), "test", accumulation)
def get_labels(self):
"""See base class."""
return [ self.ontology[slot] for slot in self.target_slot]
def get_prev_labels(self):
"""See base class."""
return [ self.ontology[slot] for slot in self.prev_slot]
def _create_examples(self, lines, set_type, accumulation=False):
"""Creates examples for the training and dev sets."""
prev_dialogue_index = None
examples = []
for (i, line) in enumerate(lines):
guid = "%s-%s-%s" % (set_type, line[0], line[1]) # line[0]: dialogue index, line[1]: turn index
if accumulation:
if prev_dialogue_index is None or prev_dialogue_index != line[0]:
text_a = line[2]
text_b = line[3]
prev_dialogue_index = line[0]
else:
# The symbol '#' will be replaced with '[SEP]' after tokenization.
text_a = line[2] + " # " + text_a
text_b = line[3] + " # " + text_b
else:
text_a = line[2] # line[2]: user utterance
text_b = line[3] # line[3]: system response
label = [ line[4+idx] for idx in self.target_slot_idx]
prev_label = [ line[4+idx] for idx in self.prev_slot_idx]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label, prev_label=prev_label))
return examples
def convert_examples_to_features(examples, label_list, prev_label_list, max_seq_length, tokenizer, max_turn_length):
"""Loads a data file into a list of `InputBatch`s."""
slot_dim = len(label_list)
prev_slot_dim = len(prev_label_list)
def _hard_coding_label(label):
return 'do not care' if label=='dontcare' else label
def _get_label(label, label_list):
label_id = []
label_info = ''
label_map = [{_label: i for i, _label in enumerate(labels)} for labels in label_list]
for i, label in enumerate(label):
label = _hard_coding_label(label)
label_id.append(label_map[i][label])
label_info += '%s (id = %d) ' % (label, label_map[i][label])
return label_id, label_info
features = []
prev_dialogue_idx = None
all_padding = [0] * max_seq_length
all_padding_len = [0, 0]
max_turn = 0
for (ex_index, example) in enumerate(examples):
if max_turn < int(example.guid.split('-')[2]):
max_turn = int(example.guid.split('-')[2])
max_turn_length = min(max_turn+1, max_turn_length)
logger.info("max_turn_length = %d" % max_turn)
for (ex_index, example) in enumerate(examples):
tokens_a = [x if x != '#' else '[SEP]' for x in tokenizer.tokenize(example.text_a)]
tokens_b = None
if example.text_b:
tokens_b = [x if x != '#' else '[SEP]' for x in tokenizer.tokenize(example.text_b)]
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
else:
# Account for [CLS] and [SEP] with "- 2"
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[:(max_seq_length - 2)]
tokens = ["[CLS]"] + tokens_a + ["[SEP]"]
input_len = [len(tokens), 0]
if tokens_b:
tokens += tokens_b + ["[SEP]"]
input_len[1] = len(tokens_b) + 1
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# Zero-pad up to the sequence length.
input_ids += [0] * (max_seq_length - len(input_ids)) # Note: padding idx = 0
assert len(input_ids) == max_seq_length
label_id, label_info = _get_label(example.label, label_list)
prev_label_id, prev_label_info = _get_label(example.prev_label, prev_label_list)
if ex_index < 5:
logger.info("*** Example ***")
logger.info("guid: %s" % example.guid)
logger.info("tokens: %s" % " ".join([str(x) for x in tokens]))
logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
logger.info("input_len: %s" % " ".join([str(x) for x in input_len]))
logger.info("label: " + label_info)
logger.info("previous label: " + prev_label_info)
curr_dialogue_idx = example.guid.split('-')[1]
curr_turn_idx = int(example.guid.split('-')[2])
if (prev_dialogue_idx is not None) and (prev_dialogue_idx != curr_dialogue_idx):
if prev_turn_idx < max_turn_length:
features += [InputFeatures(input_ids=all_padding,
input_len=all_padding_len,
label_id=[-1]*slot_dim,
prev_label_id=[-1] * prev_slot_dim)] * (max_turn_length - prev_turn_idx - 1)
assert len(features) % max_turn_length == 0
if prev_dialogue_idx is None or prev_turn_idx < max_turn_length:
features.append(InputFeatures(input_ids=input_ids,
input_len=input_len,
label_id=label_id,
prev_label_id=prev_label_id,
))
prev_dialogue_idx = curr_dialogue_idx
prev_turn_idx = curr_turn_idx
if prev_turn_idx < max_turn_length:
features += [InputFeatures(input_ids=all_padding,
input_len=all_padding_len,
label_id=[-1]*slot_dim,
prev_label_id=[-1]*prev_slot_dim)] * (max_turn_length - prev_turn_idx - 1)
assert len(features) % max_turn_length == 0
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_input_len= torch.tensor([f.input_len for f in features], dtype=torch.long)
all_label_ids = torch.tensor([f.label_id for f in features], dtype=torch.long)
all_prev_label_ids = torch.tensor([f.prev_label_id for f in features], dtype=torch.long)
# reshape tensors to [batch, turn, word]
all_input_ids = all_input_ids.view(-1, max_turn_length, max_seq_length)
all_input_len = all_input_len.view(-1, max_turn_length, 2)
all_label_ids = all_label_ids.view(-1, max_turn_length, slot_dim)
all_prev_label_ids = all_prev_label_ids.view(-1, max_turn_length, prev_slot_dim)
return all_input_ids, all_input_len, all_label_ids, all_prev_label_ids
def get_label_embedding(labels, max_seq_length, tokenizer, device):
features = []
for label in labels:
label_tokens = ["[CLS]"] + tokenizer.tokenize(label) + ["[SEP]"]
label_token_ids = tokenizer.convert_tokens_to_ids(label_tokens)
label_len = len(label_token_ids)
label_padding = [0] * (max_seq_length - len(label_token_ids))
label_token_ids += label_padding
assert len(label_token_ids) == max_seq_length
features.append((label_token_ids, label_len))
all_label_token_ids = torch.tensor([f[0] for f in features], dtype=torch.long).to(device)
all_label_len = torch.tensor([f[1] for f in features], dtype=torch.long).to(device)
return all_label_token_ids, all_label_len
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
###############################################################################
# Miscellaneous functions
###############################################################################
def accuracy(out, labels):
outputs = np.argmax(out, axis=1)
return np.sum(outputs == labels)
def warmup_linear(x, warmup=0.002):
if x < warmup:
return x / warmup
return 1.0 - x
###############################################################################
# Main
###############################################################################
def main():
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument('--data_dir', type=str, required=True,
help='location of the data corpus')
parser.add_argument("--bert_model", default=None, type=str, required=True,
help="Bert pre-trained model selected in the list: bert-base-uncased, "
"bert-large-uncased, bert-base-cased, bert-large-cased, bert-base-multilingual-uncased, "
"bert-base-multilingual-cased, bert-base-chinese.")
parser.add_argument("--bert_dir", default='/gfs/nlp/.pytorch_pretrained_bert',
type=str, required=False,
help="The directory of the pretrained BERT model")
parser.add_argument("--task_name", default=None, type=str, required=True,
help="The name of the task to train: bert, bert-gru, bert-lstm, "
"bert-label-embedding, bert-gru-label-embedding, bert-lstm-label-embedding")
parser.add_argument("--output_dir", default=None, type=str, required=True,
help="The output directory where the model predictions and checkpoints will be written.")
parser.add_argument('--load_path', type=str, default='',
help='pretrained model directory name')
parser.add_argument("--target_slot", default='', type=str, required=True,
help="Target slot idx to train model. ex. '0:1:2 or an excluding slot name 'attraction'" )
parser.add_argument("--prev_slot", default='', type=str, required=True,
help="Previous trained slots. ex. '0:1:2 or an excluding slot name 'attraction'" )
parser.add_argument("--tf_dir", default='tensorboard', type=str, required=False,
help="Tensorboard directory")
parser.add_argument("--nbt", default='rnn', type=str, required=True,
help="nbt type: rnn or transformer or turn" )
parser.add_argument("--fix_utterance_encoder",
action='store_true',
help="Do not train BERT utterance encoder")
## Other parameters
parser.add_argument("--max_seq_length", default=64, type=int,
help="The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, and sequences shorter \n"
"than this will be padded.")
parser.add_argument("--max_label_length", default=32, type=int,
help="The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, and sequences shorter \n"
"than this will be padded.")
parser.add_argument("--max_turn_length", default=22, type=int,
help="The maximum total input turn length. \n"
"Sequences longer than this will be truncated, and sequences shorter \n"
"than this will be padded.")
parser.add_argument('--hidden_dim',
type=int,
default=100,
help="hidden dimension used in belief tracker")
parser.add_argument('--num_rnn_layers',
type=int,
default=1,
help="number of RNN layers")
parser.add_argument('--zero_init_rnn',
action='store_true',
help="set initial hidden of rnns zero")
parser.add_argument('--skip_connect',
type=str,
default=False,
help="skip-connection")
parser.add_argument('--attn_head',
type=int,
default=4,
help="the number of heads in multi-headed attention")
parser.add_argument("--do_train",
action='store_true',
help="Whether to run training.")
parser.add_argument("--do_eval",
action='store_true',
help="Whether to run eval on the test set.")
parser.add_argument("--do_analyze",
action='store_true',
help="Whether to run analysis on the test set.")
parser.add_argument("--do_lower_case",
action='store_true',
help="Set this flag if you are using an uncased model.")
parser.add_argument("--set_label_encoder_trainable",
action='store_true',
help="Set this flag if you want to set the label encoder trainable. \n"
"This option is valid only when using label embeddings. \n")
parser.add_argument("--distance_metric",
type=str,
default="cosine",
help="The metric for distance between label embeddings: cosine, euclidean.")
parser.add_argument("--train_batch_size",
default=4,
type=int,
help="Total batch size for training.")
parser.add_argument("--dev_batch_size",
default=1,
type=int,
help="Total batch size for validation.")
parser.add_argument("--eval_batch_size",
default=16,
type=int,
help="Total batch size for eval.")
parser.add_argument("--learning_rate",
default=5e-5,
type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--num_train_epochs",
default=3.0,
type=float,
help="Total number of training epochs to perform.")
parser.add_argument("--patience",
default=10.0,
type=float,
help="The number of epochs to allow no further improvement.")
parser.add_argument("--warmup_proportion",
default=0.1,
type=float,
help="Proportion of training to perform linear learning rate warmup for. "
"E.g., 0.1 = 10%% of training.")
parser.add_argument("--lambda_ewc",
default=0.1,
type=float,
help="Hyper-parameter for EWC")
parser.add_argument("--no_cuda",
action='store_true',
help="Whether not to use CUDA when available")
parser.add_argument("--local_rank",
type=int,
default=-1,
help="local_rank for distributed training on gpus")
parser.add_argument('--seed',
type=int,
default=42,
help="random seed for initialization")
parser.add_argument('--gradient_accumulation_steps',
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument('--fp16',
action='store_true',
help="Whether to use 16-bit float precision instead of 32-bit")
parser.add_argument('--loss_scale',
type=float, default=0,
help="Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n"
"0 (default value): dynamic loss scaling.\n"
"Positive power of 2: static loss scaling value.\n")
parser.add_argument("--do_not_use_tensorboard",
action='store_true',
help="Whether to run eval on the test set.")
args = parser.parse_args()
if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train:
raise ValueError("Output directory ({}) already exists and is not empty.".format(args.output_dir))
os.makedirs(args.output_dir, exist_ok=True)
task_name = args.task_name.lower()
tb_file_name = args.output_dir.split('/')[1]
# Tensorboard logging
if not args.do_not_use_tensorboard:
summary_writer = SummaryWriter("./%s/%s" % (args.tf_dir, tb_file_name))
else:
summary_writer = None
fileHandler = logging.FileHandler(os.path.join(args.output_dir, "%s.txt"%(tb_file_name)))
logger.addHandler(fileHandler)
logger.info(args)
# CUDA setting
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
n_gpu = torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
n_gpu = 1
# Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.distributed.init_process_group(backend='nccl')
logger.info("device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}".format(
device, n_gpu, bool(args.local_rank != -1), args.fp16))
if args.gradient_accumulation_steps < 1:
raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(
args.gradient_accumulation_steps))
args.train_batch_size = int(args.train_batch_size / args.gradient_accumulation_steps)
# Set the random seed manually for reproducibility.
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
if not args.do_train and not args.do_eval and not args.do_analyze:
raise ValueError("At least one of `do_train` or `do_eval` must be True.")
###############################################################################
# Load data
###############################################################################
# Get Processor
processor = Processor(args)
prev_label_list = processor.get_prev_labels() # Slot value labels of Previous task
target_label_list = processor.get_labels() # Slot value labels of Present task
label_list = prev_label_list + target_label_list # All slot value labels
num_labels = [len(labels) for labels in label_list] # Number of labels of all slots
#prev_slot_id = processor.prev_slot_idx
#target_slot_id = processor.target_slot_idx
# wrong
prev_slot_id = list(range(0, len(processor.prev_slot))) # List of slots in previous task
target_slot_id = list(range(len(processor.prev_slot), len(processor.all_slot))) # list of slots in present task
# tokenizer
vocab_dir = os.path.join(args.bert_dir, '%s-vocab.txt' % args.bert_model)
if not os.path.exists(vocab_dir):
raise ValueError("Can't find %s " % vocab_dir)
tokenizer = BertTokenizer.from_pretrained(vocab_dir, do_lower_case=args.do_lower_case)
num_train_steps = None
accumulation = False
if args.do_train:
train_examples = processor.get_train_examples(args.data_dir, accumulation=accumulation)
dev_examples = processor.get_dev_examples(args.data_dir, accumulation=accumulation)
num_train_steps = int(len(train_examples) / args.train_batch_size * args.num_train_epochs)
num_dev_steps = int(len(dev_examples) / args.dev_batch_size * args.num_train_epochs)
## utterances
all_input_ids, all_input_len, all_label_ids, all_prev_label_ids = convert_examples_to_features(
train_examples, target_label_list, prev_label_list, args.max_seq_length, tokenizer, args.max_turn_length)
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_examples))
logger.info(" Batch size = %d", args.train_batch_size)
logger.info(" Num steps = %d", num_train_steps)
all_input_ids, all_input_len, all_label_ids, all_prev_label_ids \
= all_input_ids.to(device), all_input_len.to(device), all_label_ids.to(device), all_prev_label_ids.to(device)
train_data = TensorDataset(all_input_ids, all_input_len, all_label_ids, all_prev_label_ids)
if args.local_rank == -1:
train_sampler = RandomSampler(train_data)
else:
train_sampler = DistributedSampler(train_data)
train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size)
## Dev
## utterances
all_input_ids_dev, all_input_len_dev, all_label_ids_dev, all_prev_label_ids_dev = convert_examples_to_features(
dev_examples, target_label_list, prev_label_list, args.max_seq_length, tokenizer, args.max_turn_length)
logger.info("***** Running validation *****")
logger.info(" Num examples = %d", len(dev_examples))
logger.info(" Batch size = %d", args.dev_batch_size)
logger.info(" Num steps = %d", num_dev_steps)
all_input_ids_dev, all_input_len_dev, all_label_ids_dev, all_prev_label_ids_dev = \
all_input_ids_dev.to(device), all_input_len_dev.to(device), all_label_ids_dev.to(device), all_prev_label_ids_dev.to(device)
dev_data = TensorDataset(all_input_ids_dev, all_input_len_dev, all_label_ids_dev, all_prev_label_ids_dev)
dev_sampler = SequentialSampler(dev_data)
dev_dataloader = DataLoader(dev_data, sampler=dev_sampler, batch_size=args.dev_batch_size)
logger.info("Loaded data!")
###############################################################################
# Build the models
###############################################################################
# Prepare model
if args.nbt =='rnn':
from BeliefTrackerSlotQueryMultiSlot import BeliefTracker
if args.task_name.find("gru") == -1 and args.task_name.find("lstm") == -1:
raise ValueError("Task name should include at least \"gru\" or \"lstm\"")
elif args.nbt =='turn':
from BeliefTrackerSlotQueryMultiSlotTurn import BeliefTracker
elif args.nbt == 'transformer':
from BeliefTrackerSlotQueryMultiSlotTransformer import BeliefTracker
from BeliefTrackerSlotQueryMultiSlotEWC import EWC
else:
raise ValueError('nbt type should be either rnn or transformer')
from BeliefTrackerSlotQueryMultiSlotEWC import EWC
model = BeliefTracker(args, num_labels, device)
if args.fp16:
model.half()
# Load pretrained model
# in the case that slot and values are different between the training and evaluation
ptr_model = torch.load(args.load_path, map_location=device)
del_list = []
rename_list = []
for key in ptr_model.keys():
if ('slot_lookup' in key) or ('value_lookup' in key): # remove slot_lookup and value_lookup
del_list.append(key)
if ('rnn.' in key): # rename rnn -> nbt,
rename_list.append(key)
for key in del_list:
del ptr_model[key]
for key in rename_list:
new_key = key.replace('rnn.', 'nbt.')
ptr_model[new_key] = ptr_model[key]
del ptr_model[key]
state = model.state_dict()
state.update(ptr_model)
model.load_state_dict(state)
model.to(device)
## Get slot-value embeddings
label_token_ids, label_len = [], []
for labels in label_list:
token_ids, lens = get_label_embedding(labels, args.max_label_length, tokenizer, device)
label_token_ids.append(token_ids)
label_len.append(lens)
## Get slot-type embeddings
## Note: slot embeddings are ordered as [previous slots + present target slots]
slot_token_ids, slot_len = \
get_label_embedding(processor.all_slot, args.max_label_length, tokenizer, device)
model.initialize_slot_value_lookup(label_token_ids, label_len, slot_token_ids, slot_len)
if args.local_rank != -1:
try:
from apex.parallel import DistributedDataParallel as DDP
except ImportError:
raise ImportError(
"Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")
model = DDP(model)
elif n_gpu > 1:
model = torch.nn.DataParallel(model)
# Prepare optimizer
if args.do_train:
def get_optimizer_grouped_parameters(model):
param_optimizer = [(n, p) for n, p in model.named_parameters() if p.requires_grad]
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01,
'lr': args.learning_rate},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0,
'lr': args.learning_rate},
]
return optimizer_grouped_parameters
if n_gpu == 1:
optimizer_grouped_parameters = get_optimizer_grouped_parameters(model)
else:
optimizer_grouped_parameters = get_optimizer_grouped_parameters(model.module)
t_total = num_train_steps
if args.local_rank != -1:
t_total = t_total // torch.distributed.get_world_size()
if args.fp16:
try:
from apex.optimizers import FP16_Optimizer
from apex.optimizers import FusedAdam
except ImportError:
raise ImportError(
"Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")
optimizer = FusedAdam(optimizer_grouped_parameters,
lr=args.learning_rate,
bias_correction=False,
max_grad_norm=1.0)
if args.loss_scale == 0:
optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True)
else:
optimizer = FP16_Optimizer(optimizer, static_loss_scale=args.loss_scale)
else:
optimizer = BertAdam(optimizer_grouped_parameters,
lr=args.learning_rate,
warmup=args.warmup_proportion,
t_total=t_total)
logger.info(optimizer)
###############################################################################
# Training code
###############################################################################
if args.do_train:
logger.info("Training...")
global_step = 0
last_update = None
best_loss = None
#### EWC: calculate Fisher
ewc = EWC(model, dev_dataloader, oldtask=prev_slot_id, num_labels=num_labels, device=device, n_gpu=n_gpu)
for epoch in trange(int(args.num_train_epochs), desc="Epoch"):
# for epoch in trange(1):
#### TRAIN
model.train()
tr_loss = 0
nb_tr_examples, nb_tr_steps = 0, 0
for step, batch in enumerate(tqdm(train_dataloader, desc="Iteration")):
batch = tuple(t.to(device) for t in batch)
input_ids, input_len, label_ids, _ = batch
if n_gpu == 1:
loss_, loss_slot, acc, acc_slot, _ = model(input_ids, input_len, label_ids, n_gpu, target_slot=target_slot_id)
loss_ewc = ewc.penalty(model)
loss = loss_ + args.lambda_ewc * loss_ewc
else:
loss_, _, acc, acc_slot, _ = model(input_ids, input_len, label_ids, n_gpu,
target_slot=target_slot_id)
loss_ = loss_.mean()
acc = acc.mean()
acc_slot = acc_slot.mean(0)
loss_ewc = ewc.penalty(model)
loss = loss_ + args.lambda_ewc * loss_ewc
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
optimizer.backward(loss)
else:
loss.backward()
if summary_writer is not None:
summary_writer.add_scalar("Epoch", epoch, global_step)
summary_writer.add_scalar("Train/Loss", loss_, global_step)
summary_writer.add_scalar("Train/Loss_EWC", loss_ewc, global_step)
summary_writer.add_scalar("Train/Loss_Total", loss, global_step)
summary_writer.add_scalar("Train/JointAcc", acc, global_step)
if n_gpu == 1:
for i, slot in enumerate(processor.target_slot):
summary_writer.add_scalar("Train/Loss_%s" % slot.replace(' ','_'), loss_slot[i], global_step)
summary_writer.add_scalar("Train/Acc_%s" % slot.replace(' ','_'), acc_slot[i], global_step)
tr_loss += loss.item()
nb_tr_examples += input_ids.size(0)
nb_tr_steps += 1
if (step + 1) % args.gradient_accumulation_steps == 0:
# modify learning rate with special warm up BERT uses
lr_this_step = args.learning_rate * warmup_linear(global_step / t_total, args.warmup_proportion)
if summary_writer is not None:
summary_writer.add_scalar("Train/LearningRate", lr_this_step, global_step)
for param_group in optimizer.param_groups:
param_group['lr'] = lr_this_step
optimizer.step()
optimizer.zero_grad()
global_step += 1
# Perform evaluation on validation dataset
model.eval()
dev_loss = 0
dev_acc = 0
dev_loss_slot, dev_acc_slot = None, None
nb_dev_examples, nb_dev_steps = 0, 0
prev_dev_loss = 0
prev_dev_acc = 0
prev_dev_loss_slot, prev_dev_acc_slot = None, None
prev_nb_dev_examples = 0
for step, batch in enumerate(tqdm(dev_dataloader, desc="Validation")):
batch = tuple(t.to(device) for t in batch)
input_ids, input_len, label_ids, prev_label_ids = batch
if input_ids.dim() == 2:
input_ids = input_ids.unsqueeze(0)
input_len = input_len.unsqueeze(0)
label_ids = label_ids.unsuqeeze(0)
prev_label_ids = prev_label_ids.unsuqeeze(0)
with torch.no_grad():
if n_gpu == 1:
loss_, loss_slot, acc, acc_slot, _ = model(input_ids, input_len, label_ids, n_gpu,
target_slot=target_slot_id)
loss = loss_ + args.lambda_ewc * ewc.penalty(model)
prev_loss, prev_loss_slot, prev_acc, prev_acc_slot, _ = model(input_ids, input_len,
prev_label_ids, n_gpu,
target_slot=prev_slot_id)
else:
loss_, _, acc, acc_slot, _ = model(input_ids, input_len, label_ids, n_gpu, target_slot=target_slot_id)
loss_ = loss_.mean()
acc = acc.mean()
acc_slot = acc_slot.mean(0)
loss_ewc = ewc.penalty(model)
loss = loss_ + args.lambda_ewc * loss_ewc
prev_loss, _, prev_acc, prev_acc_slot, _ = model(input_ids, input_len, prev_label_ids, n_gpu, target_slot=prev_slot_id)
prev_loss = prev_loss.mean()
prev_acc = prev_acc.mean()
prev_acc_slot = prev_acc_slot.mean(0)
num_valid_turn = torch.sum(label_ids[:,:,0].view(-1) > -1, 0).item()
dev_loss += loss.item() * num_valid_turn
dev_acc += acc.item() * num_valid_turn
prev_num_valid_turn = torch.sum(prev_label_ids[:,:,0].view(-1) > -1, 0).item()
prev_dev_loss += prev_loss.item() * prev_num_valid_turn
prev_dev_acc += prev_acc.item() * prev_num_valid_turn
if n_gpu == 1:
if dev_loss_slot is None:
dev_loss_slot = [ l * num_valid_turn for l in loss_slot]
dev_acc_slot = acc_slot * num_valid_turn
prev_dev_loss_slot = [ l * prev_num_valid_turn for l in prev_loss_slot]
prev_dev_acc_slot = prev_acc_slot * prev_num_valid_turn
else:
for i, l in enumerate(loss_slot):
dev_loss_slot[i] = dev_loss_slot[i] + l * num_valid_turn
dev_acc_slot += acc_slot * num_valid_turn
for i, l in enumerate(prev_loss_slot):
prev_dev_loss_slot[i] = prev_dev_loss_slot[i] + l * prev_num_valid_turn
prev_dev_acc_slot += prev_acc_slot * prev_num_valid_turn
nb_dev_examples += num_valid_turn
prev_nb_dev_examples += prev_num_valid_turn
dev_loss = dev_loss / nb_dev_examples
dev_acc = dev_acc / nb_dev_examples
prev_dev_loss = prev_dev_loss / prev_nb_dev_examples
prev_dev_acc = prev_dev_acc / prev_nb_dev_examples
if n_gpu == 1:
dev_acc_slot = dev_acc_slot / nb_dev_examples
prev_dev_acc_slot = prev_dev_acc_slot / prev_nb_dev_examples
if summary_writer is not None:
summary_writer.add_scalar("Validate/Loss", dev_loss, global_step)
summary_writer.add_scalar("Validate/Acc", dev_acc, global_step)
summary_writer.add_scalar("Validate/Prev_Loss", prev_dev_loss, global_step)
summary_writer.add_scalar("Validate/Prev_Acc", prev_dev_acc, global_step)
if n_gpu == 1:
for i, slot in enumerate(processor.target_slot):
summary_writer.add_scalar("Validate/Loss_%s" % slot.replace(' ','_'), dev_loss_slot[i]/nb_dev_examples, global_step)
summary_writer.add_scalar("Validate/Acc_%s" % slot.replace(' ','_'), dev_acc_slot[i], global_step)
for i, slot in enumerate(processor.prev_slot):
summary_writer.add_scalar("Validate/Prev_Loss_%s" % slot.replace(' ','_'), prev_dev_loss_slot[i]/prev_nb_dev_examples, global_step)
summary_writer.add_scalar("Validate/Prev_Acc_%s" % slot.replace(' ','_'), prev_dev_acc_slot[i], global_step)
logger.info("*** Model Updated: Epoch=%d, Valid loss=%.6f, Valid acc=%.6f, Valid prev loss=%.6f, Valid prev acc=%.6f ***" \
% (epoch, dev_loss, dev_acc, prev_dev_loss, prev_dev_acc))
dev_loss = round(dev_loss, 6)
if last_update is None or dev_loss < best_loss:
# Save a trained model
output_model_file = os.path.join(args.output_dir, "pytorch_model.bin")
if args.do_train:
if n_gpu == 1:
torch.save(model.state_dict(), output_model_file)
else:
torch.save(model.module.state_dict(), output_model_file)
last_update = epoch
best_loss = dev_loss
best_acc = dev_acc
logger.info("*** Model Updated: Epoch=%d, Validation Loss=%.6f, Validation Acc=%.6f ***" % (last_update, best_loss, best_acc))
else:
logger.info("*** Model NOT Updated: Epoch=%d, Validation Loss=%.6f, Validation Acc=%.6f ***" % (epoch, dev_loss, dev_acc))
#if epoch > 100 and last_update + args.patience <= epoch:
if last_update + args.patience <= epoch:
break
###############################################################################
# Evaluation
###############################################################################
# Test
output_model_file = os.path.join(args.output_dir, "pytorch_model.bin")
# Load a trained model that you have fine-tuned
ptr_model = torch.load(output_model_file, map_location=device)
del_list = []
for key in ptr_model.keys():
if ('slot' in key) or ('value' in key):
del_list.append(key)
for key in del_list:
del ptr_model[key]
if n_gpu > 1:
model = model.module
state = model.state_dict()
state.update(ptr_model)
model.load_state_dict(state)
model.to(device)
## Get slot-value embeddings
label_token_ids, label_len = [], []
for labels in label_list:
token_ids, lens = get_label_embedding(labels, args.max_label_length, tokenizer, device)
label_token_ids.append(token_ids)
label_len.append(lens)
## Get slot-type embeddings
## Note: slot embeddings are ordered as [previous slots + present target slots]
slot_token_ids, slot_len = \
get_label_embedding(processor.all_slot, args.max_label_length, tokenizer, device)
model.initialize_slot_value_lookup(label_token_ids, label_len, slot_token_ids, slot_len)
if n_gpu > 1:
model = torch.nn.DataParallel(model)
if args.do_eval and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
eval_examples = processor.get_test_examples(args.data_dir, accumulation=accumulation)
all_input_ids, all_input_len, all_label_ids, all_prev_label_ids = convert_examples_to_features(
eval_examples, target_label_list, prev_label_list, args.max_seq_length, tokenizer, args.max_turn_length)
all_input_ids, all_input_len, all_label_ids, all_prev_label_ids \
= all_input_ids.to(device), all_input_len.to(device), all_label_ids.to(device), all_prev_label_ids.to(device)
logger.info("***** Running evaluation *****")
logger.info(" Num examples = %d", len(eval_examples))
logger.info(" Batch size = %d", args.eval_batch_size)
eval_data = TensorDataset(all_input_ids, all_input_len, all_label_ids, all_prev_label_ids)
# Run prediction for full data
eval_sampler = SequentialSampler(eval_data)
eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size)
model.eval()
eval_loss, eval_accuracy = 0, 0
eval_loss_slot, eval_acc_slot = None, None
nb_eval_steps, nb_eval_examples = 0, 0
prev_eval_loss, prev_eval_accuracy = 0, 0
prev_eval_loss_slot, prev_eval_acc_slot = None, None
nb_eval_examples_prev = 0
for input_ids, input_len, label_ids, prev_label_ids in tqdm(eval_dataloader, desc="Evaluating"):
if input_ids.dim() == 2:
input_ids = input_ids.unsqueeze(0)
input_len = input_len.unsqueeze(0)
label_ids = label_ids.unsuqeeze(0)
prev_label_ids = prev_label_ids.unsuqeeze(0)
with torch.no_grad():
if n_gpu == 1:
loss, loss_slot, acc, acc_slot, _ = model(input_ids, input_len, label_ids, n_gpu, target_slot=target_slot_id)
prev_loss, prev_loss_slot, prev_acc, prev_acc_slot, _ = model(input_ids, input_len, prev_label_ids, n_gpu, target_slot=prev_slot_id)
else:
loss, loss_slot, acc, acc_slot, _ = model(input_ids, input_len, label_ids, n_gpu, target_slot=target_slot_id)
loss = loss.mean()
acc = acc.mean()
acc_slot = acc_slot.mean(0)
prev_loss, prev_loss_slot, prev_acc, prev_acc_slot, _ = model(input_ids, input_len, prev_label_ids, n_gpu, target_slot=prev_slot_id)
prev_loss = prev_loss.mean()
prev_acc = prev_acc.mean()
prev_acc_slot = prev_acc_slot.mean(0)
nb_eval_ex_prev = (prev_label_ids[:,:,0].view(-1) != -1).sum().item()
nb_eval_examples_prev += nb_eval_ex_prev
nb_eval_ex = (label_ids[:,:,0].view(-1) != -1).sum().item()
nb_eval_examples += nb_eval_ex
nb_eval_steps += 1
def _post_process(eval_loss, eval_loss_slot, eval_accuracy, eval_acc_slot, loss, loss_slot, acc, acc_slot, nb_eval_ex):
eval_loss += loss.item() * nb_eval_ex
eval_accuracy += acc.item() * nb_eval_ex
if loss_slot is not None:
if eval_loss_slot is None:
eval_loss_slot = [ l * nb_eval_ex for l in loss_slot]
else:
for i, l in enumerate(loss_slot):
eval_loss_slot[i] = eval_loss_slot[i] + l * nb_eval_ex
if eval_acc_slot is None:
eval_acc_slot = acc_slot * nb_eval_ex
else:
eval_acc_slot += acc_slot * nb_eval_ex
return eval_loss, eval_loss_slot, eval_accuracy, eval_acc_slot
eval_loss, eval_loss_slot, eval_accuracy, eval_acc_slot = \
_post_process(eval_loss, eval_loss_slot, eval_accuracy, eval_acc_slot, loss, loss_slot, acc, acc_slot, nb_eval_ex)
prev_eval_loss, prev_eval_loss_slot, prev_eval_accuracy, prev_eval_acc_slot = \
_post_process(prev_eval_loss, prev_eval_loss_slot, prev_eval_accuracy, prev_eval_acc_slot, \
prev_loss, prev_loss_slot, prev_acc, prev_acc_slot, nb_eval_ex_prev)
eval_loss /= nb_eval_examples
if eval_loss_slot is None: # for multi-gpu
eval_loss_slot = [0]
prev_eval_loss_slot = [0]
eval_accuracy = eval_accuracy / nb_eval_examples
prev_eval_loss = prev_eval_loss / nb_eval_examples_prev
prev_eval_accuracy = prev_eval_accuracy / nb_eval_examples_prev
eval_acc_slot = eval_acc_slot / nb_eval_examples
prev_eval_acc_slot = prev_eval_acc_slot / nb_eval_examples_prev
total_acc_slot = {}
for val, idx in zip(torch.cat([eval_acc_slot, prev_eval_acc_slot]), (target_slot_id+prev_slot_id)):
total_acc_slot[idx] = val
total_acc_slot = sorted(total_acc_slot.items(), key=operator.itemgetter(0))
loss = tr_loss / nb_tr_steps if args.do_train else None
result = {'eval_loss': eval_loss,
'eval_accuracy': eval_accuracy,
'loss': loss,
'eval_loss_slot':'\t'.join([ str(val/ nb_eval_examples) for val in eval_loss_slot]),
'eval_acc_slot':'\t'.join([ str((val).item()) for val in eval_acc_slot]),
'prev_eval_loss': prev_eval_loss,
'prev_eval_accuracy': prev_eval_accuracy,
'prev_eval_loss_slot': '\t'.join([str(val / nb_eval_examples_prev) for val in prev_eval_loss_slot]),
'prev_eval_acc_slot': '\t'.join([str((val).item()) for val in prev_eval_acc_slot]),
'total_acc_slot': '\t'.join([str(val[1].item()) for val in total_acc_slot])
}
out_file_name = 'eval_results'
if args.target_slot=='all':
out_file_name += '_all'
output_eval_file = os.path.join(args.output_dir, "%s.txt" % out_file_name)
with open(output_eval_file, "w") as writer:
logger.info("***** Eval results *****")
for key in sorted(result.keys()):
logger.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
###############################################################################
# Analyze: TODO
###############################################################################
if args.do_analyze and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
pdb.set_trace()
def draw(data, x, y, ax):
seaborn.heatmap(data,
xticklabels=x, square=True, yticklabels=y, vmin=0.0, vmax=1.0,
cbar=False, ax=ax)
class_correct = [[0 for x in range(num_labels[i])] for i in range(len(num_labels))]
class_count = [[0 for x in range(num_labels[i])] for i in range(len(num_labels))]
eval_examples = processor.get_test_examples(args.data_dir, accumulation=accumulation)
all_input_ids, all_input_len, all_label_ids = convert_examples_to_features(
eval_examples, label_list, args.max_seq_length, tokenizer, args.max_turn_length)
all_input_ids, all_input_len, all_label_ids = all_input_ids.to(device), all_input_len.to(
device), all_label_ids.to(device)
logger.info("***** Running analysis *****")
logger.info(" Num examples = %d", len(eval_examples))
logger.info(" Batch size = %d", 1)
eval_data = TensorDataset(all_input_ids, all_input_len, all_label_ids)
# Run prediction for full data
eval_sampler = SequentialSampler(eval_data)
eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=1)
model.eval()
none_value_id = [ len(val)-1 for val in label_list]
incorrect_dialogs = []
attention_draw = 5
for input_ids, input_len, label_ids in tqdm(eval_dataloader, desc="Evaluating"):
if input_ids.dim() == 2:
input_ids = input_ids.unsqueeze(0)
input_len = input_len.unsqueeze(0)
label_ids = label_ids.unsuqeeze(0)
with torch.no_grad():
_, _, acc, _, pred_slot = model(input_ids, input_len, label_ids, 1)
nturn = (label_ids[:,:,0].view(-1) != -1).sum().item()
nslot = label_ids.size(2)
for slot in range(nslot):
for turn in range(nturn):
class_count[slot][label_ids[0][turn][slot]]+=1
if label_ids[0][turn][slot] == pred_slot[0][turn][slot]:
class_correct[slot][label_ids[0][turn][slot]] +=1
drawfig = False
print('hotel')
print(label_ids[0, 0:10, 8:18].cpu() == torch.Tensor(none_value_id[8:18]).long().repeat(10, 1))
print(pred_slot[0, 0:10, 8:18].cpu() == torch.Tensor(none_value_id[8:18]).long().repeat(10, 1))
print(label_ids[0, 0:10, 0:8].cpu() == torch.Tensor(none_value_id[0:8]).long().repeat(10, 1))
print(label_ids[0, 0:10, 18:].cpu() == torch.Tensor(none_value_id[18:]).long().repeat(10, 1))
pdb.set_trace()
if drawfig == True:
#if (len(incorrect_dialogs) < attention_draw):
max_len = input_ids.size(2)
attn_scores = model.attn.get_scores().transpose(1, 2).contiguous().view(label_ids.size(1)*nslot, -1, max_len)
for slot in range(0, nslot):
fig, axs = plt.subplots(nturn, 1, figsize=(50, 10*nturn))
print("Slot", slot)
for turn in range(nturn):
draw(attn_scores[slot*label_ids.size(1)+turn,:,:].cpu(),
tokenizer.convert_ids_to_tokens(input_ids[0][turn].cpu().numpy()),
[*range(0, args.attn_head)], ax=axs[turn])
axs[turn].set_title("turn %d slot: %s label: %s pred: %s"
% (turn, processor.target_slot[slot], str(label_list[slot][label_ids[0][turn][slot].item()]),
str(label_list[slot][pred_slot[0][turn][slot].item()]) ))
plt.show()
plt.savefig(os.path.join(args.output_dir, "attention-d%d-slot%s.png"%(len(incorrect_dialogs), slot)))
plt.close()
if not acc == 1:
dialog = []
for input, label, pred in zip(input_ids[0], label_ids[0], pred_slot[0]):
if label[0] == -1:
break
text = {}
text['input'] = ' '.join(tokenizer.convert_ids_to_tokens(input.cpu().numpy())).replace(' [PAD]', '')
text['label'] = [str(label_list[idx][x]) for idx, x in enumerate(label.cpu().numpy())]
text['pred'] = [str(label_list[idx][x]) for idx, x in enumerate(pred.cpu().numpy())]
dialog.append(text)
incorrect_dialogs.append(dialog)
output_eval_incorr_file = os.path.join(args.output_dir, "incorrect_dialog.txt")
with open(output_eval_incorr_file, "w") as writer:
for dialog in incorrect_dialogs:
for turn in dialog:
text = turn['input'] + '\t'
for label, pred in zip(turn['label'], turn['pred']):
text += '%s\t%s\t'%(label, pred)
writer.write("%s\n" % text)
writer.write("---------- \n")
logger.info("Done analysis: %s" % output_eval_incorr_file)
output_eval_incorr_file = os.path.join(args.output_dir, "per_class_accuracy.txt")
with open(output_eval_incorr_file, "w") as writer:
total_class_acc = 0
total_slot_class_acc = []
nlabels = 0
for sid, slot in enumerate(class_count):
slot_class_acc = 0
for vid, value in enumerate(slot):
if not value == 0:
class_acc = class_correct[sid][vid]/value
writer.write("%s\t%d\t%d\t%.3f\n"%(label_list[sid][vid], class_correct[sid][vid], value, class_acc) )
slot_class_acc += class_acc
nlabels += 1
else:
writer.write("%s\t%d\t%d\t%.3f\n"%(label_list[sid][vid], class_correct[sid][vid], value, -1) )
total_slot_class_acc.append(slot_class_acc/(vid+1))
total_class_acc+=slot_class_acc
total_class_acc /= nlabels
for sid, slot_acc in enumerate(total_slot_class_acc):
writer.write("%d\t%.3f\n" % (sid, slot_acc))
writer.write("total class accuracy \t%.3f\n" % total_class_acc)
logger.info("Done analysis: %s" % output_eval_incorr_file)
print(class_correct)
print(class_count)
if __name__ == "__main__":
main() | [
"numpy.sum",
"argparse.ArgumentParser",
"numpy.random.seed",
"numpy.argmax",
"csv.reader",
"torch.utils.data.RandomSampler",
"pytorch_pretrained_bert.optimization.BertAdam",
"pytorch_pretrained_bert.tokenization.BertTokenizer.from_pretrained",
"seaborn.heatmap",
"torch.cat",
"torch.cuda.device_count",
"torch.utils.data.TensorDataset",
"torch.distributed.get_world_size",
"torch.device",
"torch.no_grad",
"os.path.join",
"torch.utils.data.DataLoader",
"torch.distributed.get_rank",
"matplotlib.pyplot.close",
"torch.load",
"os.path.exists",
"apex.optimizers.FusedAdam",
"apex.optimizers.FP16_Optimizer",
"torch.utils.data.distributed.DistributedSampler",
"torch.Tensor",
"random.seed",
"torch.utils.data.SequentialSampler",
"torch.cuda.set_device",
"matplotlib.pyplot.subplots",
"seaborn.set_context",
"BeliefTrackerSlotQueryMultiSlotTransformer.BeliefTracker",
"tqdm.tqdm",
"matplotlib.pyplot.show",
"torch.manual_seed",
"BeliefTrackerSlotQueryMultiSlotEWC.EWC",
"torch.cuda.is_available",
"apex.parallel.DistributedDataParallel",
"os.listdir",
"tensorboardX.SummaryWriter",
"json.load",
"torch.distributed.init_process_group",
"os.makedirs",
"logging.basicConfig",
"torch.nn.DataParallel",
"torch.cuda.manual_seed_all",
"pdb.set_trace",
"torch.tensor",
"operator.itemgetter",
"logging.getLogger"
] | [((535, 570), 'seaborn.set_context', 'seaborn.set_context', ([], {'context': '"""talk"""'}), "(context='talk')\n", (554, 570), False, 'import seaborn\n'), ((572, 715), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s - %(levelname)s - %(name)s - %(message)s"""', 'datefmt': '"""%m/%d/%Y %H:%M:%S"""', 'level': 'logging.INFO'}), "(format=\n '%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt=\n '%m/%d/%Y %H:%M:%S', level=logging.INFO)\n", (591, 715), False, 'import logging\n'), ((755, 782), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (772, 782), False, 'import logging\n'), ((11829, 11892), 'torch.tensor', 'torch.tensor', (['[f.input_ids for f in features]'], {'dtype': 'torch.long'}), '([f.input_ids for f in features], dtype=torch.long)\n', (11841, 11892), False, 'import torch\n'), ((11912, 11975), 'torch.tensor', 'torch.tensor', (['[f.input_len for f in features]'], {'dtype': 'torch.long'}), '([f.input_len for f in features], dtype=torch.long)\n', (11924, 11975), False, 'import torch\n'), ((11996, 12058), 'torch.tensor', 'torch.tensor', (['[f.label_id for f in features]'], {'dtype': 'torch.long'}), '([f.label_id for f in features], dtype=torch.long)\n', (12008, 12058), False, 'import torch\n'), ((12084, 12151), 'torch.tensor', 'torch.tensor', (['[f.prev_label_id for f in features]'], {'dtype': 'torch.long'}), '([f.prev_label_id for f in features], dtype=torch.long)\n', (12096, 12151), False, 'import torch\n'), ((13909, 13931), 'numpy.argmax', 'np.argmax', (['out'], {'axis': '(1)'}), '(out, axis=1)\n', (13918, 13931), True, 'import numpy as np\n'), ((13943, 13968), 'numpy.sum', 'np.sum', (['(outputs == labels)'], {}), '(outputs == labels)\n', (13949, 13968), True, 'import numpy as np\n'), ((14266, 14291), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (14289, 14291), False, 'import argparse\n'), ((22629, 22672), 'os.makedirs', 'os.makedirs', (['args.output_dir'], {'exist_ok': '(True)'}), '(args.output_dir, exist_ok=True)\n', (22640, 22672), False, 'import os\n'), ((24097, 24119), 'random.seed', 'random.seed', (['args.seed'], {}), '(args.seed)\n', (24108, 24119), False, 'import random\n'), ((24124, 24149), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (24138, 24149), True, 'import numpy as np\n'), ((24154, 24182), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (24171, 24182), False, 'import torch\n'), ((25330, 25391), 'os.path.join', 'os.path.join', (['args.bert_dir', "('%s-vocab.txt' % args.bert_model)"], {}), "(args.bert_dir, '%s-vocab.txt' % args.bert_model)\n", (25342, 25391), False, 'import os\n'), ((25501, 25575), 'pytorch_pretrained_bert.tokenization.BertTokenizer.from_pretrained', 'BertTokenizer.from_pretrained', (['vocab_dir'], {'do_lower_case': 'args.do_lower_case'}), '(vocab_dir, do_lower_case=args.do_lower_case)\n', (25530, 25575), False, 'from pytorch_pretrained_bert.tokenization import BertTokenizer\n'), ((29016, 29055), 'BeliefTrackerSlotQueryMultiSlotTransformer.BeliefTracker', 'BeliefTracker', (['args', 'num_labels', 'device'], {}), '(args, num_labels, device)\n', (29029, 29055), False, 'from BeliefTrackerSlotQueryMultiSlotTransformer import BeliefTracker\n'), ((29229, 29276), 'torch.load', 'torch.load', (['args.load_path'], {'map_location': 'device'}), '(args.load_path, map_location=device)\n', (29239, 29276), False, 'import torch\n'), ((43141, 43191), 'os.path.join', 'os.path.join', (['args.output_dir', '"""pytorch_model.bin"""'], {}), "(args.output_dir, 'pytorch_model.bin')\n", (43153, 43191), False, 'import os\n'), ((43261, 43311), 'torch.load', 'torch.load', (['output_model_file'], {'map_location': 'device'}), '(output_model_file, map_location=device)\n', (43271, 43311), False, 'import torch\n'), ((22435, 22466), 'os.path.exists', 'os.path.exists', (['args.output_dir'], {}), '(args.output_dir)\n', (22449, 22466), False, 'import os\n'), ((22471, 22498), 'os.listdir', 'os.listdir', (['args.output_dir'], {}), '(args.output_dir)\n', (22481, 22498), False, 'import os\n'), ((22854, 22908), 'tensorboardX.SummaryWriter', 'SummaryWriter', (["('./%s/%s' % (args.tf_dir, tb_file_name))"], {}), "('./%s/%s' % (args.tf_dir, tb_file_name))\n", (22867, 22908), False, 'from tensorboardX import SummaryWriter\n'), ((22988, 23042), 'os.path.join', 'os.path.join', (['args.output_dir', "('%s.txt' % tb_file_name)"], {}), "(args.output_dir, '%s.txt' % tb_file_name)\n", (23000, 23042), False, 'import os\n'), ((23282, 23307), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (23305, 23307), False, 'import torch\n'), ((23326, 23364), 'torch.cuda.set_device', 'torch.cuda.set_device', (['args.local_rank'], {}), '(args.local_rank)\n', (23347, 23364), False, 'import torch\n'), ((23382, 23419), 'torch.device', 'torch.device', (['"""cuda"""', 'args.local_rank'], {}), "('cuda', args.local_rank)\n", (23394, 23419), False, 'import torch\n'), ((23540, 23592), 'torch.distributed.init_process_group', 'torch.distributed.init_process_group', ([], {'backend': '"""nccl"""'}), "(backend='nccl')\n", (23576, 23592), False, 'import torch\n'), ((24209, 24246), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['args.seed'], {}), '(args.seed)\n', (24235, 24246), False, 'import torch\n'), ((25403, 25428), 'os.path.exists', 'os.path.exists', (['vocab_dir'], {}), '(vocab_dir)\n', (25417, 25428), False, 'import os\n'), ((26735, 26813), 'torch.utils.data.TensorDataset', 'TensorDataset', (['all_input_ids', 'all_input_len', 'all_label_ids', 'all_prev_label_ids'], {}), '(all_input_ids, all_input_len, all_label_ids, all_prev_label_ids)\n', (26748, 26813), False, 'from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler\n'), ((27003, 27082), 'torch.utils.data.DataLoader', 'DataLoader', (['train_data'], {'sampler': 'train_sampler', 'batch_size': 'args.train_batch_size'}), '(train_data, sampler=train_sampler, batch_size=args.train_batch_size)\n', (27013, 27082), False, 'from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler\n'), ((27840, 27938), 'torch.utils.data.TensorDataset', 'TensorDataset', (['all_input_ids_dev', 'all_input_len_dev', 'all_label_ids_dev', 'all_prev_label_ids_dev'], {}), '(all_input_ids_dev, all_input_len_dev, all_label_ids_dev,\n all_prev_label_ids_dev)\n', (27853, 27938), False, 'from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler\n'), ((27957, 27984), 'torch.utils.data.SequentialSampler', 'SequentialSampler', (['dev_data'], {}), '(dev_data)\n', (27974, 27984), False, 'from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler\n'), ((28010, 28083), 'torch.utils.data.DataLoader', 'DataLoader', (['dev_data'], {'sampler': 'dev_sampler', 'batch_size': 'args.dev_batch_size'}), '(dev_data, sampler=dev_sampler, batch_size=args.dev_batch_size)\n', (28020, 28083), False, 'from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler\n'), ((30792, 30802), 'apex.parallel.DistributedDataParallel', 'DDP', (['model'], {}), '(model)\n', (30795, 30802), True, 'from apex.parallel import DistributedDataParallel as DDP\n'), ((33363, 33466), 'BeliefTrackerSlotQueryMultiSlotEWC.EWC', 'EWC', (['model', 'dev_dataloader'], {'oldtask': 'prev_slot_id', 'num_labels': 'num_labels', 'device': 'device', 'n_gpu': 'n_gpu'}), '(model, dev_dataloader, oldtask=prev_slot_id, num_labels=num_labels,\n device=device, n_gpu=n_gpu)\n', (33366, 33466), False, 'from BeliefTrackerSlotQueryMultiSlotEWC import EWC\n'), ((44301, 44329), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (['model'], {}), '(model)\n', (44322, 44329), False, 'import torch\n'), ((45131, 45209), 'torch.utils.data.TensorDataset', 'TensorDataset', (['all_input_ids', 'all_input_len', 'all_label_ids', 'all_prev_label_ids'], {}), '(all_input_ids, all_input_len, all_label_ids, all_prev_label_ids)\n', (45144, 45209), False, 'from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler\n'), ((45273, 45301), 'torch.utils.data.SequentialSampler', 'SequentialSampler', (['eval_data'], {}), '(eval_data)\n', (45290, 45301), False, 'from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler\n'), ((45328, 45404), 'torch.utils.data.DataLoader', 'DataLoader', (['eval_data'], {'sampler': 'eval_sampler', 'batch_size': 'args.eval_batch_size'}), '(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size)\n', (45338, 45404), False, 'from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler\n'), ((45775, 45815), 'tqdm.tqdm', 'tqdm', (['eval_dataloader'], {'desc': '"""Evaluating"""'}), "(eval_dataloader, desc='Evaluating')\n", (45779, 45815), False, 'from tqdm import tqdm, trange\n'), ((50344, 50399), 'os.path.join', 'os.path.join', (['args.output_dir', "('%s.txt' % out_file_name)"], {}), "(args.output_dir, '%s.txt' % out_file_name)\n", (50356, 50399), False, 'import os\n'), ((50971, 50986), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (50984, 50986), False, 'import pdb\n'), ((51972, 52030), 'torch.utils.data.TensorDataset', 'TensorDataset', (['all_input_ids', 'all_input_len', 'all_label_ids'], {}), '(all_input_ids, all_input_len, all_label_ids)\n', (51985, 52030), False, 'from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler\n'), ((52094, 52122), 'torch.utils.data.SequentialSampler', 'SequentialSampler', (['eval_data'], {}), '(eval_data)\n', (52111, 52122), False, 'from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler\n'), ((52149, 52206), 'torch.utils.data.DataLoader', 'DataLoader', (['eval_data'], {'sampler': 'eval_sampler', 'batch_size': '(1)'}), '(eval_data, sampler=eval_sampler, batch_size=1)\n', (52159, 52206), False, 'from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler\n'), ((52396, 52436), 'tqdm.tqdm', 'tqdm', (['eval_dataloader'], {'desc': '"""Evaluating"""'}), "(eval_dataloader, desc='Evaluating')\n", (52400, 52436), False, 'from tqdm import tqdm, trange\n'), ((55601, 55654), 'os.path.join', 'os.path.join', (['args.output_dir', '"""incorrect_dialog.txt"""'], {}), "(args.output_dir, 'incorrect_dialog.txt')\n", (55613, 55654), False, 'import os\n'), ((56170, 56225), 'os.path.join', 'os.path.join', (['args.output_dir', '"""per_class_accuracy.txt"""'], {}), "(args.output_dir, 'per_class_accuracy.txt')\n", (56182, 56225), False, 'import os\n'), ((2433, 2483), 'csv.reader', 'csv.reader', (['f'], {'delimiter': '"""\t"""', 'quotechar': 'quotechar'}), "(f, delimiter='\\t', quotechar=quotechar)\n", (2443, 2483), False, 'import csv\n'), ((3119, 3141), 'json.load', 'json.load', (['fp_ontology'], {}), '(fp_ontology)\n', (3128, 3141), False, 'import json\n'), ((13115, 13171), 'torch.tensor', 'torch.tensor', (['[f[0] for f in features]'], {'dtype': 'torch.long'}), '([f[0] for f in features], dtype=torch.long)\n', (13127, 13171), False, 'import torch\n'), ((13203, 13259), 'torch.tensor', 'torch.tensor', (['[f[1] for f in features]'], {'dtype': 'torch.long'}), '([f[1] for f in features], dtype=torch.long)\n', (13215, 13259), False, 'import torch\n'), ((26876, 26901), 'torch.utils.data.RandomSampler', 'RandomSampler', (['train_data'], {}), '(train_data)\n', (26889, 26901), False, 'from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler\n'), ((26944, 26974), 'torch.utils.data.distributed.DistributedSampler', 'DistributedSampler', (['train_data'], {}), '(train_data)\n', (26962, 26974), False, 'from torch.utils.data.distributed import DistributedSampler\n'), ((30839, 30867), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (['model'], {}), '(model)\n', (30860, 30867), False, 'import torch\n'), ((32280, 32388), 'apex.optimizers.FusedAdam', 'FusedAdam', (['optimizer_grouped_parameters'], {'lr': 'args.learning_rate', 'bias_correction': '(False)', 'max_grad_norm': '(1.0)'}), '(optimizer_grouped_parameters, lr=args.learning_rate,\n bias_correction=False, max_grad_norm=1.0)\n', (32289, 32388), False, 'from apex.optimizers import FusedAdam\n'), ((32749, 32863), 'pytorch_pretrained_bert.optimization.BertAdam', 'BertAdam', (['optimizer_grouped_parameters'], {'lr': 'args.learning_rate', 'warmup': 'args.warmup_proportion', 't_total': 't_total'}), '(optimizer_grouped_parameters, lr=args.learning_rate, warmup=args.\n warmup_proportion, t_total=t_total)\n', (32757, 32863), False, 'from pytorch_pretrained_bert.optimization import BertAdam\n'), ((49171, 49217), 'torch.cat', 'torch.cat', (['[eval_acc_slot, prev_eval_acc_slot]'], {}), '([eval_acc_slot, prev_eval_acc_slot])\n', (49180, 49217), False, 'import torch\n'), ((51033, 51140), 'seaborn.heatmap', 'seaborn.heatmap', (['data'], {'xticklabels': 'x', 'square': '(True)', 'yticklabels': 'y', 'vmin': '(0.0)', 'vmax': '(1.0)', 'cbar': '(False)', 'ax': 'ax'}), '(data, xticklabels=x, square=True, yticklabels=y, vmin=0.0,\n vmax=1.0, cbar=False, ax=ax)\n', (51048, 51140), False, 'import seaborn\n'), ((53650, 53665), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (53663, 53665), False, 'import pdb\n'), ((3034, 3089), 'os.path.join', 'os.path.join', (['config.data_dir', '"""ontology_dstc2_en.json"""'], {}), "(config.data_dir, 'ontology_dstc2_en.json')\n", (3046, 3089), False, 'import os\n'), ((3554, 3576), 'json.load', 'json.load', (['fp_ontology'], {}), '(fp_ontology)\n', (3563, 3576), False, 'import json\n'), ((5630, 5665), 'os.path.join', 'os.path.join', (['data_dir', '"""train.tsv"""'], {}), "(data_dir, 'train.tsv')\n", (5642, 5665), False, 'import os\n'), ((5849, 5882), 'os.path.join', 'os.path.join', (['data_dir', '"""dev.tsv"""'], {}), "(data_dir, 'dev.tsv')\n", (5861, 5882), False, 'import os\n'), ((6065, 6099), 'os.path.join', 'os.path.join', (['data_dir', '"""test.tsv"""'], {}), "(data_dir, 'test.tsv')\n", (6077, 6099), False, 'import os\n'), ((31880, 31914), 'torch.distributed.get_world_size', 'torch.distributed.get_world_size', ([], {}), '()\n', (31912, 31914), False, 'import torch\n'), ((32552, 32602), 'apex.optimizers.FP16_Optimizer', 'FP16_Optimizer', (['optimizer'], {'dynamic_loss_scale': '(True)'}), '(optimizer, dynamic_loss_scale=True)\n', (32566, 32602), False, 'from apex.optimizers import FP16_Optimizer\n'), ((32649, 32709), 'apex.optimizers.FP16_Optimizer', 'FP16_Optimizer', (['optimizer'], {'static_loss_scale': 'args.loss_scale'}), '(optimizer, static_loss_scale=args.loss_scale)\n', (32663, 32709), False, 'from apex.optimizers import FP16_Optimizer\n'), ((33731, 33771), 'tqdm.tqdm', 'tqdm', (['train_dataloader'], {'desc': '"""Iteration"""'}), "(train_dataloader, desc='Iteration')\n", (33735, 33771), False, 'from tqdm import tqdm, trange\n'), ((36887, 36926), 'tqdm.tqdm', 'tqdm', (['dev_dataloader'], {'desc': '"""Validation"""'}), "(dev_dataloader, desc='Validation')\n", (36891, 36926), False, 'from tqdm import tqdm, trange\n'), ((42061, 42111), 'os.path.join', 'os.path.join', (['args.output_dir', '"""pytorch_model.bin"""'], {}), "(args.output_dir, 'pytorch_model.bin')\n", (42073, 42111), False, 'import os\n'), ((44381, 44409), 'torch.distributed.get_rank', 'torch.distributed.get_rank', ([], {}), '()\n', (44407, 44409), False, 'import torch\n'), ((46086, 46101), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (46099, 46101), False, 'import torch\n'), ((49350, 49372), 'operator.itemgetter', 'operator.itemgetter', (['(0)'], {}), '(0)\n', (49369, 49372), False, 'import operator\n'), ((50926, 50954), 'torch.distributed.get_rank', 'torch.distributed.get_rank', ([], {}), '()\n', (50952, 50954), False, 'import torch\n'), ((52646, 52661), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (52659, 52661), False, 'import torch\n'), ((3478, 3524), 'os.path.join', 'os.path.join', (['config.data_dir', '"""ontology.json"""'], {}), "(config.data_dir, 'ontology.json')\n", (3490, 3524), False, 'import os\n'), ((23207, 23232), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (23230, 23232), False, 'import torch\n'), ((37353, 37368), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (37366, 37368), False, 'import torch\n'), ((54005, 54053), 'matplotlib.pyplot.subplots', 'plt.subplots', (['nturn', '(1)'], {'figsize': '(50, 10 * nturn)'}), '(nturn, 1, figsize=(50, 10 * nturn))\n', (54017, 54053), True, 'import matplotlib.pyplot as plt\n'), ((54732, 54742), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (54740, 54742), True, 'import matplotlib.pyplot as plt\n'), ((54885, 54896), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (54894, 54896), True, 'import matplotlib.pyplot as plt\n'), ((53261, 53294), 'torch.Tensor', 'torch.Tensor', (['none_value_id[8:18]'], {}), '(none_value_id[8:18])\n', (53273, 53294), False, 'import torch\n'), ((53369, 53402), 'torch.Tensor', 'torch.Tensor', (['none_value_id[8:18]'], {}), '(none_value_id[8:18])\n', (53381, 53402), False, 'import torch\n'), ((53476, 53508), 'torch.Tensor', 'torch.Tensor', (['none_value_id[0:8]'], {}), '(none_value_id[0:8])\n', (53488, 53508), False, 'import torch\n'), ((53582, 53614), 'torch.Tensor', 'torch.Tensor', (['none_value_id[18:]'], {}), '(none_value_id[18:])\n', (53594, 53614), False, 'import torch\n')] |
from typing import Any, Dict, List
from mypy_extensions import TypedDict
from typing_extensions import Protocol
ActionPayload = List[Dict[str, Any]]
ActionPayloadWithLabel = TypedDict(
"ActionPayloadWithLabel", {"action": str, "data": ActionPayload}
)
Payload = List[ActionPayloadWithLabel]
ActionResult = TypedDict("ActionResult", {"success": bool, "message": str})
class Action(Protocol): # pragma: no cover
"""
Interface for action component.
The handle_request method raises ActionException or PermissionDenied if
the request fails.
"""
def handle_request(self, payload: Payload, user_id: int) -> List[ActionResult]:
...
| [
"mypy_extensions.TypedDict"
] | [((176, 251), 'mypy_extensions.TypedDict', 'TypedDict', (['"""ActionPayloadWithLabel"""', "{'action': str, 'data': ActionPayload}"], {}), "('ActionPayloadWithLabel', {'action': str, 'data': ActionPayload})\n", (185, 251), False, 'from mypy_extensions import TypedDict\n'), ((313, 373), 'mypy_extensions.TypedDict', 'TypedDict', (['"""ActionResult"""', "{'success': bool, 'message': str}"], {}), "('ActionResult', {'success': bool, 'message': str})\n", (322, 373), False, 'from mypy_extensions import TypedDict\n')] |
import numpy as np
import os
import re
import cPickle
class read_cifar10(object):
def __init__(self, data_path=None, is_training=True):
self.data_path = data_path
self.is_training = is_training
def load_data(self):
files = os.listdir(self.data_path)
if self.is_training is True:
pattern = re.compile('(data_batch_).')
to_read = [m.group(0) for i in files for m in [pattern.search(i)] if m]
data = []
labels = []
for t in to_read:
with open(self.data_path+'/'+t, 'rb') as f:
d = cPickle.load(f)
data.append(d['data'])
labels.append(d['labels'])
data = np.vstack(data)
labels = np.hstack(labels)
else:
with open(self.data_path+'/test_batch') as f:
d = cPickle.load(f)
data = d['data']
labels = d['labels']
return data, labels
| [
"cPickle.load",
"numpy.hstack",
"os.listdir",
"numpy.vstack",
"re.compile"
] | [((243, 269), 'os.listdir', 'os.listdir', (['self.data_path'], {}), '(self.data_path)\n', (253, 269), False, 'import os\n'), ((320, 348), 're.compile', 're.compile', (['"""(data_batch_)."""'], {}), "('(data_batch_).')\n", (330, 348), False, 'import re\n'), ((655, 670), 'numpy.vstack', 'np.vstack', (['data'], {}), '(data)\n', (664, 670), True, 'import numpy as np\n'), ((686, 703), 'numpy.hstack', 'np.hstack', (['labels'], {}), '(labels)\n', (695, 703), True, 'import numpy as np\n'), ((779, 794), 'cPickle.load', 'cPickle.load', (['f'], {}), '(f)\n', (791, 794), False, 'import cPickle\n'), ((555, 570), 'cPickle.load', 'cPickle.load', (['f'], {}), '(f)\n', (567, 570), False, 'import cPickle\n')] |
# Generated by Django 2.1.4 on 2019-01-25 12:49
import datetime
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('assessment', '0007_answer_is_correct_choice'),
]
operations = [
migrations.RemoveField(
model_name='question',
name='correct_choices',
),
migrations.AddField(
model_name='assessment',
name='multi_times',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='score',
name='history',
field=django.contrib.postgres.fields.jsonb.JSONField(default={}),
),
migrations.AlterField(
model_name='answer',
name='created_at',
field=models.DateField(default=datetime.date(2019, 1, 25)),
),
migrations.AlterField(
model_name='assessment',
name='created_at',
field=models.DateField(default=datetime.date(2019, 1, 25)),
),
migrations.AlterField(
model_name='question',
name='created_at',
field=models.DateField(default=datetime.date(2019, 1, 25)),
),
migrations.AlterField(
model_name='question',
name='mark',
field=models.DecimalField(decimal_places=10, default=0.0, max_digits=19),
),
migrations.AlterField(
model_name='score',
name='assessment_score',
field=models.DecimalField(decimal_places=10, default=0.0, max_digits=19),
),
migrations.AlterField(
model_name='score',
name='created_at',
field=models.DateField(default=datetime.date(2019, 1, 25)),
),
migrations.AlterField(
model_name='score',
name='status',
field=models.CharField(blank=True, default='started', max_length=250),
),
]
| [
"django.db.migrations.RemoveField",
"django.db.models.CharField",
"datetime.date",
"django.db.models.BooleanField",
"django.db.models.DecimalField"
] | [((304, 373), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""question"""', 'name': '"""correct_choices"""'}), "(model_name='question', name='correct_choices')\n", (326, 373), False, 'from django.db import migrations, models\n'), ((526, 560), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (545, 560), False, 'from django.db import migrations, models\n'), ((1400, 1466), 'django.db.models.DecimalField', 'models.DecimalField', ([], {'decimal_places': '(10)', 'default': '(0.0)', 'max_digits': '(19)'}), '(decimal_places=10, default=0.0, max_digits=19)\n', (1419, 1466), False, 'from django.db import migrations, models\n'), ((1597, 1663), 'django.db.models.DecimalField', 'models.DecimalField', ([], {'decimal_places': '(10)', 'default': '(0.0)', 'max_digits': '(19)'}), '(decimal_places=10, default=0.0, max_digits=19)\n', (1616, 1663), False, 'from django.db import migrations, models\n'), ((1961, 2024), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'default': '"""started"""', 'max_length': '(250)'}), "(blank=True, default='started', max_length=250)\n", (1977, 2024), False, 'from django.db import migrations, models\n'), ((889, 915), 'datetime.date', 'datetime.date', (['(2019)', '(1)', '(25)'], {}), '(2019, 1, 25)\n', (902, 915), False, 'import datetime\n'), ((1071, 1097), 'datetime.date', 'datetime.date', (['(2019)', '(1)', '(25)'], {}), '(2019, 1, 25)\n', (1084, 1097), False, 'import datetime\n'), ((1251, 1277), 'datetime.date', 'datetime.date', (['(2019)', '(1)', '(25)'], {}), '(2019, 1, 25)\n', (1264, 1277), False, 'import datetime\n'), ((1813, 1839), 'datetime.date', 'datetime.date', (['(2019)', '(1)', '(25)'], {}), '(2019, 1, 25)\n', (1826, 1839), False, 'import datetime\n')] |
"""API views for social_network."""
from rest_framework import viewsets
from rest_framework.decorators import api_view, detail_route
from rest_framework.response import Response
from rest_framework.reverse import reverse
from .models import Profile, Post, Vote
from .serializers import ProfileSerializer, PostSerializer
@api_view(['GET'])
def api_root(request, format=None):
"""Root of API, this is useful for documentation generated by DRF."""
return Response({
'profiles': reverse('profile-list', request=request, format=format),
'posts': reverse('post-list', request=request, format=format)
})
class ProfileViewSet(viewsets.ReadOnlyModelViewSet):
"""This provides get and list functionality for Profiles."""
queryset = Profile.objects.all()
serializer_class = ProfileSerializer
class PostViewSet(viewsets.ModelViewSet):
"""Get or create Posts.
retrieve:
Return a post given its ID.
list:
Get a paginated list of all Posts.
create:
Create a new Post as the logged-in user.
"""
queryset = Post.objects.all().order_by('-created')
serializer_class = PostSerializer
def perform_create(self, serializer):
"""Create a Post associated with the logged-in user."""
serializer.save(owner=self.request.user.profile)
@detail_route(methods=['POST', 'DELETE'], url_path='vote')
def vote(self, request, pk=None):
"""Vote or unvote on a post."""
post = self.get_object()
if request.method == 'POST':
# check if the vote already exists, if so don't allow the user to vote again
if Vote.objects.filter(profile=self.request.user.profile, post=post).exists():
# the user already voted, just return the post directly
data = PostSerializer(post, context={'request': self.request}).data
return Response(data)
new_vote = Vote(profile=self.request.user.profile, post=post)
new_vote.save()
elif request.method == 'DELETE':
Vote.objects.filter(profile=self.request.user.profile, post=post).delete()
data = PostSerializer(post, context={'request': self.request}).data
return Response(data)
| [
"rest_framework.reverse.reverse",
"rest_framework.decorators.api_view",
"rest_framework.response.Response",
"rest_framework.decorators.detail_route"
] | [((325, 342), 'rest_framework.decorators.api_view', 'api_view', (["['GET']"], {}), "(['GET'])\n", (333, 342), False, 'from rest_framework.decorators import api_view, detail_route\n'), ((1327, 1384), 'rest_framework.decorators.detail_route', 'detail_route', ([], {'methods': "['POST', 'DELETE']", 'url_path': '"""vote"""'}), "(methods=['POST', 'DELETE'], url_path='vote')\n", (1339, 1384), False, 'from rest_framework.decorators import api_view, detail_route\n'), ((2230, 2244), 'rest_framework.response.Response', 'Response', (['data'], {}), '(data)\n', (2238, 2244), False, 'from rest_framework.response import Response\n'), ((495, 550), 'rest_framework.reverse.reverse', 'reverse', (['"""profile-list"""'], {'request': 'request', 'format': 'format'}), "('profile-list', request=request, format=format)\n", (502, 550), False, 'from rest_framework.reverse import reverse\n'), ((569, 621), 'rest_framework.reverse.reverse', 'reverse', (['"""post-list"""'], {'request': 'request', 'format': 'format'}), "('post-list', request=request, format=format)\n", (576, 621), False, 'from rest_framework.reverse import reverse\n'), ((1892, 1906), 'rest_framework.response.Response', 'Response', (['data'], {}), '(data)\n', (1900, 1906), False, 'from rest_framework.response import Response\n')] |
from .Dataset import Dataset
from pathlib import Path
def check_multi(folder):
pathlist = Path(folder).glob('**/*.nc')
for path in pathlist:
outfile = Path(str(path).replace(".nc", ".check"))
try:
with Dataset(path) as i_data:
i_data.uc2_check()
i_data.check_result.to_file(outfile, full=False)
except Exception:
text_file = open(str(outfile), "w")
text_file.write("Could not read file: "+str(path))
text_file.close()
| [
"pathlib.Path"
] | [((96, 108), 'pathlib.Path', 'Path', (['folder'], {}), '(folder)\n', (100, 108), False, 'from pathlib import Path\n')] |
import ast
from collections import OrderedDict
from .codegen import to_source
from .function_compiler_ast import timeshift, StandardizeDatesSimple
from dolo.compiler.recipes import recipes
from numba import njit
class NumericModel:
calibration = None
calibration_dict = None
covariances = None
markov_chain = None
def __init__(self, symbolic_model, options=None, infos=None):
self.symbolic = symbolic_model
self.symbols = symbolic_model.symbols
self.variables = sum( [tuple(e) for k,e in self.symbols.items() if k not in ('parameters','shocks','values')], ())
self.options = options if options is not None else {}
self.infos = infos if infos is not None else {}
self.infos['data_layout'] = 'columns'
self.name = self.infos['name']
self.model_type = self.infos['type']
# self.model_spec
self.__update_from_symbolic__()
self.__compile_functions__()
def __update_from_symbolic__(self):
import numpy
# updates calibration according to the symbolic definitions
system = self.symbolic.calibration_dict
from dolo.compiler.triangular_solver import solve_triangular_system
self.calibration_dict = solve_triangular_system( system )
from dolo.compiler.misc import CalibrationDict, calibration_to_vector
calib = calibration_to_vector(self.symbols, self.calibration_dict)
self.calibration = CalibrationDict(self.symbols, calib)
from .symbolic_eval import NumericEval
evaluator = NumericEval(self.calibration_dict)
# read symbolic structure
self.options = evaluator.eval(self.symbolic.options)
distribution = evaluator.eval(self.symbolic.distribution)
discrete_transition = evaluator.eval(self.symbolic.discrete_transition)
covariances = distribution
if distribution is None:
self.covariances = None
else:
self.covariances = numpy.atleast_2d(numpy.array(covariances, dtype=float))
markov_chain = discrete_transition
if markov_chain is None:
self.markov_chain = None
else:
self.markov_chain = [numpy.atleast_2d(numpy.array(tab, dtype=float)) for tab in markov_chain]
def get_calibration(self, pname, *args):
if isinstance(pname, list):
return [ self.get_calibration(p) for p in pname ]
elif isinstance(pname, tuple):
return tuple( [ self.get_calibration(p) for p in pname ] )
elif len(args)>0:
pnames = (pname,) + args
return self.get_calibration(pnames)
group = [g for g in self.symbols.keys() if pname in self.symbols[g]]
try:
group = group[0]
except:
raise Exception('Unknown symbol {}.'.format(pname))
i = self.symbols[group].index(pname)
v = self.calibration[group][i]
return v
def set_calibration(self, *args, **kwargs):
# raise exception if unknown symbol ?
if len(args)==2:
pname, pvalue = args
if isinstance(pname, str):
self.set_calibration(**{pname:pvalue})
else:
# else ignore pname and pvalue
calib = self.symbolic.calibration_dict
calib.update(kwargs)
self.__update_from_symbolic__()
def __str__(self):
from dolo.misc.termcolor import colored
s = u'''
Model object:
------------
- name: "{name}"
- type: "{type}"
- file: "{filename}\n'''.format(**self.infos)
ss = '\n- residuals:\n\n'
res = self.residuals()
# for eqgroup, eqlist in self.symbolic.equations.items():
for eqgroup in res.keys():
eqlist = self.symbolic.equations[eqgroup]
ss += u" {}\n".format(eqgroup)
for i, eq in enumerate(eqlist):
val = res[eqgroup][i]
if abs(val) < 1e-8:
val = 0
vals = '{:.4f}'.format(val)
if abs(val) > 1e-8:
vals = colored(vals, 'red')
# eq = eq.replace('|', u"\u27C2")
ss += u" {eqn:3} : {vals} : {eqs}\n".format(eqn=str(i+1), vals=vals, eqs=eq)
ss += "\n"
s += ss
# import pprint
# s += '- residuals:\n'
# s += pprint.pformat(compute_residuals(self),indent=2, depth=1)
return s
def __repr__(self):
return self.__str__()
@property
def x_bounds(self):
if 'controls_ub' in self.functions:
fun_lb = self.functions['controls_lb']
fun_ub = self.functions['controls_ub']
return [fun_lb, fun_ub]
else:
return None
def residuals(self, calib=None):
if self.model_type == 'dtcscc':
from dolo.algos.dtcscc.steady_state import residuals
return residuals(self, calib)
elif self.model_type == 'dtmscc':
from dolo.algos.dtmscc.steady_state import residuals
return residuals(self, calib)
def eval_formula(self, expr, dataframe=None, calib=None):
from dolo.compiler.eval_formula import eval_formula
if calib is None:
calib = self.calibration
return eval_formula(expr, dataframe=dataframe, context=calib)
def __compile_functions__(self):
from dolo.compiler.function_compiler_ast import compile_function_ast
from dolo.compiler.function_compiler import standard_function
defs = self.symbolic.definitions
# works for fg models only
model_type = self.model_type
if 'auxiliaries' not in self.symbols:
model_type += '_'
else:
# prepare auxiliaries
auxeqs = self.symbolic.equations['auxiliary']
auxdefs = {}
for time in [-1,0,1]:
dd = OrderedDict()
for eq in auxeqs:
lhs, rhs = eq.split('=')
lhs = ast.parse( str.strip(lhs) ).body[0].value
rhs = ast.parse( str.strip(rhs) ).body[0].value
tmp = timeshift(rhs, self.variables, time)
k = timeshift(lhs, self.variables, time)
k = StandardizeDatesSimple(self.variables).visit(k)
v = StandardizeDatesSimple(self.variables).visit(tmp)
dd[to_source(k)] = to_source(v)
auxdefs[time] = dd
recipe = recipes[model_type]
symbols = self.symbols # should match self.symbols
comps = []
functions = {}
original_functions = {}
original_gufunctions = {}
for funname in recipe['specs'].keys():
spec = recipe['specs'][funname]
if funname not in self.symbolic.equations:
if not spec.get('optional'):
raise Exception("The model doesn't contain equations of type '{}'.".format(funname))
else:
continue
if spec.get('target'):
# keep only right-hand side
# TODO: restore recursive definitions
eqs = self.symbolic.equations[funname]
eqs = [eq.split('=')[1] for eq in eqs]
eqs = [str.strip(eq) for eq in eqs]
target_spec = spec.get('target')
n_output = len(self.symbols[target_spec[0]])
# target_short_name = spec.get('target')[2]
if spec.get('recursive') is False:
target_spec = None
else:
target_spec[2] = 'out'
else:
target_spec = None
if spec.get('complementarities'):
# TODO: Rewrite and simplify
comp_spec = spec.get('complementarities')
comp_order = comp_spec['middle']
comp_args = comp_spec['left-right']
comps = []
eqs = []
for i,eq in enumerate(self.symbolic.equations[funname]):
if '|' in eq:
control = self.symbols[comp_order[0]][i]
eq, comp = str.split(eq,'|')
lhs, rhs = decode_complementarity(comp, control)
comps.append([lhs, rhs])
else:
comps.append(['-inf', 'inf'])
eqs.append(eq)
comp_lhs, comp_rhs = zip(*comps)
# fb_names = ['{}_lb'.format(funname), '{}_ub'.format(funname)]
fb_names = ['controls_lb'.format(funname), 'controls_ub'.format(funname)]
ddefs = OrderedDict()
for ag in comp_args:
if ag[0] == 'auxiliaries':
t = ag[1]
ddefs.update(auxdefs[t])
ddefs.update(defs)
lower_bound, gu_lower_bound = compile_function_ast(comp_lhs, symbols, comp_args, funname=fb_names[0],definitions=defs)
upper_bound, gu_upper_bound = compile_function_ast(comp_rhs, symbols, comp_args, funname=fb_names[1],definitions=defs)
n_output = len(comp_lhs)
functions[fb_names[0]] = standard_function(gu_lower_bound, n_output )
functions[fb_names[1]] = standard_function(gu_upper_bound, n_output )
original_functions[fb_names[0]] = lower_bound
original_functions[fb_names[1]] = upper_bound
original_gufunctions[fb_names[0]] = gu_lower_bound
original_gufunctions[fb_names[1]] = gu_upper_bound
# rewrite all equations as rhs - lhs
def filter_equal(eq):
if '=' in eq:
lhs, rhs = str.split(eq,'=')
eq = '{} - ( {} )'.format(rhs, lhs)
eq = str.strip(eq)
return eq
else:
return eq
eqs = [filter_equal(eq) for eq in eqs]
arg_names = recipe['specs'][funname]['eqs']
ddefs = OrderedDict()
for ag in arg_names:
if ag[0] == 'auxiliaries':
t = ag[1]
ddefs.update(auxdefs[t])
ddefs.update(defs)
fun, gufun = compile_function_ast(eqs, symbols, arg_names,
output_names=target_spec, funname=funname, definitions=ddefs,
)
# print("So far so good !")c
n_output = len(eqs)
original_functions[funname] = fun
functions[funname] = standard_function(gufun, n_output )
original_functions[funname] = fun
original_gufunctions[funname] = gufun
self.__original_functions__ = original_functions
self.__original_gufunctions__ = original_gufunctions
self.functions = functions
import re
regex = re.compile("(.*)<=(.*)<=(.*)")
def decode_complementarity(comp, control):
'''
# comp can be either:
- None
- "a<=expr" where a is a controls
- "expr<=a" where a is a control
- "expr1<=a<=expr2"
'''
try:
res = regex.match(comp).groups()
except:
raise Exception("Unable to parse complementarity condition '{}'".format(comp))
res = [r.strip() for r in res]
if res[1] != control:
msg = "Complementarity condition '{}' incorrect. Expected {} instead of {}.".format(comp, control, res[1])
raise Exception(msg)
return [res[0], res[2]]
| [
"dolo.compiler.function_compiler_ast.compile_function_ast",
"dolo.algos.dtmscc.steady_state.residuals",
"dolo.misc.termcolor.colored",
"dolo.compiler.eval_formula.eval_formula",
"dolo.compiler.misc.calibration_to_vector",
"dolo.compiler.triangular_solver.solve_triangular_system",
"numpy.array",
"dolo.compiler.function_compiler.standard_function",
"collections.OrderedDict",
"dolo.compiler.misc.CalibrationDict",
"re.compile"
] | [((11083, 11113), 're.compile', 're.compile', (['"""(.*)<=(.*)<=(.*)"""'], {}), "('(.*)<=(.*)<=(.*)')\n", (11093, 11113), False, 'import re\n'), ((1256, 1287), 'dolo.compiler.triangular_solver.solve_triangular_system', 'solve_triangular_system', (['system'], {}), '(system)\n', (1279, 1287), False, 'from dolo.compiler.triangular_solver import solve_triangular_system\n'), ((1384, 1442), 'dolo.compiler.misc.calibration_to_vector', 'calibration_to_vector', (['self.symbols', 'self.calibration_dict'], {}), '(self.symbols, self.calibration_dict)\n', (1405, 1442), False, 'from dolo.compiler.misc import CalibrationDict, calibration_to_vector\n'), ((1470, 1506), 'dolo.compiler.misc.CalibrationDict', 'CalibrationDict', (['self.symbols', 'calib'], {}), '(self.symbols, calib)\n', (1485, 1506), False, 'from dolo.compiler.misc import CalibrationDict, calibration_to_vector\n'), ((5332, 5386), 'dolo.compiler.eval_formula.eval_formula', 'eval_formula', (['expr'], {'dataframe': 'dataframe', 'context': 'calib'}), '(expr, dataframe=dataframe, context=calib)\n', (5344, 5386), False, 'from dolo.compiler.eval_formula import eval_formula\n'), ((4958, 4980), 'dolo.algos.dtmscc.steady_state.residuals', 'residuals', (['self', 'calib'], {}), '(self, calib)\n', (4967, 4980), False, 'from dolo.algos.dtmscc.steady_state import residuals\n'), ((10220, 10233), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (10231, 10233), False, 'from collections import OrderedDict\n'), ((10442, 10553), 'dolo.compiler.function_compiler_ast.compile_function_ast', 'compile_function_ast', (['eqs', 'symbols', 'arg_names'], {'output_names': 'target_spec', 'funname': 'funname', 'definitions': 'ddefs'}), '(eqs, symbols, arg_names, output_names=target_spec,\n funname=funname, definitions=ddefs)\n', (10462, 10553), False, 'from dolo.compiler.function_compiler_ast import compile_function_ast\n'), ((10778, 10812), 'dolo.compiler.function_compiler.standard_function', 'standard_function', (['gufun', 'n_output'], {}), '(gufun, n_output)\n', (10795, 10812), False, 'from dolo.compiler.function_compiler import standard_function\n'), ((2020, 2057), 'numpy.array', 'numpy.array', (['covariances'], {'dtype': 'float'}), '(covariances, dtype=float)\n', (2031, 2057), False, 'import numpy\n'), ((5107, 5129), 'dolo.algos.dtmscc.steady_state.residuals', 'residuals', (['self', 'calib'], {}), '(self, calib)\n', (5116, 5129), False, 'from dolo.algos.dtmscc.steady_state import residuals\n'), ((5952, 5965), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (5963, 5965), False, 'from collections import OrderedDict\n'), ((8787, 8800), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (8798, 8800), False, 'from collections import OrderedDict\n'), ((9050, 9143), 'dolo.compiler.function_compiler_ast.compile_function_ast', 'compile_function_ast', (['comp_lhs', 'symbols', 'comp_args'], {'funname': 'fb_names[0]', 'definitions': 'defs'}), '(comp_lhs, symbols, comp_args, funname=fb_names[0],\n definitions=defs)\n', (9070, 9143), False, 'from dolo.compiler.function_compiler_ast import compile_function_ast\n'), ((9185, 9278), 'dolo.compiler.function_compiler_ast.compile_function_ast', 'compile_function_ast', (['comp_rhs', 'symbols', 'comp_args'], {'funname': 'fb_names[1]', 'definitions': 'defs'}), '(comp_rhs, symbols, comp_args, funname=fb_names[1],\n definitions=defs)\n', (9205, 9278), False, 'from dolo.compiler.function_compiler_ast import compile_function_ast\n'), ((9358, 9401), 'dolo.compiler.function_compiler.standard_function', 'standard_function', (['gu_lower_bound', 'n_output'], {}), '(gu_lower_bound, n_output)\n', (9375, 9401), False, 'from dolo.compiler.function_compiler import standard_function\n'), ((9444, 9487), 'dolo.compiler.function_compiler.standard_function', 'standard_function', (['gu_upper_bound', 'n_output'], {}), '(gu_upper_bound, n_output)\n', (9461, 9487), False, 'from dolo.compiler.function_compiler import standard_function\n'), ((2237, 2266), 'numpy.array', 'numpy.array', (['tab'], {'dtype': 'float'}), '(tab, dtype=float)\n', (2248, 2266), False, 'import numpy\n'), ((4119, 4139), 'dolo.misc.termcolor.colored', 'colored', (['vals', '"""red"""'], {}), "(vals, 'red')\n", (4126, 4139), False, 'from dolo.misc.termcolor import colored\n')] |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""Tests for the file system implementation using gzip."""
import os
import unittest
from dfvfs.path import gzip_path_spec
from dfvfs.path import os_path_spec
from dfvfs.resolver import context
from dfvfs.vfs import gzip_file_system
class GzipFileSystemTest(unittest.TestCase):
"""The unit test for the gzip file system object."""
def setUp(self):
"""Sets up the needed objects used throughout the test."""
self._resolver_context = context.Context()
test_file = os.path.join(u'test_data', u'syslog.gz')
path_spec = os_path_spec.OSPathSpec(location=test_file)
self._gzip_path_spec = gzip_path_spec.GzipPathSpec(parent=path_spec)
def testOpenAndClose(self):
"""Test the open and close functionality."""
file_system = gzip_file_system.GzipFileSystem(self._resolver_context)
self.assertNotEqual(file_system, None)
file_system.Open(path_spec=self._gzip_path_spec)
file_system.Close()
def testFileEntryExistsByPathSpec(self):
"""Test the file entry exists by path specification functionality."""
file_system = gzip_file_system.GzipFileSystem(self._resolver_context)
self.assertNotEqual(file_system, None)
file_system.Open(path_spec=self._gzip_path_spec)
self.assertTrue(file_system.FileEntryExistsByPathSpec(self._gzip_path_spec))
file_system.Close()
def testGetFileEntryByPathSpec(self):
"""Test the get entry by path specification functionality."""
file_system = gzip_file_system.GzipFileSystem(self._resolver_context)
self.assertNotEqual(file_system, None)
file_system.Open(path_spec=self._gzip_path_spec)
file_entry = file_system.GetFileEntryByPathSpec(self._gzip_path_spec)
self.assertNotEqual(file_entry, None)
self.assertEqual(file_entry.name, u'')
file_system.Close()
def testGetRootFileEntry(self):
"""Test the get root file entry functionality."""
file_system = gzip_file_system.GzipFileSystem(self._resolver_context)
self.assertNotEqual(file_system, None)
file_system.Open(path_spec=self._gzip_path_spec)
file_entry = file_system.GetRootFileEntry()
self.assertNotEqual(file_entry, None)
self.assertEqual(file_entry.name, u'')
file_system.Close()
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"dfvfs.resolver.context.Context",
"dfvfs.path.os_path_spec.OSPathSpec",
"dfvfs.vfs.gzip_file_system.GzipFileSystem",
"dfvfs.path.gzip_path_spec.GzipPathSpec",
"os.path.join"
] | [((2286, 2301), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2299, 2301), False, 'import unittest\n'), ((491, 508), 'dfvfs.resolver.context.Context', 'context.Context', ([], {}), '()\n', (506, 508), False, 'from dfvfs.resolver import context\n'), ((525, 565), 'os.path.join', 'os.path.join', (['u"""test_data"""', 'u"""syslog.gz"""'], {}), "(u'test_data', u'syslog.gz')\n", (537, 565), False, 'import os\n'), ((582, 625), 'dfvfs.path.os_path_spec.OSPathSpec', 'os_path_spec.OSPathSpec', ([], {'location': 'test_file'}), '(location=test_file)\n', (605, 625), False, 'from dfvfs.path import os_path_spec\n'), ((653, 698), 'dfvfs.path.gzip_path_spec.GzipPathSpec', 'gzip_path_spec.GzipPathSpec', ([], {'parent': 'path_spec'}), '(parent=path_spec)\n', (680, 698), False, 'from dfvfs.path import gzip_path_spec\n'), ((797, 852), 'dfvfs.vfs.gzip_file_system.GzipFileSystem', 'gzip_file_system.GzipFileSystem', (['self._resolver_context'], {}), '(self._resolver_context)\n', (828, 852), False, 'from dfvfs.vfs import gzip_file_system\n'), ((1111, 1166), 'dfvfs.vfs.gzip_file_system.GzipFileSystem', 'gzip_file_system.GzipFileSystem', (['self._resolver_context'], {}), '(self._resolver_context)\n', (1142, 1166), False, 'from dfvfs.vfs import gzip_file_system\n'), ((1496, 1551), 'dfvfs.vfs.gzip_file_system.GzipFileSystem', 'gzip_file_system.GzipFileSystem', (['self._resolver_context'], {}), '(self._resolver_context)\n', (1527, 1551), False, 'from dfvfs.vfs import gzip_file_system\n'), ((1942, 1997), 'dfvfs.vfs.gzip_file_system.GzipFileSystem', 'gzip_file_system.GzipFileSystem', (['self._resolver_context'], {}), '(self._resolver_context)\n', (1973, 1997), False, 'from dfvfs.vfs import gzip_file_system\n')] |
#!/usr/bin/env python3
import ta_vision
from vision.camera import Camera
from color_detection import ColorDetection
import cv2 as cv
import rospy
import time
import math
from geometry_msgs.msg import PointStamped
from gazebo_msgs.msg import ModelStates
from gazebo_msgs.srv import SetModelState
from gazebo_msgs.msg import ModelState
from geometry_msgs.msg import Pose
from geometry_msgs.msg import Point
from geometry_msgs.msg import Quaternion
from tf.transformations import euler_from_quaternion, quaternion_from_euler
import csv
lower_threshold = (160, 190, 220)
upper_threshold = (180, 230, 255)
FW = 320
FH = 240
FOVX = 62.2
FOVY = 48.8
KX = FOVX / FW / 180.0 * math.pi
KY = FOVY / FH / 180.0 * math.pi
# CSV
cam_csv = open('/home/musyafa/Datalog/cam.csv', 'w')
real_csv = open('/home/musyafa/Datalog/real.csv', 'w')
cam_writer = csv.writer(cam_csv)
real_writer = csv.writer(real_csv)
cam_writer.writerow(['Time', 'Cam Position X', 'Cam Position Y', 'Time', 'Real Position X', 'Real Position Y'])
real_writer.writerow(['Time', 'Real Position X', 'Real Position Y'])
waktu = rospy.Time(0)
pose = Pose()
z = 0
roll = 0
pitch = 0
yaw = 0
def models_cb(msg):
global pose, z, roll, pitch, yaw, waktu
pose = msg.pose[1]
z = msg.pose[1].position.z
orientation_list = [msg.pose[1].orientation.x, msg.pose[1].orientation.y, msg.pose[1].orientation.z, msg.pose[1].orientation.w]
(roll, pitch, yaw) = euler_from_quaternion(orientation_list)
waktu = rospy.Time.now()
def trans_data(x, y):
global z, roll, pitch, yaw
x_ = math.tan(KX * x - roll) * z
y_ = math.tan(KY * y - pitch) * z
# x_ = math.tan(KX * x + roll) * z
# y_ = math.tan(KY * y + pitch - 1.57079632679) * z
out_x = -math.sin(yaw) * x_ - math.cos(yaw) * y_
out_y = math.cos(yaw) * x_ - math.sin(yaw) * y_
return (out_x, out_y)
if __name__ == "__main__":
try:
rospy.init_node("color_detection")
rate = rospy.Rate(15) # 15 FPS
cam_pos_pub = rospy.Publisher('/datalog/cam', Point, queue_size=5)
real_pos_pub = rospy.Publisher('/datalog/real', Point, queue_size=5)
cam_pub = rospy.Publisher("camera/data", PointStamped, queue_size=10)
rospy.Subscriber('/gazebo/model_states', ModelStates, models_cb)
set_state = rospy.ServiceProxy('/gazebo/set_model_state', SetModelState)
rospy.wait_for_message('/gazebo/model_states', ModelState)
model = ModelState()
model.model_name='pi_cam'
model.pose = pose
model.pose.position.x = -0.5
model.pose.position.y = -0.5
set_state(model_state=model)
cap = Camera(port=5600)
cd = ColorDetection(lower_threshold, upper_threshold)
rospy.loginfo("Wait for camera capture..")
frame = cap.capture()
while frame is None and not rospy.is_shutdown():
rate.sleep()
frame = cap.capture()
rospy.loginfo("Frame captured!")
fps = 30.0
t = time.time()
while not rospy.is_shutdown():
frame = cap.capture()
t_cap = rospy.Time.now()
mask = cd.update(frame)
if cd.centroid:
(cX, cY) = cd.centroid
centroid = PointStamped()
centroid.point.x = cX - 160
centroid.point.y = cY - 120
centroid.point.y = -centroid.point.y
centroid.header.stamp = t_cap
cam_pub.publish(centroid)
(X, Y) = trans_data(centroid.point.x, centroid.point.y)
rospy.loginfo("ERRX: %f; ERRY: %f", X - pose.position.x, Y - pose.position.y)
cam_pos = Point(x=X, y=Y, z=1)
cam_pos_pub.publish(cam_pos)
cam_writer.writerow([t_cap, cam_pos.x, cam_pos.y, waktu, pose.position.x, pose.position.y])
real_writer.writerow([waktu, pose.position.x, pose.position.y])
pose.position.x = pose.position.x + 0.001
pose.position.y = pose.position.y + 0.001
model.pose = pose
set_state(model_state=model)
real_pos_pub.publish(pose.position)
if pose.position.x >= 0.5:
break
if cd.has_centroid:
cv.circle(frame, cd.centroid, 5, 127, -1)
cv.putText(frame, "fps: %.1f" % fps, (240, 230), cv.FONT_HERSHEY_SIMPLEX, 0.5, 127, 2)
# if cd.has_centroid:
# cv.circle(mask, cd.centroid, 5, 127, -1)
# cv.putText(mask, "fps: %.1f" % fps, (240, 230), cv.FONT_HERSHEY_SIMPLEX, 0.5, 127, 2)
cv.imshow("Frame", frame)
# cv.imshow("Frame", mask)
key = cv.waitKey(15)
if key == 27:
break
fps = 0.9 * fps + 0.1 * 1 / (time.time() - t)
t = time.time()
rate.sleep()
except rospy.ROSInterruptException:
pass
if cap is not None:
cap.close()
cv.destroyAllWindows()
cam_csv.close()
real_csv.close()
| [
"rospy.Subscriber",
"rospy.ServiceProxy",
"rospy.Time",
"gazebo_msgs.msg.ModelState",
"cv2.imshow",
"rospy.Time.now",
"rospy.Rate",
"rospy.is_shutdown",
"rospy.init_node",
"math.cos",
"cv2.destroyAllWindows",
"geometry_msgs.msg.Pose",
"cv2.circle",
"csv.writer",
"cv2.waitKey",
"math.sin",
"rospy.loginfo",
"cv2.putText",
"geometry_msgs.msg.PointStamped",
"rospy.wait_for_message",
"color_detection.ColorDetection",
"math.tan",
"rospy.Publisher",
"time.time",
"geometry_msgs.msg.Point",
"vision.camera.Camera",
"tf.transformations.euler_from_quaternion"
] | [((845, 864), 'csv.writer', 'csv.writer', (['cam_csv'], {}), '(cam_csv)\n', (855, 864), False, 'import csv\n'), ((879, 899), 'csv.writer', 'csv.writer', (['real_csv'], {}), '(real_csv)\n', (889, 899), False, 'import csv\n'), ((1091, 1104), 'rospy.Time', 'rospy.Time', (['(0)'], {}), '(0)\n', (1101, 1104), False, 'import rospy\n'), ((1113, 1119), 'geometry_msgs.msg.Pose', 'Pose', ([], {}), '()\n', (1117, 1119), False, 'from geometry_msgs.msg import Pose\n'), ((1428, 1467), 'tf.transformations.euler_from_quaternion', 'euler_from_quaternion', (['orientation_list'], {}), '(orientation_list)\n', (1449, 1467), False, 'from tf.transformations import euler_from_quaternion, quaternion_from_euler\n'), ((1481, 1497), 'rospy.Time.now', 'rospy.Time.now', ([], {}), '()\n', (1495, 1497), False, 'import rospy\n'), ((4995, 5017), 'cv2.destroyAllWindows', 'cv.destroyAllWindows', ([], {}), '()\n', (5015, 5017), True, 'import cv2 as cv\n'), ((1562, 1585), 'math.tan', 'math.tan', (['(KX * x - roll)'], {}), '(KX * x - roll)\n', (1570, 1585), False, 'import math\n'), ((1599, 1623), 'math.tan', 'math.tan', (['(KY * y - pitch)'], {}), '(KY * y - pitch)\n', (1607, 1623), False, 'import math\n'), ((1902, 1936), 'rospy.init_node', 'rospy.init_node', (['"""color_detection"""'], {}), "('color_detection')\n", (1917, 1936), False, 'import rospy\n'), ((1952, 1966), 'rospy.Rate', 'rospy.Rate', (['(15)'], {}), '(15)\n', (1962, 1966), False, 'import rospy\n'), ((1999, 2051), 'rospy.Publisher', 'rospy.Publisher', (['"""/datalog/cam"""', 'Point'], {'queue_size': '(5)'}), "('/datalog/cam', Point, queue_size=5)\n", (2014, 2051), False, 'import rospy\n'), ((2075, 2128), 'rospy.Publisher', 'rospy.Publisher', (['"""/datalog/real"""', 'Point'], {'queue_size': '(5)'}), "('/datalog/real', Point, queue_size=5)\n", (2090, 2128), False, 'import rospy\n'), ((2147, 2206), 'rospy.Publisher', 'rospy.Publisher', (['"""camera/data"""', 'PointStamped'], {'queue_size': '(10)'}), "('camera/data', PointStamped, queue_size=10)\n", (2162, 2206), False, 'import rospy\n'), ((2216, 2280), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/gazebo/model_states"""', 'ModelStates', 'models_cb'], {}), "('/gazebo/model_states', ModelStates, models_cb)\n", (2232, 2280), False, 'import rospy\n'), ((2302, 2362), 'rospy.ServiceProxy', 'rospy.ServiceProxy', (['"""/gazebo/set_model_state"""', 'SetModelState'], {}), "('/gazebo/set_model_state', SetModelState)\n", (2320, 2362), False, 'import rospy\n'), ((2372, 2430), 'rospy.wait_for_message', 'rospy.wait_for_message', (['"""/gazebo/model_states"""', 'ModelState'], {}), "('/gazebo/model_states', ModelState)\n", (2394, 2430), False, 'import rospy\n'), ((2447, 2459), 'gazebo_msgs.msg.ModelState', 'ModelState', ([], {}), '()\n', (2457, 2459), False, 'from gazebo_msgs.msg import ModelState\n'), ((2646, 2663), 'vision.camera.Camera', 'Camera', ([], {'port': '(5600)'}), '(port=5600)\n', (2652, 2663), False, 'from vision.camera import Camera\n'), ((2677, 2725), 'color_detection.ColorDetection', 'ColorDetection', (['lower_threshold', 'upper_threshold'], {}), '(lower_threshold, upper_threshold)\n', (2691, 2725), False, 'from color_detection import ColorDetection\n'), ((2735, 2777), 'rospy.loginfo', 'rospy.loginfo', (['"""Wait for camera capture.."""'], {}), "('Wait for camera capture..')\n", (2748, 2777), False, 'import rospy\n'), ((2932, 2964), 'rospy.loginfo', 'rospy.loginfo', (['"""Frame captured!"""'], {}), "('Frame captured!')\n", (2945, 2964), False, 'import rospy\n'), ((2997, 3008), 'time.time', 'time.time', ([], {}), '()\n', (3006, 3008), False, 'import time\n'), ((1759, 1772), 'math.cos', 'math.cos', (['yaw'], {}), '(yaw)\n', (1767, 1772), False, 'import math\n'), ((1790, 1803), 'math.cos', 'math.cos', (['yaw'], {}), '(yaw)\n', (1798, 1803), False, 'import math\n'), ((1811, 1824), 'math.sin', 'math.sin', (['yaw'], {}), '(yaw)\n', (1819, 1824), False, 'import math\n'), ((3028, 3047), 'rospy.is_shutdown', 'rospy.is_shutdown', ([], {}), '()\n', (3045, 3047), False, 'import rospy\n'), ((3103, 3119), 'rospy.Time.now', 'rospy.Time.now', ([], {}), '()\n', (3117, 3119), False, 'import rospy\n'), ((4337, 4428), 'cv2.putText', 'cv.putText', (['frame', "('fps: %.1f' % fps)", '(240, 230)', 'cv.FONT_HERSHEY_SIMPLEX', '(0.5)', '(127)', '(2)'], {}), "(frame, 'fps: %.1f' % fps, (240, 230), cv.FONT_HERSHEY_SIMPLEX, \n 0.5, 127, 2)\n", (4347, 4428), True, 'import cv2 as cv\n'), ((4631, 4656), 'cv2.imshow', 'cv.imshow', (['"""Frame"""', 'frame'], {}), "('Frame', frame)\n", (4640, 4656), True, 'import cv2 as cv\n'), ((4714, 4728), 'cv2.waitKey', 'cv.waitKey', (['(15)'], {}), '(15)\n', (4724, 4728), True, 'import cv2 as cv\n'), ((4853, 4864), 'time.time', 'time.time', ([], {}), '()\n', (4862, 4864), False, 'import time\n'), ((1738, 1751), 'math.sin', 'math.sin', (['yaw'], {}), '(yaw)\n', (1746, 1751), False, 'import math\n'), ((2844, 2863), 'rospy.is_shutdown', 'rospy.is_shutdown', ([], {}), '()\n', (2861, 2863), False, 'import rospy\n'), ((3251, 3265), 'geometry_msgs.msg.PointStamped', 'PointStamped', ([], {}), '()\n', (3263, 3265), False, 'from geometry_msgs.msg import PointStamped\n'), ((3584, 3661), 'rospy.loginfo', 'rospy.loginfo', (['"""ERRX: %f; ERRY: %f"""', '(X - pose.position.x)', '(Y - pose.position.y)'], {}), "('ERRX: %f; ERRY: %f', X - pose.position.x, Y - pose.position.y)\n", (3597, 3661), False, 'import rospy\n'), ((3688, 3708), 'geometry_msgs.msg.Point', 'Point', ([], {'x': 'X', 'y': 'Y', 'z': '(1)'}), '(x=X, y=Y, z=1)\n', (3693, 3708), False, 'from geometry_msgs.msg import Point\n'), ((4283, 4324), 'cv2.circle', 'cv.circle', (['frame', 'cd.centroid', '(5)', '(127)', '(-1)'], {}), '(frame, cd.centroid, 5, 127, -1)\n', (4292, 4324), True, 'import cv2 as cv\n'), ((4820, 4831), 'time.time', 'time.time', ([], {}), '()\n', (4829, 4831), False, 'import time\n')] |
from turtle import Turtle, Screen
my_turtle = Turtle()
screen = Screen()
my_turtle.shape('arrow')
def forward():
my_turtle.forward(10)
def backward():
my_turtle.back(10)
def right():
my_turtle.right(10)
def left():
my_turtle.left(10)
def clear_screen():
my_turtle.penup()
my_turtle.home()
my_turtle.clear()
my_turtle.pendown()
screen.listen()
screen.onkeypress(forward, 'w')
screen.onkeypress(backward, 's')
screen.onkeypress(right, 'd')
screen.onkeypress(left, 'a')
screen.onkeypress(clear_screen, 'c')
screen.exitonclick()
| [
"turtle.Screen",
"turtle.Turtle"
] | [((47, 55), 'turtle.Turtle', 'Turtle', ([], {}), '()\n', (53, 55), False, 'from turtle import Turtle, Screen\n'), ((65, 73), 'turtle.Screen', 'Screen', ([], {}), '()\n', (71, 73), False, 'from turtle import Turtle, Screen\n')] |
"""
`pytest <https://docs.pytest.org/en/latest/>`_ client library integration.
Implements some utilities for mocking out ``xjsonrpc`` library clients.
"""
import asyncio
import collections
import functools as ft
import json
import unittest.mock
from typing import Any, Callable, Dict, Optional, Union
import pytest
import xjsonrpc
from xjsonrpc import Response
from xjsonrpc.common import UNSET, UnsetType
class Match:
"""
Match object. Incorporates request matching information.
"""
def __init__(
self,
endpoint: str,
version: str,
method_name: str,
once: bool,
callback: Optional[Callable],
**response_data: Any,
):
self.endpoint = endpoint
self.version = version
self.method_name = method_name
self.once = once
self.callback = callback
self.response_data = response_data
class PjRpcMocker:
"""
Synchronous JSON-RPC client mocker.
:param target: method to be mocked
:param mocker: mocking package
:param passthrough: pass not mocked requests to the original method
"""
def __init__(self, target, mocker=unittest.mock, passthrough: bool = False):
self._target = target
self._mocker = mocker
self._patcher = None
self._async_resp = False
self._passthrough = passthrough
self._matches: Dict = collections.defaultdict(lambda: collections.defaultdict(list))
self._calls: Dict = collections.defaultdict(dict)
@property
def calls(self) -> Dict:
"""
Dictionary of JSON-PRC method calls.
"""
return self._calls
def add(
self,
endpoint: str,
method_name: str,
result: UnsetType = UNSET,
error: UnsetType = UNSET,
id: Optional[Union[int, str]] = None,
version: str = '2.0',
once: bool = False,
callback: Optional[Callable] = None,
) -> None:
"""
Appends response patch. If the same method patch already exists they will be used in a round-robin way.
:param endpoint: request endpoint
:param method_name: method name
:param result: patched result
:param error: patched error
:param id: patched request id
:param version: patched request version
:param once: if ``True`` the patch will be deleted after the first call
:param callback: patched request callback
"""
match = Match(endpoint, version, method_name, once, id=id, result=result, error=error, callback=callback)
self._matches[endpoint][(version, method_name)].append(match)
def replace(
self,
endpoint: str,
method_name: str,
result: UnsetType = UNSET,
error: UnsetType = UNSET,
id: Optional[Union[int, str]] = None,
version: str = '2.0',
once: bool = False,
callback: Optional[Callable] = None,
idx: int = 0,
):
"""
Replaces a previously added response patch by a new one.
:param endpoint: request endpoint
:param method_name: method name
:param result: patched result
:param error: patched error
:param id: patched request id
:param version: patched request version
:param once: if ``True`` the patch will be deleted after the first call
:param callback: patched request callback
:param idx: patch index (if there are more than one)
"""
match = Match(endpoint, version, method_name, once, id=id, result=result, error=error, callback=callback)
self._matches[endpoint][(version, method_name)][idx] = match
def remove(self, endpoint: str, method_name: Optional[str] = None, version: str = '2.0'):
"""
Removes a previously added response patch.
:param endpoint: request endpoint
:param method_name: method name
:param version: JSON-RPC request version
:returns: removed response patch
"""
if method_name is None:
result = self._matches.pop(endpoint)
else:
result = self._matches[endpoint].pop((version, method_name))
self._cleanup_matches(endpoint, version, method_name)
return result
def reset(self) -> None:
"""
Removes all added matches and reset call statistics.
"""
self._matches.clear()
for calls in self._calls.values():
for stub in calls.values():
stub.reset_mock()
self._calls.clear()
def start(self):
"""
Activates a patcher.
"""
patcher = self._mocker.patch(self._target)
with patcher:
if asyncio.iscoroutinefunction(patcher.temp_original):
self._async_resp = True
if self._async_resp:
async def side_effect(*args, **kwargs):
return await self._on_request(*args, **kwargs)
else:
def side_effect(*args, **kwargs):
return self._on_request(*args, **kwargs)
self._patcher = self._mocker.patch(self._target, side_effect=side_effect, autospec=True)
return self._patcher.start()
def stop(self) -> None:
"""
Stop an active patcher.
"""
self.reset()
self._patcher.stop()
def _cleanup_matches(self, endpoint: str, version: str = '2.0', method_name: Optional[str] = None) -> None:
matches = self._matches[endpoint].get((version, method_name))
if not matches:
self._matches[endpoint].pop((version, method_name), None)
if not self._matches[endpoint]:
self._matches.pop(endpoint)
def _on_request(self, origin_self: Any, request_text: str, is_notification: bool = False, **kwargs: Any):
endpoint = origin_self._endpoint
matches = self._matches.get(endpoint)
if matches is None:
if self._passthrough:
return self._patcher.temp_original(origin_self, request_text, is_notification, **kwargs)
else:
raise ConnectionRefusedError()
json_data = json.loads(request_text)
if isinstance(json_data, (list, tuple)):
response = xjsonrpc.BatchResponse()
for request in xjsonrpc.BatchRequest.from_json(json_data):
response.append(
self._match_request(endpoint, request.version, request.method, request.params, request.id),
)
else:
request = xjsonrpc.Request.from_json(json_data)
response = self._match_request(endpoint, request.version, request.method, request.params, request.id)
if self._async_resp:
async def wrapper():
return json.dumps(response.to_json())
return wrapper()
else:
return json.dumps(response.to_json())
def _match_request(
self,
endpoint: str,
version: str,
method_name: str,
params: Optional[Union[list, dict]],
id: Optional[Union[int, str]],
) -> Response:
matches = self._matches[endpoint].get((version, method_name))
if matches is None:
return xjsonrpc.Response(id=id, error=xjsonrpc.exc.MethodNotFoundError(data=method_name))
match = matches.pop(0)
if not match.once:
matches.append(match)
self._cleanup_matches(endpoint, version, method_name)
stub = self.calls[endpoint].setdefault(
(version, method_name),
self._mocker.MagicMock(spec=lambda *args, **kwargs: None, name=f'{endpoint}:{version}:{method_name}'),
)
if isinstance(params, (list, tuple)):
stub(*params)
else:
stub(**params)
if match.callback:
if isinstance(params, (list, tuple)):
result = match.callback(*params)
else:
result = match.callback(**params)
return xjsonrpc.Response(id=id, result=result)
else:
return xjsonrpc.Response(
id=id or match.response_data['id'],
result=match.response_data['result'],
error=match.response_data['error'],
)
def __enter__(self):
self.start()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.stop()
self.reset()
# shortcuts
PjRpcRequestsMocker = ft.partial(PjRpcMocker, target='xjsonrpc.client.backend.requests.Client._request')
PjRpcAiohttpMocker = ft.partial(PjRpcMocker, target='xjsonrpc.client.backend.aiohttp.Client._request')
@pytest.fixture
def xjsonrpc_requests_mocker():
"""
Requests client mocking fixture.
"""
with PjRpcRequestsMocker() as mocker:
yield mocker
@pytest.fixture
def xjsonrpc_aiohttp_mocker():
"""
Aiohttp client mocking fixture.
"""
with PjRpcAiohttpMocker() as mocker:
yield mocker
| [
"functools.partial",
"xjsonrpc.BatchResponse",
"json.loads",
"xjsonrpc.Response",
"asyncio.iscoroutinefunction",
"xjsonrpc.Request.from_json",
"collections.defaultdict",
"xjsonrpc.exc.MethodNotFoundError",
"xjsonrpc.BatchRequest.from_json"
] | [((8518, 8605), 'functools.partial', 'ft.partial', (['PjRpcMocker'], {'target': '"""xjsonrpc.client.backend.requests.Client._request"""'}), "(PjRpcMocker, target=\n 'xjsonrpc.client.backend.requests.Client._request')\n", (8528, 8605), True, 'import functools as ft\n'), ((8622, 8708), 'functools.partial', 'ft.partial', (['PjRpcMocker'], {'target': '"""xjsonrpc.client.backend.aiohttp.Client._request"""'}), "(PjRpcMocker, target=\n 'xjsonrpc.client.backend.aiohttp.Client._request')\n", (8632, 8708), True, 'import functools as ft\n'), ((1494, 1523), 'collections.defaultdict', 'collections.defaultdict', (['dict'], {}), '(dict)\n', (1517, 1523), False, 'import collections\n'), ((6194, 6218), 'json.loads', 'json.loads', (['request_text'], {}), '(request_text)\n', (6204, 6218), False, 'import json\n'), ((4759, 4809), 'asyncio.iscoroutinefunction', 'asyncio.iscoroutinefunction', (['patcher.temp_original'], {}), '(patcher.temp_original)\n', (4786, 4809), False, 'import asyncio\n'), ((6292, 6316), 'xjsonrpc.BatchResponse', 'xjsonrpc.BatchResponse', ([], {}), '()\n', (6314, 6316), False, 'import xjsonrpc\n'), ((6344, 6386), 'xjsonrpc.BatchRequest.from_json', 'xjsonrpc.BatchRequest.from_json', (['json_data'], {}), '(json_data)\n', (6375, 6386), False, 'import xjsonrpc\n'), ((6588, 6625), 'xjsonrpc.Request.from_json', 'xjsonrpc.Request.from_json', (['json_data'], {}), '(json_data)\n', (6614, 6625), False, 'import xjsonrpc\n'), ((8057, 8096), 'xjsonrpc.Response', 'xjsonrpc.Response', ([], {'id': 'id', 'result': 'result'}), '(id=id, result=result)\n', (8074, 8096), False, 'import xjsonrpc\n'), ((8131, 8263), 'xjsonrpc.Response', 'xjsonrpc.Response', ([], {'id': "(id or match.response_data['id'])", 'result': "match.response_data['result']", 'error': "match.response_data['error']"}), "(id=id or match.response_data['id'], result=match.\n response_data['result'], error=match.response_data['error'])\n", (8148, 8263), False, 'import xjsonrpc\n'), ((1435, 1464), 'collections.defaultdict', 'collections.defaultdict', (['list'], {}), '(list)\n', (1458, 1464), False, 'import collections\n'), ((7311, 7361), 'xjsonrpc.exc.MethodNotFoundError', 'xjsonrpc.exc.MethodNotFoundError', ([], {'data': 'method_name'}), '(data=method_name)\n', (7343, 7361), False, 'import xjsonrpc\n')] |
# load in data
import helper
import numpy as np
import torch
import torch.nn as nn
from string import punctuation
from collections import Counter
from torch.utils.data import TensorDataset, DataLoader
data_dir = './data/Seinfeld_Scripts.txt'
text = helper.load_data(data_dir)
# Check for a GPU
train_on_gpu = torch.cuda.is_available()
if not train_on_gpu:
print('No GPU found. Please use a GPU to train your neural network.')
def create_lookup_tables(text):
"""
Create lookup tables for vocabulary
:param text: The text of tv scripts split into words
:return: A tuple of dicts (vocab_to_int, int_to_vocab)
"""
word_counts = Counter(text)
sorted_vocab = sorted(word_counts, key=word_counts.get, reverse=True)
int_to_vocab = {ii: word for ii, word in enumerate(sorted_vocab)}
vocab_to_int = {word: ii for ii, word in int_to_vocab.items()}
return vocab_to_int, int_to_vocab
def token_lookup():
"""
Generate a dict to turn punctuation into a token.
:return: Tokenized dictionary where the key is the punctuation and the value is the token
"""
return {
'.': '||PERIOD||',
',': '||COMMA||',
'"': '||QUOTATION_MARK||',
';': '||SEMICOLON||',
'!': '||EXCLAMATION_MARK||',
'?': '||QUESTION_MARK||',
'(': '||LEFT_PAREN>||',
')': '||RIGHT_PAREN||',
'-': '||DASH||',
'\n': '||RETURN||',
}
helper.preprocess_and_save_data(data_dir, token_lookup, create_lookup_tables)
def batch_data(words, sequence_length, batch_size):
"""
Batch the neural network data using DataLoader
:param words: The word ids of the TV scripts
:param sequence_length: The sequence length of each batch
:param batch_size: The size of each batch; the number of sequences in a batch
:return: DataLoader with batched data
"""
n_batches = len(words)//batch_size
words = words[:n_batches*batch_size]
features = []
targets = []
total = len(words)-sequence_length
for idx in range(0, total):
x = words[idx:idx+sequence_length]
features.append(x)
y = words[idx+sequence_length]
targets.append(y)
train_x = np.array(features)
train_y = np.array(targets)
train_data = TensorDataset(torch.from_numpy(train_x), torch.from_numpy(train_y))
train_loader = DataLoader(train_data, shuffle=False, batch_size=batch_size)
# return a dataloader
return train_loader
int_text, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()
print(token_dict)
print(int_text[:10])
print(list(vocab_to_int.values())[:10])
print(list(int_to_vocab.values())[:10])
class RNN(nn.Module):
def __init__(self, vocab_size, output_size, embedding_dim, hidden_dim, n_layers, dropout=0.5):
"""
Initialize the PyTorch RNN Module
:param vocab_size: The number of input dimensions of the neural network (the size of the vocabulary)
:param output_size: The number of output dimensions of the neural network
:param embedding_dim: The size of embeddings, should you choose to use them
:param hidden_dim: The size of the hidden layer outputs
:param dropout: dropout to add in between LSTM/GRU layers
"""
super(RNN, self).__init__()
# set class variables
self.output_size = output_size
self.n_layers = n_layers
self.hidden_dim = hidden_dim
# define model layers
self.embedding = nn.Embedding(vocab_size, embedding_dim)
self.lstm = nn.LSTM(embedding_dim, hidden_dim, n_layers,
dropout=dropout, batch_first=True)
self.fc = nn.Linear(hidden_dim, output_size)
self.dropout = nn.Dropout(dropout)
def forward(self, nn_input, hidden):
"""
Forward propagation of the neural network
:param nn_input: The input to the neural network
:param hidden: The hidden state
:return: Two Tensors, the output of the neural network and the latest hidden state
"""
batch_size = nn_input.size(0)
x = self.embedding(nn_input)
x,h = self.lstm(x, hidden)
x = x.contiguous().view(-1, self.hidden_dim)
# x = self.dropout(x)
x = self.fc(x)
x = x.view(batch_size, -1, self.output_size)
x = x[:, -1]
# return one batch of output word scores and the hidden state
return x, h
def init_hidden(self, batch_size):
'''
Initialize the hidden state of an LSTM/GRU
:param batch_size: The batch_size of the hidden state
:return: hidden state of dims (n_layers, batch_size, hidden_dim)
'''
# Implement function
weight = next(self.parameters()).data
if (train_on_gpu):
hidden = (weight.new(self.n_layers, batch_size, self.hidden_dim).zero_().cuda(),
weight.new(self.n_layers, batch_size, self.hidden_dim).zero_().cuda())
else:
hidden = (weight.new(self.n_layers, batch_size, self.hidden_dim).zero_(),
weight.new(self.n_layers, batch_size, self.hidden_dim).zero_())
return hidden
def forward_back_prop(rnn, optimizer, criterion, inp, target, hidden):
"""
Forward and backward propagation on the neural network
:param decoder: The PyTorch Module that holds the neural network
:param decoder_optimizer: The PyTorch optimizer for the neural network
:param criterion: The PyTorch loss function
:param inp: A batch of input to the neural network
:param target: The target output for the batch of input
:return: The loss and the latest hidden state Tensor
"""
# move data to GPU, if available
if train_on_gpu:
inp, target = inp.cuda(), target.cuda()
# perform backpropagation and optimization
h = tuple([each.data for each in hidden])
rnn.zero_grad()
output, h = rnn(inp, h)
loss = criterion(output, target)
loss.backward()
nn.utils.clip_grad_norm_(rnn.parameters(), 5)
optimizer.step()
# return the loss over a batch and the hidden state produced by our model
return loss.item(), h
def train_rnn(rnn, batch_size, optimizer, criterion, n_epochs, show_every_n_batches=100):
batch_losses = []
rnn.train()
print("Training for %d epoch(s)..." % n_epochs)
for epoch_i in range(1, n_epochs + 1):
# initialize hidden state
hidden = rnn.init_hidden(batch_size)
for batch_i, (inputs, labels) in enumerate(train_loader, 1):
# make sure you iterate over completely full batches, only
n_batches = len(train_loader.dataset)//batch_size
if(batch_i > n_batches):
break
# forward, back prop
loss, hidden = forward_back_prop(rnn, optimizer, criterion, inputs, labels, hidden)
# record loss
batch_losses.append(loss)
# printing loss stats
if batch_i % show_every_n_batches == 0:
print('Epoch: {:>4}/{:<4} Loss: {}\n'.format(
epoch_i, n_epochs, np.average(batch_losses)))
batch_losses = []
# returns a trained rnn
return rnn
# Data params
# Sequence Length
sequence_length = 8 # of words in a sequence
# Batch Size
batch_size = 100
# data loader - do not change
train_loader = batch_data(int_text, sequence_length, batch_size)
# Training parameters
# Number of Epochs
num_epochs = 5
# Learning Rate
learning_rate = 0.001
# Model parameters
# Vocab size
vocab_size = len(vocab_to_int)
# Output size
output_size = vocab_size
# Embedding Dimension
embedding_dim = 128
# Hidden Dimension
hidden_dim = 512
# Number of RNN Layers
n_layers = 2
# Show stats for every n number of batches
show_every_n_batches = 500
# create model and move to gpu if available
rnn = RNN(vocab_size, output_size, embedding_dim, hidden_dim, n_layers, dropout=0.5)
if train_on_gpu:
rnn.cuda()
# defining loss and optimization functions for training
optimizer = torch.optim.Adam(rnn.parameters(), lr=learning_rate)
criterion = nn.CrossEntropyLoss()
# training the model
trained_rnn = train_rnn(rnn, batch_size, optimizer, criterion, num_epochs, show_every_n_batches)
# saving the trained model
helper.save_model('./trained_tv_script', trained_rnn)
print('Model Trained and Saved')
_, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()
trained_rnn = helper.load_model('./trained_tv_script')
import torch.nn.functional as F
def generate(rnn, prime_id, int_to_vocab, token_dict, pad_value, predict_len=100):
"""
Generate text using the neural network
:param decoder: The PyTorch Module that holds the trained neural network
:param prime_id: The word id to start the first prediction
:param int_to_vocab: Dict of word id keys to word values
:param token_dict: Dict of puncuation tokens keys to puncuation values
:param pad_value: The value used to pad a sequence
:param predict_len: The length of text to generate
:return: The generated text
"""
rnn.eval()
# create a sequence (batch_size=1) with the prime_id
current_seq = np.full((1, sequence_length), pad_value)
current_seq[-1][-1] = prime_id
predicted = [int_to_vocab[prime_id]]
for _ in range(predict_len):
if train_on_gpu:
current_seq = torch.LongTensor(current_seq).cuda()
else:
current_seq = torch.LongTensor(current_seq)
# initialize the hidden state
hidden = rnn.init_hidden(current_seq.size(0))
# get the output of the rnn
output, _ = rnn(current_seq, hidden)
# get the next word probabilities
p = F.softmax(output, dim=1).data
if(train_on_gpu):
p = p.cpu() # move to cpu
# use top_k sampling to get the index of the next word
top_k = 5
p, top_i = p.topk(top_k)
top_i = top_i.numpy().squeeze()
# select the likely next word index with some element of randomness
p = p.numpy().squeeze()
word_i = np.random.choice(top_i, p=p/p.sum())
# retrieve that word from the dictionary
word = int_to_vocab[word_i]
predicted.append(word)
# the generated word becomes the next "current sequence" and the cycle can continue
current_seq = np.roll(current_seq, -1, 1)
current_seq[-1][-1] = word_i
gen_sentences = ' '.join(predicted)
# Replace punctuation tokens
for key, token in token_dict.items():
ending = ' ' if key in ['\n', '(', '"'] else ''
gen_sentences = gen_sentences.replace(' ' + token.lower(), key)
gen_sentences = gen_sentences.replace('\n ', '\n')
gen_sentences = gen_sentences.replace('( ', '(')
# return all the sentences
return gen_sentences
# run the cell multiple times to get different results!
gen_length = 400 # modify the length to your preference
prime_word = 'jerry' # name for starting the script
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
pad_word = helper.SPECIAL_WORDS['PADDING']
generated_script = generate(trained_rnn, vocab_to_int[prime_word + ':'], int_to_vocab, token_dict, vocab_to_int[pad_word], gen_length)
print(generated_script) | [
"torch.nn.Dropout",
"torch.nn.Embedding",
"numpy.full",
"torch.utils.data.DataLoader",
"helper.save_model",
"helper.load_preprocess",
"torch.nn.Linear",
"collections.Counter",
"torch.nn.LSTM",
"numpy.average",
"numpy.roll",
"torch.cuda.is_available",
"torch.from_numpy",
"helper.load_data",
"torch.LongTensor",
"torch.nn.CrossEntropyLoss",
"torch.nn.functional.softmax",
"numpy.array",
"helper.preprocess_and_save_data",
"helper.load_model"
] | [((251, 277), 'helper.load_data', 'helper.load_data', (['data_dir'], {}), '(data_dir)\n', (267, 277), False, 'import helper\n'), ((312, 337), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (335, 337), False, 'import torch\n'), ((1432, 1509), 'helper.preprocess_and_save_data', 'helper.preprocess_and_save_data', (['data_dir', 'token_lookup', 'create_lookup_tables'], {}), '(data_dir, token_lookup, create_lookup_tables)\n', (1463, 1509), False, 'import helper\n'), ((2518, 2542), 'helper.load_preprocess', 'helper.load_preprocess', ([], {}), '()\n', (2540, 2542), False, 'import helper\n'), ((8225, 8246), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (8244, 8246), True, 'import torch.nn as nn\n'), ((8394, 8447), 'helper.save_model', 'helper.save_model', (['"""./trained_tv_script"""', 'trained_rnn'], {}), "('./trained_tv_script', trained_rnn)\n", (8411, 8447), False, 'import helper\n'), ((8527, 8551), 'helper.load_preprocess', 'helper.load_preprocess', ([], {}), '()\n', (8549, 8551), False, 'import helper\n'), ((8566, 8606), 'helper.load_model', 'helper.load_model', (['"""./trained_tv_script"""'], {}), "('./trained_tv_script')\n", (8583, 8606), False, 'import helper\n'), ((656, 669), 'collections.Counter', 'Counter', (['text'], {}), '(text)\n', (663, 669), False, 'from collections import Counter\n'), ((2200, 2218), 'numpy.array', 'np.array', (['features'], {}), '(features)\n', (2208, 2218), True, 'import numpy as np\n'), ((2233, 2250), 'numpy.array', 'np.array', (['targets'], {}), '(targets)\n', (2241, 2250), True, 'import numpy as np\n'), ((2355, 2415), 'torch.utils.data.DataLoader', 'DataLoader', (['train_data'], {'shuffle': '(False)', 'batch_size': 'batch_size'}), '(train_data, shuffle=False, batch_size=batch_size)\n', (2365, 2415), False, 'from torch.utils.data import TensorDataset, DataLoader\n'), ((9292, 9332), 'numpy.full', 'np.full', (['(1, sequence_length)', 'pad_value'], {}), '((1, sequence_length), pad_value)\n', (9299, 9332), True, 'import numpy as np\n'), ((2282, 2307), 'torch.from_numpy', 'torch.from_numpy', (['train_x'], {}), '(train_x)\n', (2298, 2307), False, 'import torch\n'), ((2309, 2334), 'torch.from_numpy', 'torch.from_numpy', (['train_y'], {}), '(train_y)\n', (2325, 2334), False, 'import torch\n'), ((3504, 3543), 'torch.nn.Embedding', 'nn.Embedding', (['vocab_size', 'embedding_dim'], {}), '(vocab_size, embedding_dim)\n', (3516, 3543), True, 'import torch.nn as nn\n'), ((3564, 3643), 'torch.nn.LSTM', 'nn.LSTM', (['embedding_dim', 'hidden_dim', 'n_layers'], {'dropout': 'dropout', 'batch_first': '(True)'}), '(embedding_dim, hidden_dim, n_layers, dropout=dropout, batch_first=True)\n', (3571, 3643), True, 'import torch.nn as nn\n'), ((3691, 3725), 'torch.nn.Linear', 'nn.Linear', (['hidden_dim', 'output_size'], {}), '(hidden_dim, output_size)\n', (3700, 3725), True, 'import torch.nn as nn\n'), ((3749, 3768), 'torch.nn.Dropout', 'nn.Dropout', (['dropout'], {}), '(dropout)\n', (3759, 3768), True, 'import torch.nn as nn\n'), ((10475, 10502), 'numpy.roll', 'np.roll', (['current_seq', '(-1)', '(1)'], {}), '(current_seq, -1, 1)\n', (10482, 10502), True, 'import numpy as np\n'), ((9571, 9600), 'torch.LongTensor', 'torch.LongTensor', (['current_seq'], {}), '(current_seq)\n', (9587, 9600), False, 'import torch\n'), ((9831, 9855), 'torch.nn.functional.softmax', 'F.softmax', (['output'], {'dim': '(1)'}), '(output, dim=1)\n', (9840, 9855), True, 'import torch.nn.functional as F\n'), ((9494, 9523), 'torch.LongTensor', 'torch.LongTensor', (['current_seq'], {}), '(current_seq)\n', (9510, 9523), False, 'import torch\n'), ((7238, 7262), 'numpy.average', 'np.average', (['batch_losses'], {}), '(batch_losses)\n', (7248, 7262), True, 'import numpy as np\n')] |
import rospy
import service_utils as su
from nao_interaction_msgs.srv import BehaviorManagerControl, BehaviorManagerControlRequest
from nao_interaction_msgs.srv import BehaviorManagerInfo, BehaviorManagerInfoRequest
def start_behaviour(name):
su.call_service(
"/naoqi_driver/behaviour_manager/start_behaviour",
BehaviorManagerControl,
BehaviorManagerControlRequest(name=name)
)
def stop_behaviour(name):
su.call_service(
"/naoqi_driver/behaviour_manager/stop_behaviour",
BehaviorManagerControl,
BehaviorManagerControlRequest(name=name)
)
def toggle_behaviour(name):
try:
start_behaviour(name)
except:
pass
try:
stop_behaviour(name)
except:
pass
def wait_for_behaviour(name):
while not rospy.is_shutdown():
if name not in su.call_service(
"/naoqi_driver/behaviour_manager/get_running_behaviors",
BehaviorManagerInfo,
BehaviorManagerInfoRequest()
).behaviors:
return
else:
rospy.sleep(.01)
| [
"nao_interaction_msgs.srv.BehaviorManagerControlRequest",
"rospy.is_shutdown",
"nao_interaction_msgs.srv.BehaviorManagerInfoRequest",
"rospy.sleep"
] | [((365, 405), 'nao_interaction_msgs.srv.BehaviorManagerControlRequest', 'BehaviorManagerControlRequest', ([], {'name': 'name'}), '(name=name)\n', (394, 405), False, 'from nao_interaction_msgs.srv import BehaviorManagerControl, BehaviorManagerControlRequest\n'), ((559, 599), 'nao_interaction_msgs.srv.BehaviorManagerControlRequest', 'BehaviorManagerControlRequest', ([], {'name': 'name'}), '(name=name)\n', (588, 599), False, 'from nao_interaction_msgs.srv import BehaviorManagerControl, BehaviorManagerControlRequest\n'), ((809, 828), 'rospy.is_shutdown', 'rospy.is_shutdown', ([], {}), '()\n', (826, 828), False, 'import rospy\n'), ((1091, 1108), 'rospy.sleep', 'rospy.sleep', (['(0.01)'], {}), '(0.01)\n', (1102, 1108), False, 'import rospy\n'), ((996, 1024), 'nao_interaction_msgs.srv.BehaviorManagerInfoRequest', 'BehaviorManagerInfoRequest', ([], {}), '()\n', (1022, 1024), False, 'from nao_interaction_msgs.srv import BehaviorManagerInfo, BehaviorManagerInfoRequest\n')] |
# -*- coding: utf-8 -*-
"""Next-Word Prediction using Universal Sentence Encoder.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1r2ma5P7w2LE30L1o5mAyNPLE7Qi3JxoL
# **Google drive for local storage**
_NB: All comments are written to facilitate smooth evaluation of the model, that the **Current User** may be less fatigued and see beauty in the good work._
Uncomment text under **PREVIEW OUTPUT** to further scrutinize.
"""
# Commented out IPython magic to ensure Python compatibility.
# This cell will prompt an external url to accept permissions for Colab to access Google Drive
from google.colab import drive
drive.mount("/gdrive")
# %ls
"""# **Import ***"""
# Getting all required libraries
import os
import re
import gdown
import numpy
import string
import numpy as np
import pandas as pd
import seaborn as sns
import tensorflow as tf
from absl import logging
import tensorflow_hub as hub
from tensorflow import keras
import matplotlib.pyplot as plt
from keras.models import Sequential
import tensorflow.keras.backend as K
from keras.layers.recurrent import LSTM
from keras.layers import Dense, Activation
from keras.callbacks import LambdaCallback
from keras.utils.data_utils import get_file
from keras.layers.embeddings import Embedding
from sklearn.model_selection import train_test_split
"""## **Data preparation - _Generating Corpus_**"""
# Download data from Google drive
'''
ORIGINAL DATASET URL:
https://raw.githubusercontent.com/maxim5/stanford-tensorflow-tutorials/master/data/arxiv_abstracts.txt
'''
url = ' https://drive.google.com/uc?id=1YTBR7FiXssaKXHhOZbUbwoWw6jzQxxKW'
output = 'corpus.txt'
gdown.download(url, output, quiet=False)
# sentence_length = 40
# Read local file from directory
with open('corpus.txt') as subject:
cache = subject.readlines()
translator = str.maketrans('', '', string.punctuation) # Remove punctuation
lines = [doc.lower().translate(translator) for doc in cache] # Switch to lower case
# PREVIEW OUTPUT ::
# print(lines[0][:100])
# len(lines)
# Generate an list of single/independent words
vocabulary = list(set(' '.join(lines).replace('\n','').split(' ')))
primary_store = {}
for strings, texts in enumerate(vocabulary):
primary_store[texts] = strings
# PREVIEW OUTPUT ::
# print(vocabulary[:50])
# len(vocabulary)
# Splitting data into Train sets and test sets
X = []
y = []
for c in lines:
xxxx = c.replace('\n','').split(' ')
X.append(' '.join(xxxx[:-1])) # X from the corpus
yyyy = [0 for i in range(len(vocabulary))] # Generate Y from the Vocabulary
# yyyy[primary_store[xxxx[-1]]] = 1
yyyy[primary_store[xxxx[-1]]] = 1
y.append(yyyy)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=42)
y_test = numpy.array(y_test)
y_train = numpy.array(y_train)
# PREVIEW OUTPUT ::
# print(X_train[:10])
# print(y_train[:10])
# print(X_test[:10])
# print(y_test[:10])
"""## **Embeddings!**"""
# Import the Universal Sentence Encoder's TF Hub module (Here we're making use of version 4)
# This will take a while but won't be long :)
module_url = "https://tfhub.dev/google/universal-sentence-encoder/4"
appreciate = hub.load(module_url)
# Making it easier - Function for embedding
def embed(goodness):
return appreciate(goodness)
# REVIEW OUTPUT ::
# appreciate.variables
# Wrapping up with the U-S-E
X_train = embed(X_train)
X_test = embed(X_test)
X_train = X_train.numpy()
X_test = X_test.numpy()
# PREVIEW OUTPUT ::
# print(X_train[:10])
# print(y_train[:10])
# print(X_test[:10])
# print(y_test[:10])
# print(X_train.shape, X_test.shape, y_test.shape, y_train.shape)
"""# **Building the model**"""
model = Sequential()
# model.add(Embedding(input_dim=len(vocabulary), output_dim=100))
model = Sequential()
# model.add(LSTM(units=100, input_shape=[512]))
model.add(Dense(512, input_shape=[512], activation = 'relu'))
model.add(Dense(units=len(vocabulary), activation = 'softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['acc'])
model.summary()
# Training the model.
model.fit(X_train, y_train, batch_size=512, shuffle=True, epochs=20, validation_data=(X_test, y_test), callbacks=[LambdaCallback()])
"""#**Unto the tests!**"""
# Create function to predict and show detailed output
def next_word(collection=[], extent=1):
for item in collection:
text = item
for i in range(extent):
prediction = model.predict(x=embed([item]).numpy())
idx = np.argmax(prediction[-1])
item += ' ' + vocabulary[idx]
print(text + ' --> ' + item + '\nNEXT WORD: ' + item.split(' ')[-1] + '\n')
# Tests - please feel free to explore
single_text = ['and some other essential']
next_word(single_text)
# Testing on a collection of words
text_collection = ['deep convolutional', 'simple and effective', 'a nonconvex', 'a']
next_word(text_collection)
"""## **For the record**
The Dataset is based on a Tensorflow tutorial from Stanford, so all predicted words will be based on Deep learning and Machine learning _common terms_.
"""
# Storing data
vocabulary = numpy.array(vocabulary)
numpy.save('./vocabulary.npy', vocabulary)
model.save('./NWP-USE')
## END OF NOTEBOOK | [
"tensorflow_hub.load",
"numpy.save",
"numpy.argmax",
"gdown.download",
"sklearn.model_selection.train_test_split",
"keras.callbacks.LambdaCallback",
"keras.layers.Dense",
"numpy.array",
"google.colab.drive.mount",
"keras.models.Sequential"
] | [((690, 712), 'google.colab.drive.mount', 'drive.mount', (['"""/gdrive"""'], {}), "('/gdrive')\n", (701, 712), False, 'from google.colab import drive\n'), ((1704, 1744), 'gdown.download', 'gdown.download', (['url', 'output'], {'quiet': '(False)'}), '(url, output, quiet=False)\n', (1718, 1744), False, 'import gdown\n'), ((2745, 2800), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.25)', 'random_state': '(42)'}), '(X, y, test_size=0.25, random_state=42)\n', (2761, 2800), False, 'from sklearn.model_selection import train_test_split\n'), ((2810, 2829), 'numpy.array', 'numpy.array', (['y_test'], {}), '(y_test)\n', (2821, 2829), False, 'import numpy\n'), ((2840, 2860), 'numpy.array', 'numpy.array', (['y_train'], {}), '(y_train)\n', (2851, 2860), False, 'import numpy\n'), ((3220, 3240), 'tensorflow_hub.load', 'hub.load', (['module_url'], {}), '(module_url)\n', (3228, 3240), True, 'import tensorflow_hub as hub\n'), ((3725, 3737), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (3735, 3737), False, 'from keras.models import Sequential\n'), ((3812, 3824), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (3822, 3824), False, 'from keras.models import Sequential\n'), ((5145, 5168), 'numpy.array', 'numpy.array', (['vocabulary'], {}), '(vocabulary)\n', (5156, 5168), False, 'import numpy\n'), ((5169, 5211), 'numpy.save', 'numpy.save', (['"""./vocabulary.npy"""', 'vocabulary'], {}), "('./vocabulary.npy', vocabulary)\n", (5179, 5211), False, 'import numpy\n'), ((3883, 3931), 'keras.layers.Dense', 'Dense', (['(512)'], {'input_shape': '[512]', 'activation': '"""relu"""'}), "(512, input_shape=[512], activation='relu')\n", (3888, 3931), False, 'from keras.layers import Dense, Activation\n'), ((4236, 4252), 'keras.callbacks.LambdaCallback', 'LambdaCallback', ([], {}), '()\n', (4250, 4252), False, 'from keras.callbacks import LambdaCallback\n'), ((4520, 4545), 'numpy.argmax', 'np.argmax', (['prediction[-1]'], {}), '(prediction[-1])\n', (4529, 4545), True, 'import numpy as np\n')] |
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import os
import numpy as np
import inspect
import six
import paddle
from paddle.fluid.io import _get_valid_program
from paddle.fluid.dygraph.dygraph_to_static.program_translator import ProgramTranslator, StaticFunction
from paddle.fluid.layers.utils import flatten, pack_sequence_as
from collections import OrderedDict
from paddle.fluid import dygraph
from paddle.fluid.dygraph.jit import declarative
from paddle.fluid import core
from paddle.fluid import layers
from paddle.nn import Layer
from paddle.fluid.framework import Block, ParamBase, Program, Variable, Parameter, program_guard
from paddle.fluid.dygraph.layers import Layer
from paddle2onnx.utils import logging
from paddle2onnx.graph.graph_helper import prepend_feed_ops, append_fetch_ops
def _get_input_var_names(inputs, input_spec):
name_none_error = "The %s's name is None. " \
"When using jit.save, please set InputSepc's name in " \
"to_static(input_spec=[]) and jit.save(input_spec=[]) " \
"and make sure they are consistent."
name_no_exists_error = "The tensor `%s` does not exists. " \
"Please make sure the name of InputSpec or example Tensor " \
"in input_spec is the same as the name of InputSpec in " \
"`to_static` decorated on the Layer.forward method."
result_list = []
input_var_names = [
var.name for var in flatten(inputs) if isinstance(var, Variable)
]
if input_spec is None:
# no prune
return input_var_names
else:
# fileter out non-tensor type spec infos.
input_spec = [
spec for spec in input_spec
if isinstance(spec, paddle.static.InputSpec)
]
if len(input_spec) == len(input_var_names):
# no prune
result_list = input_var_names
# if input spec name not in input_var_names, only raise warning
for spec in input_spec:
if spec.name is None:
warnings.warn(name_none_error % spec)
elif spec.name not in input_var_names:
warnings.warn(name_no_exists_error % spec.name)
else:
# do nothing
pass
else:
# prune
for spec in input_spec:
if spec.name is None:
# name is None, the input_spec only can be InputSpec
raise ValueError(name_none_error % spec)
elif spec.name not in input_var_names:
# the input_spec can be `InputSpec` or `VarBase`
raise ValueError(name_no_exists_error % spec.name)
else:
result_list.append(spec.name)
return result_list
def _get_output_vars(outputs, output_spec):
name_no_exists_error = "The tensor `%s` does not exists. " \
"Please make sure the name of example Tensor " \
"in configs.output_spec is the output tensor of " \
"Layer.forward method."
result_list = []
output_vars_dict = OrderedDict()
for var in flatten(outputs):
if isinstance(var, Variable):
output_vars_dict[var.name] = var
if output_spec is None:
result_list = output_vars_dict.values()
elif output_spec is not None and len(output_spec) == len(output_vars_dict):
result_list = output_vars_dict.values()
for var in output_spec:
if var.name not in output_vars_dict:
warnings.warn(name_no_exists_error % var.name)
else:
for var in output_spec:
if var.name not in output_vars_dict:
raise ValueError(name_no_exists_error % var.name)
else:
result_list.append(output_vars_dict[var.name])
return result_list
@dygraph.base.switch_to_static_graph
def get_program(layer, input_spec, output_spec, **configs):
paddle.jit.set_verbosity(0)
prog_translator = ProgramTranslator()
if not prog_translator.enable_to_static:
raise RuntimeError(
"The Paddle2onnx doesn't work when setting ProgramTranslator.enable to False."
)
if not isinstance(layer, Layer):
raise TypeError(
"The input of paddle2onnx should be 'Layer', but received input type is %s."
% type(layer))
if isinstance(layer, paddle.DataParallel):
inner_layer = layer._layers
else:
inner_layer = layer
# avoid change user given input_spec
inner_input_spec = None
if input_spec is not None:
for attr_func in dir(inner_layer):
static_func = getattr(inner_layer, attr_func, None)
if isinstance(static_func,
StaticFunction) and 'forward' != attr_func:
raise ValueError(
"If there are static functions other than 'forward' that need to be saved, the input 'input_spec' should be None, but received the type of 'input_spec' is %s."
% type(input_spec))
if not isinstance(input_spec, (list, tuple)):
raise TypeError(
"The input input_spec should be 'list', but received input_spec's type is %s."
% type(input_spec))
inner_input_spec = []
for var in flatten(input_spec):
if isinstance(var, paddle.static.InputSpec):
inner_input_spec.append(var)
elif isinstance(var, (core.VarBase, core.eager.Tensor, Variable)):
inner_input_spec.append(
paddle.static.InputSpec.from_tensor(var))
else:
# NOTE(Aurelius84): Support non-Tensor type in `input_spec`.
inner_input_spec.append(var)
extra_var_info = dict()
functions = dir(inner_layer)
for attr_func in functions:
static_func = getattr(inner_layer, attr_func, None)
if isinstance(static_func, StaticFunction):
concrete_program = static_func.concrete_program_specify_input_spec(
inner_input_spec)
elif 'forward' == attr_func:
# transform in jit.save, if input_spec is incomplete, declarative will throw error
# inner_input_spec is list[InputSpec], it should be packed with same structure
# as original input_spec here.
if inner_input_spec:
inner_input_spec = pack_sequence_as(input_spec,
inner_input_spec)
static_forward = declarative(
inner_layer.forward, input_spec=inner_input_spec)
concrete_program = static_forward.concrete_program
# the input_spec has been used in declarative, which is equal to
# @declarative with input_spec and jit.save without input_spec,
# avoid needless warning
inner_input_spec = None
else:
continue
input_var_names = _get_input_var_names(concrete_program.inputs,
inner_input_spec)
# NOTE(chenweihang): [ Get output variables ]
# the rule is like [ Get input variables name ]. For output var,
# we only support VarBase spec, and actually, we only need the
# var name of output, and we don't recommended to use output_spec
output_vars = _get_output_vars(concrete_program.outputs, output_spec)
feeded_var_names = input_var_names
target_vars = output_vars
main_program = concrete_program.main_program.clone()
export_for_deployment = True
if isinstance(feeded_var_names, six.string_types):
feeded_var_names = [feeded_var_names]
elif export_for_deployment:
if len(feeded_var_names) > 0:
# TODO(paddle-dev): polish these code blocks
if not (bool(feeded_var_names) and all(
isinstance(name, six.string_types)
for name in feeded_var_names)):
raise ValueError("'feed_var_names' should be a list of str.")
if isinstance(target_vars, Variable):
target_vars = [target_vars]
elif export_for_deployment:
if not (bool(target_vars) and
all(isinstance(var, Variable) for var in target_vars)):
raise ValueError("'target_vars' should be a list of Variable.")
main_program = _get_valid_program(main_program)
# remind user to set auc_states to zeros if the program contains auc op
all_ops = main_program.global_block().ops
for op in all_ops:
# clear device of Op
device_attr_name = core.op_proto_and_checker_maker.kOpDeviceAttrName()
op._set_attr(device_attr_name, "")
if op.type == 'auc':
warnings.warn(
"please ensure that you have set the auc states to zeros before saving inference model"
)
break
with program_guard(main_program):
uniq_target_vars = []
for i, var in enumerate(target_vars):
uniq_target_vars.append(var)
target_vars = uniq_target_vars
target_var_name_list = [var.name for var in target_vars]
origin_program = main_program.clone()
main_program = main_program.clone()
global_block = main_program.global_block()
need_to_remove_op_index = []
for i, op in enumerate(global_block.ops):
op.desc.set_is_target(False)
if op.type == "feed" or op.type == "fetch":
need_to_remove_op_index.append(i)
for index in need_to_remove_op_index[::-1]:
global_block._remove_op(index)
main_program.desc.flush()
main_program = main_program._prune_with_input(
feeded_var_names=feeded_var_names, targets=target_vars)
main_program = main_program._inference_optimize(prune_read_op=True)
fetch_var_names = [v.name for v in target_vars]
for target_v in target_vars:
if not main_program.global_block().has_var(target_v.name):
main_program.global_block().create_var(
name=target_v.name,
shape=target_v.shape,
dtype=target_v.dtype,
persistable=target_v.persistable)
prepend_feed_ops(main_program, feeded_var_names)
append_fetch_ops(main_program, fetch_var_names)
main_program.desc._set_version()
paddle.fluid.core.save_op_version_info(main_program.desc)
main_program._copy_dist_param_info_from(origin_program)
return main_program, feeded_var_names, target_vars
| [
"paddle2onnx.graph.graph_helper.append_fetch_ops",
"paddle.fluid.layers.utils.pack_sequence_as",
"paddle.fluid.dygraph.dygraph_to_static.program_translator.ProgramTranslator",
"paddle.fluid.io._get_valid_program",
"paddle.fluid.core.save_op_version_info",
"paddle.fluid.dygraph.jit.declarative",
"paddle.fluid.layers.utils.flatten",
"paddle2onnx.graph.graph_helper.prepend_feed_ops",
"paddle.static.InputSpec.from_tensor",
"paddle.jit.set_verbosity",
"paddle.fluid.core.op_proto_and_checker_maker.kOpDeviceAttrName",
"collections.OrderedDict",
"paddle.fluid.framework.program_guard"
] | [((3606, 3619), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (3617, 3619), False, 'from collections import OrderedDict\n'), ((3635, 3651), 'paddle.fluid.layers.utils.flatten', 'flatten', (['outputs'], {}), '(outputs)\n', (3642, 3651), False, 'from paddle.fluid.layers.utils import flatten, pack_sequence_as\n'), ((4448, 4475), 'paddle.jit.set_verbosity', 'paddle.jit.set_verbosity', (['(0)'], {}), '(0)\n', (4472, 4475), False, 'import paddle\n'), ((4498, 4517), 'paddle.fluid.dygraph.dygraph_to_static.program_translator.ProgramTranslator', 'ProgramTranslator', ([], {}), '()\n', (4515, 4517), False, 'from paddle.fluid.dygraph.dygraph_to_static.program_translator import ProgramTranslator, StaticFunction\n'), ((8890, 8922), 'paddle.fluid.io._get_valid_program', '_get_valid_program', (['main_program'], {}), '(main_program)\n', (8908, 8922), False, 'from paddle.fluid.io import _get_valid_program\n'), ((10692, 10740), 'paddle2onnx.graph.graph_helper.prepend_feed_ops', 'prepend_feed_ops', (['main_program', 'feeded_var_names'], {}), '(main_program, feeded_var_names)\n', (10708, 10740), False, 'from paddle2onnx.graph.graph_helper import prepend_feed_ops, append_fetch_ops\n'), ((10745, 10792), 'paddle2onnx.graph.graph_helper.append_fetch_ops', 'append_fetch_ops', (['main_program', 'fetch_var_names'], {}), '(main_program, fetch_var_names)\n', (10761, 10792), False, 'from paddle2onnx.graph.graph_helper import prepend_feed_ops, append_fetch_ops\n'), ((10835, 10892), 'paddle.fluid.core.save_op_version_info', 'paddle.fluid.core.save_op_version_info', (['main_program.desc'], {}), '(main_program.desc)\n', (10873, 10892), False, 'import paddle\n'), ((5828, 5847), 'paddle.fluid.layers.utils.flatten', 'flatten', (['input_spec'], {}), '(input_spec)\n', (5835, 5847), False, 'from paddle.fluid.layers.utils import flatten, pack_sequence_as\n'), ((9125, 9176), 'paddle.fluid.core.op_proto_and_checker_maker.kOpDeviceAttrName', 'core.op_proto_and_checker_maker.kOpDeviceAttrName', ([], {}), '()\n', (9174, 9176), False, 'from paddle.fluid import core\n'), ((9422, 9449), 'paddle.fluid.framework.program_guard', 'program_guard', (['main_program'], {}), '(main_program)\n', (9435, 9449), False, 'from paddle.fluid.framework import Block, ParamBase, Program, Variable, Parameter, program_guard\n'), ((2014, 2029), 'paddle.fluid.layers.utils.flatten', 'flatten', (['inputs'], {}), '(inputs)\n', (2021, 2029), False, 'from paddle.fluid.layers.utils import flatten, pack_sequence_as\n'), ((7055, 7116), 'paddle.fluid.dygraph.jit.declarative', 'declarative', (['inner_layer.forward'], {'input_spec': 'inner_input_spec'}), '(inner_layer.forward, input_spec=inner_input_spec)\n', (7066, 7116), False, 'from paddle.fluid.dygraph.jit import declarative\n'), ((6927, 6973), 'paddle.fluid.layers.utils.pack_sequence_as', 'pack_sequence_as', (['input_spec', 'inner_input_spec'], {}), '(input_spec, inner_input_spec)\n', (6943, 6973), False, 'from paddle.fluid.layers.utils import flatten, pack_sequence_as\n'), ((6091, 6131), 'paddle.static.InputSpec.from_tensor', 'paddle.static.InputSpec.from_tensor', (['var'], {}), '(var)\n', (6126, 6131), False, 'import paddle\n')] |
import requests
import ujson
# from b2b_app.config import CONFIG
class Hubspot:
def __init__(self, hub_id, refresh_token):
self.hub_id = hub_id
self.refresh_token = refresh_token
self.access_token = self.get_access_token(refresh_token)
self._lists_url = 'https://api.hubapi.com/contacts/v1/lists'
pass
def get_access_token(self, refresh_token):
pass
def update_access_token():
pass
##### ACCOUNT APIS #####
def get_account_by_id():
pass
# read all companies
def get_all_accounts():
pass
# create company in hubspot
def create_account():
pass
# update company
def update_account():
#
pass
def add_contact_to_account():
pass
def get_associated_deals_for_account():
pass
##### CONTACT APIS #####
def get_contact_by_id():
pass
# read all companies
def get_all_contacts():
url = self._lists_url + 'all/contacts/all'
querystring = {
'vid-offset': '0',
'count': '10',
'hapikey': '<KEY>',
}
response = requests.request(
'GET',
url,
params=querystring
)
print(response.text)
# create contact
def create_contact():
pass
# update contact
def update_contact():
pass
# deleting contact
def delete_contact():
pass
def get_associated_deals_for_contact():
pass
##### LISTS APIS #####
def get_all_static_lists():
url = self._lists_url
querystring = {
'offset': '0',
'count': '10',
'hapikey': '<KEY>',
}
response = requests.request(
'GET',
url,
params=querystring
)
print(response.text)
def get_all_dynamic_lists():
url = self._lists_url + '/dynamic'
querystring = {
'offset': '0',
'count': '10',
'hapikey': '<KEY>',
}
response = requests.request(
'GET',
url,
params=querystring
)
print(response.text)
def get_list_by_id(self, list_id):
url = self._lists_url + '/' + list_id
querystring = {
'hapikey': '<KEY>',
}
response = requests.request(
'GET',
url,
params=querystring
)
print(response.text)
def create_static_list(self, list_name):
url = self._lists_url
querystring = {
'hapikey': '<KEY>',
}
payload = ujson.dumps({
'name': list_name,
'dynamic': False,
'portalId': 5225356,
'filters': [],
})
response = requests.request(
'POST',
url,
data=payload,
params=querystring
)
print(response.text)
def create_dynamic_list():
url = self._lists_url
querystring = {
'hapikey': '<KEY>',
}
payload = ujson.dumps({
'name': list_name,
'dynamic': True,
'portalId': 5225356,
'filters': [],
})
response = requests.request(
'POST',
url,
data=payload,
params=querystring
)
print(response.text)
pass
def delete_list():
pass
def get_all_contacts_from_a_list(self, list_id):
url = self._lists_url + '/' + list_id + '/contacts/all'
querystring = {
'vidOffset': '0',
'count': '100',
'hapikey': '<KEY>',
}
response = requests.request(
'GET',
url,
params=querystring
)
print(response.text)
def add_contacts_in_a_static_list(self, list_id, array_of_ids):
url = self._lists_url + '/' + list_id + '/add'
querystring = {
'hapikey': '<KEY>',
}
payload = ujson.dumps({
vids: array_of_ids
})
response = requests.request(
'POST',
url,
data=payload,
params=querystring
)
print(response.text)
##### DEAL APIS #####
# create deal
def get_deal_owner_by_id():
# check deal id or owner id
pass
def create_deal():
pass
def associate_contact_to_deal():
pass
def associate_account_to_deal():
pass
def dissociate_contact_from_deal():
pass
def find_deal_owner():
# yes
pass
def test():
pass
| [
"ujson.dumps",
"requests.request"
] | [((1166, 1214), 'requests.request', 'requests.request', (['"""GET"""', 'url'], {'params': 'querystring'}), "('GET', url, params=querystring)\n", (1182, 1214), False, 'import requests\n'), ((1766, 1814), 'requests.request', 'requests.request', (['"""GET"""', 'url'], {'params': 'querystring'}), "('GET', url, params=querystring)\n", (1782, 1814), False, 'import requests\n'), ((2110, 2158), 'requests.request', 'requests.request', (['"""GET"""', 'url'], {'params': 'querystring'}), "('GET', url, params=querystring)\n", (2126, 2158), False, 'import requests\n'), ((2409, 2457), 'requests.request', 'requests.request', (['"""GET"""', 'url'], {'params': 'querystring'}), "('GET', url, params=querystring)\n", (2425, 2457), False, 'import requests\n'), ((2697, 2787), 'ujson.dumps', 'ujson.dumps', (["{'name': list_name, 'dynamic': False, 'portalId': 5225356, 'filters': []}"], {}), "({'name': list_name, 'dynamic': False, 'portalId': 5225356,\n 'filters': []})\n", (2708, 2787), False, 'import ujson\n'), ((2862, 2925), 'requests.request', 'requests.request', (['"""POST"""', 'url'], {'data': 'payload', 'params': 'querystring'}), "('POST', url, data=payload, params=querystring)\n", (2878, 2925), False, 'import requests\n'), ((3163, 3252), 'ujson.dumps', 'ujson.dumps', (["{'name': list_name, 'dynamic': True, 'portalId': 5225356, 'filters': []}"], {}), "({'name': list_name, 'dynamic': True, 'portalId': 5225356,\n 'filters': []})\n", (3174, 3252), False, 'import ujson\n'), ((3327, 3390), 'requests.request', 'requests.request', (['"""POST"""', 'url'], {'data': 'payload', 'params': 'querystring'}), "('POST', url, data=payload, params=querystring)\n", (3343, 3390), False, 'import requests\n'), ((3797, 3845), 'requests.request', 'requests.request', (['"""GET"""', 'url'], {'params': 'querystring'}), "('GET', url, params=querystring)\n", (3813, 3845), False, 'import requests\n'), ((4133, 4166), 'ujson.dumps', 'ujson.dumps', (['{vids: array_of_ids}'], {}), '({vids: array_of_ids})\n', (4144, 4166), False, 'import ujson\n'), ((4208, 4271), 'requests.request', 'requests.request', (['"""POST"""', 'url'], {'data': 'payload', 'params': 'querystring'}), "('POST', url, data=payload, params=querystring)\n", (4224, 4271), False, 'import requests\n')] |
import os
from .. import FileBuilder
from .file_builder_test import FileBuilderTest
class LambdaTest(FileBuilderTest):
"""Tests that ``FileBuilder`` methods accept lambda arguments.
Tests that ``FileBuilder`` methods accept lambdas for arguments that
must be callables.
"""
def _build_file(self, builder, filename):
"""Build file function for ``LambdaTest``."""
self._write(filename, 'text')
def _subbuild(self, builder, dir_):
"""Subbuild function for ``LambdaTest``."""
builder.build_file(
os.path.join(dir_, 'Output1.txt'), 'build_file', self._build_file)
builder.build_file(
os.path.join(dir_, 'Output2.txt'), 'build_file',
lambda builder, filename: self._write(filename, 'text'))
def _build(self, builder):
"""Build function for ``LambdaTest``."""
builder.subbuild(
'subbuild', self._subbuild, os.path.join(self._temp_dir, 'Dir1'))
builder.subbuild(
'subbuild',
lambda builder, dir_: self._subbuild(builder, dir_),
os.path.join(self._temp_dir, 'Dir2'))
def test_lambda(self):
"""Test that ``FileBuilder`` methods accept lambda arguments.
Test that ``FileBuilder`` methods accept lambdas for arguments
that must be callable.
"""
FileBuilder.build(self._cache_filename, 'lambda_test', self._build)
self._check_contents(
os.path.join(self._temp_dir, 'Dir1', 'Output1.txt'), 'text')
self._check_contents(
os.path.join(self._temp_dir, 'Dir1', 'Output2.txt'), 'text')
self._check_contents(
os.path.join(self._temp_dir, 'Dir2', 'Output1.txt'), 'text')
self._check_contents(
os.path.join(self._temp_dir, 'Dir2', 'Output2.txt'), 'text')
FileBuilder.clean(self._cache_filename, 'lambda_test')
self.assertEqual([], os.listdir(self._temp_dir))
FileBuilder.build(
self._cache_filename, 'lambda_test',
lambda builder: self._build(builder))
self._check_contents(
os.path.join(self._temp_dir, 'Dir1', 'Output1.txt'), 'text')
self._check_contents(
os.path.join(self._temp_dir, 'Dir1', 'Output2.txt'), 'text')
self._check_contents(
os.path.join(self._temp_dir, 'Dir2', 'Output1.txt'), 'text')
self._check_contents(
os.path.join(self._temp_dir, 'Dir2', 'Output2.txt'), 'text')
| [
"os.path.join",
"os.listdir"
] | [((566, 599), 'os.path.join', 'os.path.join', (['dir_', '"""Output1.txt"""'], {}), "(dir_, 'Output1.txt')\n", (578, 599), False, 'import os\n'), ((673, 706), 'os.path.join', 'os.path.join', (['dir_', '"""Output2.txt"""'], {}), "(dir_, 'Output2.txt')\n", (685, 706), False, 'import os\n'), ((938, 974), 'os.path.join', 'os.path.join', (['self._temp_dir', '"""Dir1"""'], {}), "(self._temp_dir, 'Dir1')\n", (950, 974), False, 'import os\n'), ((1103, 1139), 'os.path.join', 'os.path.join', (['self._temp_dir', '"""Dir2"""'], {}), "(self._temp_dir, 'Dir2')\n", (1115, 1139), False, 'import os\n'), ((1473, 1524), 'os.path.join', 'os.path.join', (['self._temp_dir', '"""Dir1"""', '"""Output1.txt"""'], {}), "(self._temp_dir, 'Dir1', 'Output1.txt')\n", (1485, 1524), False, 'import os\n'), ((1576, 1627), 'os.path.join', 'os.path.join', (['self._temp_dir', '"""Dir1"""', '"""Output2.txt"""'], {}), "(self._temp_dir, 'Dir1', 'Output2.txt')\n", (1588, 1627), False, 'import os\n'), ((1679, 1730), 'os.path.join', 'os.path.join', (['self._temp_dir', '"""Dir2"""', '"""Output1.txt"""'], {}), "(self._temp_dir, 'Dir2', 'Output1.txt')\n", (1691, 1730), False, 'import os\n'), ((1782, 1833), 'os.path.join', 'os.path.join', (['self._temp_dir', '"""Dir2"""', '"""Output2.txt"""'], {}), "(self._temp_dir, 'Dir2', 'Output2.txt')\n", (1794, 1833), False, 'import os\n'), ((1937, 1963), 'os.listdir', 'os.listdir', (['self._temp_dir'], {}), '(self._temp_dir)\n', (1947, 1963), False, 'import os\n'), ((2135, 2186), 'os.path.join', 'os.path.join', (['self._temp_dir', '"""Dir1"""', '"""Output1.txt"""'], {}), "(self._temp_dir, 'Dir1', 'Output1.txt')\n", (2147, 2186), False, 'import os\n'), ((2238, 2289), 'os.path.join', 'os.path.join', (['self._temp_dir', '"""Dir1"""', '"""Output2.txt"""'], {}), "(self._temp_dir, 'Dir1', 'Output2.txt')\n", (2250, 2289), False, 'import os\n'), ((2341, 2392), 'os.path.join', 'os.path.join', (['self._temp_dir', '"""Dir2"""', '"""Output1.txt"""'], {}), "(self._temp_dir, 'Dir2', 'Output1.txt')\n", (2353, 2392), False, 'import os\n'), ((2444, 2495), 'os.path.join', 'os.path.join', (['self._temp_dir', '"""Dir2"""', '"""Output2.txt"""'], {}), "(self._temp_dir, 'Dir2', 'Output2.txt')\n", (2456, 2495), False, 'import os\n')] |
#!/usr/bin/env python
# coding: utf-8
def scrape():
import pandas as pd
from splinter import Browser
from bs4 import BeautifulSoup
import time
#dictionary with all data
mars_data={}
executable_path = {'executable_path': 'chromedriver.exe'}
browser = Browser('chrome', **executable_path, headless=True, user_agent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64)')
url_1 = 'https://mars.nasa.gov/news/'
browser.visit(url_1)
html = browser.html
soup = BeautifulSoup(html, 'html.parser')
search = soup.find('section', class_= 'grid_gallery module list_view')
title_search = search.find_all('div', class_= 'content_title',limit=1)
p_search = search.find_all('div', class_='article_teaser_body',limit=1)
news_title = title_search[0].a.text
news_p = p_search[0].text
#add data to dictionary
mars_data['news_title']=news_title
mars_data['news_p']=news_p
url_2='https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars'
browser.visit(url_2)
html = browser.html
soup = BeautifulSoup(html, 'html.parser')
#click the full image button
click1=browser.find_by_css('a[class="button fancybox"]').click()
##click1=browser.links.find_by_partial_text('FULL IMAGE').click()
#click the more info button
click2=browser.links.find_by_partial_text('more info').click()
#parse the page
html = browser.html
soup = BeautifulSoup(html, 'html.parser')
#find the link to the full size image
img_partial = soup.find_all('img',class_='main_image')[0]['src']
featured_img_url = f'https://www.jpl.nasa.gov{img_partial}'
mars_data['featured_img_url']=featured_img_url
featured_img_url
twitter_url = 'https://twitter.com/MarsWxReport?lang=en'
browser.visit(twitter_url)
time.sleep(2)
html = browser.html
soup = BeautifulSoup(html, 'html.parser')
tweet_search = soup.find_all('article')
mars_weather=tweet_search[0].find_all('span')[4].text
mars_data['mars_weather']=mars_weather
mars_weather
facts_url = 'https://space-facts.com/mars/'
browser.visit(facts_url)
html = browser.html
soup = BeautifulSoup(html, 'html.parser')
facts_table = pd.read_html(facts_url)
mars_table = facts_table[0]
mars_table = mars_table.rename(columns = {0:'Mars Planet Profile',1:''})
mars_table = mars_table.set_index('Mars Planet Profile', drop=True)
mars_table
mars_table.to_html('mars_html')
mars_data['mars_facts']=mars_table.to_html(justify='left')
hemisphere_url = 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars'
base_url = 'https://astrogeology.usgs.gov/'
browser.visit(hemisphere_url)
html = browser.html
soup = BeautifulSoup(html, 'html.parser')
#click the image link to get to the page with the full res image
browser.find_by_css('img[class="thumb"]')[0].click()
#get html again after clicking page
html = browser.html
soup = BeautifulSoup(html, 'html.parser')
#find image link and title
img_search = soup.find_all('img',class_='wide-image' )
title_search = soup.find_all('h2',class_='title')
#titles had the word 'enhanced' at the end, just getting rid of that
' '.join(title_search[0].text.split(' ')[:-1])
img_link = base_url + img_search[0]['src']
img_link
#do all of the step above for each hemisphere
img_urls =[]
titles=[]
for i in range(4):
browser.visit(hemisphere_url)
time.sleep(1)
browser.find_by_css('img[class="thumb"]')[i].click()
time.sleep(1)
html = browser.html
soup = BeautifulSoup(html, 'html.parser')
img_search = soup.find_all('img',class_='wide-image' )
title_search = soup.find_all('h2',class_='title')
titles.append(' '.join(title_search[0].text.split(' ')[:-1]))
img_urls.append(base_url + img_search[0]['src'])
img_urls
titles
hemisphere_image_urls = []
urls ={}
for i in range(4):
urls['title']=titles[i]
urls['img_url']=img_urls[i]
hemisphere_image_urls.append(urls)
urls={}
mars_data['hemisphere_image_urls']=hemisphere_image_urls
hemisphere_image_urls
return mars_data
| [
"bs4.BeautifulSoup",
"splinter.Browser",
"pandas.read_html",
"time.sleep"
] | [((290, 402), 'splinter.Browser', 'Browser', (['"""chrome"""'], {'headless': '(True)', 'user_agent': '"""Mozilla/5.0 (Windows NT 10.0; Win64; x64)"""'}), "('chrome', **executable_path, headless=True, user_agent=\n 'Mozilla/5.0 (Windows NT 10.0; Win64; x64)')\n", (297, 402), False, 'from splinter import Browser\n'), ((506, 540), 'bs4.BeautifulSoup', 'BeautifulSoup', (['html', '"""html.parser"""'], {}), "(html, 'html.parser')\n", (519, 540), False, 'from bs4 import BeautifulSoup\n'), ((1086, 1120), 'bs4.BeautifulSoup', 'BeautifulSoup', (['html', '"""html.parser"""'], {}), "(html, 'html.parser')\n", (1099, 1120), False, 'from bs4 import BeautifulSoup\n'), ((1453, 1487), 'bs4.BeautifulSoup', 'BeautifulSoup', (['html', '"""html.parser"""'], {}), "(html, 'html.parser')\n", (1466, 1487), False, 'from bs4 import BeautifulSoup\n'), ((1844, 1857), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (1854, 1857), False, 'import time\n'), ((1893, 1927), 'bs4.BeautifulSoup', 'BeautifulSoup', (['html', '"""html.parser"""'], {}), "(html, 'html.parser')\n", (1906, 1927), False, 'from bs4 import BeautifulSoup\n'), ((2208, 2242), 'bs4.BeautifulSoup', 'BeautifulSoup', (['html', '"""html.parser"""'], {}), "(html, 'html.parser')\n", (2221, 2242), False, 'from bs4 import BeautifulSoup\n'), ((2263, 2286), 'pandas.read_html', 'pd.read_html', (['facts_url'], {}), '(facts_url)\n', (2275, 2286), True, 'import pandas as pd\n'), ((2820, 2854), 'bs4.BeautifulSoup', 'BeautifulSoup', (['html', '"""html.parser"""'], {}), "(html, 'html.parser')\n", (2833, 2854), False, 'from bs4 import BeautifulSoup\n'), ((3058, 3092), 'bs4.BeautifulSoup', 'BeautifulSoup', (['html', '"""html.parser"""'], {}), "(html, 'html.parser')\n", (3071, 3092), False, 'from bs4 import BeautifulSoup\n'), ((3576, 3589), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (3586, 3589), False, 'import time\n'), ((3659, 3672), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (3669, 3672), False, 'import time\n'), ((3716, 3750), 'bs4.BeautifulSoup', 'BeautifulSoup', (['html', '"""html.parser"""'], {}), "(html, 'html.parser')\n", (3729, 3750), False, 'from bs4 import BeautifulSoup\n')] |
from mpkg.common import Soft
from mpkg.utils import Search
class Package(Soft):
ID = 'python'
def _prepare(self):
data = self.data
links = {'32bit': 'https://www.python.org/ftp/python/{ver}/python-{ver}.exe',
'64bit': 'https://www.python.org/ftp/python/{ver}/python-{ver}-amd64.exe'}
url = 'https://www.python.org/'
data.ver = Search(url, 'Latest: .*Python ([\\d\\.]+)')
data.changelog = f'https://docs.python.org/release/{data.ver}/whatsnew/changelog.html#changelog'
data.arch = Search(links=links, ver=data.ver)
| [
"mpkg.utils.Search"
] | [((387, 430), 'mpkg.utils.Search', 'Search', (['url', '"""Latest: .*Python ([\\\\d\\\\.]+)"""'], {}), "(url, 'Latest: .*Python ([\\\\d\\\\.]+)')\n", (393, 430), False, 'from mpkg.utils import Search\n'), ((556, 589), 'mpkg.utils.Search', 'Search', ([], {'links': 'links', 'ver': 'data.ver'}), '(links=links, ver=data.ver)\n', (562, 589), False, 'from mpkg.utils import Search\n')] |
# Copyright 2014 eNovance
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import itertools
import socket
from keystoneauth1 import loading
from oslo_config import cfg
import ceilometer.agent.manager
import ceilometer.api.app
import ceilometer.api.controllers.v2.root
import ceilometer.collector
import ceilometer.compute.discovery
import ceilometer.compute.util
import ceilometer.compute.virt.inspector
import ceilometer.compute.virt.libvirt.inspector
import ceilometer.compute.virt.vmware.inspector
import ceilometer.compute.virt.xenapi.inspector
import ceilometer.coordination
import ceilometer.dispatcher
import ceilometer.dispatcher.file
import ceilometer.dispatcher.gnocchi_opts
import ceilometer.dispatcher.http
import ceilometer.energy.kwapi
import ceilometer.event.converter
import ceilometer.exchange_control
import ceilometer.hardware.discovery
import ceilometer.hardware.pollsters.generic
import ceilometer.image.discovery
import ceilometer.ipmi.notifications.ironic
import ceilometer.ipmi.platform.intel_node_manager
import ceilometer.ipmi.pollsters
import ceilometer.keystone_client
import ceilometer.meter.notifications
import ceilometer.middleware
import ceilometer.neutron_client
import ceilometer.notification
import ceilometer.nova_client
import ceilometer.objectstore.rgw
import ceilometer.objectstore.swift
import ceilometer.pipeline
import ceilometer.publisher.messaging
import ceilometer.publisher.utils
import ceilometer.sample
import ceilometer.storage
import ceilometer.utils
import ceilometer.volume.discovery
OPTS = [
cfg.StrOpt('host',
default=socket.gethostname(),
sample_default='<your_hostname>',
help='Name of this node, which must be valid in an AMQP '
'key. Can be an opaque identifier. For ZeroMQ only, must '
'be a valid host name, FQDN, or IP address.'),
cfg.IntOpt('http_timeout',
default=600,
help='Timeout seconds for HTTP requests. Set it to None to '
'disable timeout.'),
]
def list_opts():
# FIXME(sileht): readd pollster namespaces in the generated configfile
# This have been removed due to a recursive import issue
return [
('DEFAULT',
itertools.chain(ceilometer.agent.manager.OPTS,
ceilometer.api.app.OPTS,
ceilometer.compute.util.OPTS,
ceilometer.compute.virt.inspector.OPTS,
ceilometer.compute.virt.libvirt.inspector.OPTS,
ceilometer.dispatcher.OPTS,
ceilometer.ipmi.notifications.ironic.OPTS,
ceilometer.middleware.OPTS,
ceilometer.nova_client.OPTS,
ceilometer.objectstore.swift.OPTS,
ceilometer.pipeline.OPTS,
ceilometer.sample.OPTS,
ceilometer.utils.OPTS,
ceilometer.exchange_control.EXCHANGE_OPTS,
OPTS)),
('api', itertools.chain(ceilometer.api.app.API_OPTS,
ceilometer.api.controllers.v2.root.API_OPTS)),
('collector', ceilometer.collector.OPTS),
('compute', ceilometer.compute.discovery.OPTS),
('coordination', ceilometer.coordination.OPTS),
('database', ceilometer.storage.OPTS),
('dispatcher_file', ceilometer.dispatcher.file.OPTS),
('dispatcher_http', ceilometer.dispatcher.http.http_dispatcher_opts),
('dispatcher_gnocchi',
ceilometer.dispatcher.gnocchi_opts.dispatcher_opts),
('event', ceilometer.event.converter.OPTS),
('hardware', itertools.chain(
ceilometer.hardware.discovery.OPTS,
ceilometer.hardware.pollsters.generic.OPTS)),
('ipmi',
itertools.chain(ceilometer.ipmi.platform.intel_node_manager.OPTS,
ceilometer.ipmi.pollsters.OPTS)),
('meter', ceilometer.meter.notifications.OPTS),
('notification', ceilometer.notification.OPTS),
('polling', ceilometer.agent.manager.POLLING_OPTS),
('publisher', ceilometer.publisher.utils.OPTS),
('publisher_notifier', ceilometer.publisher.messaging.NOTIFIER_OPTS),
('rgw_admin_credentials', ceilometer.objectstore.rgw.CREDENTIAL_OPTS),
# NOTE(sileht): the configuration file contains only the options
# for the password plugin that handles keystone v2 and v3 API
# with discovery. But other options are possible.
('service_credentials', ceilometer.keystone_client.CLI_OPTS),
('service_types',
itertools.chain(ceilometer.energy.kwapi.SERVICE_OPTS,
ceilometer.image.discovery.SERVICE_OPTS,
ceilometer.neutron_client.SERVICE_OPTS,
ceilometer.nova_client.SERVICE_OPTS,
ceilometer.objectstore.rgw.SERVICE_OPTS,
ceilometer.objectstore.swift.SERVICE_OPTS,
ceilometer.volume.discovery.SERVICE_OPTS,)),
('storage', ceilometer.dispatcher.STORAGE_OPTS),
('vmware', ceilometer.compute.virt.vmware.inspector.OPTS),
('xenapi', ceilometer.compute.virt.xenapi.inspector.OPTS),
]
def list_keystoneauth_opts():
# NOTE(sileht): the configuration file contains only the options
# for the password plugin that handles keystone v2 and v3 API
# with discovery. But other options are possible.
return [('service_credentials', (
loading.get_auth_common_conf_options() +
loading.get_auth_plugin_conf_options('password')))]
| [
"keystoneauth1.loading.get_auth_plugin_conf_options",
"keystoneauth1.loading.get_auth_common_conf_options",
"socket.gethostname",
"itertools.chain",
"oslo_config.cfg.IntOpt"
] | [((2378, 2500), 'oslo_config.cfg.IntOpt', 'cfg.IntOpt', (['"""http_timeout"""'], {'default': '(600)', 'help': '"""Timeout seconds for HTTP requests. Set it to None to disable timeout."""'}), "('http_timeout', default=600, help=\n 'Timeout seconds for HTTP requests. Set it to None to disable timeout.')\n", (2388, 2500), False, 'from oslo_config import cfg\n'), ((2094, 2114), 'socket.gethostname', 'socket.gethostname', ([], {}), '()\n', (2112, 2114), False, 'import socket\n'), ((2749, 3249), 'itertools.chain', 'itertools.chain', (['ceilometer.agent.manager.OPTS', 'ceilometer.api.app.OPTS', 'ceilometer.compute.util.OPTS', 'ceilometer.compute.virt.inspector.OPTS', 'ceilometer.compute.virt.libvirt.inspector.OPTS', 'ceilometer.dispatcher.OPTS', 'ceilometer.ipmi.notifications.ironic.OPTS', 'ceilometer.middleware.OPTS', 'ceilometer.nova_client.OPTS', 'ceilometer.objectstore.swift.OPTS', 'ceilometer.pipeline.OPTS', 'ceilometer.sample.OPTS', 'ceilometer.utils.OPTS', 'ceilometer.exchange_control.EXCHANGE_OPTS', 'OPTS'], {}), '(ceilometer.agent.manager.OPTS, ceilometer.api.app.OPTS,\n ceilometer.compute.util.OPTS, ceilometer.compute.virt.inspector.OPTS,\n ceilometer.compute.virt.libvirt.inspector.OPTS, ceilometer.dispatcher.\n OPTS, ceilometer.ipmi.notifications.ironic.OPTS, ceilometer.middleware.\n OPTS, ceilometer.nova_client.OPTS, ceilometer.objectstore.swift.OPTS,\n ceilometer.pipeline.OPTS, ceilometer.sample.OPTS, ceilometer.utils.OPTS,\n ceilometer.exchange_control.EXCHANGE_OPTS, OPTS)\n', (2764, 3249), False, 'import itertools\n'), ((3592, 3686), 'itertools.chain', 'itertools.chain', (['ceilometer.api.app.API_OPTS', 'ceilometer.api.controllers.v2.root.API_OPTS'], {}), '(ceilometer.api.app.API_OPTS, ceilometer.api.controllers.v2.\n root.API_OPTS)\n', (3607, 3686), False, 'import itertools\n'), ((4231, 4331), 'itertools.chain', 'itertools.chain', (['ceilometer.hardware.discovery.OPTS', 'ceilometer.hardware.pollsters.generic.OPTS'], {}), '(ceilometer.hardware.discovery.OPTS, ceilometer.hardware.\n pollsters.generic.OPTS)\n', (4246, 4331), False, 'import itertools\n'), ((4380, 4481), 'itertools.chain', 'itertools.chain', (['ceilometer.ipmi.platform.intel_node_manager.OPTS', 'ceilometer.ipmi.pollsters.OPTS'], {}), '(ceilometer.ipmi.platform.intel_node_manager.OPTS,\n ceilometer.ipmi.pollsters.OPTS)\n', (4395, 4481), False, 'import itertools\n'), ((5196, 5512), 'itertools.chain', 'itertools.chain', (['ceilometer.energy.kwapi.SERVICE_OPTS', 'ceilometer.image.discovery.SERVICE_OPTS', 'ceilometer.neutron_client.SERVICE_OPTS', 'ceilometer.nova_client.SERVICE_OPTS', 'ceilometer.objectstore.rgw.SERVICE_OPTS', 'ceilometer.objectstore.swift.SERVICE_OPTS', 'ceilometer.volume.discovery.SERVICE_OPTS'], {}), '(ceilometer.energy.kwapi.SERVICE_OPTS, ceilometer.image.\n discovery.SERVICE_OPTS, ceilometer.neutron_client.SERVICE_OPTS,\n ceilometer.nova_client.SERVICE_OPTS, ceilometer.objectstore.rgw.\n SERVICE_OPTS, ceilometer.objectstore.swift.SERVICE_OPTS, ceilometer.\n volume.discovery.SERVICE_OPTS)\n', (5211, 5512), False, 'import itertools\n'), ((6115, 6153), 'keystoneauth1.loading.get_auth_common_conf_options', 'loading.get_auth_common_conf_options', ([], {}), '()\n', (6151, 6153), False, 'from keystoneauth1 import loading\n'), ((6168, 6216), 'keystoneauth1.loading.get_auth_plugin_conf_options', 'loading.get_auth_plugin_conf_options', (['"""password"""'], {}), "('password')\n", (6204, 6216), False, 'from keystoneauth1 import loading\n')] |
# -*- coding: utf-8 -*-
import json
import os
import random
from shutil import copyfile
from flask import url_for, current_app as app
from flask_login import UserMixin
from sqlalchemy import func, desc
# from vktrainer import db, app, login_manager
from vktrainer import db, login_manager
from vktrainer.utils import get_md5
photos = db.Table('training_set_photos',
db.Column('training_set_id', db.Integer, db.ForeignKey('training_set.id')),
db.Column('photo_id', db.Integer, db.ForeignKey('photo.id'))
)
class User(UserMixin, db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64))
def __repr__(self):
return self.name
@classmethod
def get_or_create(cls, name):
user = cls.query.filter(cls.name == name).first()
if not user:
user = cls(name=name)
db.session.add(user)
db.session.commit()
return user, True
return user, False
@login_manager.user_loader
def load_user(userid):
return User.query.filter(User.id == userid).first()
class Photo(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64))
picture = db.Column(db.String(128))
md5 = db.Column(db.String(64))
PICTURES_FOLDER = 'pictures/'
@classmethod
def create_from_file(cls, file, check_if_exists=True):
# We check no photo with the same md5 already exists in db
md5 = get_md5(file)
if check_if_exists:
photo = cls.query.filter_by(md5=md5).first()
if photo is not None:
return None
# We copy the file
_, filename = os.path.split(file)
path = os.path.join('vktrainer', cls.PICTURES_FOLDER, md5)
copyfile(file, path)
name, _ = os.path.splitext(filename)
photo = Photo(name=name, md5=md5, picture=path)
db.session.add(photo)
db.session.commit()
return photo
def get_path(self):
return os.path.join(self.PICTURES_FOLDER, self.md5)
def get_absolute_url(self):
return url_for('vktrainer.show_photo', pk=self.id)
class TrainingSet(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64))
photos = db.dynamic_loader(
'Photo', secondary=photos, backref=db.backref('training_sets', lazy='dynamic'))
def __str__(self):
return self.name
def get_absolute_url(self):
return url_for('vktrainer.training_set', pk=self.id)
def get_results_url(self):
return url_for('vktrainer.training_set_results', pk=self.id)
def get_leaderboard_url(self):
return url_for('vktrainer.training_set_leaderboard', pk=self.id)
def get_results(self):
return [tr.get_pretty_result() for tr in self.training_results.all()]
def get_leaderboard(self):
count = func.count(TrainingResult.id)
return self.training_results.join(
TrainingResult.user,
).add_column(
count,
).group_by(
TrainingResult.user_id,
).order_by(
desc(count),
).values(
User.name,
count,
)
def get_percentage_done(self):
nb_photos_with_results = self.photos.filter(
Photo.id.in_(self.training_results.with_entities(TrainingResult.photo_id))
).count()
nb_photos = self.photos.count()
return float(nb_photos_with_results) / nb_photos * 100
def get_first_photo(self):
if app.config['SHOW_PICTURES_ORDERING'] == 'linear':
return self.photos.order_by('id').first()
else:
return self._get_next_photo_semi_random(None)
def get_next_photo(self, photo):
if app.config['SHOW_PICTURES_ORDERING'] == 'linear':
return self._get_next_photo_linear(photo)
else:
return self._get_next_photo_semi_random(photo)
def _get_next_photo_linear(self, photo):
next_photo = self.photos.filter(Photo.id > photo.id).order_by('id').first()
if not next_photo:
# We are already at the last photo, we show the first one
next_photo = self.photos.order_by('id').first()
return next_photo
def _get_previous_photo_linear(self, photo):
previous_photo = self.photos.filter(Photo.id < photo.id).order_by('-id').first()
if not previous_photo:
# We are already at the first photo, we show the last one
previous_photo = self.photos.order_by('-id').first()
return previous_photo
def _get_next_photo_semi_random(self, photo):
"""
We serve a random photo without any results
If there aren't any, we serve a random photo
"""
photos_without_results = self.photos.filter(~Photo.id.in_(
self.training_results.with_entities(TrainingResult.photo_id)
))
if photo:
photos_without_results = photos_without_results.filter(Photo.id != photo.id)
nb_photos_without_results = photos_without_results.count()
if nb_photos_without_results:
return photos_without_results.all()[random.randint(0, nb_photos_without_results - 1)]
else:
nb_photos = self.photos.count()
random_nb = random.randint(0, nb_photos - 1)
return self.photos.all()[random_nb]
def _get_previous_photo_semi_random(self, photo):
# Don't want to allow previous photo in semi random mode (breaks UX)
return None
class TrainingPattern(db.Model):
id = db.Column(db.Integer, primary_key=True)
training_set_id = db.Column(db.Integer, db.ForeignKey('training_set.id'))
name = db.Column(db.String(64))
instruction = db.Column(db.Text)
training_set = db.relation('TrainingSet', backref=db.backref('patterns', lazy='dynamic'))
pattern_ref = db.Column(db.String(64))
position = db.Column(db.Integer)
@property
def pattern(self):
from .patterns import REF_TO_PATTERN_CLASS
try:
return REF_TO_PATTERN_CLASS[self.pattern_ref]
except KeyError:
raise KeyError('Unknown pattern: {}'.format(self.pattern_ref))
class TrainingResult(db.Model):
id = db.Column(db.Integer, primary_key=True)
training_set_id = db.Column(db.Integer, db.ForeignKey('training_set.id'))
photo_id = db.Column(db.Integer, db.ForeignKey('photo.id'))
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
training_set = db.relation('TrainingSet', backref=db.backref('training_results', lazy='dynamic'))
photo = db.relation('Photo')
user = db.relation('User', lazy='joined', backref=db.backref('training_results'))
result = db.Column(db.Text) # Result stored in JSON
photo_is_incorrect = db.Column(db.Boolean, default=False)
def get_absolute_url(self):
return url_for(
'vktrainer.training_set_result',
training_set_pk=self.training_set.id,
result_pk=self.id,
)
def get_pretty_result(self):
if self.photo_is_incorrect:
result = 'Photo marked as incorrect'
else:
try:
loaded_result = json.loads(self.result)
except ValueError:
# Could not decode JSON
loaded_result = None
if loaded_result:
result = {
'state': 'OK',
'value': loaded_result,
}
else:
result = {
'state': 'KO',
'value': {},
}
return {
'photo': {
'name': self.photo.name,
'id': self.photo.id,
},
'user': self.user.name if self.user else None,
'result': result,
'id': self.id,
'url': self.get_absolute_url(),
}
@classmethod
def create(cls, photo, training_set, user, result, **kwargs):
training_result = cls(
photo=photo,
training_set=training_set,
user=user,
result=result,
**kwargs
)
db.session.add(training_result)
db.session.commit()
return training_result
| [
"vktrainer.db.ForeignKey",
"random.randint",
"vktrainer.db.session.add",
"json.loads",
"vktrainer.db.backref",
"vktrainer.db.relation",
"vktrainer.utils.get_md5",
"flask.url_for",
"vktrainer.db.Column",
"os.path.splitext",
"vktrainer.db.session.commit",
"shutil.copyfile",
"sqlalchemy.func.count",
"sqlalchemy.desc",
"vktrainer.db.String",
"os.path.join",
"os.path.split"
] | [((563, 602), 'vktrainer.db.Column', 'db.Column', (['db.Integer'], {'primary_key': '(True)'}), '(db.Integer, primary_key=True)\n', (572, 602), False, 'from vktrainer import db, login_manager\n'), ((1122, 1161), 'vktrainer.db.Column', 'db.Column', (['db.Integer'], {'primary_key': '(True)'}), '(db.Integer, primary_key=True)\n', (1131, 1161), False, 'from vktrainer import db, login_manager\n'), ((2192, 2231), 'vktrainer.db.Column', 'db.Column', (['db.Integer'], {'primary_key': '(True)'}), '(db.Integer, primary_key=True)\n', (2201, 2231), False, 'from vktrainer import db, login_manager\n'), ((5611, 5650), 'vktrainer.db.Column', 'db.Column', (['db.Integer'], {'primary_key': '(True)'}), '(db.Integer, primary_key=True)\n', (5620, 5650), False, 'from vktrainer import db, login_manager\n'), ((5784, 5802), 'vktrainer.db.Column', 'db.Column', (['db.Text'], {}), '(db.Text)\n', (5793, 5802), False, 'from vktrainer import db, login_manager\n'), ((5955, 5976), 'vktrainer.db.Column', 'db.Column', (['db.Integer'], {}), '(db.Integer)\n', (5964, 5976), False, 'from vktrainer import db, login_manager\n'), ((6280, 6319), 'vktrainer.db.Column', 'db.Column', (['db.Integer'], {'primary_key': '(True)'}), '(db.Integer, primary_key=True)\n', (6289, 6319), False, 'from vktrainer import db, login_manager\n'), ((6639, 6659), 'vktrainer.db.relation', 'db.relation', (['"""Photo"""'], {}), "('Photo')\n", (6650, 6659), False, 'from vktrainer import db, login_manager\n'), ((6759, 6777), 'vktrainer.db.Column', 'db.Column', (['db.Text'], {}), '(db.Text)\n', (6768, 6777), False, 'from vktrainer import db, login_manager\n'), ((6828, 6864), 'vktrainer.db.Column', 'db.Column', (['db.Boolean'], {'default': '(False)'}), '(db.Boolean, default=False)\n', (6837, 6864), False, 'from vktrainer import db, login_manager\n'), ((417, 449), 'vktrainer.db.ForeignKey', 'db.ForeignKey', (['"""training_set.id"""'], {}), "('training_set.id')\n", (430, 449), False, 'from vktrainer import db, login_manager\n'), ((490, 515), 'vktrainer.db.ForeignKey', 'db.ForeignKey', (['"""photo.id"""'], {}), "('photo.id')\n", (503, 515), False, 'from vktrainer import db, login_manager\n'), ((625, 638), 'vktrainer.db.String', 'db.String', (['(64)'], {}), '(64)\n', (634, 638), False, 'from vktrainer import db, login_manager\n'), ((1184, 1197), 'vktrainer.db.String', 'db.String', (['(64)'], {}), '(64)\n', (1193, 1197), False, 'from vktrainer import db, login_manager\n'), ((1223, 1237), 'vktrainer.db.String', 'db.String', (['(128)'], {}), '(128)\n', (1232, 1237), False, 'from vktrainer import db, login_manager\n'), ((1259, 1272), 'vktrainer.db.String', 'db.String', (['(64)'], {}), '(64)\n', (1268, 1272), False, 'from vktrainer import db, login_manager\n'), ((1467, 1480), 'vktrainer.utils.get_md5', 'get_md5', (['file'], {}), '(file)\n', (1474, 1480), False, 'from vktrainer.utils import get_md5\n'), ((1678, 1697), 'os.path.split', 'os.path.split', (['file'], {}), '(file)\n', (1691, 1697), False, 'import os\n'), ((1713, 1764), 'os.path.join', 'os.path.join', (['"""vktrainer"""', 'cls.PICTURES_FOLDER', 'md5'], {}), "('vktrainer', cls.PICTURES_FOLDER, md5)\n", (1725, 1764), False, 'import os\n'), ((1773, 1793), 'shutil.copyfile', 'copyfile', (['file', 'path'], {}), '(file, path)\n', (1781, 1793), False, 'from shutil import copyfile\n'), ((1813, 1839), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (1829, 1839), False, 'import os\n'), ((1904, 1925), 'vktrainer.db.session.add', 'db.session.add', (['photo'], {}), '(photo)\n', (1918, 1925), False, 'from vktrainer import db, login_manager\n'), ((1934, 1953), 'vktrainer.db.session.commit', 'db.session.commit', ([], {}), '()\n', (1951, 1953), False, 'from vktrainer import db, login_manager\n'), ((2015, 2059), 'os.path.join', 'os.path.join', (['self.PICTURES_FOLDER', 'self.md5'], {}), '(self.PICTURES_FOLDER, self.md5)\n', (2027, 2059), False, 'import os\n'), ((2108, 2151), 'flask.url_for', 'url_for', (['"""vktrainer.show_photo"""'], {'pk': 'self.id'}), "('vktrainer.show_photo', pk=self.id)\n", (2115, 2151), False, 'from flask import url_for, current_app as app\n'), ((2254, 2267), 'vktrainer.db.String', 'db.String', (['(64)'], {}), '(64)\n', (2263, 2267), False, 'from vktrainer import db, login_manager\n'), ((2486, 2531), 'flask.url_for', 'url_for', (['"""vktrainer.training_set"""'], {'pk': 'self.id'}), "('vktrainer.training_set', pk=self.id)\n", (2493, 2531), False, 'from flask import url_for, current_app as app\n'), ((2579, 2632), 'flask.url_for', 'url_for', (['"""vktrainer.training_set_results"""'], {'pk': 'self.id'}), "('vktrainer.training_set_results', pk=self.id)\n", (2586, 2632), False, 'from flask import url_for, current_app as app\n'), ((2684, 2741), 'flask.url_for', 'url_for', (['"""vktrainer.training_set_leaderboard"""'], {'pk': 'self.id'}), "('vktrainer.training_set_leaderboard', pk=self.id)\n", (2691, 2741), False, 'from flask import url_for, current_app as app\n'), ((2896, 2925), 'sqlalchemy.func.count', 'func.count', (['TrainingResult.id'], {}), '(TrainingResult.id)\n', (2906, 2925), False, 'from sqlalchemy import func, desc\n'), ((5695, 5727), 'vktrainer.db.ForeignKey', 'db.ForeignKey', (['"""training_set.id"""'], {}), "('training_set.id')\n", (5708, 5727), False, 'from vktrainer import db, login_manager\n'), ((5751, 5764), 'vktrainer.db.String', 'db.String', (['(64)'], {}), '(64)\n', (5760, 5764), False, 'from vktrainer import db, login_manager\n'), ((5925, 5938), 'vktrainer.db.String', 'db.String', (['(64)'], {}), '(64)\n', (5934, 5938), False, 'from vktrainer import db, login_manager\n'), ((6364, 6396), 'vktrainer.db.ForeignKey', 'db.ForeignKey', (['"""training_set.id"""'], {}), "('training_set.id')\n", (6377, 6396), False, 'from vktrainer import db, login_manager\n'), ((6435, 6460), 'vktrainer.db.ForeignKey', 'db.ForeignKey', (['"""photo.id"""'], {}), "('photo.id')\n", (6448, 6460), False, 'from vktrainer import db, login_manager\n'), ((6498, 6522), 'vktrainer.db.ForeignKey', 'db.ForeignKey', (['"""user.id"""'], {}), "('user.id')\n", (6511, 6522), False, 'from vktrainer import db, login_manager\n'), ((6913, 7015), 'flask.url_for', 'url_for', (['"""vktrainer.training_set_result"""'], {'training_set_pk': 'self.training_set.id', 'result_pk': 'self.id'}), "('vktrainer.training_set_result', training_set_pk=self.training_set.\n id, result_pk=self.id)\n", (6920, 7015), False, 'from flask import url_for, current_app as app\n'), ((8231, 8262), 'vktrainer.db.session.add', 'db.session.add', (['training_result'], {}), '(training_result)\n', (8245, 8262), False, 'from vktrainer import db, login_manager\n'), ((8271, 8290), 'vktrainer.db.session.commit', 'db.session.commit', ([], {}), '()\n', (8288, 8290), False, 'from vktrainer import db, login_manager\n'), ((868, 888), 'vktrainer.db.session.add', 'db.session.add', (['user'], {}), '(user)\n', (882, 888), False, 'from vktrainer import db, login_manager\n'), ((901, 920), 'vktrainer.db.session.commit', 'db.session.commit', ([], {}), '()\n', (918, 920), False, 'from vktrainer import db, login_manager\n'), ((2344, 2387), 'vktrainer.db.backref', 'db.backref', (['"""training_sets"""'], {'lazy': '"""dynamic"""'}), "('training_sets', lazy='dynamic')\n", (2354, 2387), False, 'from vktrainer import db, login_manager\n'), ((5333, 5365), 'random.randint', 'random.randint', (['(0)', '(nb_photos - 1)'], {}), '(0, nb_photos - 1)\n', (5347, 5365), False, 'import random\n'), ((5857, 5895), 'vktrainer.db.backref', 'db.backref', (['"""patterns"""'], {'lazy': '"""dynamic"""'}), "('patterns', lazy='dynamic')\n", (5867, 5895), False, 'from vktrainer import db, login_manager\n'), ((6579, 6625), 'vktrainer.db.backref', 'db.backref', (['"""training_results"""'], {'lazy': '"""dynamic"""'}), "('training_results', lazy='dynamic')\n", (6589, 6625), False, 'from vktrainer import db, login_manager\n'), ((6714, 6744), 'vktrainer.db.backref', 'db.backref', (['"""training_results"""'], {}), "('training_results')\n", (6724, 6744), False, 'from vktrainer import db, login_manager\n'), ((5201, 5249), 'random.randint', 'random.randint', (['(0)', '(nb_photos_without_results - 1)'], {}), '(0, nb_photos_without_results - 1)\n', (5215, 5249), False, 'import random\n'), ((7240, 7263), 'json.loads', 'json.loads', (['self.result'], {}), '(self.result)\n', (7250, 7263), False, 'import json\n'), ((3132, 3143), 'sqlalchemy.desc', 'desc', (['count'], {}), '(count)\n', (3136, 3143), False, 'from sqlalchemy import func, desc\n')] |
import argparse
import os
from scipy.special import erf
from scipy.stats import truncnorm
import numpy as np
import data
def build_vector_cache(glove_filename, vec_cache_filename, vocab):
print("Building vector cache...")
with open(glove_filename) as f, open(vec_cache_filename, "w") as f2:
for line in f:
tok, vec = line.split(" ", 1)
if tok in vocab:
vocab.remove(tok)
f2.write("{} {}".format(tok, vec))
def discrete_tnorm(a, b, tgt_loc, sigma=1, n_steps=100):
def phi(zeta):
return 1 / (np.sqrt(2 * np.pi)) * np.exp(-0.5 * zeta**2)
def Phi(x):
return 0.5 * (1 + erf(x / np.sqrt(2)))
def tgt_loc_update(x):
y1 = phi((a - x) / sigma)
y2 = phi((b - x) / sigma)
x1 = Phi((b - x) / sigma)
x2 = Phi((a - x) / sigma)
denom = x1 - x2 + 1E-4
return y1 / denom - y2 / denom
x = tgt_loc
direction = np.sign(tgt_loc - (b - a))
for _ in range(n_steps):
x = tgt_loc - sigma * tgt_loc_update(x)
tn = truncnorm((a - x) / sigma, (b - x) / sigma, loc=x, scale=sigma)
rrange = np.arange(a, b + 1)
pmf = tn.pdf(rrange)
pmf /= np.sum(pmf)
return pmf
def discrete_lerp(a, b, ground_truth):
pmf = np.zeros(b - a + 1)
c = int(np.ceil(ground_truth + 1E-8))
f = int(np.floor(ground_truth))
pmf[min(c - a, b - a)] = ground_truth - f
pmf[f - a] = c - ground_truth
return pmf
def smoothed_labels(truth, n_labels):
return discrete_lerp(1, n_labels, truth)
def preprocess(filename, output_name="sim_sparse.txt"):
print("Preprocessing {}...".format(filename))
with open(filename) as f:
values = [float(l.strip()) for l in f.readlines()]
values = [" ".join([str(l) for l in smoothed_labels(v, 5)]) for v in values]
with open(os.path.join(os.path.dirname(filename), output_name), "w") as f:
f.write("\n".join(values))
def add_vocab(tok_filename, vocab):
with open(tok_filename) as f:
for line in f:
vocab.update(line.strip().split())
def main():
base_conf = data.Configs.base_config()
sick_conf = data.Configs.sick_config()
sick_folder = sick_conf.sick_data
vocab = set()
for name in ("train", "dev", "test"):
preprocess(os.path.join(sick_folder, name, "sim.txt"))
add_vocab(os.path.join(sick_folder, name, "a.toks"), vocab)
add_vocab(os.path.join(sick_folder, name, "b.toks"), vocab)
build_vector_cache(base_conf.wordvecs_file, sick_conf.sick_cache, vocab)
if __name__ == "__main__":
main()
| [
"numpy.sum",
"data.Configs.base_config",
"numpy.ceil",
"scipy.stats.truncnorm",
"numpy.floor",
"numpy.zeros",
"data.Configs.sick_config",
"os.path.dirname",
"numpy.arange",
"numpy.exp",
"numpy.sign",
"os.path.join",
"numpy.sqrt"
] | [((952, 978), 'numpy.sign', 'np.sign', (['(tgt_loc - (b - a))'], {}), '(tgt_loc - (b - a))\n', (959, 978), True, 'import numpy as np\n'), ((1065, 1128), 'scipy.stats.truncnorm', 'truncnorm', (['((a - x) / sigma)', '((b - x) / sigma)'], {'loc': 'x', 'scale': 'sigma'}), '((a - x) / sigma, (b - x) / sigma, loc=x, scale=sigma)\n', (1074, 1128), False, 'from scipy.stats import truncnorm\n'), ((1142, 1161), 'numpy.arange', 'np.arange', (['a', '(b + 1)'], {}), '(a, b + 1)\n', (1151, 1161), True, 'import numpy as np\n'), ((1198, 1209), 'numpy.sum', 'np.sum', (['pmf'], {}), '(pmf)\n', (1204, 1209), True, 'import numpy as np\n'), ((1275, 1294), 'numpy.zeros', 'np.zeros', (['(b - a + 1)'], {}), '(b - a + 1)\n', (1283, 1294), True, 'import numpy as np\n'), ((2113, 2139), 'data.Configs.base_config', 'data.Configs.base_config', ([], {}), '()\n', (2137, 2139), False, 'import data\n'), ((2156, 2182), 'data.Configs.sick_config', 'data.Configs.sick_config', ([], {}), '()\n', (2180, 2182), False, 'import data\n'), ((1307, 1336), 'numpy.ceil', 'np.ceil', (['(ground_truth + 1e-08)'], {}), '(ground_truth + 1e-08)\n', (1314, 1336), True, 'import numpy as np\n'), ((1349, 1371), 'numpy.floor', 'np.floor', (['ground_truth'], {}), '(ground_truth)\n', (1357, 1371), True, 'import numpy as np\n'), ((600, 624), 'numpy.exp', 'np.exp', (['(-0.5 * zeta ** 2)'], {}), '(-0.5 * zeta ** 2)\n', (606, 624), True, 'import numpy as np\n'), ((2300, 2342), 'os.path.join', 'os.path.join', (['sick_folder', 'name', '"""sim.txt"""'], {}), "(sick_folder, name, 'sim.txt')\n", (2312, 2342), False, 'import os\n'), ((2362, 2403), 'os.path.join', 'os.path.join', (['sick_folder', 'name', '"""a.toks"""'], {}), "(sick_folder, name, 'a.toks')\n", (2374, 2403), False, 'import os\n'), ((2430, 2471), 'os.path.join', 'os.path.join', (['sick_folder', 'name', '"""b.toks"""'], {}), "(sick_folder, name, 'b.toks')\n", (2442, 2471), False, 'import os\n'), ((578, 596), 'numpy.sqrt', 'np.sqrt', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (585, 596), True, 'import numpy as np\n'), ((1856, 1881), 'os.path.dirname', 'os.path.dirname', (['filename'], {}), '(filename)\n', (1871, 1881), False, 'import os\n'), ((673, 683), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (680, 683), True, 'import numpy as np\n')] |
import pandas as pd
import matplotlib.pyplot as plt
from tqdm import tqdm
import numpy as np
pipelines = pd.read_csv('OntoGasGrid/pipeline_owl_generator/pipeline_split.csv').to_numpy()
offtakes = pd.read_csv('OntoGasGrid/grid_component_owl_generator/grid_component_data.csv').to_numpy()
n_offt = len(offtakes[:,0])
n_cons = len(pipelines[:,0])
closest_connection = np.zeros((n_offt,2),dtype=object)
def connection_name_get(i):
grid_line = pipelines[i,3]
connect_num = pipelines[i,8]
return grid_line + ' ' + str(connect_num) + ' Connection'
for i in tqdm(range(n_offt)):
if offtakes[i,2] != '#VALUE!':
dist_store = []
max_dist = 1000
off_lat = float(offtakes[i,2])
off_lng = float(offtakes[i,1])
for ii in range(n_cons):
con_lat = float(pipelines[ii,0])
con_lng = float(pipelines[ii,1])
dist = np.sqrt((off_lat-con_lat)**2+(off_lng-con_lng)**2)
if dist < max_dist:
closest_connection[i,0] = connection_name_get(ii)
closest_connection[i,1] = pipelines[ii,2]
max_dist = dist
closest_connection = pd.DataFrame(closest_connection).to_csv('OntoGasGrid/grid_component_owl_generator/closest connection.csv',index=False,header=False)
| [
"pandas.read_csv",
"numpy.zeros",
"pandas.DataFrame",
"numpy.sqrt"
] | [((373, 408), 'numpy.zeros', 'np.zeros', (['(n_offt, 2)'], {'dtype': 'object'}), '((n_offt, 2), dtype=object)\n', (381, 408), True, 'import numpy as np\n'), ((110, 178), 'pandas.read_csv', 'pd.read_csv', (['"""OntoGasGrid/pipeline_owl_generator/pipeline_split.csv"""'], {}), "('OntoGasGrid/pipeline_owl_generator/pipeline_split.csv')\n", (121, 178), True, 'import pandas as pd\n'), ((201, 280), 'pandas.read_csv', 'pd.read_csv', (['"""OntoGasGrid/grid_component_owl_generator/grid_component_data.csv"""'], {}), "('OntoGasGrid/grid_component_owl_generator/grid_component_data.csv')\n", (212, 280), True, 'import pandas as pd\n'), ((1182, 1214), 'pandas.DataFrame', 'pd.DataFrame', (['closest_connection'], {}), '(closest_connection)\n', (1194, 1214), True, 'import pandas as pd\n'), ((904, 964), 'numpy.sqrt', 'np.sqrt', (['((off_lat - con_lat) ** 2 + (off_lng - con_lng) ** 2)'], {}), '((off_lat - con_lat) ** 2 + (off_lng - con_lng) ** 2)\n', (911, 964), True, 'import numpy as np\n')] |
import logging
import time
import os
import subprocess as sp
from governor.etcd import Client as Etcd
from governor.postgresql import Postgresql
from governor.ha import Ha
import etcd
class Governor:
INIT_SCRIPT_DIR = '/docker-entrypoint-initdb.d'
def __init__(self, config, psql_config):
self.advertise_url = config.advertise_url
self.loop_time = config.loop_time
self.connect_to_etcd(config)
self.psql = Postgresql(config, psql_config)
self.ha = Ha(self.psql, self.etcd)
self.name = self.psql.name
def run_init_scripts(self):
# run all the scripts /docker-entrypoint-initdb.d/*.sh
if not os.path.isdir(self.INIT_SCRIPT_DIR):
return
for file in os.listdir(self.INIT_SCRIPT_DIR):
file = os.path.join(self.INIT_SCRIPT_DIR, file)
if not file.endswith('.sh') or not os.path.isfile(file):
continue
logging.info('Running init script: %s', file)
if sp.call(['sh', file]) != 0:
logging.warn('Failed to run init script: %s', file)
def connect_to_etcd(self, config):
while True:
logging.info('waiting on etcd')
try:
self.etcd = Etcd(config)
except (ConnectionRefusedError, etcd.EtcdConnectionFailed) as e:
logging.error('Error communicating with etcd: %s', e)
else:
return
time.sleep(5)
def keep_alive(self):
value = self.advertise_url
try:
self.etcd.write_scoped(self.name, value, ttl=self.etcd.ttl, prevValue=value)
except etcd.EtcdKeyNotFound:
self.etcd.write_scoped(self.name, value, ttl=self.etcd.ttl, prevExist=False)
def initialize(self, force_leader=False):
self.keep_alive()
# is data directory empty?
if not self.psql.data_directory_empty():
self.load_psql()
elif not self.init_cluster(force_leader):
self.sync_from_leader()
self.run_init_scripts()
def init_cluster(self, force_leader=False):
try:
self.etcd.init_cluster(self.name)
except etcd.EtcdAlreadyExist:
if not force_leader:
return False
self.psql.initialize()
self.etcd.take_leadership(self.name, first = not force_leader)
self.psql.start()
self.psql.create_users()
return True
def sync_from_leader(self):
while True:
logging.info('resolving leader')
try:
cluster = self.etcd.get_cluster()
except etcd.EtcdKeyNotFound:
cluster = None
if cluster and cluster.leader:
logging.info('syncing with leader')
if self.psql.sync_from_leader(cluster.leader):
self.psql.write_recovery_conf(cluster.leader)
self.psql.start()
return True
time.sleep(5)
def load_psql(self):
self.psql.start()
if self.psql.is_running():
self.psql.load_replication_slots()
def run(self):
while True:
self.keep_alive()
logging.info(self.ha.run_cycle())
self.ha.sync_replication_slots()
time.sleep(self.loop_time)
def cleanup(self):
self.psql.stop()
self.etcd.delete(os.path.join(self.etcd.scope, self.name))
try:
self.etcd.vacate_leadership(self.name)
except (etcd.EtcdCompareFailed, etcd.EtcdKeyNotFound):
pass
| [
"logging.error",
"governor.etcd.Client",
"os.path.isdir",
"logging.warn",
"governor.ha.Ha",
"time.sleep",
"governor.postgresql.Postgresql",
"logging.info",
"os.path.isfile",
"subprocess.call",
"os.path.join",
"os.listdir"
] | [((451, 482), 'governor.postgresql.Postgresql', 'Postgresql', (['config', 'psql_config'], {}), '(config, psql_config)\n', (461, 482), False, 'from governor.postgresql import Postgresql\n'), ((501, 525), 'governor.ha.Ha', 'Ha', (['self.psql', 'self.etcd'], {}), '(self.psql, self.etcd)\n', (503, 525), False, 'from governor.ha import Ha\n'), ((749, 781), 'os.listdir', 'os.listdir', (['self.INIT_SCRIPT_DIR'], {}), '(self.INIT_SCRIPT_DIR)\n', (759, 781), False, 'import os\n'), ((673, 708), 'os.path.isdir', 'os.path.isdir', (['self.INIT_SCRIPT_DIR'], {}), '(self.INIT_SCRIPT_DIR)\n', (686, 708), False, 'import os\n'), ((802, 842), 'os.path.join', 'os.path.join', (['self.INIT_SCRIPT_DIR', 'file'], {}), '(self.INIT_SCRIPT_DIR, file)\n', (814, 842), False, 'import os\n'), ((949, 994), 'logging.info', 'logging.info', (['"""Running init script: %s"""', 'file'], {}), "('Running init script: %s', file)\n", (961, 994), False, 'import logging\n'), ((1178, 1209), 'logging.info', 'logging.info', (['"""waiting on etcd"""'], {}), "('waiting on etcd')\n", (1190, 1209), False, 'import logging\n'), ((1468, 1481), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (1478, 1481), False, 'import time\n'), ((2531, 2563), 'logging.info', 'logging.info', (['"""resolving leader"""'], {}), "('resolving leader')\n", (2543, 2563), False, 'import logging\n'), ((3010, 3023), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (3020, 3023), False, 'import time\n'), ((3331, 3357), 'time.sleep', 'time.sleep', (['self.loop_time'], {}), '(self.loop_time)\n', (3341, 3357), False, 'import time\n'), ((3432, 3472), 'os.path.join', 'os.path.join', (['self.etcd.scope', 'self.name'], {}), '(self.etcd.scope, self.name)\n', (3444, 3472), False, 'import os\n'), ((1010, 1031), 'subprocess.call', 'sp.call', (["['sh', file]"], {}), "(['sh', file])\n", (1017, 1031), True, 'import subprocess as sp\n'), ((1054, 1105), 'logging.warn', 'logging.warn', (['"""Failed to run init script: %s"""', 'file'], {}), "('Failed to run init script: %s', file)\n", (1066, 1105), False, 'import logging\n'), ((1255, 1267), 'governor.etcd.Client', 'Etcd', (['config'], {}), '(config)\n', (1259, 1267), True, 'from governor.etcd import Client as Etcd\n'), ((2763, 2798), 'logging.info', 'logging.info', (['"""syncing with leader"""'], {}), "('syncing with leader')\n", (2775, 2798), False, 'import logging\n'), ((890, 910), 'os.path.isfile', 'os.path.isfile', (['file'], {}), '(file)\n', (904, 910), False, 'import os\n'), ((1361, 1414), 'logging.error', 'logging.error', (['"""Error communicating with etcd: %s"""', 'e'], {}), "('Error communicating with etcd: %s', e)\n", (1374, 1414), False, 'import logging\n')] |
#!/usr/bin/env python3
import random
import unittest
import networkit as nk
class TestMatchingAlgorithms(unittest.TestCase):
def generateRandomWeights(self, g):
if not g.isWeighted():
g = nk.graphtools.toWeighted(g)
for e in g.iterEdges():
g.setWeight(e[0], e[1], random.random())
return g
def setUp(self):
self.g = nk.readGraph("input/PGPgiantcompo.graph", nk.Format.METIS)
self.gw = self.generateRandomWeights(self.g)
def hasUnmatchedNeighbors(self, g, m):
for e in g.iterEdges():
if not m.isMatched(e[0]) and not m.isMatched(e[1]):
return True
return False
def testPathGrowingMatcher(self):
def runAlgo(g):
pgm = nk.matching.PathGrowingMatcher(self.g)
pgm.run()
m = pgm.getMatching()
runAlgo(self.g)
runAlgo(self.gw)
def testSuitorMatcher(self):
def doTest(g):
m1 = nk.matching.SuitorMatcher(g, False).run().getMatching()
nk.graphtools.sortEdgesByWeight(g, True)
self.assertTrue(m1.isProper(g))
self.assertFalse(self.hasUnmatchedNeighbors(g, m1))
m2 = nk.matching.SuitorMatcher(g, True).run().getMatching()
self.assertTrue(m2.isProper(g))
self.assertFalse(self.hasUnmatchedNeighbors(g, m2))
for u in g.iterNodes():
self.assertEqual(m1.mate(u), m2.mate(u))
doTest(self.g)
doTest(self.gw)
if __name__ == "__main__":
unittest.main()
| [
"unittest.main",
"networkit.graphtools.sortEdgesByWeight",
"networkit.matching.PathGrowingMatcher",
"random.random",
"networkit.graphtools.toWeighted",
"networkit.readGraph",
"networkit.matching.SuitorMatcher"
] | [((1312, 1327), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1325, 1327), False, 'import unittest\n'), ((337, 395), 'networkit.readGraph', 'nk.readGraph', (['"""input/PGPgiantcompo.graph"""', 'nk.Format.METIS'], {}), "('input/PGPgiantcompo.graph', nk.Format.METIS)\n", (349, 395), True, 'import networkit as nk\n'), ((198, 225), 'networkit.graphtools.toWeighted', 'nk.graphtools.toWeighted', (['g'], {}), '(g)\n', (222, 225), True, 'import networkit as nk\n'), ((659, 697), 'networkit.matching.PathGrowingMatcher', 'nk.matching.PathGrowingMatcher', (['self.g'], {}), '(self.g)\n', (689, 697), True, 'import networkit as nk\n'), ((890, 930), 'networkit.graphtools.sortEdgesByWeight', 'nk.graphtools.sortEdgesByWeight', (['g', '(True)'], {}), '(g, True)\n', (921, 930), True, 'import networkit as nk\n'), ((279, 294), 'random.random', 'random.random', ([], {}), '()\n', (292, 294), False, 'import random\n'), ((831, 866), 'networkit.matching.SuitorMatcher', 'nk.matching.SuitorMatcher', (['g', '(False)'], {}), '(g, False)\n', (856, 866), True, 'import networkit as nk\n'), ((1030, 1064), 'networkit.matching.SuitorMatcher', 'nk.matching.SuitorMatcher', (['g', '(True)'], {}), '(g, True)\n', (1055, 1064), True, 'import networkit as nk\n')] |
#The heart of the bot.
#--------- Libaries ---------#
import discord, os, settings
from tools.logging import ABLog
from discord.ext import commands
#--------- Variables ---------#
INTENTS = discord.Intents.all()
client = commands.Bot(command_prefix = settings.ABPrefixes, intents = INTENTS, help_command=None)
client.remove_command('help')
#--------- Code ---------#
@client.command() #Loads specified cog.
async def load(ctx, extension):
if ctx.message.author.id == settings.AdminID:
client.load_extension(f'cogs.{extension}')
ABLog(f"Loading {extension}")
await ctx.send(f'The cog {extension} was loaded')
@client.command() #Reloads specified cog.
async def reload(ctx, extension):
if ctx.message.author.id == settings.AdminID:
client.reload_extension(f'cogs.{extension}')
ABLog(f"Reloading {extension}")
await ctx.send(f'The cog {extension} was reloaded')
@client.command() #Unloads specified cog.
async def unload(ctx, extension):
if ctx.message.author.id == settings.AdminID:
client.unload_extension(f'cogs.{extension}')
ABLog(f"Unloading {extension}")
await ctx.send(f'The cog {extension} was unloaded')
for filename in os.listdir('./cogs'): #Initial load of all cogs.
if filename.endswith('.py'):
client.load_extension(f'cogs.{filename[:-3]}')
print(f"Loaded {filename}")
@client.event #Changes the custom status.
async def on_ready():
activity = discord.Activity(name=settings.Custom_Status, type=discord.ActivityType.playing)
ABLog("Bot loaded.")
print(f'{client.user} has connected to Discord!')
await client.change_presence(activity=activity)
client.run(settings.Token) #Gets the bot online!
| [
"discord.Activity",
"tools.logging.ABLog",
"discord.ext.commands.Bot",
"os.listdir",
"discord.Intents.all"
] | [((194, 215), 'discord.Intents.all', 'discord.Intents.all', ([], {}), '()\n', (213, 215), False, 'import discord, os, settings\n'), ((225, 313), 'discord.ext.commands.Bot', 'commands.Bot', ([], {'command_prefix': 'settings.ABPrefixes', 'intents': 'INTENTS', 'help_command': 'None'}), '(command_prefix=settings.ABPrefixes, intents=INTENTS,\n help_command=None)\n', (237, 313), False, 'from discord.ext import commands\n'), ((1161, 1181), 'os.listdir', 'os.listdir', (['"""./cogs"""'], {}), "('./cogs')\n", (1171, 1181), False, 'import discord, os, settings\n'), ((1400, 1485), 'discord.Activity', 'discord.Activity', ([], {'name': 'settings.Custom_Status', 'type': 'discord.ActivityType.playing'}), '(name=settings.Custom_Status, type=discord.ActivityType.playing\n )\n', (1416, 1485), False, 'import discord, os, settings\n'), ((1485, 1505), 'tools.logging.ABLog', 'ABLog', (['"""Bot loaded."""'], {}), "('Bot loaded.')\n", (1490, 1505), False, 'from tools.logging import ABLog\n'), ((541, 570), 'tools.logging.ABLog', 'ABLog', (['f"""Loading {extension}"""'], {}), "(f'Loading {extension}')\n", (546, 570), False, 'from tools.logging import ABLog\n'), ((797, 828), 'tools.logging.ABLog', 'ABLog', (['f"""Reloading {extension}"""'], {}), "(f'Reloading {extension}')\n", (802, 828), False, 'from tools.logging import ABLog\n'), ((1057, 1088), 'tools.logging.ABLog', 'ABLog', (['f"""Unloading {extension}"""'], {}), "(f'Unloading {extension}')\n", (1062, 1088), False, 'from tools.logging import ABLog\n')] |
# -*- coding: utf-8 -*-
from frontera.contrib.backends.remote.codecs.json import Encoder as JsonEncoder, Decoder as JsonDecoder
from frontera.contrib.backends.remote.codecs.msgpack import Encoder as MsgPackEncoder, Decoder as MsgPackDecoder
from frontera.core.models import Request, Response
import pytest
@pytest.mark.parametrize(
('encoder', 'decoder'), [
(MsgPackEncoder, MsgPackDecoder),
(JsonEncoder, JsonDecoder)
]
)
def test_codec(encoder, decoder):
def check_request(req1, req2):
assert req1.url == req2.url and req1.meta == req2.meta and req1.headers == req2.headers
enc = encoder(Request, send_body=True)
dec = decoder(Request, Response)
req = Request(url="http://www.yandex.ru", meta={"test": "shmest"}, headers={'reqhdr': 'value'})
req2 = Request(url="http://www.yandex.ru/search")
msgs = [
enc.encode_add_seeds([req]),
enc.encode_page_crawled(Response(url="http://www.yandex.ru", body='SOME CONTENT', headers={'hdr': 'value'},
request=req), [req2]),
enc.encode_request_error(req, "Host not found"),
enc.encode_update_score("1be68ff556fd0bbe5802d1a100850da29f7f15b1", 0.51, "http://yandex.ru", True),
enc.encode_new_job_id(1),
enc.encode_offset(0, 28796),
enc.encode_request(req)
]
it = iter(msgs)
o = dec.decode(it.next())
assert o[0] == 'add_seeds'
assert type(o[1]) == list
req_d = o[1][0]
check_request(req_d, req)
assert type(req_d) == Request
o = dec.decode(it.next())
assert o[0] == 'page_crawled'
assert type(o[1]) == Response
assert o[1].url == req.url and o[1].body == 'SOME CONTENT' and o[1].meta == req.meta
assert type(o[2]) == list
req_d = o[2][0]
assert type(req_d) == Request
assert req_d.url == req2.url
o_type, o_req, o_error = dec.decode(it.next())
assert o_type == 'request_error'
check_request(o_req, req)
assert o_error == "Host not found"
o_type, fprint, score, url, schedule = dec.decode(it.next())
assert o_type == 'update_score'
assert fprint == "1be68ff556fd0bbe5802d1a100850da29f7f15b1"
assert score == 0.51
assert url == "http://yandex.ru"
assert schedule is True
o_type, job_id = dec.decode(it.next())
assert o_type == 'new_job_id'
assert job_id == 1
o_type, partition_id, offset = dec.decode(it.next())
assert o_type == 'offset'
assert partition_id == 0
assert offset == 28796
o = dec.decode_request(it.next())
check_request(o, req) | [
"frontera.core.models.Request",
"pytest.mark.parametrize",
"frontera.core.models.Response"
] | [((310, 425), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (["('encoder', 'decoder')", '[(MsgPackEncoder, MsgPackDecoder), (JsonEncoder, JsonDecoder)]'], {}), "(('encoder', 'decoder'), [(MsgPackEncoder,\n MsgPackDecoder), (JsonEncoder, JsonDecoder)])\n", (333, 425), False, 'import pytest\n'), ((706, 800), 'frontera.core.models.Request', 'Request', ([], {'url': '"""http://www.yandex.ru"""', 'meta': "{'test': 'shmest'}", 'headers': "{'reqhdr': 'value'}"}), "(url='http://www.yandex.ru', meta={'test': 'shmest'}, headers={\n 'reqhdr': 'value'})\n", (713, 800), False, 'from frontera.core.models import Request, Response\n'), ((807, 849), 'frontera.core.models.Request', 'Request', ([], {'url': '"""http://www.yandex.ru/search"""'}), "(url='http://www.yandex.ru/search')\n", (814, 849), False, 'from frontera.core.models import Request, Response\n'), ((932, 1032), 'frontera.core.models.Response', 'Response', ([], {'url': '"""http://www.yandex.ru"""', 'body': '"""SOME CONTENT"""', 'headers': "{'hdr': 'value'}", 'request': 'req'}), "(url='http://www.yandex.ru', body='SOME CONTENT', headers={'hdr':\n 'value'}, request=req)\n", (940, 1032), False, 'from frontera.core.models import Request, Response\n')] |
import keras
'''
Helper methods and variables for mnist models and manifolds
'''
color_list = [
"red",
"orange",
"yellow",
"lime",
"green",
"cyan",
"blue",
"purple",
"fuchsia",
"peru",
]
# # Returns 4D np array (1, HEIGHT, WIDTH, 1)
# def tensor_to_numpy(t):
# sess = K.get_session()
# t_np = sess.run(t)
# # Get rid of the extra dimension
# t_np = t_np.reshape(1, HEIGHT, WIDTH, 1)
# return t_np
def convert_to_model(seq_model):
# From https://github.com/keras-team/keras/issues/10386
input_layer = keras.layers.Input(batch_shape=seq_model.layers[0].input_shape)
prev_layer = input_layer
for layer in seq_model.layers:
layer._inbound_nodes = []
prev_layer = layer(prev_layer)
funcmodel = keras.models.Model([input_layer], [prev_layer])
return funcmodel | [
"keras.layers.Input",
"keras.models.Model"
] | [((575, 638), 'keras.layers.Input', 'keras.layers.Input', ([], {'batch_shape': 'seq_model.layers[0].input_shape'}), '(batch_shape=seq_model.layers[0].input_shape)\n', (593, 638), False, 'import keras\n'), ((792, 839), 'keras.models.Model', 'keras.models.Model', (['[input_layer]', '[prev_layer]'], {}), '([input_layer], [prev_layer])\n', (810, 839), False, 'import keras\n')] |
# package imports
import dash
import dash_bootstrap_components as dbc
import dash_html_components as html
import dash_core_components as dcc
from dash.dependencies import Input, Output, State
from dash import no_update
from flask import session
# local imports
from auth import authenticate_user, validate_login_session
from server import app, server, ui
# login layout content
def login_layout():
return html.Div(
[
dcc.Location(id='login-url', pathname='/login', refresh=False),
dbc.Container(
[
dbc.Row(
dbc.Col(
dbc.Card(
[
html.H4('Login', className='card-title'),
dbc.Input(id='login-email', placeholder='User', autoFocus=True),
dbc.Input(id='login-password', placeholder='Password', type='password'),
dbc.Button('Submit', id='login-button', color='success', block=True),
html.Br(),
html.Div(id='login-alert')
],
body=True
),
width=6
),
justify='center'
)
]
)
]
)
# home layout content
are_sure = dbc.Modal(
[
dbc.ModalHeader("Logout"),
dbc.ModalBody("Are you sure?"),
dbc.ModalFooter(
dbc.Row(
[
dbc.Col(dbc.Button("Yes", id="yes-are_sure")),
dbc.Col(dbc.Button("Close", id="close-are_sure")),
],
justify="center",
)
),
],
id="modal-are_sure",
centered=True,
)
test_page = html.Div([
html.H1("DashUI test!"),
html.Br(),
html.H6("Change the value in the text box to see callbacks in action!"),
dbc.Input(id='my-input', value='initial value', type='text', autoFocus=True),
html.Br(),
html.Div(id='my-output'),
are_sure,
])
@validate_login_session
def app_layout():
return \
html.Div([
dcc.Location(id='home-url',pathname='/home'),
dbc.Container(
[
dbc.Row(
dbc.Col(
test_page,
),
justify='center'
),
html.Br(),
dbc.Row(
dbc.Col(
dbc.Button('Logout', id='logout-button', color='danger', block=True, size='sm'),
width=4
),
justify='center'
),
html.Br()
],
)
]
)
# main app layout
app.layout = html.Div(
[
dcc.Location(id='url', refresh=False),
html.Div(
login_layout(),
id='page-content'
),
]
)
###############################################################################
# utilities
###############################################################################
# router
@app.callback(
Output('page-content', 'children'),
[Input('url', 'pathname')]
)
def router(url):
if url=='/home':
return app_layout()
elif url=='/login':
return login_layout()
else:
return login_layout()
# authenticate
@app.callback(
[Output('url', 'pathname'),
Output('login-alert', 'children')],
[Input('login-button', 'n_clicks'),
Input('login-email',' n_submit'),
Input('login-password', '<PASSWORD>'),
],
[State('login-email', 'value'),
State('login-password', 'value')])
def login_auth(n_clicks, n_submit_email, n_submit_password, email ,pw):
'''
check credentials
if correct, authenticate the session
otherwise, authenticate the session and send user to login
'''
if n_clicks is None \
and n_submit_email is None \
and n_submit_password is None:
return no_update, no_update
credentials = {'user':email, "password":pw}
if authenticate_user(credentials):
session['authed'] = True
return '/home', ''
session['authed'] = False
return no_update, dbc.Alert('Incorrect credentials.', color='danger', dismissable=True)
@app.callback(
Output('home-url', 'pathname'),
[Input('yes-are_sure', 'n_clicks')]
)
def logout_(n_clicks):
'''clear the session and send user to login'''
if n_clicks is None:
return no_update
session['authed'] = False
return '/login'
@app.callback(
[Output('modal-are_sure', 'is_open'),
Output('close-are_sure', 'n_clicks')],
[Input('logout-button', 'n_clicks'),
Input('close-are_sure', 'n_clicks')],
[State('modal-are_sure', 'is_open')],
)
def logout_modal(logout_click, close_click, is_open):
if close_click is not None:
return False, None
elif logout_click is not None:
return True, None
else:
return is_open, close_click
###############################################################################
# callbacks
###############################################################################
# @app.callback(
# Output('...'),
# [Input('...')]
# )
# def func(...):
# ...
@app.callback(
Output(component_id='my-output', component_property='children'),
[Input(component_id='my-input', component_property='value')]
)
def update_output_div(input_value):
return f'Output: {input_value}'
###############################################################################
# run app
###############################################################################
if __name__ == "__main__":
ui.run()
| [
"dash_html_components.H6",
"dash_bootstrap_components.Input",
"auth.authenticate_user",
"dash_html_components.Br",
"dash_core_components.Location",
"server.ui.run",
"dash_bootstrap_components.ModalBody",
"dash_html_components.Div",
"dash_bootstrap_components.Alert",
"dash.dependencies.State",
"dash_bootstrap_components.Button",
"dash.dependencies.Input",
"dash_bootstrap_components.Col",
"dash_html_components.H4",
"dash_bootstrap_components.ModalHeader",
"dash_html_components.H1",
"dash.dependencies.Output"
] | [((3416, 3450), 'dash.dependencies.Output', 'Output', (['"""page-content"""', '"""children"""'], {}), "('page-content', 'children')\n", (3422, 3450), False, 'from dash.dependencies import Input, Output, State\n'), ((4363, 4393), 'auth.authenticate_user', 'authenticate_user', (['credentials'], {}), '(credentials)\n', (4380, 4393), False, 'from auth import authenticate_user, validate_login_session\n'), ((4597, 4627), 'dash.dependencies.Output', 'Output', (['"""home-url"""', '"""pathname"""'], {}), "('home-url', 'pathname')\n", (4603, 4627), False, 'from dash.dependencies import Input, Output, State\n'), ((5579, 5642), 'dash.dependencies.Output', 'Output', ([], {'component_id': '"""my-output"""', 'component_property': '"""children"""'}), "(component_id='my-output', component_property='children')\n", (5585, 5642), False, 'from dash.dependencies import Input, Output, State\n'), ((5986, 5994), 'server.ui.run', 'ui.run', ([], {}), '()\n', (5992, 5994), False, 'from server import app, server, ui\n'), ((1532, 1557), 'dash_bootstrap_components.ModalHeader', 'dbc.ModalHeader', (['"""Logout"""'], {}), "('Logout')\n", (1547, 1557), True, 'import dash_bootstrap_components as dbc\n'), ((1567, 1597), 'dash_bootstrap_components.ModalBody', 'dbc.ModalBody', (['"""Are you sure?"""'], {}), "('Are you sure?')\n", (1580, 1597), True, 'import dash_bootstrap_components as dbc\n'), ((1960, 1983), 'dash_html_components.H1', 'html.H1', (['"""DashUI test!"""'], {}), "('DashUI test!')\n", (1967, 1983), True, 'import dash_html_components as html\n'), ((1989, 1998), 'dash_html_components.Br', 'html.Br', ([], {}), '()\n', (1996, 1998), True, 'import dash_html_components as html\n'), ((2004, 2075), 'dash_html_components.H6', 'html.H6', (['"""Change the value in the text box to see callbacks in action!"""'], {}), "('Change the value in the text box to see callbacks in action!')\n", (2011, 2075), True, 'import dash_html_components as html\n'), ((2081, 2157), 'dash_bootstrap_components.Input', 'dbc.Input', ([], {'id': '"""my-input"""', 'value': '"""initial value"""', 'type': '"""text"""', 'autoFocus': '(True)'}), "(id='my-input', value='initial value', type='text', autoFocus=True)\n", (2090, 2157), True, 'import dash_bootstrap_components as dbc\n'), ((2163, 2172), 'dash_html_components.Br', 'html.Br', ([], {}), '()\n', (2170, 2172), True, 'import dash_html_components as html\n'), ((2178, 2202), 'dash_html_components.Div', 'html.Div', ([], {'id': '"""my-output"""'}), "(id='my-output')\n", (2186, 2202), True, 'import dash_html_components as html\n'), ((3079, 3116), 'dash_core_components.Location', 'dcc.Location', ([], {'id': '"""url"""', 'refresh': '(False)'}), "(id='url', refresh=False)\n", (3091, 3116), True, 'import dash_core_components as dcc\n'), ((3457, 3481), 'dash.dependencies.Input', 'Input', (['"""url"""', '"""pathname"""'], {}), "('url', 'pathname')\n", (3462, 3481), False, 'from dash.dependencies import Input, Output, State\n'), ((4507, 4576), 'dash_bootstrap_components.Alert', 'dbc.Alert', (['"""Incorrect credentials."""'], {'color': '"""danger"""', 'dismissable': '(True)'}), "('Incorrect credentials.', color='danger', dismissable=True)\n", (4516, 4576), True, 'import dash_bootstrap_components as dbc\n'), ((3682, 3707), 'dash.dependencies.Output', 'Output', (['"""url"""', '"""pathname"""'], {}), "('url', 'pathname')\n", (3688, 3707), False, 'from dash.dependencies import Input, Output, State\n'), ((3714, 3747), 'dash.dependencies.Output', 'Output', (['"""login-alert"""', '"""children"""'], {}), "('login-alert', 'children')\n", (3720, 3747), False, 'from dash.dependencies import Input, Output, State\n'), ((3755, 3788), 'dash.dependencies.Input', 'Input', (['"""login-button"""', '"""n_clicks"""'], {}), "('login-button', 'n_clicks')\n", (3760, 3788), False, 'from dash.dependencies import Input, Output, State\n'), ((3795, 3828), 'dash.dependencies.Input', 'Input', (['"""login-email"""', '""" n_submit"""'], {}), "('login-email', ' n_submit')\n", (3800, 3828), False, 'from dash.dependencies import Input, Output, State\n'), ((3834, 3871), 'dash.dependencies.Input', 'Input', (['"""login-password"""', '"""<PASSWORD>"""'], {}), "('login-password', '<PASSWORD>')\n", (3839, 3871), False, 'from dash.dependencies import Input, Output, State\n'), ((3885, 3914), 'dash.dependencies.State', 'State', (['"""login-email"""', '"""value"""'], {}), "('login-email', 'value')\n", (3890, 3914), False, 'from dash.dependencies import Input, Output, State\n'), ((3921, 3953), 'dash.dependencies.State', 'State', (['"""login-password"""', '"""value"""'], {}), "('login-password', 'value')\n", (3926, 3953), False, 'from dash.dependencies import Input, Output, State\n'), ((4634, 4667), 'dash.dependencies.Input', 'Input', (['"""yes-are_sure"""', '"""n_clicks"""'], {}), "('yes-are_sure', 'n_clicks')\n", (4639, 4667), False, 'from dash.dependencies import Input, Output, State\n'), ((4866, 4901), 'dash.dependencies.Output', 'Output', (['"""modal-are_sure"""', '"""is_open"""'], {}), "('modal-are_sure', 'is_open')\n", (4872, 4901), False, 'from dash.dependencies import Input, Output, State\n'), ((4908, 4944), 'dash.dependencies.Output', 'Output', (['"""close-are_sure"""', '"""n_clicks"""'], {}), "('close-are_sure', 'n_clicks')\n", (4914, 4944), False, 'from dash.dependencies import Input, Output, State\n'), ((4952, 4986), 'dash.dependencies.Input', 'Input', (['"""logout-button"""', '"""n_clicks"""'], {}), "('logout-button', 'n_clicks')\n", (4957, 4986), False, 'from dash.dependencies import Input, Output, State\n'), ((4993, 5028), 'dash.dependencies.Input', 'Input', (['"""close-are_sure"""', '"""n_clicks"""'], {}), "('close-are_sure', 'n_clicks')\n", (4998, 5028), False, 'from dash.dependencies import Input, Output, State\n'), ((5036, 5070), 'dash.dependencies.State', 'State', (['"""modal-are_sure"""', '"""is_open"""'], {}), "('modal-are_sure', 'is_open')\n", (5041, 5070), False, 'from dash.dependencies import Input, Output, State\n'), ((5649, 5707), 'dash.dependencies.Input', 'Input', ([], {'component_id': '"""my-input"""', 'component_property': '"""value"""'}), "(component_id='my-input', component_property='value')\n", (5654, 5707), False, 'from dash.dependencies import Input, Output, State\n'), ((443, 505), 'dash_core_components.Location', 'dcc.Location', ([], {'id': '"""login-url"""', 'pathname': '"""/login"""', 'refresh': '(False)'}), "(id='login-url', pathname='/login', refresh=False)\n", (455, 505), True, 'import dash_core_components as dcc\n'), ((2308, 2353), 'dash_core_components.Location', 'dcc.Location', ([], {'id': '"""home-url"""', 'pathname': '"""/home"""'}), "(id='home-url', pathname='/home')\n", (2320, 2353), True, 'import dash_core_components as dcc\n'), ((2612, 2621), 'dash_html_components.Br', 'html.Br', ([], {}), '()\n', (2619, 2621), True, 'import dash_html_components as html\n'), ((2964, 2973), 'dash_html_components.Br', 'html.Br', ([], {}), '()\n', (2971, 2973), True, 'import dash_html_components as html\n'), ((1691, 1727), 'dash_bootstrap_components.Button', 'dbc.Button', (['"""Yes"""'], {'id': '"""yes-are_sure"""'}), "('Yes', id='yes-are_sure')\n", (1701, 1727), True, 'import dash_bootstrap_components as dbc\n'), ((1758, 1798), 'dash_bootstrap_components.Button', 'dbc.Button', (['"""Close"""'], {'id': '"""close-are_sure"""'}), "('Close', id='close-are_sure')\n", (1768, 1798), True, 'import dash_bootstrap_components as dbc\n'), ((2452, 2470), 'dash_bootstrap_components.Col', 'dbc.Col', (['test_page'], {}), '(test_page)\n', (2459, 2470), True, 'import dash_bootstrap_components as dbc\n'), ((2714, 2793), 'dash_bootstrap_components.Button', 'dbc.Button', (['"""Logout"""'], {'id': '"""logout-button"""', 'color': '"""danger"""', 'block': '(True)', 'size': '"""sm"""'}), "('Logout', id='logout-button', color='danger', block=True, size='sm')\n", (2724, 2793), True, 'import dash_bootstrap_components as dbc\n'), ((722, 762), 'dash_html_components.H4', 'html.H4', (['"""Login"""'], {'className': '"""card-title"""'}), "('Login', className='card-title')\n", (729, 762), True, 'import dash_html_components as html\n'), ((800, 863), 'dash_bootstrap_components.Input', 'dbc.Input', ([], {'id': '"""login-email"""', 'placeholder': '"""User"""', 'autoFocus': '(True)'}), "(id='login-email', placeholder='User', autoFocus=True)\n", (809, 863), True, 'import dash_bootstrap_components as dbc\n'), ((901, 972), 'dash_bootstrap_components.Input', 'dbc.Input', ([], {'id': '"""login-password"""', 'placeholder': '"""Password"""', 'type': '"""password"""'}), "(id='login-password', placeholder='Password', type='password')\n", (910, 972), True, 'import dash_bootstrap_components as dbc\n'), ((1010, 1078), 'dash_bootstrap_components.Button', 'dbc.Button', (['"""Submit"""'], {'id': '"""login-button"""', 'color': '"""success"""', 'block': '(True)'}), "('Submit', id='login-button', color='success', block=True)\n", (1020, 1078), True, 'import dash_bootstrap_components as dbc\n'), ((1116, 1125), 'dash_html_components.Br', 'html.Br', ([], {}), '()\n', (1123, 1125), True, 'import dash_html_components as html\n'), ((1163, 1189), 'dash_html_components.Div', 'html.Div', ([], {'id': '"""login-alert"""'}), "(id='login-alert')\n", (1171, 1189), True, 'import dash_html_components as html\n')] |
from random import shuffle
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.datasets import load_iris
import numpy as np
iris = load_iris()
print(type(iris), len(iris.data))
def test1():
XY = np.array(zip(iris.data, iris.target))
np.random.shuffle(XY)
X, Y = XY[:, :1][:100], XY[:, 1:][:100]
X_test, Y_test = XY[:, :1][100:], XY[:, 1:][100:]
X.shape, Y.shape = -1, -1
X_test.shape, Y_test.shape = -1, -1
X = [list(i) for i in X]
X_test = [list(i) for i in X_test]
print('X:', X)
print('Y:', Y)
# Train model
rf = RandomForestRegressor()
rf.fit(X, Y)
# Predict new sample
Y_pre = rf.predict(X_test)
print('Y_test:', Y_test)
print('Y_pre:', Y_pre)
def test2():
from sklearn.cross_validation import cross_val_score, ShuffleSplit
X, Y, names = iris.data, iris.target, iris['feature_names']
rf = RandomForestRegressor()
scores = []
for i in range(X.shape[1]):
score = cross_val_score(rf, X[:, i:i + 1], Y,
scoring='r2',
cv=ShuffleSplit(len(X), 3, .3))
scores.append((round(np.mean(score), 3), names[i]))
print(sorted(scores, reverse=True))
if __name__ == '__main__':
test1()
test2()
| [
"sklearn.datasets.load_iris",
"numpy.mean",
"numpy.random.shuffle",
"sklearn.ensemble.RandomForestRegressor"
] | [((192, 203), 'sklearn.datasets.load_iris', 'load_iris', ([], {}), '()\n', (201, 203), False, 'from sklearn.datasets import load_iris\n'), ((304, 325), 'numpy.random.shuffle', 'np.random.shuffle', (['XY'], {}), '(XY)\n', (321, 325), True, 'import numpy as np\n'), ((628, 651), 'sklearn.ensemble.RandomForestRegressor', 'RandomForestRegressor', ([], {}), '()\n', (649, 651), False, 'from sklearn.ensemble import RandomForestRegressor\n'), ((942, 965), 'sklearn.ensemble.RandomForestRegressor', 'RandomForestRegressor', ([], {}), '()\n', (963, 965), False, 'from sklearn.ensemble import RandomForestRegressor\n'), ((1207, 1221), 'numpy.mean', 'np.mean', (['score'], {}), '(score)\n', (1214, 1221), True, 'import numpy as np\n')] |
# -*- coding:utf-8 -*-
from __future__ import absolute_import
import codecs
from setuptools import setup
with codecs.open('README.rst') as readme_file:
readme = readme_file.read()
with codecs.open('HISTORY.rst') as history_file:
history = history_file.read()
setup(
name='cfn-resource-timeout',
version='1.2.0',
description=(
'Wrapper decorators for building CloudFormation custom resources'
),
long_description=readme + '\n\n' + history,
url='https://github.com/timeoutdigital/cfn-resource-timeout',
author='<NAME>',
author_email='<EMAIL>',
maintainer='<NAME>',
maintainer_email='<EMAIL>',
license='MIT',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
],
keywords='cloudformation aws cloud custom resource amazon',
py_modules=["cfn_resource"],
install_requires=["requests"],
python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*',
package_data={},
data_files=[],
entry_points={},
)
| [
"codecs.open",
"setuptools.setup"
] | [((273, 1214), 'setuptools.setup', 'setup', ([], {'name': '"""cfn-resource-timeout"""', 'version': '"""1.2.0"""', 'description': '"""Wrapper decorators for building CloudFormation custom resources"""', 'long_description': "(readme + '\\n\\n' + history)", 'url': '"""https://github.com/timeoutdigital/cfn-resource-timeout"""', 'author': '"""<NAME>"""', 'author_email': '"""<EMAIL>"""', 'maintainer': '"""<NAME>"""', 'maintainer_email': '"""<EMAIL>"""', 'license': '"""MIT"""', 'classifiers': "['Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6']", 'keywords': '"""cloudformation aws cloud custom resource amazon"""', 'py_modules': "['cfn_resource']", 'install_requires': "['requests']", 'python_requires': '""">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"""', 'package_data': '{}', 'data_files': '[]', 'entry_points': '{}'}), "(name='cfn-resource-timeout', version='1.2.0', description=\n 'Wrapper decorators for building CloudFormation custom resources',\n long_description=readme + '\\n\\n' + history, url=\n 'https://github.com/timeoutdigital/cfn-resource-timeout', author=\n '<NAME>', author_email='<EMAIL>', maintainer='<NAME>', maintainer_email\n ='<EMAIL>', license='MIT', classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6'], keywords=\n 'cloudformation aws cloud custom resource amazon', py_modules=[\n 'cfn_resource'], install_requires=['requests'], python_requires=\n '>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*', package_data={},\n data_files=[], entry_points={})\n", (278, 1214), False, 'from setuptools import setup\n'), ((113, 138), 'codecs.open', 'codecs.open', (['"""README.rst"""'], {}), "('README.rst')\n", (124, 138), False, 'import codecs\n'), ((193, 219), 'codecs.open', 'codecs.open', (['"""HISTORY.rst"""'], {}), "('HISTORY.rst')\n", (204, 219), False, 'import codecs\n')] |
import os
import argparse
import datetime
import numpy as np
from glob import glob
from typing import List, Set, Tuple
"""
Author: <NAME> (<EMAIL>)
Computes character-level Cohen's kappa and percentage
agreement for a set of brat annotated files from two
annotators for a sequence labeling task (e.g. NER).
"""
class BratANN(object):
"""
A brat annotation.
>>> ann = "T1\tent 1 4\tcat"
>>> b1 = BratANN("T3", "ent", 1, 4, "cat")
>>> b2 = BratANN.from_string(ann)
>>> b1 == b2
True
>>> b3 = BratANN("T3", "ent", 1, 5, "cat ")
>>> b1 == b3
False
"""
def __init__(self, num: str, label: str, start: int, end: int, text: str):
self.num = num
self.label = label
self.start = int(start)
self.end = int(end)
self.text = text
@classmethod
def from_string(cls, string: str):
(n, l, s, e, t) = string.split(maxsplit=4)
return cls(n, l, int(s), int(e), t)
def __str__(self) -> str:
return f"{self.num}\t{self.label} {self.start} {self.end}\t{self.text}" # noqa
def __repr__(self) -> str:
return f"<ira.BratANN '{self.num}, {self.label}, {self.start}, {self.end}, {self.text}'>" # noqa
def __eq__(self, other) -> bool:
"""
Overrides the default implementation
Two BratANNs are considering equal iff they have the same label,
offset, and text.
Equality does not consider the annotation number, e.g. T1
"""
if isinstance(other, BratANN):
return all([self.label == other.label,
self.start == other.start,
self.end == other.end,
self.text == other.text])
else:
return False
def parse_args():
def usage():
return """ira.py
[--help, Show this help message and exit]
[--test, Test the ira function]
[--docdir, Directory containing the documents that were annotated.
If not specified, looks in indir1.]
--indir1, Directory containing first annotators annotations
--indir2, Directory containing second annotators annotations
--annotation_conf, The brat annotation.conf that was used
for this annotation task
--disagreements, Whether to suppress, print, or log files
in which annotators disagree. Possible values
are "suppress", "print", "log". Default is
"suppress". If "log", writes file names to
"disagreements.log" in the current working
directory.
"""
desc = """Computes Cohen's kappa at the token
level for a sequence labeling task."""
parser = argparse.ArgumentParser(description=desc, usage=usage())
parser.add_argument("--test", action="store_true", default=False,
help="""Test the ira function.""")
args, remainder = parser.parse_known_args()
if args.test is True:
return args
parser = argparse.ArgumentParser(usage=usage())
parser.add_argument("--indir1", type=str, required=True)
parser.add_argument("--indir2", type=str, required=True)
parser.add_argument("--annotation_conf", type=str, required=True)
parser.add_argument("--docdir", type=str, required=False, default=None)
parser.add_argument("--disagreements", type=str,
required=False,
default="suppress",
choices=["suppress", "print", "log"])
args = parser.parse_args(remainder)
args.test = False
return args
def main(indir1: str, indir2: str, ann_conf: str,
docdir: str = None, disagreements: str = "suppress"):
"""
param indir{1,2}: Input directories containing the first and second
annotators .ann files, respectively.
param ann_conf: Path to the annotation.conf file.
param docdir: Directory containing the .txt files which were annotated.
If None, uses indir1.
param disagreements: How disagreements are logged. Possible values are
"suppress", "print" and "log". If "suppress",
do nothing. If "print", prints files that disagree
to the console. If "log", files that disagree
will be written to "disagreements.log" in the current
working directory.
"""
# Read in the documents.
if docdir is not None:
doc_fnames = glob(f"{docdir}/*.txt")
else:
doc_fnames = glob(f"{indir1}/*.txt")
docs = read_docs(doc_fnames)
# Read in the annotations.
basenames = [os.path.splitext(os.path.basename(fn))[0]
for fn in doc_fnames]
ann_fnames1 = [os.path.join(indir1, f"{bn}.ann") for bn in basenames]
ann_fnames2 = [os.path.join(indir2, f"{bn}.ann") for bn in basenames]
anns1 = read_anns(ann_fnames1)
anns2 = read_anns(ann_fnames2)
if not len(docs) == len(anns1) == len(anns2):
raise ValueError("Different numbers of documents and annotations.")
# Read the entity labels.
labels = read_labels(ann_conf)
# Compute inter rater agreement.
kappa, agreement, disagree_idxs = ira(docs, anns1, anns2, labels)
summary(kappa, "Cohen's Kappa")
summary(agreement, "Percentage Agreement")
# Do something with disagreements.
if disagreements == "print":
print("=== Disagreements ===")
for (idx, p_o) in disagree_idxs:
bn = os.path.basename(doc_fnames[idx])
print(f"{bn}: Agreement={p_o:.3f}")
if disagreements == "log":
with open("disagreements.log", 'w') as outF:
outF.write(str(datetime.datetime.now() + '\n'))
for (idx, p_o) in disagree_idxs:
bn = os.path.basename(doc_fnames[idx])
outF.write(f"{bn}: Agreement={p_o:.3f}\n")
def read_docs(fnames: List[str]) -> List[str]:
"""
Reads in the documents.
param fnames: List of paths to .txt files to read.
returns: List of input documents.
"""
all_docs = []
for docfile in fnames:
doc = open(docfile, 'r').read()
all_docs.append(doc)
return all_docs
def read_anns(fnames: List[str]) -> List[List[BratANN]]:
"""
Reads all .ann files and converts their
annotations to BratANN objects.
param fnames: List of paths to .ann files to read.
returns: List of annotations.
"""
all_anns = []
for annfile in fnames:
anns = [BratANN.from_string(a.strip()) for a in open(annfile, 'r')]
all_anns.append(anns)
return all_anns
def read_labels(ann_conf: str) -> Set[str]:
"""
Reads the entity labels from annotation.conf.
param ann_conf: Path to annotation.conf
returns: set of entity labels.
"""
labels = set()
with open(ann_conf, 'r') as infile:
copy = False
for line in infile:
# Skip blank lines and comments.
if not line.strip() or line.strip().startswith('#'):
continue
if line.strip() == "[entities]":
copy = True
elif line.strip() == "[relations]":
copy = False
elif copy is True:
labels.add(line.strip())
return labels
def ira(docs: List[str],
anns1: List[List[BratANN]],
anns2: List[List[BratANN]],
labels: Set[str]) -> Tuple[np.array, np.array, List[Tuple[int, float]]]: # noqa
"""
Computes Cohen's kappa and percentage agreement between two annotators.
param docs: List of documents, output of read_docs().
param anns1: List of first annotators annotations, output of read_anns().
param anns2: List of second annotators annotations, output of read_anns().
param labels: Set of labels annotated, output of read_labels().
returns: Kappa and percentage agreement for each document.
"""
n_docs = len(docs)
p_os = np.zeros(n_docs)
kappas = np.zeros(n_docs)
disagree_idxs_po = []
for i in range(n_docs):
denom = len(docs[i])
v1 = label_vector(docs[i], anns1[i], labels)
v2 = label_vector(docs[i], anns2[i], labels)
# Observed agreement: How often the two annotators actually agreed.
# Equivalent to accuracy.
p_o = np.sum(v1 == v2) / denom
if p_o != 1.0:
disagree_idxs_po.append((i, p_o))
# Expected agreement: How often the two annotators are expected to
# agree. For number of items N, labels k, and the number of times
# rater j predicted label k, n_j_k:
# p_e = (1/N^2) * sum_k (n_1_k * n_2_k)
p_e = (1/denom**2) * np.sum([np.sum(v1 == k) * np.sum(v2 == k)
for k in range(len(labels)+1)])
if p_e == 1:
k = 0.0
else:
k = (p_o - p_e) / (1 - p_e)
p_os[i] = p_o
kappas[i] = k
return (kappas, p_os, disagree_idxs_po)
def label_vector(doc: List[str],
anns: List[List[BratANN]],
labels: Set[str]) -> np.array:
"""
Converts the document into an integer vector. The value
of each element corresponds to the entity type of the
annotation at that character position, with 0 indicating
no annotation. So an annotation task with 3 annotation types
would have a vector of 0s, 1s, 2s, and 3s.
param doc: Document that was annotated.
param anns: Annotations for each document.
param labels: Set of entity labels for this task.
returns: Vector of character level annotations.
"""
v = np.zeros(len(doc)) # For each character
for (i, lab) in enumerate(labels):
i += 1 # 0 is reserved for no label
idxs = [np.arange(a.start, a.end) for a in anns if a.label == lab]
idxs = [j for mask in idxs for j in mask]
v[idxs] = i
return v
def summary(results: np.array, varname: str = None):
"""
Prints summary statistics for the supplied results.
param results: Numeric array of results (e.g. kappas).
param varname: (Optional) Name of the variable being summarized.
"""
if varname is not None:
print(varname)
if len(results) == 1:
print(f"{results[0]:.3f}")
else:
rmean = np.mean(results)
rmax = np.max(results)
rmin = np.min(results)
rstd = np.std(results)
print(f"""Mean: {rmean:.3f} +/-{rstd:.3f}\nRange: ({rmin:.3f}, {rmax:.3f})""") # noqa
def test():
"""
A small example to test ira().
"""
docs = ["The cats sat on the mat"]
ann_strs1 = ["T1\tent 4 8\tcats",
"T2\tent 9 12\tsat",
"T3\tent 20 23\tmat"]
anns1 = [[BratANN.from_string(s) for s in ann_strs1]]
ann_strs2 = ["T1\tent 4 7\tcat", "T2\tent 20 23 mat"]
anns2 = [[BratANN.from_string(s) for s in ann_strs2]]
labels = ["ent"]
kappas, agreements, disagreements = ira(docs, anns1, anns2, labels)
assert(np.isclose(kappas[0], 0.629, atol=1e-03))
assert(np.isclose(agreements[0], 0.826, atol=1e-03))
print("All tests passed.")
if __name__ == "__main__":
args = parse_args()
if args.test is True:
import doctest
doctest.testmod()
test()
else:
main(args.indir1, args.indir2, args.annotation_conf,
docdir=args.docdir, disagreements=args.disagreements)
| [
"numpy.sum",
"os.path.basename",
"numpy.std",
"numpy.zeros",
"datetime.datetime.now",
"numpy.isclose",
"numpy.mean",
"numpy.max",
"numpy.min",
"numpy.arange",
"glob.glob",
"os.path.join",
"doctest.testmod"
] | [((8150, 8166), 'numpy.zeros', 'np.zeros', (['n_docs'], {}), '(n_docs)\n', (8158, 8166), True, 'import numpy as np\n'), ((8180, 8196), 'numpy.zeros', 'np.zeros', (['n_docs'], {}), '(n_docs)\n', (8188, 8196), True, 'import numpy as np\n'), ((11195, 11235), 'numpy.isclose', 'np.isclose', (['kappas[0]', '(0.629)'], {'atol': '(0.001)'}), '(kappas[0], 0.629, atol=0.001)\n', (11205, 11235), True, 'import numpy as np\n'), ((11248, 11292), 'numpy.isclose', 'np.isclose', (['agreements[0]', '(0.826)'], {'atol': '(0.001)'}), '(agreements[0], 0.826, atol=0.001)\n', (11258, 11292), True, 'import numpy as np\n'), ((4683, 4706), 'glob.glob', 'glob', (['f"""{docdir}/*.txt"""'], {}), "(f'{docdir}/*.txt')\n", (4687, 4706), False, 'from glob import glob\n'), ((4738, 4761), 'glob.glob', 'glob', (['f"""{indir1}/*.txt"""'], {}), "(f'{indir1}/*.txt')\n", (4742, 4761), False, 'from glob import glob\n'), ((4943, 4976), 'os.path.join', 'os.path.join', (['indir1', 'f"""{bn}.ann"""'], {}), "(indir1, f'{bn}.ann')\n", (4955, 4976), False, 'import os\n'), ((5017, 5050), 'os.path.join', 'os.path.join', (['indir2', 'f"""{bn}.ann"""'], {}), "(indir2, f'{bn}.ann')\n", (5029, 5050), False, 'import os\n'), ((10493, 10509), 'numpy.mean', 'np.mean', (['results'], {}), '(results)\n', (10500, 10509), True, 'import numpy as np\n'), ((10525, 10540), 'numpy.max', 'np.max', (['results'], {}), '(results)\n', (10531, 10540), True, 'import numpy as np\n'), ((10556, 10571), 'numpy.min', 'np.min', (['results'], {}), '(results)\n', (10562, 10571), True, 'import numpy as np\n'), ((10587, 10602), 'numpy.std', 'np.std', (['results'], {}), '(results)\n', (10593, 10602), True, 'import numpy as np\n'), ((11435, 11452), 'doctest.testmod', 'doctest.testmod', ([], {}), '()\n', (11450, 11452), False, 'import doctest\n'), ((5692, 5725), 'os.path.basename', 'os.path.basename', (['doc_fnames[idx]'], {}), '(doc_fnames[idx])\n', (5708, 5725), False, 'import os\n'), ((8512, 8528), 'numpy.sum', 'np.sum', (['(v1 == v2)'], {}), '(v1 == v2)\n', (8518, 8528), True, 'import numpy as np\n'), ((9957, 9982), 'numpy.arange', 'np.arange', (['a.start', 'a.end'], {}), '(a.start, a.end)\n', (9966, 9982), True, 'import numpy as np\n'), ((4860, 4880), 'os.path.basename', 'os.path.basename', (['fn'], {}), '(fn)\n', (4876, 4880), False, 'import os\n'), ((5984, 6017), 'os.path.basename', 'os.path.basename', (['doc_fnames[idx]'], {}), '(doc_fnames[idx])\n', (6000, 6017), False, 'import os\n'), ((5885, 5908), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (5906, 5908), False, 'import datetime\n'), ((8890, 8905), 'numpy.sum', 'np.sum', (['(v1 == k)'], {}), '(v1 == k)\n', (8896, 8905), True, 'import numpy as np\n'), ((8908, 8923), 'numpy.sum', 'np.sum', (['(v2 == k)'], {}), '(v2 == k)\n', (8914, 8923), True, 'import numpy as np\n')] |
import datetime
import os
import keras
import numpy as np
import pandas as pd
from base_model import BaseModel
from multivariate_container import MultivariateContainer
from typing import Union
class MultivariateLSTM(BaseModel):
def __init__(
self,
container: MultivariateContainer,
config: bool=None,
create_empty: bool=False) -> None:
"""
Initialization method.
"""
_, self.time_steps, self.num_fea = container.train_X.shape
print(f"MultivariateLSTM Initialized: \
\n\tTime Step: {self.time_steps}\
\n\tFeature: {self.num_fea}")
self.config = config
self.container = container
self.hist = None
if create_empty:
self.core = None
else:
self.core = self._construct_lstm_model(self.config)
self._gen_file_name()
print(
f"\tMultivariateLSTM: Current model will be save to ./saved_models/f{self.file_name}/")
def _construct_lstm_model(
self,
config: dict,
verbose: bool=True
) -> keras.Model:
"""
Construct the Stacked lstm model,
Note: Modify this method to change model configurations.
# TODO: Add arbitray layer support.
"""
print("MultivariateLSTM: Generating LSTM model using Model API.")
input_sequence = keras.layers.Input(
shape=(self.time_steps, self.num_fea),
dtype="float32",
name="input_sequence")
normalization = keras.layers.BatchNormalization()(input_sequence)
lstm = keras.layers.LSTM(
units=config["nn.lstm1"],
return_sequences=False
)(normalization)
dense1 = keras.layers.Dense(
units=config["nn.dense1"],
name="Dense1"
)(lstm)
predictions = keras.layers.Dense(
1,
name="Prediction"
)(dense1)
model = keras.Model(inputs=input_sequence, outputs=predictions)
model.compile(loss="mse", optimizer="adam")
if verbose:
print("\tMultivariateLSTM: LSTM model constructed with configuration: ")
keras.utils.print_summary(model)
return model
def _construct_lstm_sequential(
self,
config: dict,
verbose: bool=True
) -> keras.Sequential:
"""
Construct the Stacked lstm model,
Note: Modify this method to change model configurations.
# TODO: Add arbitray layer support.
"""
print("MultivariateLSTM: Generating LSTM model with Keras Sequential API")
model = keras.Sequential()
model.add(keras.layers.LSTM(
units=config["nn.lstm1"],
input_shape=(self.time_steps, self.num_fea),
return_sequences=True,
name="LSTM1"
))
model.add(
keras.layers.LSTM(
units=config["nn.lstm2"],
name="LSTM2"
))
model.add(
keras.layers.Dense(
units=config["nn.dense1"],
name="Dense1"
))
model.add(
keras.layers.Dense(
units=1,
name="Dense_output"
))
model.compile(loss="mse", optimizer="adam")
if verbose:
print("\tMultivariateLSTM: LSTM model constructed with configuration: ")
keras.utils.print_summary(model)
return model
def update_config(
self,
new_config: dict
) -> None:
"""
Update the neural network configuration, and re-construct, re-compile the core.
"""
# TODO: add check configuration method here.
print("MultivariateLSTM: Updating neural network configuration...")
self.prev_config = self.config
self.config = new_config
self.core = self._construct_lstm_model(self.config, verbose=False)
print("\tDone.")
def fit_model(
self,
epochs: int=10
) -> None:
start_time = datetime.datetime.now()
print("MultivariateLSTM: Start fitting.")
self.hist = self.core.fit(
self.container.train_X,
self.container.train_y,
epochs=epochs,
batch_size=32 if self.config is None else self.config["batch_size"],
validation_split=0.1 if self.config is None else self.config["validation_split"]
)
finish_time = datetime.datetime.now()
time_taken = finish_time - start_time
print(f"\tFitting finished, {epochs} epochs for {str(time_taken)}")
def predict(
self,
X_feed: np.ndarray
) -> np.ndarray:
y_hat = self.core.predict(X_feed, verbose=1)
# y_hat = self.container.scaler_y.inverse_transform(y_hat)
# y_hat returned used to compare with self.container.*_X directly.
return y_hat
def save_model(
self,
file_dir: str=None
) -> None:
if file_dir is None:
# If no file directory specified, use the default one.
file_dir = self.file_name
# Try to create record folder.
try:
folder = f"./saved_models/{file_dir}/"
os.system(f"mkdir {folder}")
print(f"Experiment record directory created: {folder}")
except:
print("Current directory: ")
_ = os.system("pwd")
raise FileNotFoundError(
"Failed to create directory, please create directory ./saved_models/")
# Save model structure to JSON
print("Saving model structure...")
model_json = self.core.to_json()
with open(f"{folder}model_structure.json", "w") as json_file:
json_file.write(model_json)
print("Done.")
# Save model weight to h5
print("Saving model weights...")
self.core.save_weights(f"{folder}model_weights.h5")
print("Done")
# Save model illustration to png file.
print("Saving model visualization...")
try:
keras.utils.plot_model(
self.core,
to_file=f"{folder}model.png",
show_shapes=True,
show_layer_names=True)
except:
print("Model illustration cannot be saved.")
# Save training history (if any)
if self.hist is not None:
hist_loss = np.squeeze(np.array(self.hist.history["loss"]))
hist_val_loss = np.squeeze(np.array(self.hist.history["val_loss"]))
combined = np.stack([hist_loss, hist_val_loss])
combined = np.transpose(combined)
df = pd.DataFrame(combined, dtype=np.float32)
df.columns = ["loss", "val_loss"]
df.to_csv(f"{folder}hist.csv", sep=",")
print(f"Training history is saved to {folder}hist.csv...")
else:
print("No training history found.")
print("Done.")
def load_model(
self,
folder_dir: str
) -> None:
"""
#TODO: doc
"""
if not folder_dir.endswith("/"):
# Assert the correct format, folder_dir should be
folder_dir += "/"
print(f"Load model from folder {folder_dir}")
# construct model from json
print("Reconstruct model from Json file...")
try:
json_file = open(f"{folder_dir}model_structure.json", "r")
except FileNotFoundError:
raise Warning(
f"Json file not found. Expected: {folder_dir}model_structure.json"
)
model_file = json_file.read()
json_file.close()
self.core = keras.models.model_from_json(model_file)
print("Done.")
# load weights from h5
print("Loading model weights...")
try:
self.core.load_weights(
f"{folder_dir}model_weights.h5", by_name=True)
except FileNotFoundError:
raise Warning(
f"h5 file not found. Expected: {folder_dir}model_weights.h5"
)
print("Done.")
self.core.compile(loss="mse", optimizer="adam")
def summarize_training(self):
"""
Summarize training result to string file.
- Loss
- Epochs
- Time taken
"""
raise NotImplementedError
def visualize_training(self):
"""
Visualize the training result:
- Plot training set loss and validation set loss.
"""
# TODO: move visualize training to general methods.
raise NotImplementedError
| [
"numpy.stack",
"pandas.DataFrame",
"keras.Model",
"keras.Sequential",
"keras.layers.LSTM",
"numpy.transpose",
"os.system",
"keras.utils.plot_model",
"keras.models.model_from_json",
"keras.layers.Dense",
"numpy.array",
"keras.layers.Input",
"keras.utils.print_summary",
"datetime.datetime.now",
"keras.layers.BatchNormalization"
] | [((1417, 1518), 'keras.layers.Input', 'keras.layers.Input', ([], {'shape': '(self.time_steps, self.num_fea)', 'dtype': '"""float32"""', 'name': '"""input_sequence"""'}), "(shape=(self.time_steps, self.num_fea), dtype='float32',\n name='input_sequence')\n", (1435, 1518), False, 'import keras\n'), ((2002, 2057), 'keras.Model', 'keras.Model', ([], {'inputs': 'input_sequence', 'outputs': 'predictions'}), '(inputs=input_sequence, outputs=predictions)\n', (2013, 2057), False, 'import keras\n'), ((2698, 2716), 'keras.Sequential', 'keras.Sequential', ([], {}), '()\n', (2714, 2716), False, 'import keras\n'), ((4146, 4169), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (4167, 4169), False, 'import datetime\n'), ((4560, 4583), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (4581, 4583), False, 'import datetime\n'), ((7834, 7874), 'keras.models.model_from_json', 'keras.models.model_from_json', (['model_file'], {}), '(model_file)\n', (7862, 7874), False, 'import keras\n'), ((1577, 1610), 'keras.layers.BatchNormalization', 'keras.layers.BatchNormalization', ([], {}), '()\n', (1608, 1610), False, 'import keras\n'), ((1643, 1710), 'keras.layers.LSTM', 'keras.layers.LSTM', ([], {'units': "config['nn.lstm1']", 'return_sequences': '(False)'}), "(units=config['nn.lstm1'], return_sequences=False)\n", (1660, 1710), False, 'import keras\n'), ((1778, 1838), 'keras.layers.Dense', 'keras.layers.Dense', ([], {'units': "config['nn.dense1']", 'name': '"""Dense1"""'}), "(units=config['nn.dense1'], name='Dense1')\n", (1796, 1838), False, 'import keras\n'), ((1902, 1942), 'keras.layers.Dense', 'keras.layers.Dense', (['(1)'], {'name': '"""Prediction"""'}), "(1, name='Prediction')\n", (1920, 1942), False, 'import keras\n'), ((2229, 2261), 'keras.utils.print_summary', 'keras.utils.print_summary', (['model'], {}), '(model)\n', (2254, 2261), False, 'import keras\n'), ((2735, 2864), 'keras.layers.LSTM', 'keras.layers.LSTM', ([], {'units': "config['nn.lstm1']", 'input_shape': '(self.time_steps, self.num_fea)', 'return_sequences': '(True)', 'name': '"""LSTM1"""'}), "(units=config['nn.lstm1'], input_shape=(self.time_steps,\n self.num_fea), return_sequences=True, name='LSTM1')\n", (2752, 2864), False, 'import keras\n'), ((2951, 3008), 'keras.layers.LSTM', 'keras.layers.LSTM', ([], {'units': "config['nn.lstm2']", 'name': '"""LSTM2"""'}), "(units=config['nn.lstm2'], name='LSTM2')\n", (2968, 3008), False, 'import keras\n'), ((3087, 3147), 'keras.layers.Dense', 'keras.layers.Dense', ([], {'units': "config['nn.dense1']", 'name': '"""Dense1"""'}), "(units=config['nn.dense1'], name='Dense1')\n", (3105, 3147), False, 'import keras\n'), ((3226, 3274), 'keras.layers.Dense', 'keras.layers.Dense', ([], {'units': '(1)', 'name': '"""Dense_output"""'}), "(units=1, name='Dense_output')\n", (3244, 3274), False, 'import keras\n'), ((3492, 3524), 'keras.utils.print_summary', 'keras.utils.print_summary', (['model'], {}), '(model)\n', (3517, 3524), False, 'import keras\n'), ((5346, 5374), 'os.system', 'os.system', (['f"""mkdir {folder}"""'], {}), "(f'mkdir {folder}')\n", (5355, 5374), False, 'import os\n'), ((6192, 6301), 'keras.utils.plot_model', 'keras.utils.plot_model', (['self.core'], {'to_file': 'f"""{folder}model.png"""', 'show_shapes': '(True)', 'show_layer_names': '(True)'}), "(self.core, to_file=f'{folder}model.png', show_shapes\n =True, show_layer_names=True)\n", (6214, 6301), False, 'import keras\n'), ((6694, 6730), 'numpy.stack', 'np.stack', (['[hist_loss, hist_val_loss]'], {}), '([hist_loss, hist_val_loss])\n', (6702, 6730), True, 'import numpy as np\n'), ((6754, 6776), 'numpy.transpose', 'np.transpose', (['combined'], {}), '(combined)\n', (6766, 6776), True, 'import numpy as np\n'), ((6794, 6834), 'pandas.DataFrame', 'pd.DataFrame', (['combined'], {'dtype': 'np.float32'}), '(combined, dtype=np.float32)\n', (6806, 6834), True, 'import pandas as pd\n'), ((5516, 5532), 'os.system', 'os.system', (['"""pwd"""'], {}), "('pwd')\n", (5525, 5532), False, 'import os\n'), ((6554, 6589), 'numpy.array', 'np.array', (["self.hist.history['loss']"], {}), "(self.hist.history['loss'])\n", (6562, 6589), True, 'import numpy as np\n'), ((6630, 6669), 'numpy.array', 'np.array', (["self.hist.history['val_loss']"], {}), "(self.hist.history['val_loss'])\n", (6638, 6669), True, 'import numpy as np\n')] |
from django.contrib import admin
from .models import Post
from . models import Query
from .models import Solution
# Register your models here.
admin.site.register(Post)
admin.site.register(Query)
# admin.site.register(Services)
# admin.site.register(Contact)
admin.site.register(Solution) | [
"django.contrib.admin.site.register"
] | [((150, 175), 'django.contrib.admin.site.register', 'admin.site.register', (['Post'], {}), '(Post)\n', (169, 175), False, 'from django.contrib import admin\n'), ((177, 203), 'django.contrib.admin.site.register', 'admin.site.register', (['Query'], {}), '(Query)\n', (196, 203), False, 'from django.contrib import admin\n'), ((270, 299), 'django.contrib.admin.site.register', 'admin.site.register', (['Solution'], {}), '(Solution)\n', (289, 299), False, 'from django.contrib import admin\n')] |
# ----------------------------------------------------------------------------
# board.py
# Pin definitions
#
# The MIT License (MIT)
# Copyright (c) 2020 <NAME>
# 2020-11-21, v1
# ----------------------------------------------------------------------------
from micropython import const
# Spectrometer (CM12880MA)
TRG = const(14)
STA = const(15)
CLK = const(21)
VID = const(36)
# I2C for compass etc.
SDA = const(23)
SCL = const(22)
I2C_FRQ = const(400000)
# Serial for extensions
TX = const(17)
RX = const(16)
# Servos
SERVO_PAN = const(27)
PAN_RANGE_US = [1010, 1931]
PAN_RANGE_DEG = [-45, 45]
SERVO_TLT = const(33)
TLT_RANGE_US = [1033, 1916]
TLT_RANGE_DEG = [-45, 45]
# NeoPixel
NEOPIX = const(32)
# ----------------------------------------------------------------------------
| [
"micropython.const"
] | [((333, 342), 'micropython.const', 'const', (['(14)'], {}), '(14)\n', (338, 342), False, 'from micropython import const\n'), ((360, 369), 'micropython.const', 'const', (['(15)'], {}), '(15)\n', (365, 369), False, 'from micropython import const\n'), ((387, 396), 'micropython.const', 'const', (['(21)'], {}), '(21)\n', (392, 396), False, 'from micropython import const\n'), ((414, 423), 'micropython.const', 'const', (['(36)'], {}), '(36)\n', (419, 423), False, 'from micropython import const\n'), ((465, 474), 'micropython.const', 'const', (['(23)'], {}), '(23)\n', (470, 474), False, 'from micropython import const\n'), ((492, 501), 'micropython.const', 'const', (['(22)'], {}), '(22)\n', (497, 501), False, 'from micropython import const\n'), ((519, 532), 'micropython.const', 'const', (['(400000)'], {}), '(400000)\n', (524, 532), False, 'from micropython import const\n'), ((575, 584), 'micropython.const', 'const', (['(17)'], {}), '(17)\n', (580, 584), False, 'from micropython import const\n'), ((602, 611), 'micropython.const', 'const', (['(16)'], {}), '(16)\n', (607, 611), False, 'from micropython import const\n'), ((639, 648), 'micropython.const', 'const', (['(27)'], {}), '(27)\n', (644, 648), False, 'from micropython import const\n'), ((723, 732), 'micropython.const', 'const', (['(33)'], {}), '(33)\n', (728, 732), False, 'from micropython import const\n'), ((819, 828), 'micropython.const', 'const', (['(32)'], {}), '(32)\n', (824, 828), False, 'from micropython import const\n')] |
from application import app
# Starts the application
if __name__ == "__main__":
app.run() | [
"application.app.run"
] | [((85, 94), 'application.app.run', 'app.run', ([], {}), '()\n', (92, 94), False, 'from application import app\n')] |
import re
import nltk
from string import punctuation
from nltk.tokenize import TweetTokenizer
from nltk.corpus import stopwords
#nltk.download('rslp')
#nltk.download('stopwords')
#nltk.download('punkt')
class PreProcessor(object):
stemmer = nltk.stem.RSLPStemmer()
tokenizer = TweetTokenizer(reduce_len=True, preserve_case=False)
special_char = ['$', '%', '&', '*', '(', ')', '_', '-', '+', '=', '{', '[', '}', ']', '~', '.', ',', ';', 'º', 'ª', '°', '¹', '²', '³']
# UniLex: Método Léxico para Análise de Sentimentos Textuais sobre Conteúdo de Tweets em Português Brasileiro*
stoplist_uniLex = ['a', 'agora', 'ainda', 'alguem', 'algum', 'alguma', 'algumas', 'alguns', 'ampla', 'amplas', 'amplo', 'amplos',
'ante', 'antes', 'ao', 'aos', 'apos', 'aquela', 'aquelas', 'aquele', 'aqueles', 'aquilo', 'as', 'ate', 'atraves',
'cada', 'coisa', 'coisas', 'com', 'como', 'contra', 'contudo', 'da', 'daquele', 'daqueles', 'das', 'de', 'dela',
'delas', 'dele', 'deles', 'depois', 'dessa', 'dessas', 'desse', 'desses', 'desta', 'destas', 'deste', 'deste',
'destes', 'deve', 'devem', 'devendo', 'dever', 'devera', 'deverao', 'deveria', 'deveriam', 'devia', 'deviam',
'disse', 'disso', 'disto', 'dito', 'diz', 'dizem', 'do', 'dos', 'e', 'ela', 'elas', 'ele', 'eles', 'em',
'enquanto', 'entre', 'era', 'essa', 'essas', 'esse', 'esses', 'esta', 'estamos', 'estao', 'estas', 'estava',
'estavam', 'estavamos', 'este', 'estes', 'estou', 'eu', 'fazendo', 'fazer', 'feita', 'feitas', 'feito', 'feitos',
'foi', 'for', 'foram', 'fosse', 'fossem', 'grande', 'grandes', 'ha', 'isso', 'isto', 'ja', 'la', 'lhe', 'lhes',
'lo', 'mas', 'me', 'mesma', 'mesmas', 'mesmo', 'mesmos', 'meu', 'meus', 'minha', 'minhas', 'muita', 'muitas',
'muito', 'muitos', 'na', 'nao', 'nas', 'nem', 'nenhum', 'nessa', 'nessas', 'nesta', 'nestas', 'ninguem', 'no',
'nos', 'nossa', 'nossas', 'nosso', 'nossos', 'num', 'numa', 'nunca', 'o', 'os', 'ou', 'outra', 'outras', 'outro',
'outros', 'para', 'pela', 'pelas', 'pelo', 'pelos', 'pequena', 'pequenas', 'pequeno', 'pequenos', 'per', 'perante',
'pode', 'podendo', 'poder', 'poderia', 'poderiam', 'podia', 'podiam', 'pois', 'por', 'porem', 'porque', 'posso',
'pouca', 'poucas', 'pouco', 'poucos', 'primeiro', 'primeiros', 'propria', 'proprias', 'proprio', 'proprios',
'quais', 'qual', 'quando', 'quanto', 'quantos', 'que', 'quem', 'sao', 'se', 'seja', 'sejam', 'sem', 'sempre',
'sendo', 'sera', 'serao', 'seu', 'seus', 'si', 'sido', 'so', 'sob', 'sobre', 'sua', 'suas', 'talvez', 'tambem',
'tampouco', 'te', 'tem', 'tendo', 'tenha', 'ter', 'teu', 'teus', 'ti', 'tido', 'tinha', 'tinham', 'toda', 'todas',
'todavia', 'todo', 'todos', 'tu', 'tua', 'tuas', 'tudo', 'ultima', 'ultimas', 'ultimo', 'ultimos', 'um', 'uma',
'umas', 'uns', 'vendo', 'ver', 'vez', 'vindo', 'vir', 'vos', 'vos']
# Stopwords do nltk + stopwords do UniLex
stoplist = sorted(set(stoplist_uniLex + stopwords.words('portuguese')))
def process(self, tweet):
tweet = self.to_lower(tweet)
tweet = self.remove_links(tweet)
tweet = self.remove_mentions(tweet)
tweet = self.remove_hashtags(tweet)
tweet = self.remove_numbers(tweet)
tweet = self.replace_three_or_more(tweet)
palavras = self.tokenizer.tokenize(tweet)
palavras = self.remove_punctuation(palavras)
palavras = self.remove_stopwords(palavras)
palavras_processadas = []
for palavra in palavras:
# Replace emoji
if len(palavra) <= 3:
# replace good emoticons
palavra = re.sub('[:;=8][\-=^*\']?[)\]Dpb}]|[cCqd{(\[][\-=^*\']?[:;=8]', 'bom', palavra)
# replace bad emoticons
palavra = re.sub('[:;=8][\-=^*\']?[(\[<{cC]|[D>)\]}][\-=^*\']?[:;=8]', 'ruim', palavra)
# Stemming
# palavra = self.stemmer.stem(palavra)
# Remove small words
if len(palavra) <= 2:
palavra = ''
for s in self.special_char:
palavra = palavra.replace(s, '')
palavras_processadas.append(palavra)
tweet = ' '.join(palavras_processadas)
tweet = self.remove_duplicated_spaces(tweet)
return tweet
def to_lower(self, tweet):
return tweet.lower()
def remove_links(self, tweet):
# http matches literal characters and \S+ matches all non-whitespace characters (the end of the url)
return re.sub("http\S+", "", tweet)
def remove_mentions(self, tweet):
return re.sub("@\S+", "", tweet)
def remove_hashtags(self, tweet):
return re.sub("#", "", tweet)
def remove_numbers(self, tweet):
return re.sub("\d+", "", tweet)
def replace_three_or_more(self, tweet):
# pattern to look for three or more repetitions of any character, including newlines
pattern = re.compile(r"(.)\1{2,}", re.DOTALL)
return pattern.sub(r"\1\1", tweet)
def remove_duplicated_spaces(self, tweet):
tweet = tweet.strip() # Remove spaces before and after string
return re.sub(" +", " ", tweet)
def remove_stopwords(self, palavras):
return [palavra for palavra in palavras if palavra not in self.stoplist]
def remove_punctuation(self, palavras):
return [palavra for palavra in palavras if palavra not in list(punctuation)]
| [
"nltk.stem.RSLPStemmer",
"nltk.tokenize.TweetTokenizer",
"nltk.corpus.stopwords.words",
"re.sub",
"re.compile"
] | [((249, 272), 'nltk.stem.RSLPStemmer', 'nltk.stem.RSLPStemmer', ([], {}), '()\n', (270, 272), False, 'import nltk\n'), ((289, 341), 'nltk.tokenize.TweetTokenizer', 'TweetTokenizer', ([], {'reduce_len': '(True)', 'preserve_case': '(False)'}), '(reduce_len=True, preserve_case=False)\n', (303, 341), False, 'from nltk.tokenize import TweetTokenizer\n'), ((4548, 4577), 're.sub', 're.sub', (['"""http\\\\S+"""', '""""""', 'tweet'], {}), "('http\\\\S+', '', tweet)\n", (4554, 4577), False, 'import re\n'), ((4631, 4657), 're.sub', 're.sub', (['"""@\\\\S+"""', '""""""', 'tweet'], {}), "('@\\\\S+', '', tweet)\n", (4637, 4657), False, 'import re\n'), ((4711, 4733), 're.sub', 're.sub', (['"""#"""', '""""""', 'tweet'], {}), "('#', '', tweet)\n", (4717, 4733), False, 'import re\n'), ((4787, 4812), 're.sub', 're.sub', (['"""\\\\d+"""', '""""""', 'tweet'], {}), "('\\\\d+', '', tweet)\n", (4793, 4812), False, 'import re\n'), ((4968, 5003), 're.compile', 're.compile', (['"""(.)\\\\1{2,}"""', 're.DOTALL'], {}), "('(.)\\\\1{2,}', re.DOTALL)\n", (4978, 5003), False, 'import re\n'), ((5181, 5205), 're.sub', 're.sub', (['""" +"""', '""" """', 'tweet'], {}), "(' +', ' ', tweet)\n", (5187, 5205), False, 'import re\n'), ((2996, 3025), 'nltk.corpus.stopwords.words', 'stopwords.words', (['"""portuguese"""'], {}), "('portuguese')\n", (3011, 3025), False, 'from nltk.corpus import stopwords\n'), ((3670, 3755), 're.sub', 're.sub', (['"""[:;=8][\\\\-=^*\']?[)\\\\]Dpb}]|[cCqd{(\\\\[][\\\\-=^*\']?[:;=8]"""', '"""bom"""', 'palavra'], {}), '("[:;=8][\\\\-=^*\']?[)\\\\]Dpb}]|[cCqd{(\\\\[][\\\\-=^*\']?[:;=8]", \'bom\', palavra\n )\n', (3676, 3755), False, 'import re\n'), ((3815, 3894), 're.sub', 're.sub', (['"""[:;=8][\\\\-=^*\']?[(\\\\[<{cC]|[D>)\\\\]}][\\\\-=^*\']?[:;=8]"""', '"""ruim"""', 'palavra'], {}), '("[:;=8][\\\\-=^*\']?[(\\\\[<{cC]|[D>)\\\\]}][\\\\-=^*\']?[:;=8]", \'ruim\', palavra)\n', (3821, 3894), False, 'import re\n')] |
import os
from static import SQLITE_DIR_PATH, USE_MYSQL, MYSQL_USERNAME, MYSQL_PASSWORD, MYSQL_HOST, MYSQL_DATABASE_NAME
def db_path_validate():
assert os.path.exists(SQLITE_DIR_PATH), "{path} is not exists.".format(path=SQLITE_DIR_PATH)
if USE_MYSQL:
assert MYSQL_USERNAME is not None, "MYSQL_USERNAME is not given."
assert MYSQL_PASSWORD is not None, "MYSQL_PASSWORD is not given."
assert MYSQL_HOST is not None, "MYSQL_HOST is not given."
assert MYSQL_DATABASE_NAME is not None, "MYSQL_DATABASE_NAME is not given."
| [
"os.path.exists"
] | [((158, 189), 'os.path.exists', 'os.path.exists', (['SQLITE_DIR_PATH'], {}), '(SQLITE_DIR_PATH)\n', (172, 189), False, 'import os\n')] |
# Before running, make sure avspeech_train.csv and avspeech_test.csv are in catalog.
# if not, see the requirement.txt
# download and preprocess the data from AVspeech dataset
import sys
sys.path.append("../lib")
import AVHandler as avh
import pandas as pd
import multiprocessing
from multiprocessing import Process
def m_link(youtube_id):
# return the youtube actual link
link = 'https://www.youtube.com/watch?v='+youtube_id
return link
def m_audio(loc,name,cat,start_idx,end_idx):
# make concatenated audio following by the catalog from AVSpeech
# loc | the location for file to store
# name | name for the wav mix file
# cat | the catalog with audio link and time
# start_idx | the starting index of the audio to download and concatenate
# end_idx | the ending index of the audio to download and concatenate
for i in range(start_idx,end_idx):
#f_name = name+str(i)
f_name = f'{name}_{cat.loc[i, "link"]}_{i}' # auio_train_id_indexofaudio
link = m_link(cat.loc[i,'link'])
start_time = cat.loc[i,'start_time']
end_time = start_time + 3.0
avh.download(loc,f_name,link)
avh.cut(loc,f_name,start_time,end_time)
cat_train = pd.read_csv('catalog/avspeech_train.csv')
cat_train.columns = ['link', 'start_time', 'end_time', 'x','y']
#cat_test = pd.read_csv('catalog/avspeech_test.csv')
# create 80000-90000 audios data from 290K
avh.mkdir('audio_train')
# Multiprocess
processes = []
n_process = 10
sample_per_process = 2000
for i in range(n_process):
proc = Process(target=m_audio, kwargs={'loc':'audio_train', 'name': 'audio_train','cat':cat_train, 'start_idx':i*sample_per_process, 'end_idx':(i+1)*sample_per_process})
processes.append(proc)
proc.start()
print("Start process: ", i)
for proc in processes:
proc.join()
#m_audio('audio_train','audio_train',cat_train,80000,80500)
| [
"sys.path.append",
"AVHandler.mkdir",
"pandas.read_csv",
"AVHandler.download",
"multiprocessing.Process",
"AVHandler.cut"
] | [((187, 212), 'sys.path.append', 'sys.path.append', (['"""../lib"""'], {}), "('../lib')\n", (202, 212), False, 'import sys\n'), ((1250, 1291), 'pandas.read_csv', 'pd.read_csv', (['"""catalog/avspeech_train.csv"""'], {}), "('catalog/avspeech_train.csv')\n", (1261, 1291), True, 'import pandas as pd\n'), ((1454, 1478), 'AVHandler.mkdir', 'avh.mkdir', (['"""audio_train"""'], {}), "('audio_train')\n", (1463, 1478), True, 'import AVHandler as avh\n'), ((1590, 1772), 'multiprocessing.Process', 'Process', ([], {'target': 'm_audio', 'kwargs': "{'loc': 'audio_train', 'name': 'audio_train', 'cat': cat_train, 'start_idx':\n i * sample_per_process, 'end_idx': (i + 1) * sample_per_process}"}), "(target=m_audio, kwargs={'loc': 'audio_train', 'name': 'audio_train',\n 'cat': cat_train, 'start_idx': i * sample_per_process, 'end_idx': (i + \n 1) * sample_per_process})\n", (1597, 1772), False, 'from multiprocessing import Process\n'), ((1150, 1181), 'AVHandler.download', 'avh.download', (['loc', 'f_name', 'link'], {}), '(loc, f_name, link)\n', (1162, 1181), True, 'import AVHandler as avh\n'), ((1188, 1230), 'AVHandler.cut', 'avh.cut', (['loc', 'f_name', 'start_time', 'end_time'], {}), '(loc, f_name, start_time, end_time)\n', (1195, 1230), True, 'import AVHandler as avh\n')] |
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
df = pd.read_csv('medals_data.csv')
df[['Gold','Silver','Bronze']].plot(kind='bar',stacked=True)
plt.title('India Olympics Medal')
plt.xlabel('Years')
plt.ylabel('Medals')
n = len(df['Games'])
labels = df.Games.str.slice(0,4)
plt.xticks(np.arange(n),labels,rotation='horizontal')
plt.show() | [
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"pandas.read_csv",
"numpy.arange",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel"
] | [((78, 108), 'pandas.read_csv', 'pd.read_csv', (['"""medals_data.csv"""'], {}), "('medals_data.csv')\n", (89, 108), True, 'import pandas as pd\n'), ((171, 204), 'matplotlib.pyplot.title', 'plt.title', (['"""India Olympics Medal"""'], {}), "('India Olympics Medal')\n", (180, 204), True, 'import matplotlib.pyplot as plt\n'), ((205, 224), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Years"""'], {}), "('Years')\n", (215, 224), True, 'import matplotlib.pyplot as plt\n'), ((225, 245), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Medals"""'], {}), "('Medals')\n", (235, 245), True, 'import matplotlib.pyplot as plt\n'), ((354, 364), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (362, 364), True, 'import matplotlib.pyplot as plt\n'), ((311, 323), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (320, 323), True, 'import numpy as np\n')] |
import requests
import requests
import urllib3
import webbrowser
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
cookies = {
}
headers = {
}
sites_200 = []
sites_403 = []
def printPage(response):
with open('test.html', "w") as output:
badchars = ['\\n', '\\t', 'b\'']
responseContent = str(response.content).strip()
for elm in badchars:
responseContent = responseContent.replace(elm, "")
output.write(responseContent)
def test_html_code(response_code, page_url):
if response_code == 200:
print(f"{page_url} : Success : {response_code}")
sites_200.append(page_url)
elif response_code == 403:
print(f"{page_url}: Success : {response_code}")
sites_403.append(page_url)
elif response_code == 404:
print(f"{page_url}: Failed : {response_code}")
def write_report():
with open('success.txt', "w") as output:
output.write("PAGES THAT 200:\n")
for elm in sites_200:
# webbrowser.open(elm)
output.write(f"{elm}\n")
output.write("\n\nPAGES THAT 403:\n")
for elm in sites_403:
output.write(f"{elm}\n")
def main():
with open('test.txt', "r") as sites:
lines = sites.readlines()
for line in lines:
page_url = line.strip()
response = requests.get(page_url, headers=headers, cookies=cookies, verify=False)
test_html_code(response.status_code, page_url)
write_report()
if __name__ == '__main__':
main() | [
"urllib3.disable_warnings",
"requests.get"
] | [((71, 138), 'urllib3.disable_warnings', 'urllib3.disable_warnings', (['urllib3.exceptions.InsecureRequestWarning'], {}), '(urllib3.exceptions.InsecureRequestWarning)\n', (95, 138), False, 'import urllib3\n'), ((1408, 1478), 'requests.get', 'requests.get', (['page_url'], {'headers': 'headers', 'cookies': 'cookies', 'verify': '(False)'}), '(page_url, headers=headers, cookies=cookies, verify=False)\n', (1420, 1478), False, 'import requests\n')] |
# set keyboard mode for ios device
#from kivy.config import Config
#Config.set('kivy', 'keyboard_mode', 'dock')
from kivy.lang.builder import Builder
from kivymd.uix.bottomnavigation import MDBottomNavigation
from kivy.clock import Clock
from functools import partial
import SecondScreen
import FirstScreen
import ThirdScreen
from class_mydb import Mydb
from storage import Storage
from kivy.properties import StringProperty
from kivymd.app import MDApp
from Mdialog import GraphDialog
""" set test window and input android keyboard"""
# from kivy.core.window import Window
# Window.size = (375, 667)
# Window.softinput_mode = "resize"
kv = '''
#:import get_color_from_hex kivy.utils.get_color_from_hex
#:include FirstScreen.kv
#:include SecondScreen.kv
#:include ThirdScreen.kv
<Content>:
orientation: "vertical"
spacing: "12dp"
size_hint_y: None
width: "500dp"
height: "300dp"
BoxLayout:
id: graph
BoxLayout:
id: view
ScrollView:
MDList:
id: list
MDScreen:
Manager:
id: manager
#panel_color: get_color_from_hex("#eeeaea")
#selected_color_background: get_color_from_hex("#97ecf8")
#text_color_active: 0, 0, 0, 1
FirstScreen:
id: screen1
name: 'screen1'
text: 'Kasa'
icon: 'account-cash'
on_leave:
screen2.ids.general_view.populate_view()
screen2.ids.costs_view.populate_view()
SecondScreen:
id: screen2
name: 'screen2'
text: 'Portfel'
icon: 'format-list-bulleted-type'
ThirdScreen:
name: 'screen3'
text: 'Ustawienia'
icon: 'table-settings'
on_leave:
screen1.ids.catpro_view.populate_view()
screen2.ids.general_view.populate_view()
screen2.ids.costs_view.populate_view()
'''
class Manager(MDBottomNavigation):
pass
class Budget(MDApp):
costs_sum = StringProperty('0')
# store = ''
def __init__(self, **kwargs):
super().__init__(**kwargs)
# init DICTstorage from class Storage() in storage.py for ios device
self.storage = Storage(self.user_data_dir)
# self.storage = Storage('') local env
self.store = self.storage.store
self.db = Mydb(self.user_data_dir)
#self.db = Mydb('') local env
def build(self):
self.icon = 'logo.png'
self.theme_cls.primary_palette = "Orange"
self.theme_cls.primary_hue = "500"
return Builder.load_string(kv)
def on_start(self):
self.update_store_cat_pro('category', 'project')
self.update_all()
def on_pause(self):
self.db.conn.close()
def on_stop(self):
self.db.conn.close()
""" fetch db methods """
def update_store_cat_pro(self, *args):
for i in args:
rows = self.db.fetch_col(i)
items = [i for item in rows for i in item if i is not None]
if i == 'category':
self.store['category']['cat'] = list(dict.fromkeys(items))
else:
self.store['project']['pro'] = list(dict.fromkeys(items))
def update_procat_costs(self):
self.db.procat('project', self.store['project']['pro'])
self.db.procat('category', self.store['category']['cat'])
def update_gen_cost(self):
self.fetch_costs()
self.fetch_general_costs()
def update_all(self):
#todo: TEST: fetch from zero db ?
self.fetch_costs()
self.fetch_general_costs()
self.db.procat('project', self.store['project']['pro'])
self.db.procat('category', self.store['category']['cat'])
def fetch_costs(self):
""" all costs for pro, cat and datas source in mydb class"""
rows = self.db.fetch_col(col='cost')
self.store['costs']['RAZEM'] = f'{sum([i for item in rows for i in item if i is not None]):.2f}'
self.costs_sum = f'{(sum([i for item in rows for i in item if i is not None])):.2f}'
def fetch_general_costs(self):
""" fetch and pass into localstore all today costs """
self.fetch_items(self.db.fetch_by_date, 'dzisiaj')
""" fetch and pass into localstore from curent week """
self.fetch_items(self.db.fetch_week, 'w tym tygodniu')
""" fetch and pass into localstore all costs from - current month """
self.fetch_items(self.db.fetch_current_month, 'w tym miesiącu')
""" fetch and pass into localstore all costs from - last month """
self.fetch_items(self.db.fetch_last_mont, 'miesiąc wcześniej')
""" fetch and pass into localstore all costs from - current year """
self.fetch_items(self.db.all_year, 'w tym roku')
""" fetch and pass into local store all cost from last year """
self.fetch_items(self.db.last_year, 'w poprzednim roku')
def fetch_items(self, f, ar1):
""" fetch method"""
r_ = f()
self.store['costs'][ar1] = f'{sum([i for item in r_ for i in item]):.2f}'
return ar1
def storage(self):
#app = MDApp.get_running_app()
#ddir = app.user_data_dir
self.ddir = self.user_data_dir
print('with app:', self.ddir)
print('ddir:', self.user_data_dir + 'STORE')
# return self.user_data_dir + 'STORE'
""" section graph dialog """
def open_graph_dialog(self, text):
item = text[:(text.find(':') - 1)]
if item in self.store['category']['cat']:
r = self.db.fetch_cost_and_data('category', item)
else:
r = self.db.fetch_cost_and_data('project', item)
time = [r[i][1] for i in range(len(r))] #if r[i][0] != 0]
cost = [r[i][0] for i in range(len(r))] #if r[i][0] != 0]
"pass param as a graph attr"
GraphDialog(cost, time, item).show_graph()
Budget().run() | [
"Mdialog.GraphDialog",
"storage.Storage",
"kivy.properties.StringProperty",
"kivy.lang.builder.Builder.load_string",
"class_mydb.Mydb"
] | [((2063, 2082), 'kivy.properties.StringProperty', 'StringProperty', (['"""0"""'], {}), "('0')\n", (2077, 2082), False, 'from kivy.properties import StringProperty\n'), ((2270, 2297), 'storage.Storage', 'Storage', (['self.user_data_dir'], {}), '(self.user_data_dir)\n', (2277, 2297), False, 'from storage import Storage\n'), ((2403, 2427), 'class_mydb.Mydb', 'Mydb', (['self.user_data_dir'], {}), '(self.user_data_dir)\n', (2407, 2427), False, 'from class_mydb import Mydb\n'), ((2627, 2650), 'kivy.lang.builder.Builder.load_string', 'Builder.load_string', (['kv'], {}), '(kv)\n', (2646, 2650), False, 'from kivy.lang.builder import Builder\n'), ((5922, 5951), 'Mdialog.GraphDialog', 'GraphDialog', (['cost', 'time', 'item'], {}), '(cost, time, item)\n', (5933, 5951), False, 'from Mdialog import GraphDialog\n')] |
import os
import pickle
import logging
from src.jets.data_ops.DataLoader import DataLoader
from src.jets.data_ops.Dataset import Dataset
import numpy as np
from .io import load_jets_from_pickle
w_vs_qcd = 'w-vs-qcd'
quark_gluon = 'quark-gluon'
DATASETS = {
'w':(w_vs_qcd,'antikt-kt'),
'wp':(w_vs_qcd + '/pileup','pileup'),
'pp': (quark_gluon,'pp'),
'pbpb': (quark_gluon,'pbpb'),
#'protein': ('proteins', 'casp11')
}
def load_jets(data_dir, filename, do_preprocessing=False):
if 'w-vs-qcd' in data_dir:
from .w_vs_qcd import preprocess
elif 'quark-gluon' in data_dir:
from .quark_gluon import preprocess
else:
raise ValueError('Unrecognized data_dir!')
#from problem_module import preprocess, crop_dataset
#preprocessed_dir = os.path.join(data_dir, 'preprocessed')
raw_data_dir = os.path.join(data_dir, 'raw')
preprocessed_dir = os.path.join(data_dir, 'preprocessed')
path_to_preprocessed = os.path.join(preprocessed_dir, filename)
if not os.path.exists(path_to_preprocessed) or do_preprocessing:
if not os.path.exists(preprocessed_dir):
os.makedirs(preprocessed_dir)
logging.warning("Preprocessing...")
preprocess_fn(raw_data_dir, preprocessed_dir, filename)
logging.warning("Preprocessed the data and saved it to {}".format(path_to_preprocessed))
else:
logging.warning("Data at {} and already preprocessed".format(path_to_preprocessed))
jets = load_jets_from_pickle(path_to_preprocessed)
logging.warning("\tSuccessfully loaded data")
logging.warning("\tFound {} jets in total".format(len(jets)))
return jets
def training_and_validation_dataset(data_dir, dataset, n_train, n_valid, preprocess):
intermediate_dir, filename = DATASETS[dataset]
data_dir = os.path.join(data_dir, intermediate_dir)
jets = load_jets(data_dir,"{}-train.pickle".format(filename), preprocess)
problem = data_dir.split('/')[-1]
subproblem = filename
train_jets = jets[n_valid:n_valid + n_train] if n_train > 0 else jets[n_valid:]
train_dataset = Dataset(train_jets, problem=problem,subproblem=subproblem)
#
valid_jets = jets[:n_valid]
valid_dataset = Dataset(valid_jets, problem=problem,subproblem=subproblem)
if 'w-vs-qcd' in data_dir:
from .w_vs_qcd import crop_dataset
elif 'quark-gluon' in data_dir:
from .quark_gluon import crop_dataset
else:
raise ValueError('Unrecognized data_dir!')
valid_dataset, cropped_dataset = crop_dataset(valid_dataset)
train_dataset.extend(cropped_dataset)
train_dataset.shuffle()
##
logging.warning("Building normalizing transform from training set...")
train_dataset.transform()
valid_dataset.transform(train_dataset.tf)
# add cropped indices to training data
logging.warning("\tfinal train size = %d" % len(train_dataset))
logging.warning("\tfinal valid size = %d" % len(valid_dataset))
return train_dataset, valid_dataset
def test_dataset(data_dir, dataset, n_test, preprocess):
train_dataset, _ = training_and_validation_dataset(data_dir, dataset, -1, 27000, False)
intermediate_dir, filename = DATASETS[dataset]
data_dir = os.path.join(data_dir, intermediate_dir)
logging.warning("Loading test data...")
filename = "{}-test.pickle".format(filename)
jets = load_jets(data_dir, filename, preprocess)
jets = jets[:n_test]
dataset = Dataset(jets)
dataset.transform(train_dataset.tf)
# crop validation set and add the excluded data to the training set
if 'w-vs-qcd' in data_dir:
from .w_vs_qcd import crop_dataset
elif 'quark-gluon' in data_dir:
from .quark_gluon import crop_dataset
else:
raise ValueError('Unrecognized data_dir!')
dataset, _ = crop_dataset(dataset)
# add cropped indices to training data
logging.warning("\tfinal test size = %d" % len(dataset))
return dataset
def get_train_data_loader(data_dir, dataset, n_train, n_valid, batch_size, leaves=None,preprocess=None,**kwargs):
train_dataset, valid_dataset = training_and_validation_dataset(data_dir, dataset, n_train, n_valid, preprocess)
train_data_loader = DataLoader(train_dataset, batch_size, leaves=leaves)
valid_data_loader = DataLoader(valid_dataset, batch_size, leaves=leaves)
return train_data_loader, valid_data_loader
def get_test_data_loader(data_dir, dataset, n_test, batch_size, leaves=None,preprocess=None,**kwargs):
dataset = test_dataset(data_dir, dataset, n_test, preprocess)
test_data_loader = DataLoader(dataset, batch_size, leaves=leaves)
return test_data_loader
| [
"os.makedirs",
"logging.warning",
"os.path.exists",
"src.jets.data_ops.Dataset.Dataset",
"os.path.join",
"src.jets.data_ops.DataLoader.DataLoader"
] | [((854, 883), 'os.path.join', 'os.path.join', (['data_dir', '"""raw"""'], {}), "(data_dir, 'raw')\n", (866, 883), False, 'import os\n'), ((907, 945), 'os.path.join', 'os.path.join', (['data_dir', '"""preprocessed"""'], {}), "(data_dir, 'preprocessed')\n", (919, 945), False, 'import os\n'), ((973, 1013), 'os.path.join', 'os.path.join', (['preprocessed_dir', 'filename'], {}), '(preprocessed_dir, filename)\n', (985, 1013), False, 'import os\n'), ((1545, 1590), 'logging.warning', 'logging.warning', (['"""\tSuccessfully loaded data"""'], {}), "('\\tSuccessfully loaded data')\n", (1560, 1590), False, 'import logging\n'), ((1827, 1867), 'os.path.join', 'os.path.join', (['data_dir', 'intermediate_dir'], {}), '(data_dir, intermediate_dir)\n', (1839, 1867), False, 'import os\n'), ((2117, 2176), 'src.jets.data_ops.Dataset.Dataset', 'Dataset', (['train_jets'], {'problem': 'problem', 'subproblem': 'subproblem'}), '(train_jets, problem=problem, subproblem=subproblem)\n', (2124, 2176), False, 'from src.jets.data_ops.Dataset import Dataset\n'), ((2234, 2293), 'src.jets.data_ops.Dataset.Dataset', 'Dataset', (['valid_jets'], {'problem': 'problem', 'subproblem': 'subproblem'}), '(valid_jets, problem=problem, subproblem=subproblem)\n', (2241, 2293), False, 'from src.jets.data_ops.Dataset import Dataset\n'), ((2660, 2730), 'logging.warning', 'logging.warning', (['"""Building normalizing transform from training set..."""'], {}), "('Building normalizing transform from training set...')\n", (2675, 2730), False, 'import logging\n'), ((3246, 3286), 'os.path.join', 'os.path.join', (['data_dir', 'intermediate_dir'], {}), '(data_dir, intermediate_dir)\n', (3258, 3286), False, 'import os\n'), ((3292, 3331), 'logging.warning', 'logging.warning', (['"""Loading test data..."""'], {}), "('Loading test data...')\n", (3307, 3331), False, 'import logging\n'), ((3474, 3487), 'src.jets.data_ops.Dataset.Dataset', 'Dataset', (['jets'], {}), '(jets)\n', (3481, 3487), False, 'from src.jets.data_ops.Dataset import Dataset\n'), ((4237, 4289), 'src.jets.data_ops.DataLoader.DataLoader', 'DataLoader', (['train_dataset', 'batch_size'], {'leaves': 'leaves'}), '(train_dataset, batch_size, leaves=leaves)\n', (4247, 4289), False, 'from src.jets.data_ops.DataLoader import DataLoader\n'), ((4314, 4366), 'src.jets.data_ops.DataLoader.DataLoader', 'DataLoader', (['valid_dataset', 'batch_size'], {'leaves': 'leaves'}), '(valid_dataset, batch_size, leaves=leaves)\n', (4324, 4366), False, 'from src.jets.data_ops.DataLoader import DataLoader\n'), ((4608, 4654), 'src.jets.data_ops.DataLoader.DataLoader', 'DataLoader', (['dataset', 'batch_size'], {'leaves': 'leaves'}), '(dataset, batch_size, leaves=leaves)\n', (4618, 4654), False, 'from src.jets.data_ops.DataLoader import DataLoader\n'), ((1184, 1219), 'logging.warning', 'logging.warning', (['"""Preprocessing..."""'], {}), "('Preprocessing...')\n", (1199, 1219), False, 'import logging\n'), ((1026, 1062), 'os.path.exists', 'os.path.exists', (['path_to_preprocessed'], {}), '(path_to_preprocessed)\n', (1040, 1062), False, 'import os\n'), ((1099, 1131), 'os.path.exists', 'os.path.exists', (['preprocessed_dir'], {}), '(preprocessed_dir)\n', (1113, 1131), False, 'import os\n'), ((1145, 1174), 'os.makedirs', 'os.makedirs', (['preprocessed_dir'], {}), '(preprocessed_dir)\n', (1156, 1174), False, 'import os\n')] |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('corpus', '0002_data_migration_dont_know_skip_merge'),
]
operations = [
migrations.AlterField(
model_name='evidencelabel',
name='label',
preserve_default=True,
field=models.CharField(
default='SK',
null=True,
max_length=2,
choices=[
('YE', 'Yes, relation is present'),
('NO', 'No relation present'),
('NS', 'Evidence is nonsense'),
('SK', 'Skipped labeling of this evidence')
]
),
),
]
| [
"django.db.models.CharField"
] | [((409, 626), 'django.db.models.CharField', 'models.CharField', ([], {'default': '"""SK"""', 'null': '(True)', 'max_length': '(2)', 'choices': "[('YE', 'Yes, relation is present'), ('NO', 'No relation present'), ('NS',\n 'Evidence is nonsense'), ('SK', 'Skipped labeling of this evidence')]"}), "(default='SK', null=True, max_length=2, choices=[('YE',\n 'Yes, relation is present'), ('NO', 'No relation present'), ('NS',\n 'Evidence is nonsense'), ('SK', 'Skipped labeling of this evidence')])\n", (425, 626), False, 'from django.db import models, migrations\n')] |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'response_info.ui',
# licensing of 'response_info.ui' applies.
#
# Created: Sun Feb 17 10:16:18 2019
# by: pyside2-uic running on PySide2 5.12.1
#
# WARNING! All changes made in this file will be lost!
from PySide2 import QtCore, QtGui, QtWidgets
class Ui_ResponseInfo(object):
def setupUi(self, ResponseInfo):
ResponseInfo.setObjectName("ResponseInfo")
ResponseInfo.resize(102, 28)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(ResponseInfo.sizePolicy().hasHeightForWidth())
ResponseInfo.setSizePolicy(sizePolicy)
self.reqStatusLayout = QtWidgets.QHBoxLayout(ResponseInfo)
self.reqStatusLayout.setSpacing(5)
self.reqStatusLayout.setContentsMargins(6, 0, -1, 0)
self.reqStatusLayout.setObjectName("reqStatusLayout")
self.statusCode = QtWidgets.QLabel(ResponseInfo)
self.statusCode.setStyleSheet("")
self.statusCode.setText("")
self.statusCode.setObjectName("statusCode")
self.reqStatusLayout.addWidget(self.statusCode)
self.time = QtWidgets.QLabel(ResponseInfo)
self.time.setText("")
self.time.setMargin(5)
self.time.setObjectName("time")
self.reqStatusLayout.addWidget(self.time)
self.contentType = QtWidgets.QLabel(ResponseInfo)
self.contentType.setText("")
self.contentType.setObjectName("contentType")
self.reqStatusLayout.addWidget(self.contentType)
spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.reqStatusLayout.addItem(spacerItem)
self.retranslateUi(ResponseInfo)
QtCore.QMetaObject.connectSlotsByName(ResponseInfo)
def retranslateUi(self, ResponseInfo):
pass
| [
"PySide2.QtCore.QMetaObject.connectSlotsByName",
"PySide2.QtWidgets.QLabel",
"PySide2.QtWidgets.QSpacerItem",
"PySide2.QtWidgets.QSizePolicy",
"PySide2.QtWidgets.QHBoxLayout"
] | [((510, 598), 'PySide2.QtWidgets.QSizePolicy', 'QtWidgets.QSizePolicy', (['QtWidgets.QSizePolicy.Minimum', 'QtWidgets.QSizePolicy.Minimum'], {}), '(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.\n Minimum)\n', (531, 598), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((840, 875), 'PySide2.QtWidgets.QHBoxLayout', 'QtWidgets.QHBoxLayout', (['ResponseInfo'], {}), '(ResponseInfo)\n', (861, 875), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((1068, 1098), 'PySide2.QtWidgets.QLabel', 'QtWidgets.QLabel', (['ResponseInfo'], {}), '(ResponseInfo)\n', (1084, 1098), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((1305, 1335), 'PySide2.QtWidgets.QLabel', 'QtWidgets.QLabel', (['ResponseInfo'], {}), '(ResponseInfo)\n', (1321, 1335), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((1514, 1544), 'PySide2.QtWidgets.QLabel', 'QtWidgets.QLabel', (['ResponseInfo'], {}), '(ResponseInfo)\n', (1530, 1544), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((1714, 1812), 'PySide2.QtWidgets.QSpacerItem', 'QtWidgets.QSpacerItem', (['(40)', '(20)', 'QtWidgets.QSizePolicy.Expanding', 'QtWidgets.QSizePolicy.Minimum'], {}), '(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.\n QSizePolicy.Minimum)\n', (1735, 1812), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((1907, 1958), 'PySide2.QtCore.QMetaObject.connectSlotsByName', 'QtCore.QMetaObject.connectSlotsByName', (['ResponseInfo'], {}), '(ResponseInfo)\n', (1944, 1958), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n')] |
# -*- coding: utf-8 -*
""" DSFP modifications spy, looks for save file modifications
.. module:: watcher
:platform: Linux, Windows, MacOS X
:synopsis: watches for dark souls save file modifications and prints
any modified data in console
.. moduleauthor:: Tarvitz <<EMAIL>>
"""
from __future__ import unicode_literals
import os
import sys
import six
import struct
import json
import argparse
from time import sleep
from datetime import datetime
from textwrap import wrap
from struct import pack, unpack
PROJECT_ROOT = os.path.pardir
sys.path.insert(0, os.path.join(PROJECT_ROOT, 'dsfp'))
from dsfp.utils import chunks
def rel(path):
return os.path.join(PROJECT_ROOT, path)
class Node(object):
def __init__(self):
self.children = []
def add(self, element):
self.children.append(element)
class Leaf(Node):
__slots__ = ['start', 'size']
def __init__(self, start, size, old, new):
super(Leaf, self).__init__()
self.start = start
self.size = size
self.old = old
self.new = new
def add(self, element):
"""
nothing to do as this is leaf
:param element:
:return: None
"""
@staticmethod
def unpack(value, fmt='I'):
return struct.unpack(fmt, value)[0]
def __str__(self):
new = self.unpack(self.new)
old = self.unpack(self.old)
fmt = (
"0x%(addr)08x[%(saddr)10s] %(value)10s 0x%(hex)08x "
"%(follow)5s %(old)10s 0x%(old_hex)08x" % {
'addr': self.start,
'saddr': self.start,
'value': new,
'hex': new,
'old': old,
'old_hex': old,
'follow': '<-'
}
)
return fmt
def __repr__(self):
return "<Leaf: 0x%08x>" % self.start
def _wrap(source, parts):
"""
wrap source to list of equal parts python 3+ only
:param str source: source to wrap
:param int parts: N equal parts
:rtype: list[str]
:return: list of str with N or equal length
"""
return list(chunks(source, parts))
def text_wrap(source, parts):
"""
wrap source to list of equal parts
:param str source: source to wrap
:param int parts: N equal parts
:rtype: list[str]
:return: list of str with N or equal length
"""
if six.PY2:
return wrap(source, parts)
return _wrap(source, parts)
class NewDiff(object):
"""
"""
def __init__(self, new_stream, old_stream, watchers):
self.new_stream = new_stream
self.old_stream = old_stream
self.watchers = watchers
def read_stream(self, stream, block):
"""
read stream withing given block
:param stream: stream to read
:type stream: six.BytesIO
:param dict block: start offset, size to read
:rtype: str
:return: raw data
"""
start = int(block['start'], 16)
size = int(block['size'], 16)
stream.seek(start)
return stream.read(size)
def process_diff(self, word_size=4):
"""
processes diff
:param int word_size: word size for diff processing
:rtype: list[Leaf]
:return: diffs
"""
nodes = []
for table in self.watchers:
for block in table.get('WATCH', []):
old_data = self.read_stream(self.old_stream, block)
new_data = self.read_stream(self.new_stream, block)
for idx, (old, new) in enumerate(
zip(text_wrap(old_data, word_size),
text_wrap(new_data, word_size))
):
size = int(block['size'], 16) + idx * word_size
start = int(block['start'], 16) + idx * word_size
if old == new:
continue
#: todo decide what's version of python would be
#: more prioritized as textwrap.wrap does not work with
#: bytestring and iterate through it coverts chars back to
#: int there's only one option convert/pack them back in
#: python 3+ which could give performance drop downs.
processed_old = old
processed_new = new
if isinstance(old, list) and isinstance(new, list):
processed_old = pack('B' * word_size, *old)
processed_new = pack('B' * word_size, *new)
nodes.append(
Leaf(start, size, processed_old, processed_new)
)
return nodes
class Spy(object):
""" Changes spy
:param str filename: path inspected filename
:keyword int slot: character slot
:keyword dict skip_table: skip some data which is represented in table
stored in dict
:keyword bool use_curses: use curses interface instead of standard cli
:keyword int start_offset: start inspections with given offset
:keyword int start_offset: end inspections with given offset
"""
def __init__(self, filename, watchers=None):
self.filename = filename
self.watchers = watchers
def read(self):
fo = open(self.filename, 'rb')
return six.BytesIO(fo.read())
@staticmethod
def log(out):
"""
log into the main window
:keyword bool refresh: True if should be refreshed
"""
print(out)
def run(self):
modified = 0
old_stat = os.lstat(self.filename)
old_stream = self.read()
while 1:
sleep(1)
stat = os.lstat(self.filename)
if stat.st_mtime == old_stat.st_mtime:
continue
now = datetime.now()
print("modified: %s [%s]" % (modified, now.strftime('%H:%M:%S')))
old_stat = stat
new_stream = self.read()
diff = NewDiff(old_stream=old_stream,
new_stream=new_stream,
watchers=self.watchers)
for node in diff.process_diff():
print(node)
modified += 1
def get_default_file_name():
"""
running on windows it would get default draks0005.sl2 file location
:rtype: str
:return: draks0005.sl2 file location
"""
prefix = os.path.join(
os.getenv('HOME'), 'Documents/NBGI/DarkSouls/'
)
path = ''
default_file = 'draks0005.sl2'
if sys.version_info[:2] >= (3, 5):
path = next(x for x in os.scandir(prefix) if x.is_dir()).path
else:
for root, directory, files in os.walk(prefix):
for filename in files:
if filename == default_file:
path = os.path.join(prefix, root)
break
return os.path.join(path, default_file)
def main(ns):
filename = ns.filename or get_default_file_name()
watchers = []
if ns.watch_table:
for stream in ns.watch_table:
watchers.append(json.loads(stream.read()))
watcher = Spy(filename=filename, watchers=watchers)
try:
watcher.run()
except KeyboardInterrupt:
print("\nCatch Ctrl+C, exiting ..")
finally:
sys.exit(0)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Prints changes for darksouls save file.'
)
parser.add_argument('-f', '--filename', metavar='draks0005.sl2',
type=str, dest='filename',
help='save file', required=False)
parser.add_argument('-w', '--watch-table',
dest='watch_table',
metavar='table.json,table2.json',
nargs='+',
type=argparse.FileType('r'),
help=(
'use data inside of json file for choosing what to'
' diff check inside of block with given offsets'),
required=True)
arguments = parser.parse_args(sys.argv[1:])
main(arguments)
| [
"argparse.ArgumentParser",
"textwrap.wrap",
"os.walk",
"dsfp.utils.chunks",
"struct.unpack",
"datetime.datetime.now",
"time.sleep",
"struct.pack",
"os.scandir",
"sys.exit",
"os.path.join",
"os.getenv",
"os.lstat",
"argparse.FileType"
] | [((571, 605), 'os.path.join', 'os.path.join', (['PROJECT_ROOT', '"""dsfp"""'], {}), "(PROJECT_ROOT, 'dsfp')\n", (583, 605), False, 'import os\n'), ((666, 698), 'os.path.join', 'os.path.join', (['PROJECT_ROOT', 'path'], {}), '(PROJECT_ROOT, path)\n', (678, 698), False, 'import os\n'), ((6932, 6964), 'os.path.join', 'os.path.join', (['path', 'default_file'], {}), '(path, default_file)\n', (6944, 6964), False, 'import os\n'), ((7405, 7483), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Prints changes for darksouls save file."""'}), "(description='Prints changes for darksouls save file.')\n", (7428, 7483), False, 'import argparse\n'), ((2135, 2156), 'dsfp.utils.chunks', 'chunks', (['source', 'parts'], {}), '(source, parts)\n', (2141, 2156), False, 'from dsfp.utils import chunks\n'), ((2421, 2440), 'textwrap.wrap', 'wrap', (['source', 'parts'], {}), '(source, parts)\n', (2425, 2440), False, 'from textwrap import wrap\n'), ((5632, 5655), 'os.lstat', 'os.lstat', (['self.filename'], {}), '(self.filename)\n', (5640, 5655), False, 'import os\n'), ((6485, 6502), 'os.getenv', 'os.getenv', (['"""HOME"""'], {}), "('HOME')\n", (6494, 6502), False, 'import os\n'), ((6744, 6759), 'os.walk', 'os.walk', (['prefix'], {}), '(prefix)\n', (6751, 6759), False, 'import os\n'), ((7351, 7362), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (7359, 7362), False, 'import sys\n'), ((1279, 1304), 'struct.unpack', 'struct.unpack', (['fmt', 'value'], {}), '(fmt, value)\n', (1292, 1304), False, 'import struct\n'), ((5719, 5727), 'time.sleep', 'sleep', (['(1)'], {}), '(1)\n', (5724, 5727), False, 'from time import sleep\n'), ((5747, 5770), 'os.lstat', 'os.lstat', (['self.filename'], {}), '(self.filename)\n', (5755, 5770), False, 'import os\n'), ((5865, 5879), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (5877, 5879), False, 'from datetime import datetime\n'), ((7889, 7911), 'argparse.FileType', 'argparse.FileType', (['"""r"""'], {}), "('r')\n", (7906, 7911), False, 'import argparse\n'), ((6868, 6894), 'os.path.join', 'os.path.join', (['prefix', 'root'], {}), '(prefix, root)\n', (6880, 6894), False, 'import os\n'), ((4500, 4527), 'struct.pack', 'pack', (["('B' * word_size)", '*old'], {}), "('B' * word_size, *old)\n", (4504, 4527), False, 'from struct import pack, unpack\n'), ((4568, 4595), 'struct.pack', 'pack', (["('B' * word_size)", '*new'], {}), "('B' * word_size, *new)\n", (4572, 4595), False, 'from struct import pack, unpack\n'), ((6657, 6675), 'os.scandir', 'os.scandir', (['prefix'], {}), '(prefix)\n', (6667, 6675), False, 'import os\n')] |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function
import os, sys, pkgutil, json, glob
from distutils.command.clean import clean as CleanCommand
from setuptools import setup, find_packages, Command
#from setuptools import Extension # for Swig extension
from builtins import open, dict
PROJECT = 'intro_py.foreignc'
HERE = os.path.abspath(os.path.dirname(__file__))
sys.path.extend([os.path.join(HERE, '..')])
## for cffi, swig
#if 'java' in sys.platform.lower():
# raise Exception('This package can not be used with Jython.')
## for jna
#if 'java' not in sys.platform.lower():
# raise Exception('This package can only be used with Jython.')
## jip install <groupId>:<artifactId>:<version> --> javalib/*.jar
## java -jar ivy.jar -dependency <groupId> <artifactId> '[<version>,)' -types jar -retrieve 'javalib/[artifact]-[revision](-[classifier]).[ext]'
#sys.path.extend(glob.glob('javalib/*.jar'))
def disable_commands(*blacklist):
bad_cmds = [arg for cmd in blacklist for arg in sys.argv if cmd in arg]
if [] != bad_cmds:
print('Command(s) {0} have been disabled; exiting'.format(bad_cmds))
raise SystemExit(2)
disable_commands('register', 'upload')
def _matches_filepatterns(filepats, paths):
import fnmatch
matches_pats = [os.path.join(root, file1) for path in paths
for root, dirs, files in os.walk(path) for filepat in filepats
for file1 in fnmatch.filter(dirs + files, filepat)]
return matches_pats
def _remove_pathlist(pathlist):
import shutil
for path in pathlist:
if os.path.exists(path) and os.path.isdir(path):
shutil.rmtree(path)
elif os.path.exists(path):
os.remove(path)
class Clean0(CleanCommand):
description = CleanCommand.description + ' (modified)'
def run(self):
import shutil
CleanCommand.run(self)
if 1 != self.all:
return
_remove_pathlist(_matches_filepatterns(['build', 'dist', '*.egg*',
'.cache', '__pycache__', '.hypothesis', 'htmlcov', '.tox', '*.so',
'*.pyc', '*.pyo', '*~', '.coverage*', '*.log', '*.class'], ['.']))
class Test0(Command):
## nose2 cmd description
#description = 'run nose2 [DEBUG=1] (* addon *)'
description = 'run unittest discover [DEBUG=1] (* addon *)'
user_options = [('opts=', 'o', 'Test options (default: -s {0})'.format(
'/'.join(PROJECT.split('.')[:-1])))]
def initialize_options(self):
self.cwd, self.opts = None, ''
def finalize_options(self):
self.cwd = os.getcwd()
def run(self):
import subprocess
assert os.getcwd() == self.cwd, 'Must be in pkg root: {0}'.format(
self.cwd)
## use nose2
#errno = subprocess.call('{0} -m nose2 {1}'.format(
# sys.executable, self.opts), shell = True)
errno = subprocess.call('{0} -m unittest discover {1}'.format(
sys.executable, self.opts), shell = True)
raise SystemExit(errno)
## for ffi_lib
#PREFIX = os.environ.get('PREFIX', '/usr/local')
#os.environ['LD_LIBRARY_PATH'] = ':'.join([
# os.environ.get('LD_LIBRARY_PATH', '.'), '{}/lib'.format(PREFIX)])
#os.environ['LDFLAGS'] = ' '.join([
# os.environ.get('LDFLAGS', '-Lbuild/lib'), '-L{}/lib'.format(PREFIX)])
#os.environ['CPPFLAGS'] = ' '.join([
# os.environ.get('CPPFLAGS', '-Ibuild/include'),
# '-I{}/include'.format(PREFIX)])
## for Swig extension
#extension_mod = Extension(name='{0}._classic_c'.format(PROJECT),
# # sources=['{0}/classic_c_wrap.c'.format('build')],
# sources=['{0}/classic_c.i'.format(PROJECT.replace('.', '/'))],
# include_dirs=['.', PROJECT.replace('.', '/'), '{}/include'.format(PREFIX)],
# library_dirs=os.environ.get('LD_LIBRARY_PATH', 'build/lib').split(':'),
# libraries=[PROJECT],
# runtime_library_dirs=['$ORIGIN/', '{}/lib'.format(PREFIX)],
# extra_compile_args=os.environ.get('CPPFLAGS', '-Ibuild/include').split(' '),
# extra_link_args=os.environ.get('LDFLAGS', '-Lbuild/lib').split(' '),
# swig_opts=['-modern', '-I.']
# )
cmds_addon = {}
if '1' == os.environ.get('DEBUG', '0').lower():
sys.executable = '{0} -m coverage run'.format(sys.executable)
# setuptools add-on cmds
try:
import setup_addcmds
cmds_addon.update(setup_addcmds.cmdclass)
except ImportError as exc:
print(repr(exc))
with open('README.rst') as f_in:
readme = f_in.read()
with open('HISTORY.rst') as f_in:
history = f_in.read()
json_bytes = pkgutil.get_data(PROJECT, 'resources/pkginfo.json')
pkginfo = json.loads(json_bytes.decode(encoding='utf-8')) if json_bytes is not None else {}
licenseclassifiers = {
"Apache-2.0": "License :: OSI Approved :: Apache Software License",
"MIT": "License :: OSI Approved :: MIT License",
"BSD-3-Clause": "License :: OSI Approved :: BSD License",
"GPL-3.0+": "License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)",
"ISC": "License :: OSI Approved :: ISC License (ISCL)",
"Unlicense": "License :: Public Domain"
}
setup(
long_description=readme + '\n\n' + history,
classifiers=[
"Natural Language :: English",
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
licenseclassifiers.get('Apache-2.0', "License :: OSI Approved :: Apache Software License"),
"Programming Language :: Python",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: Implementation :: Jython",
"Topic :: Software Development"
],
#package_dir={'': '.'},
#packages=find_packages(include=[PROJECT, '{0}.tests'.format(PROJECT.replace('.', '/'))]),
packages=find_packages(),
# py_modules=[splitext(basename(path))[0] for path in glob.glob('{0}/*.py'.format('/'.join(PROJECT.split('.')[:-1])))],
#data_files=[('', ['{0}/tests/__main__.py'.format(PROJECT.replace('.', '/'))])], # DON'T USE
#package_data={'': ['{0}/tests/__main__.py'.format(PROJECT.replace('.', '/'))]}, # DON'T USE
#test_suite='{0}.tests'.format(PROJECT),
## for cffi
#cffi_modules=['{0}/classic_build.py:ffibuilder'.format(
# PROJECT.replace('.', '/'))],
## for Swig extension
#ext_modules=[extension_mod],
cmdclass=dict(dict({'clean': Clean0, 'test': Test0}).items()
## setuptools add-on cmds
| cmds_addon.items()
),
**pkginfo
)
| [
"pkgutil.get_data",
"fnmatch.filter",
"os.remove",
"os.getcwd",
"builtins.open",
"os.path.dirname",
"distutils.command.clean.clean.run",
"os.walk",
"os.path.exists",
"os.path.isdir",
"builtins.dict",
"os.environ.get",
"shutil.rmtree",
"os.path.join",
"setuptools.find_packages"
] | [((4509, 4560), 'pkgutil.get_data', 'pkgutil.get_data', (['PROJECT', '"""resources/pkginfo.json"""'], {}), "(PROJECT, 'resources/pkginfo.json')\n", (4525, 4560), False, 'import os, sys, pkgutil, json, glob\n'), ((368, 393), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (383, 393), False, 'import os, sys, pkgutil, json, glob\n'), ((4381, 4399), 'builtins.open', 'open', (['"""README.rst"""'], {}), "('README.rst')\n", (4385, 4399), False, 'from builtins import open, dict\n'), ((4440, 4459), 'builtins.open', 'open', (['"""HISTORY.rst"""'], {}), "('HISTORY.rst')\n", (4444, 4459), False, 'from builtins import open, dict\n'), ((412, 436), 'os.path.join', 'os.path.join', (['HERE', '""".."""'], {}), "(HERE, '..')\n", (424, 436), False, 'import os, sys, pkgutil, json, glob\n'), ((1298, 1323), 'os.path.join', 'os.path.join', (['root', 'file1'], {}), '(root, file1)\n', (1310, 1323), False, 'import os, sys, pkgutil, json, glob\n'), ((1863, 1885), 'distutils.command.clean.clean.run', 'CleanCommand.run', (['self'], {}), '(self)\n', (1879, 1885), True, 'from distutils.command.clean import clean as CleanCommand\n'), ((2578, 2589), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2587, 2589), False, 'import os, sys, pkgutil, json, glob\n'), ((5751, 5766), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (5764, 5766), False, 'from setuptools import setup, find_packages, Command\n'), ((1375, 1388), 'os.walk', 'os.walk', (['path'], {}), '(path)\n', (1382, 1388), False, 'import os, sys, pkgutil, json, glob\n'), ((1434, 1471), 'fnmatch.filter', 'fnmatch.filter', (['(dirs + files)', 'filepat'], {}), '(dirs + files, filepat)\n', (1448, 1471), False, 'import fnmatch\n'), ((1585, 1605), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (1599, 1605), False, 'import os, sys, pkgutil, json, glob\n'), ((1610, 1629), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (1623, 1629), False, 'import os, sys, pkgutil, json, glob\n'), ((1643, 1662), 'shutil.rmtree', 'shutil.rmtree', (['path'], {}), '(path)\n', (1656, 1662), False, 'import shutil\n'), ((1676, 1696), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (1690, 1696), False, 'import os, sys, pkgutil, json, glob\n'), ((2650, 2661), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2659, 2661), False, 'import os, sys, pkgutil, json, glob\n'), ((4130, 4158), 'os.environ.get', 'os.environ.get', (['"""DEBUG"""', '"""0"""'], {}), "('DEBUG', '0')\n", (4144, 4158), False, 'import os, sys, pkgutil, json, glob\n'), ((1710, 1725), 'os.remove', 'os.remove', (['path'], {}), '(path)\n', (1719, 1725), False, 'import os, sys, pkgutil, json, glob\n'), ((6329, 6367), 'builtins.dict', 'dict', (["{'clean': Clean0, 'test': Test0}"], {}), "({'clean': Clean0, 'test': Test0})\n", (6333, 6367), False, 'from builtins import open, dict\n')] |
# This file is loaded by py.test to discover API tests
import pytest
from apitest import APITest
from loader import yaml_load
def pytest_collect_file(parent, path):
if path.ext == ".yaml" and path.basename.startswith("test"):
return APITestFile(path, parent)
class APITestFile(pytest.File):
def collect(self):
doc = yaml_load(self.fspath.open())
if doc:
config = doc.get('config', {})
for test in doc.get('tests', []):
yield APITestItem(test['name'], self, test, config)
class APITestItem(pytest.Item):
def __init__(self, name, parent, api_test, api_config):
super(APITestItem, self).__init__(name, parent)
self.api_test = api_test
self.api_config = api_config
def runtest(self):
test = APITest(self.api_test, self.api_config)
test.runtest()
def reportinfo(self):
return self.fspath, 0, "API Test: %s" % self.name
class YamlException(Exception):
""" custom exception for error reporting. """
| [
"apitest.APITest"
] | [((808, 847), 'apitest.APITest', 'APITest', (['self.api_test', 'self.api_config'], {}), '(self.api_test, self.api_config)\n', (815, 847), False, 'from apitest import APITest\n')] |
#Import the libraries
#Pygame
import pygame
pygame.init()
#os to access files
import os
#Inits
#import win | [
"pygame.init"
] | [((44, 57), 'pygame.init', 'pygame.init', ([], {}), '()\n', (55, 57), False, 'import pygame\n')] |
'''
This code was written by following the following tutorial:
Link: https://medium.com/@martinpella/how-to-use-pre-trained-word-embeddings-in-pytorch-71ca59249f76
This script processes and generates GloVe embeddings
'''
# coding: utf-8
import pickle
from preprocess import Vocabulary
import numpy as np
import json
from scipy import misc
import bcolz
words = []
idx = 0
word2idx = {}
vectors = bcolz.carray(np.zeros(1), rootdir='glove.6B/6B.300.dat', mode='w')
with open('glove.6B/glove.6B.300d.txt', 'rb') as f:
for l in f:
line = l.decode().split()
word = line[0]
words.append(word)
word2idx[word] = idx
idx += 1
vect = np.array(line[1:]).astype(np.float)
vectors.append(vect)
vectors = bcolz.carray(vectors[1:].reshape((400000, 300)), rootdir='glove.6B/6B.300.dat', mode='w')
vectors.flush()
pickle.dump(words, open('glove.6B/6B.300_words.pkl', 'wb'))
pickle.dump(word2idx, open('glove.6B/6B.300_idx.pkl', 'wb'))
with open('data/vocab.pkl', 'rb') as f:
vocab = pickle.load(f)
print('Loading vocab...')
vectors = bcolz.open('glove.6B/6B.300.dat')[:]
words = pickle.load(open('glove.6B/6B.300_words.pkl', 'rb'))
word2idx = pickle.load(open('glove.6B/6B.300_idx.pkl', 'rb'))
print('glove is loaded...')
glove = {w: vectors[word2idx[w]] for w in words}
matrix_len = len(vocab)
weights_matrix = np.zeros((matrix_len, 300))
words_found = 0
for i, word in enumerate(vocab.idx2word):
try:
weights_matrix[i] = glove[word]
words_found += 1
except KeyError:
weights_matrix[i] = np.random.normal(scale=0.6, size=(300, ))
pickle.dump(weights_matrix, open('glove.6B/glove_words.pkl', 'wb'), protocol=2)
print('weights_matrix is created')
| [
"numpy.zeros",
"pickle.load",
"numpy.array",
"numpy.random.normal",
"bcolz.open"
] | [((1372, 1399), 'numpy.zeros', 'np.zeros', (['(matrix_len, 300)'], {}), '((matrix_len, 300))\n', (1380, 1399), True, 'import numpy as np\n'), ((411, 422), 'numpy.zeros', 'np.zeros', (['(1)'], {}), '(1)\n', (419, 422), True, 'import numpy as np\n'), ((1039, 1053), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1050, 1053), False, 'import pickle\n'), ((1092, 1125), 'bcolz.open', 'bcolz.open', (['"""glove.6B/6B.300.dat"""'], {}), "('glove.6B/6B.300.dat')\n", (1102, 1125), False, 'import bcolz\n'), ((1583, 1623), 'numpy.random.normal', 'np.random.normal', ([], {'scale': '(0.6)', 'size': '(300,)'}), '(scale=0.6, size=(300,))\n', (1599, 1623), True, 'import numpy as np\n'), ((679, 697), 'numpy.array', 'np.array', (['line[1:]'], {}), '(line[1:])\n', (687, 697), True, 'import numpy as np\n')] |
import sys
import sqlite3
from tableinfo import TableInfo
class DbInfo(object):
def __init__(self, name):
self.name = name
self.conn = sqlite3.connect(name)
self.tables = {}
self.conn.text_factory = lambda x: str(x, 'utf-8', 'ignore')
cursor = self.conn.cursor()
cursor.execute("SELECT name FROM sqlite_master WHERE type =\'table\' AND name NOT LIKE \'sqlite_%\'")
rows = cursor.fetchall()
if len(rows) > 0:
tableNames = []
for row in rows:
tableNames.append(row[0])
for tableName in tableNames:
self.tables[tableName] = TableInfo(self.conn, tableName)
def compareTables(self, tableName, numColumns, db):
cursor = self.conn.cursor()
sql = "SELECT * FROM '" + tableName + "'"
cursor.execute(sql)
rows = cursor.fetchall()
if len(rows) > 0:
cursor2 = db.conn.cursor()
cursor2.execute(sql)
for rowNum, row in enumerate(rows):
row2 = cursor2.fetchone()
if (row is None) or (row2 is None):
return False, rowNum
for col in range(numColumns):
if row[col] != row2[col]:
return False, rowNum
return True, 0
return False, 0
def compare(self, db):
matches = True
matched = []
onlyOne = []
onlyTwo = []
for tableName, tableInfo in self.tables.items():
tableInfo2 = db.tables.get(tableName)
if tableInfo2 is not None:
if tableInfo.compare(tableInfo2):
if tableInfo.numRows < 1000:
dataMatched, rowNum = self.compareTables(tableName, len(tableInfo.columns), db)
if not dataMatched:
matches = False
sys.stdout.write('Different Data for Table: {} in row {}\n'.format(tableName, rowNum + 1))
else:
matched.append(tableInfo)
else:
matched.append(tableInfo)
else:
matches = False
sys.stdout.write('Different Table: {} {} {} but in {} {}\n'.format(tableName, self.name, tableInfo.toStr(False),
db.name, tableInfo2.toStr(False)))
else:
matches = False
onlyOne.append(tableName)
for tableName, tableInfo in db.tables.items():
if tableName not in self.tables:
matches = False
onlyTwo.append(tableName)
if len(matched) > 0:
sys.stdout.write("*************** {} matched tables ****************\n".format(len(matched)))
for table in matched:
sys.stdout.write("Table: {}\n".format(table.toStr(True)))
if len(onlyOne) > 0:
sys.stdout.write("*************** {} tables only in {} ****************\n".format(len(onlyOne), self.name))
for table in onlyOne:
sys.stdout.write("Table: {}\n".format(table))
if len(onlyTwo) > 0:
sys.stdout.write("*************** {} tables only in {} ****************\n".format(len(onlyTwo), db.name))
for table in onlyTwo:
sys.stdout.write("Table: {}\n".format(table))
return matches
| [
"tableinfo.TableInfo",
"sqlite3.connect"
] | [((156, 177), 'sqlite3.connect', 'sqlite3.connect', (['name'], {}), '(name)\n', (171, 177), False, 'import sqlite3\n'), ((659, 690), 'tableinfo.TableInfo', 'TableInfo', (['self.conn', 'tableName'], {}), '(self.conn, tableName)\n', (668, 690), False, 'from tableinfo import TableInfo\n')] |
#!/usr/bin/env python
import sys
import copy
import rospy
import moveit_commander
import moveit_msgs.msg
import geometry_msgs.msg
from math import pi
from std_msgs.msg import String
from moveit_commander.conversions import pose_to_list
import tf
def all_close(goal, actual, tolerance):
"""
Convenience method for testing if a list of values are within a tolerance of their counterparts in another list
@param: goal A list of floats, a Pose or a PoseStamped
@param: actual A list of floats, a Pose or a PoseStamped
@param: tolerance A float
@returns: bool
"""
all_equal = True
if type(goal) is list:
for index in range(len(goal)):
if abs(actual[index] - goal[index]) > tolerance:
return False
elif type(goal) is geometry_msgs.msg.PoseStamped:
return all_close(goal.pose, actual.pose, tolerance)
elif type(goal) is geometry_msgs.msg.Pose:
return all_close(pose_to_list(goal), pose_to_list(actual), tolerance)
return True
def callback(data,args):
print("hello there")
ur_robot = args[0]
ur_scene = args[1]
ur_move_group = args[2]
ur_planning_frame = args[3]
ur_eef_link = args[4]
ur_group_names = args[5]
move_group = ur_move_group
print("elo grab")
print(data)
data.position.x = data.position.x - 0.05
data.position.y = data.position.y - 0.03
data.position.z = 0.15
data.orientation.x = -0.0
data.orientation.y = 1.0
data.orientation.z = 0.0
data.orientation.w = -0.0
move_group.set_pose_target(data)
plan = move_group.go(wait=True)
move_group.stop()
move_group.clear_pose_targets()
current_pose = move_group.get_current_pose().pose
return all_close(data, current_pose, 0.01)
def main():
try:
print("Grab")
moveit_commander.roscpp_initialize(sys.argv)
robot = moveit_commander.RobotCommander()
scene = moveit_commander.PlanningSceneInterface()
group_name = "manipulator"
move_group = moveit_commander.MoveGroupCommander(group_name)
planning_frame = move_group.get_planning_frame()
eef_link = move_group.get_end_effector_link()
group_names = robot.get_group_names()
robot.get_current_state()
# Misc variables
ur_robot = robot
ur_scene = scene
ur_move_group = move_group
ur_planning_frame = planning_frame
ur_eef_link = eef_link
ur_group_names = group_names
rospy.init_node('move_ur_python_interface', anonymous=True)
rospy.Subscriber("/aruco_pose",geometry_msgs.msg.Pose,callback,(ur_robot,
ur_scene,
ur_move_group,
ur_planning_frame,
ur_eef_link,
ur_group_names))
rospy.spin()
except rospy.ROSInterruptException:
return
except KeyboardInterrupt:
return
if __name__ == '__main__':
main() | [
"moveit_commander.RobotCommander",
"rospy.Subscriber",
"moveit_commander.PlanningSceneInterface",
"moveit_commander.MoveGroupCommander",
"rospy.init_node",
"moveit_commander.conversions.pose_to_list",
"rospy.spin",
"moveit_commander.roscpp_initialize"
] | [((1774, 1818), 'moveit_commander.roscpp_initialize', 'moveit_commander.roscpp_initialize', (['sys.argv'], {}), '(sys.argv)\n', (1808, 1818), False, 'import moveit_commander\n'), ((1831, 1864), 'moveit_commander.RobotCommander', 'moveit_commander.RobotCommander', ([], {}), '()\n', (1862, 1864), False, 'import moveit_commander\n'), ((1877, 1918), 'moveit_commander.PlanningSceneInterface', 'moveit_commander.PlanningSceneInterface', ([], {}), '()\n', (1916, 1918), False, 'import moveit_commander\n'), ((1967, 2014), 'moveit_commander.MoveGroupCommander', 'moveit_commander.MoveGroupCommander', (['group_name'], {}), '(group_name)\n', (2002, 2014), False, 'import moveit_commander\n'), ((2400, 2459), 'rospy.init_node', 'rospy.init_node', (['"""move_ur_python_interface"""'], {'anonymous': '(True)'}), "('move_ur_python_interface', anonymous=True)\n", (2415, 2459), False, 'import rospy\n'), ((2464, 2618), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/aruco_pose"""', 'geometry_msgs.msg.Pose', 'callback', '(ur_robot, ur_scene, ur_move_group, ur_planning_frame, ur_eef_link,\n ur_group_names)'], {}), "('/aruco_pose', geometry_msgs.msg.Pose, callback, (ur_robot,\n ur_scene, ur_move_group, ur_planning_frame, ur_eef_link, ur_group_names))\n", (2480, 2618), False, 'import rospy\n'), ((2961, 2973), 'rospy.spin', 'rospy.spin', ([], {}), '()\n', (2971, 2973), False, 'import rospy\n'), ((951, 969), 'moveit_commander.conversions.pose_to_list', 'pose_to_list', (['goal'], {}), '(goal)\n', (963, 969), False, 'from moveit_commander.conversions import pose_to_list\n'), ((971, 991), 'moveit_commander.conversions.pose_to_list', 'pose_to_list', (['actual'], {}), '(actual)\n', (983, 991), False, 'from moveit_commander.conversions import pose_to_list\n')] |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
from __future__ import print_function
import argparse
import functools
import re
import shlex
import threading
import time
import traceback
import kdpserver
import lldb
import lldbagilityutils
import stubvm
vm = None
def _exec_cmd(debugger, command, capture_output=False):
if capture_output:
cmdretobj = lldb.SBCommandReturnObject()
debugger.GetCommandInterpreter().HandleCommand(command, cmdretobj)
return cmdretobj
else:
debugger.HandleCommand(command)
return None
def _evaluate_expression(exe_ctx, expression):
res = exe_ctx.frame.EvaluateExpression(expression)
try:
vaddr = int(res.GetValue(), 0)
except (TypeError, ValueError):
return None
else:
return vaddr
def fdp_attach(debugger, command, exe_ctx, result, internal_dict):
"""
Connect to a macOS VM via FDP.
The VM must have already been started.
Existing breakpoints are deleted on attaching.
Re-execute this command every time the VM is rebooted.
"""
parser = argparse.ArgumentParser(prog="fdp-attach")
parser.add_argument("vm_name")
args = parser.parse_args(shlex.split(command))
_attach(debugger, exe_ctx, stubvm.FDPSTUB, args.vm_name)
def vmsn_attach(debugger, command, exe_ctx, result, internal_dict):
"""
Connect to a macOS VM via VMSN. Currently not maintained!
Existing breakpoints are deleted on attaching.
"""
parser = argparse.ArgumentParser(prog="vmsn-attach")
parser.add_argument("vm_name")
args = parser.parse_args(shlex.split(command))
_attach(debugger, exe_ctx, stubvm.VMSNSTUB, args.vm_name)
def _attach(debugger, exe_ctx, vm_stub, vm_name):
global vm
print(lldbagilityutils.LLDBAGILITY)
print("* Attaching to the VM")
try:
vm = stubvm.STUBVM(vm_stub, vm_name)
except Exception as exc:
print("* Could not attach! {}".format(str(exc)))
return
print("* Resuming the VM execution until reaching kernel code")
vm.complete_attach()
print("* Kernel load address: 0x{:016x}".format(vm.kernel_load_vaddr))
print("* Kernel slide: 0x{:x}".format(vm.kernel_slide))
print("* Kernel cr3: 0x{:x}".format(vm.kernel_cr3))
print("* Kernel version: {}".format(vm.kernel_version))
print("* VM breakpoints deleted")
# detach the previous process (if any)
exe_ctx.process.Detach()
# remove all LLDB breakpoints
exe_ctx.target.DeleteAllBreakpoints()
print("* LLDB breakpoints deleted")
# start the fake KDP server
kdpsv = kdpserver.KDPServer()
th = threading.Thread(target=kdpsv.debug, args=(vm,))
th.daemon = True
th.start()
# connect LLDB to the fake KDP server
kdpsv_addr, kdpsv_port = kdpsv.sv_sock.getsockname()
_exec_cmd(debugger, "kdp-remote '{}:{}'".format(kdpsv_addr, kdpsv_port))
# trigger a memory write to find out the address of the kdp struct
vm.store_kdp_at_next_write_virtual_memory()
if _exec_cmd(debugger, "memory write &kdp 41", capture_output=True).GetError():
print("* Unable to find the 'kdp' symbol. Did you specify the target to debug?")
vm.abort_store_kdp_at_next_write_virtual_memory()
def _attached(f):
@functools.wraps(f)
def _wrapper(*args, **kwargs):
global vm
if not vm:
print("* Not attached to a VM!")
return
return f(*args, **kwargs)
return _wrapper
@_attached
def fdp_save(debugger, command, exe_ctx, result, internal_dict):
"""
Save the current state of the attached macOS VM.
Breakpoints are not saved (but retained for the current session).
"""
# saving the state causes all breakpoints (soft and hard) to be unset, but
# we can preserve them at least for the current session
# we disable soft breakpoints before saving and then re-enable them once the state
# has been saved, so that LLDB sends again the KDP requests for setting them
exe_ctx.target.DisableAllBreakpoints()
# similarly, for hard breakpoints we save the state of the debug registers before saving,
# and restore it afterwards
dbgregs = vm.read_registers(("dr0", "dr1", "dr2", "dr3", "dr6", "dr7"))
# interrupt and save the VM state
process_was_stopped = exe_ctx.process.is_stopped
print("* Saving the VM state")
vm.interrupt_and_take_snapshot()
print("* State saved")
# restore soft breakpoints
exe_ctx.target.EnableAllBreakpoints()
# restore hard breakpoints
vm.write_registers(dbgregs)
if not process_was_stopped:
# display stop info
_exec_cmd(debugger, "process status")
@_attached
def fdp_restore(debugger, command, exe_ctx, result, internal_dict):
"""
Restore the attached macOS VM to the last saved state.
Breakpoints are deleted on restoring.
"""
# interrupt and restore the VM state
print("* Restoring the last saved VM state")
if vm.interrupt_and_restore_last_snapshot():
print("* State restored")
# do a full reattach (the kernel load address may differ)
fdp_attach(debugger, vm.name, exe_ctx, result, internal_dict)
else:
print("* No saved state found")
@_attached
def fdp_interrupt(debugger, command, exe_ctx, result, internal_dict):
"""
Interrupt (pause) the execution of the attached macOS VM.
"""
vm.interrupt()
@_attached
def fdp_hbreakpoint(debugger, command, exe_ctx, result, internal_dict):
"""
Set or unset hardware breakpoints.
Hardware breakpoints are implemented using the debug registers DR0, DR1, DR2 and DR3.
Consequently, a maximum of four hardware breakpoints can be active simultaneously.
"""
parser = argparse.ArgumentParser(prog="fdp-hbreakpoint")
subparsers = parser.add_subparsers(dest="action")
set_parser = subparsers.add_parser("set")
set_parser.add_argument(
"trigger",
choices={"e", "rw", "w"},
help="Type of memory access to trap on: execute, read/write, or write only.",
)
set_parser.add_argument(
"nreg",
type=lambda i: int(i, 0),
choices={0, 1, 2, 3},
help="Breakpoint slot to use (corresponding to registers ).",
)
set_parser.add_argument(
"expression", help="Breakpoint address or expression to be evaluated as such."
)
unset_parser = subparsers.add_parser("unset")
unset_parser.add_argument(
"nreg",
type=lambda i: int(i, 0),
choices={0, 1, 2, 3},
help="Breakpoint slot to free (corresponding to registers DR0, DR1, DR2 and DR3).",
)
args = parser.parse_args(shlex.split(command))
if args.action == "set":
vaddr = _evaluate_expression(exe_ctx, args.expression)
if vaddr:
vm.set_hard_breakpoint(args.trigger, args.nreg, vaddr)
print("* Hardware breakpoint set: address = 0x{:016x}".format(vaddr))
else:
print("* Invalid expression")
elif args.action == "unset":
vm.unset_hard_breakpoint(args.nreg)
print("* Hardware breakpoint unset")
else:
raise AssertionError
@_attached
def fdp_test(debugger, command, exe_ctx, result, internal_dict):
"""
Run some tests.
Warning: tests change the state of the machine and modify the last saved state!
"""
regs = {
"rax",
"rbx",
"rcx",
"rdx",
"rdi",
"rsi",
"rbp",
"rsp",
"r8",
"r9",
"r10",
"r11",
"r12",
"r13",
"r14",
"r15",
"rip",
"rflags",
"cs",
"fs",
"gs",
}
def _t1():
print("* Halt/resume/single step")
vm.halt()
assert vm.is_state_halted()
vm.resume()
assert not vm.is_state_halted()
vm.halt()
for _ in range(100):
vm.single_step()
assert vm.is_state_halted()
def _t2():
print("* Read/write registers")
vm.halt()
orig_values = vm.read_registers(regs)
new_values = {reg: 0x1337 for reg in regs}
for reg in regs:
vm.write_register(reg, new_values[reg])
# modifications to RFLAGS should be disabled
assert vm.read_register("rflags") == orig_values["rflags"]
del new_values["rflags"]
assert vm.read_registers(regs - {"rflags"}) == new_values
vm.write_registers(orig_values)
for reg in regs:
assert vm.read_register(reg) == orig_values[reg]
def _t3():
print("* Read/write virtual memory")
vm.halt()
data = vm.read_virtual_memory(vm.read_register("rsp"), 0x8)
new_data = b"ABCDEFGH"
vm.write_virtual_memory(vm.read_register("rsp"), new_data)
assert vm.read_virtual_memory(vm.read_register("rsp"), 0x8) == new_data
vm.write_virtual_memory(vm.read_register("rsp"), data)
assert vm.read_virtual_memory(vm.read_register("rsp"), 0x8) == data
def _t4():
print("* Save/restore")
vm.halt()
orig_values = vm.read_registers(regs)
orig_data = vm.read_virtual_memory(vm.read_register("rsp"), 0x100)
vm.interrupt_and_take_snapshot()
assert vm.is_state_halted()
vm.write_virtual_memory(vm.read_register("rsp"), b"A" * 0x100)
vm.single_step()
vm.resume()
time.sleep(0.100)
vm.interrupt_and_restore_last_snapshot()
assert vm.is_state_halted()
assert not vm.is_breakpoint_hit()
assert vm.read_registers(regs) == orig_values
assert vm.read_virtual_memory(vm.read_register("rsp"), 0x100) == orig_data
def _t5():
print("* Debug registers")
vm.halt()
vm.write_register("dr7", 0x0)
vm.set_hard_breakpoint("rw", 0x0, 0x1234)
assert vm.read_register("dr0") == 0x1234
assert vm.read_register("dr7") == 0b00000000000000110000000000000010
vm.set_hard_breakpoint("e", 0x0, 0x1234)
assert vm.read_register("dr7") == 0b00000000000000000000000000000010
vm.set_hard_breakpoint("w", 0x0, 0x1234)
assert vm.read_register("dr7") == 0b00000000000000010000000000000010
vm.set_hard_breakpoint("rw", 0x1, 0x1234)
assert vm.read_register("dr1") == 0x1234
assert vm.read_register("dr7") == 0b00000000001100010000000000001010
vm.set_hard_breakpoint("rw", 0x2, 0x1234)
assert vm.read_register("dr2") == 0x1234
assert vm.read_register("dr7") == 0b00000011001100010000000000101010
vm.set_hard_breakpoint("rw", 0x3, 0x1234)
assert vm.read_register("dr3") == 0x1234
assert vm.read_register("dr7") == 0b00110011001100010000000010101010
vm.unset_hard_breakpoint(0x0)
assert vm.read_register("dr7") == 0b00110011001100010000000010101000
vm.unset_hard_breakpoint(0x1)
assert vm.read_register("dr7") == 0b00110011001100010000000010100000
vm.unset_hard_breakpoint(0x2)
assert vm.read_register("dr7") == 0b00110011001100010000000010000000
vm.unset_hard_breakpoint(0x3)
assert vm.read_register("dr7") == 0b00110011001100010000000000000000
def _t6():
print("* Soft/hard exec breakpoint")
vm.halt()
# keep in mind that FDP soft and page breakpoints do not work just after a restore
# (see VMR3AddSoftBreakpoint())
vm.unset_all_breakpoints()
vm.single_step()
assert not vm.is_breakpoint_hit()
vm.interrupt_and_take_snapshot()
vm.single_step()
vm.single_step()
rip = vm.read_register("rip")
vm.interrupt_and_restore_last_snapshot()
vm.single_step()
bpid = vm.set_soft_exec_breakpoint(rip)
assert 0 <= bpid <= 254
assert not vm.is_breakpoint_hit()
vm.resume()
time.sleep(0.100)
vm.halt()
assert vm.is_breakpoint_hit()
vm.interrupt_and_restore_last_snapshot()
vm.single_step()
vm.set_hard_breakpoint("e", 0x0, rip)
assert not vm.is_breakpoint_hit()
vm.resume()
time.sleep(0.100)
vm.halt()
assert vm.is_breakpoint_hit()
if exe_ctx.process.is_running:
vm.interrupt()
vm.unset_all_breakpoints()
for _t in (_t1, _t2, _t3, _t4, _t5, _t6):
_t()
print("* All tests passed!")
def __lldb_init_module(debugger, internal_dict):
# FDP
debugger.HandleCommand("command script add -f lldbagility.fdp_attach fdp-attach")
debugger.HandleCommand("command script add -f lldbagility.fdp_save fdp-save")
debugger.HandleCommand("command script add -f lldbagility.fdp_restore fdp-restore")
debugger.HandleCommand(
"command script add -f lldbagility.fdp_interrupt fdp-interrupt"
)
debugger.HandleCommand(
"command script add -f lldbagility.fdp_hbreakpoint fdp-hbreakpoint"
)
debugger.HandleCommand("command script add -f lldbagility.fdp_test fdp-test")
debugger.HandleCommand("command alias fa fdp-attach")
debugger.HandleCommand("command alias fs fdp-save")
debugger.HandleCommand("command alias fr fdp-restore")
debugger.HandleCommand("command alias fi fdp-interrupt")
debugger.HandleCommand("command alias fh fdp-hbreakpoint")
# VMSN
debugger.HandleCommand("command script add -f lldbagility.vmsn_attach vmsn-attach")
debugger.HandleCommand("command alias va vmsn-attach")
| [
"threading.Thread",
"kdpserver.KDPServer",
"argparse.ArgumentParser",
"shlex.split",
"time.sleep",
"stubvm.STUBVM",
"functools.wraps",
"lldb.SBCommandReturnObject"
] | [((1092, 1134), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'prog': '"""fdp-attach"""'}), "(prog='fdp-attach')\n", (1115, 1134), False, 'import argparse\n'), ((1495, 1538), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'prog': '"""vmsn-attach"""'}), "(prog='vmsn-attach')\n", (1518, 1538), False, 'import argparse\n'), ((2624, 2645), 'kdpserver.KDPServer', 'kdpserver.KDPServer', ([], {}), '()\n', (2643, 2645), False, 'import kdpserver\n'), ((2655, 2703), 'threading.Thread', 'threading.Thread', ([], {'target': 'kdpsv.debug', 'args': '(vm,)'}), '(target=kdpsv.debug, args=(vm,))\n', (2671, 2703), False, 'import threading\n'), ((3293, 3311), 'functools.wraps', 'functools.wraps', (['f'], {}), '(f)\n', (3308, 3311), False, 'import functools\n'), ((5775, 5822), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'prog': '"""fdp-hbreakpoint"""'}), "(prog='fdp-hbreakpoint')\n", (5798, 5822), False, 'import argparse\n'), ((368, 396), 'lldb.SBCommandReturnObject', 'lldb.SBCommandReturnObject', ([], {}), '()\n', (394, 396), False, 'import lldb\n'), ((1199, 1219), 'shlex.split', 'shlex.split', (['command'], {}), '(command)\n', (1210, 1219), False, 'import shlex\n'), ((1603, 1623), 'shlex.split', 'shlex.split', (['command'], {}), '(command)\n', (1614, 1623), False, 'import shlex\n'), ((1852, 1883), 'stubvm.STUBVM', 'stubvm.STUBVM', (['vm_stub', 'vm_name'], {}), '(vm_stub, vm_name)\n', (1865, 1883), False, 'import stubvm\n'), ((6695, 6715), 'shlex.split', 'shlex.split', (['command'], {}), '(command)\n', (6706, 6715), False, 'import shlex\n'), ((9467, 9482), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (9477, 9482), False, 'import time\n'), ((11945, 11960), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (11955, 11960), False, 'import time\n'), ((12210, 12225), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (12220, 12225), False, 'import time\n')] |
#signal fired after an obj is saved in this cas when a user is created
from django.db.models.signals import post_save
#post to sende the signal
from .models import Post
#reciever of the signal
from django.dispatch import receiver
from .models import Review
@receiver(post_save,sender=Post)
def create_review(sender,instance,created,**kwargs):
'''
post_save:is the signal that is fired after and object is saved
Post:model is the sender of the signal
receiver:is the create rating function that fetches the signal and performs some task
instance:is the instance of Post class
created : if a post was created
'''
if created:
Review.objects.create(post=instance)
| [
"django.dispatch.receiver"
] | [((262, 294), 'django.dispatch.receiver', 'receiver', (['post_save'], {'sender': 'Post'}), '(post_save, sender=Post)\n', (270, 294), False, 'from django.dispatch import receiver\n')] |
from setuptools import setup, find_packages
setup(
name = 'multiresunet',
version = '0.1',
description = 'MultiResUNet implementation in PyTorch; MultiResUNet: Rethinking the U-Net Architecture for Multimodal',
author = '<NAME>',
author_email = '<EMAIL>',
install_requires= [],
packages = find_packages(),
python_requires = '>=3.6'
)
| [
"setuptools.find_packages"
] | [((360, 375), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (373, 375), False, 'from setuptools import setup, find_packages\n')] |
"""
====================================
Data set of Markov transition fields
====================================
A Markov transition field is an image obtained from a time series, representing
a field of transition probabilities for a discretized time series.
Different strategies can be used to bin time series.
It is implemented as :class:`pyts.image.MarkovTransitionField`.
In this example, we consider the training samples of the
`GunPoint dataset <http://timeseriesclassification.com/description.php?Dataset=GunPoint>`_,
consisting of 50 univariate time series of length 150.
The Markov transition field of each time series is independently computed and
the 50 Markov transition fields are plotted.
""" # noqa:E501
# Author: <NAME> <<EMAIL>>
# License: BSD-3-Clause
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import ImageGrid
from pyts.image import MarkovTransitionField
from pyts.datasets import load_gunpoint
# Load the GunPoint dataset
X, _, _, _ = load_gunpoint(return_X_y=True)
# Get the recurrence plots for all the time series
mtf = MarkovTransitionField(n_bins=8)
X_mtf = mtf.fit_transform(X)
# Plot the 50 Gramian angular fields
fig = plt.figure(figsize=(10, 5))
grid = ImageGrid(fig, 111, nrows_ncols=(5, 10), axes_pad=0.1, share_all=True,
cbar_mode='single')
for i, ax in enumerate(grid):
im = ax.imshow(X_mtf[i], cmap='rainbow', origin='lower', vmin=0., vmax=1.)
grid[0].get_yaxis().set_ticks([])
grid[0].get_xaxis().set_ticks([])
plt.colorbar(im, cax=grid.cbar_axes[0])
ax.cax.toggle_label(True)
fig.suptitle("Markov transition fields for the 50 time series in the "
"'GunPoint' dataset", y=0.92)
plt.show()
| [
"mpl_toolkits.axes_grid1.ImageGrid",
"matplotlib.pyplot.show",
"pyts.datasets.load_gunpoint",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.figure",
"pyts.image.MarkovTransitionField"
] | [((983, 1013), 'pyts.datasets.load_gunpoint', 'load_gunpoint', ([], {'return_X_y': '(True)'}), '(return_X_y=True)\n', (996, 1013), False, 'from pyts.datasets import load_gunpoint\n'), ((1072, 1103), 'pyts.image.MarkovTransitionField', 'MarkovTransitionField', ([], {'n_bins': '(8)'}), '(n_bins=8)\n', (1093, 1103), False, 'from pyts.image import MarkovTransitionField\n'), ((1177, 1204), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 5)'}), '(figsize=(10, 5))\n', (1187, 1204), True, 'import matplotlib.pyplot as plt\n'), ((1213, 1307), 'mpl_toolkits.axes_grid1.ImageGrid', 'ImageGrid', (['fig', '(111)'], {'nrows_ncols': '(5, 10)', 'axes_pad': '(0.1)', 'share_all': '(True)', 'cbar_mode': '"""single"""'}), "(fig, 111, nrows_ncols=(5, 10), axes_pad=0.1, share_all=True,\n cbar_mode='single')\n", (1222, 1307), False, 'from mpl_toolkits.axes_grid1 import ImageGrid\n'), ((1498, 1537), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['im'], {'cax': 'grid.cbar_axes[0]'}), '(im, cax=grid.cbar_axes[0])\n', (1510, 1537), True, 'import matplotlib.pyplot as plt\n'), ((1680, 1690), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1688, 1690), True, 'import matplotlib.pyplot as plt\n')] |
#!/usr/bin/env python3.5
import os
import dlib
import numpy as np
import cv2
import time
import darknet
from ctypes import *
import math
import random
class YOLO_NN:
def __init__(self, yoloDataFolder):
self.configPath = yoloDataFolder + "/cfg/yolov3-tiny.cfg"
self.weightPath = yoloDataFolder + "/yolov3-tiny.weights"
self.metaPath = yoloDataFolder + "/cfg/coco.data"
print("self.configPath: " + self.configPath)
print("self.weightPath: " + self.weightPath)
print("self.metaPath: " + self.metaPath)
self.netMain = None
self.metaMain = None
self.altNames = None
if not os.path.exists(self.configPath):
raise ValueError("Invalid config path `" +
os.path.abspath(self.configPath)+"`")
if not os.path.exists(self.weightPath):
raise ValueError("Invalid weight path `" +
os.path.abspath(self.weightPath)+"`")
if not os.path.exists(self.metaPath):
raise ValueError("Invalid data file path `" +
os.path.abspath(self.metaPath)+"`")
if self.netMain is None:
self.netMain = darknet.load_net_custom(self.configPath.encode(
"ascii"), self.weightPath.encode("ascii"), 0, 1) # batch size = 1
if self.metaMain is None:
self.metaMain = darknet.load_meta(self.metaPath.encode("ascii"))
if self.altNames is None:
try:
with open(self.metaPath) as metaFH:
metaContents = metaFH.read()
import re
match = re.search("names *= *(.*)$", metaContents,
re.IGNORECASE | re.MULTILINE)
if match:
result = match.group(1)
else:
result = None
try:
if os.path.exists(result):
with open(result) as namesFH:
namesList = namesFH.read().strip().split("\n")
self.altNames = [x.strip() for x in namesList]
except TypeError:
pass
except Exception:
pass
# Create an image we reuse for each detect
self.darknet_image = darknet.make_image(darknet.network_width(self.netMain),
darknet.network_height(self.netMain),3)
self.data_dir = os.path.expanduser(yoloDataFolder+'/face_data')
self.faces_folder_path = self.data_dir + '/users/'
self.face_detector = dlib.get_frontal_face_detector()
self.shape_predictor = dlib.shape_predictor(self.data_dir + '/dlib/shape_predictor_68_face_landmarks.dat')
self.face_recognition_model = dlib.face_recognition_model_v1(self.data_dir + '/dlib/dlib_face_recognition_resnet_model_v1.dat')
def convertBack(self, x, y, w, h):
xmin = int(round(x - (w / 2)))
xmax = int(round(x + (w / 2)))
ymin = int(round(y - (h / 2)))
ymax = int(round(y + (h / 2)))
return xmin, ymin, xmax, ymax
def cvDrawBoxes(self, detections, img):
for detection in detections:
x, y, w, h = detection[2][0],\
detection[2][1],\
detection[2][2],\
detection[2][3]
xmin, ymin, xmax, ymax = self.convertBack(
float(x), float(y), float(w), float(h))
pt1 = (xmin, ymin)
pt2 = (xmax, ymax)
cv2.rectangle(img, pt1, pt2, (0, 255, 0), 1)
cv2.putText(img,
detection[0].decode() +
" [" + str(round(detection[1] * 100, 2)) + "]",
(pt1[0], pt1[1] - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
[0, 255, 0], 2)
return img
def get_face_encodings(self, face):
bounds = self.face_detector(face, 1)
faces_landmarks = [self.shape_predictor(face, face_bounds) for face_bounds in bounds]
try:
h = [np.array(self.face_recognition_model.compute_face_descriptor(face, face_pose, 1)) for face_pose in faces_landmarks]
except:
return []
return h
def get_face_matches(self, known_faces, face):
return np.linalg.norm(known_faces - face, axis=1)
def find_match(self, known_faces, person_names, face):
matches = self.get_face_matches(known_faces, face) # get a list of True/False
min_index = matches.argmin()
min_value = matches[min_index]
if min_value < 0.55:
return person_names[min_index]+"! ({0:.2f})".format(min_value)
if min_value < 0.58:
return person_names[min_index]+" ({0:.2f})".format(min_value)
if min_value < 0.65:
return person_names[min_index]+"?"+" ({0:.2f})".format(min_value)
return 'Not Found'
def load_face_encodings(self):
image_filenames = filter(lambda x: x.endswith('.jpg'), os.listdir(self.faces_folder_path))
image_filenames = sorted(image_filenames)
person_names = [x[:-4] for x in image_filenames]
full_paths_to_images = [self.faces_folder_path + x for x in image_filenames]
face_encodings = []
win = dlib.image_window()
for path_to_image in full_paths_to_images:
print("Loading user: " + path_to_image)
#face = io.imread(path_to_image)
face = cv2.imread(path_to_image)
face = cv2.cvtColor(face, cv2.COLOR_BGR2RGB)
faces_bounds = self.face_detector(face, 1)
if len(faces_bounds) != 1:
print("Expected one and only one face per image: " + path_to_image + " - it has " + str(len(faces_bounds)))
exit()
face_bounds = faces_bounds[0]
face_landmarks = self.shape_predictor(face, face_bounds)
face_encoding = np.array(self.face_recognition_model.compute_face_descriptor(face, face_landmarks, 1))
win.clear_overlay()
win.set_image(face)
win.add_overlay(face_bounds)
win.add_overlay(face_landmarks)
face_encodings.append(face_encoding)
#print(face_encoding)
#dlib.hit_enter_to_continue()
return face_encodings, person_names
def detect(self, frame_read):
prev_time = time.time()
frame_resized = cv2.resize(frame_read,
(darknet.network_width(rn.netMain),
darknet.network_height(rn.netMain)),
interpolation=cv2.INTER_LINEAR)
frame_rgb = cv2.cvtColor(frame_resized, cv2.COLOR_BGR2RGB)
darknet.copy_image_from_bytes(self.darknet_image, frame_rgb.tobytes())
detections = darknet.detect_image(self.netMain, self.metaMain, self.darknet_image, thresh=0.25)
#print(1/(time.time()-prev_time))
return detections
# function to get the output layer names
# in the architecture
def get_output_layers(self,net):
layer_names = net.getLayerNames()
output_layers = [layer_names[i[0] - 1] for i in net.getUnconnectedOutLayers()]
return output_layers
# function to draw bounding box on the detected object with class name
def draw_bounding_box(self,img, class_id, confidence, x, y, x_plus_w, y_plus_h):
cv2.rectangle(img, (x,y), (x_plus_w,y_plus_h), (0, 0, 255), 2)
#cv2.putText(img, label, (x-10,y-10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
if __name__ == "__main__":
# Start Yolo Setup
rn = YOLO_NN('.')
# initialize video input
cap = cv2.VideoCapture(1)
cap.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 360)
face_encodings, person_names = rn.load_face_encodings()
faceClassifier = cv2.CascadeClassifier(rn.data_dir + '/dlib/haarcascade_frontalface_default.xml')
#rn.recognize_faces_in_video(face_encodings, person_names)
while True:
ret, frame_read = cap.read()
draw_frame = frame_read.copy()
gray = cv2.cvtColor(frame_read, cv2.COLOR_BGR2GRAY)
overlay = frame_read.copy()
cv2.rectangle(overlay, (0, 0), (640, 35), (0, 0, 0), -1)
alpha = 0.8
draw_frame = cv2.addWeighted(overlay, alpha, draw_frame, 1 - alpha, 0)
# Yolo Detection
detections = rn.detect(frame_read.copy())
filter_detections = []
n_users = 0
n_persons = 0
for detection in detections:
if detection[0] == b'person': # It is a person
filter_detections.append(detection)
if len(filter_detections) == 0: # Case Yolo didn't detected any person, try with dlib
face_rects = faceClassifier.detectMultiScale( # Detect faces with dlib
gray,
scaleFactor = 1.1,
minNeighbors = 5,
minSize = (50, 50),
flags = cv2.CASCADE_SCALE_IMAGE)
n_persons = len(face_rects)
if len(face_rects) > 0: # Case find any face
for (x, y, w, h) in face_rects:
face = draw_frame[y:y + h, x:x + w]
face_encodings_in_image = rn.get_face_encodings(face)
if (face_encodings_in_image):
match = rn.find_match(face_encodings, person_names, face_encodings_in_image[0])
if match == "Not Found":
cv2.putText(draw_frame, "Unknow", (x+5, y-15), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
cv2.rectangle(draw_frame, (x, y), (x + w, y + h), (0, 0, 255), 2)
else:
cv2.putText(draw_frame, match, (x+5, y-15), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
cv2.rectangle(draw_frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
n_users += 1
else:
cv2.putText(draw_frame, "Unknow", (x+5, y-15), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
cv2.rectangle(draw_frame, (x, y), (x + w, y + h), (0, 0, 255), 2)
else:
for detection in filter_detections:
x1, y1, w1, h1 = detection[2][0],\
detection[2][1],\
detection[2][2],\
detection[2][3]
xmin, ymin, xmax, ymax = rn.convertBack(
float(x1), float(y1), float(w1), float(h1))
sx = 640.0/416.0
sy = 360.0/416.0
xmin = int(xmin*sx)
ymin = int(ymin*sy)
xmax = int(xmax*sx)
ymax = int(ymax*sy)
pt1 = (xmin, ymin)
pt2 = (xmax, ymax)
cropped = gray[ymin:ymax, xmin:xmax]
face_rects = faceClassifier.detectMultiScale( # Detect faces with dlib
gray,
scaleFactor = 1.1,
minNeighbors = 5,
minSize = (50, 50),
flags = cv2.CASCADE_SCALE_IMAGE)
n_persons += 1
if len(face_rects) > 0:
for (x, y, w, h) in face_rects:
face = cropped[y:y + h, x:x + w]
face_encodings_in_image = rn.get_face_encodings(face)
#x += xmin
#y += ymin
if (face_encodings_in_image):
match = rn.find_match(face_encodings, person_names, face_encodings_in_image[0])
if match == "Not Found":
cv2.putText(draw_frame, "Unknow", (x+5, y-15), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
cv2.rectangle(draw_frame, (x, y), (x + w, y + h), (0, 0, 255), 2)
else:
cv2.putText(draw_frame, match, (x+5, y-15), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
cv2.rectangle(draw_frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
n_users += 1
else:
cv2.putText(draw_frame, "Unknow", (x+5, y-15), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
cv2.rectangle(draw_frame, (x, y), (x + w, y + h), (0, 0, 255), 2)
else:
cv2.rectangle(draw_frame, pt1, pt2, (0, 0, 255), 2)
cv2.putText(draw_frame, "Unknow", (pt1[0], pt1[1] - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
cv2.putText(draw_frame, "InteliCam Users: " + str(n_users) + " | "+ \
"Persons: " + str(n_persons),
(5, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
[255, 255, 255], 1)
cv2.imshow("Frame", draw_frame)
key = cv2.waitKey(3) & 0xFF
# if the `q` key was pressed, break from the loop
if key == ord("q"):
break
cv2.destroyAllWindows() | [
"numpy.linalg.norm",
"cv2.rectangle",
"darknet.network_height",
"dlib.shape_predictor",
"cv2.imshow",
"os.path.abspath",
"cv2.cvtColor",
"os.path.exists",
"cv2.destroyAllWindows",
"re.search",
"cv2.waitKey",
"cv2.addWeighted",
"dlib.face_recognition_model_v1",
"dlib.get_frontal_face_detector",
"darknet.detect_image",
"os.listdir",
"dlib.image_window",
"cv2.putText",
"time.time",
"cv2.VideoCapture",
"cv2.imread",
"cv2.CascadeClassifier",
"os.path.expanduser",
"darknet.network_width"
] | [((7846, 7865), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(1)'], {}), '(1)\n', (7862, 7865), False, 'import cv2\n'), ((8035, 8120), 'cv2.CascadeClassifier', 'cv2.CascadeClassifier', (["(rn.data_dir + '/dlib/haarcascade_frontalface_default.xml')"], {}), "(rn.data_dir + '/dlib/haarcascade_frontalface_default.xml'\n )\n", (8056, 8120), False, 'import cv2\n'), ((13538, 13561), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (13559, 13561), False, 'import cv2\n'), ((2570, 2619), 'os.path.expanduser', 'os.path.expanduser', (["(yoloDataFolder + '/face_data')"], {}), "(yoloDataFolder + '/face_data')\n", (2588, 2619), False, 'import os\n'), ((2707, 2739), 'dlib.get_frontal_face_detector', 'dlib.get_frontal_face_detector', ([], {}), '()\n', (2737, 2739), False, 'import dlib\n'), ((2771, 2858), 'dlib.shape_predictor', 'dlib.shape_predictor', (["(self.data_dir + '/dlib/shape_predictor_68_face_landmarks.dat')"], {}), "(self.data_dir +\n '/dlib/shape_predictor_68_face_landmarks.dat')\n", (2791, 2858), False, 'import dlib\n'), ((2893, 2994), 'dlib.face_recognition_model_v1', 'dlib.face_recognition_model_v1', (["(self.data_dir + '/dlib/dlib_face_recognition_resnet_model_v1.dat')"], {}), "(self.data_dir +\n '/dlib/dlib_face_recognition_resnet_model_v1.dat')\n", (2923, 2994), False, 'import dlib\n'), ((4428, 4470), 'numpy.linalg.norm', 'np.linalg.norm', (['(known_faces - face)'], {'axis': '(1)'}), '(known_faces - face, axis=1)\n', (4442, 4470), True, 'import numpy as np\n'), ((5407, 5426), 'dlib.image_window', 'dlib.image_window', ([], {}), '()\n', (5424, 5426), False, 'import dlib\n'), ((6524, 6535), 'time.time', 'time.time', ([], {}), '()\n', (6533, 6535), False, 'import time\n'), ((6814, 6860), 'cv2.cvtColor', 'cv2.cvtColor', (['frame_resized', 'cv2.COLOR_BGR2RGB'], {}), '(frame_resized, cv2.COLOR_BGR2RGB)\n', (6826, 6860), False, 'import cv2\n'), ((6962, 7048), 'darknet.detect_image', 'darknet.detect_image', (['self.netMain', 'self.metaMain', 'self.darknet_image'], {'thresh': '(0.25)'}), '(self.netMain, self.metaMain, self.darknet_image,\n thresh=0.25)\n', (6982, 7048), False, 'import darknet\n'), ((7580, 7644), 'cv2.rectangle', 'cv2.rectangle', (['img', '(x, y)', '(x_plus_w, y_plus_h)', '(0, 0, 255)', '(2)'], {}), '(img, (x, y), (x_plus_w, y_plus_h), (0, 0, 255), 2)\n', (7593, 7644), False, 'import cv2\n'), ((8296, 8340), 'cv2.cvtColor', 'cv2.cvtColor', (['frame_read', 'cv2.COLOR_BGR2GRAY'], {}), '(frame_read, cv2.COLOR_BGR2GRAY)\n', (8308, 8340), False, 'import cv2\n'), ((8385, 8441), 'cv2.rectangle', 'cv2.rectangle', (['overlay', '(0, 0)', '(640, 35)', '(0, 0, 0)', '(-1)'], {}), '(overlay, (0, 0), (640, 35), (0, 0, 0), -1)\n', (8398, 8441), False, 'import cv2\n'), ((8483, 8540), 'cv2.addWeighted', 'cv2.addWeighted', (['overlay', 'alpha', 'draw_frame', '(1 - alpha)', '(0)'], {}), '(overlay, alpha, draw_frame, 1 - alpha, 0)\n', (8498, 8540), False, 'import cv2\n'), ((13346, 13377), 'cv2.imshow', 'cv2.imshow', (['"""Frame"""', 'draw_frame'], {}), "('Frame', draw_frame)\n", (13356, 13377), False, 'import cv2\n'), ((659, 690), 'os.path.exists', 'os.path.exists', (['self.configPath'], {}), '(self.configPath)\n', (673, 690), False, 'import os\n'), ((829, 860), 'os.path.exists', 'os.path.exists', (['self.weightPath'], {}), '(self.weightPath)\n', (843, 860), False, 'import os\n'), ((999, 1028), 'os.path.exists', 'os.path.exists', (['self.metaPath'], {}), '(self.metaPath)\n', (1013, 1028), False, 'import os\n'), ((2428, 2463), 'darknet.network_width', 'darknet.network_width', (['self.netMain'], {}), '(self.netMain)\n', (2449, 2463), False, 'import darknet\n'), ((2505, 2541), 'darknet.network_height', 'darknet.network_height', (['self.netMain'], {}), '(self.netMain)\n', (2527, 2541), False, 'import darknet\n'), ((3636, 3680), 'cv2.rectangle', 'cv2.rectangle', (['img', 'pt1', 'pt2', '(0, 255, 0)', '(1)'], {}), '(img, pt1, pt2, (0, 255, 0), 1)\n', (3649, 3680), False, 'import cv2\n'), ((5135, 5169), 'os.listdir', 'os.listdir', (['self.faces_folder_path'], {}), '(self.faces_folder_path)\n', (5145, 5169), False, 'import os\n'), ((5595, 5620), 'cv2.imread', 'cv2.imread', (['path_to_image'], {}), '(path_to_image)\n', (5605, 5620), False, 'import cv2\n'), ((5640, 5677), 'cv2.cvtColor', 'cv2.cvtColor', (['face', 'cv2.COLOR_BGR2RGB'], {}), '(face, cv2.COLOR_BGR2RGB)\n', (5652, 5677), False, 'import cv2\n'), ((13392, 13406), 'cv2.waitKey', 'cv2.waitKey', (['(3)'], {}), '(3)\n', (13403, 13406), False, 'import cv2\n'), ((6619, 6652), 'darknet.network_width', 'darknet.network_width', (['rn.netMain'], {}), '(rn.netMain)\n', (6640, 6652), False, 'import darknet\n'), ((6690, 6724), 'darknet.network_height', 'darknet.network_height', (['rn.netMain'], {}), '(rn.netMain)\n', (6712, 6724), False, 'import darknet\n'), ((1665, 1737), 're.search', 're.search', (['"""names *= *(.*)$"""', 'metaContents', '(re.IGNORECASE | re.MULTILINE)'], {}), "('names *= *(.*)$', metaContents, re.IGNORECASE | re.MULTILINE)\n", (1674, 1737), False, 'import re\n'), ((12851, 12902), 'cv2.rectangle', 'cv2.rectangle', (['draw_frame', 'pt1', 'pt2', '(0, 0, 255)', '(2)'], {}), '(draw_frame, pt1, pt2, (0, 0, 255), 2)\n', (12864, 12902), False, 'import cv2\n'), ((12923, 13030), 'cv2.putText', 'cv2.putText', (['draw_frame', '"""Unknow"""', '(pt1[0], pt1[1] - 5)', 'cv2.FONT_HERSHEY_SIMPLEX', '(0.5)', '(0, 0, 255)', '(2)'], {}), "(draw_frame, 'Unknow', (pt1[0], pt1[1] - 5), cv2.\n FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)\n", (12934, 13030), False, 'import cv2\n'), ((776, 808), 'os.path.abspath', 'os.path.abspath', (['self.configPath'], {}), '(self.configPath)\n', (791, 808), False, 'import os\n'), ((946, 978), 'os.path.abspath', 'os.path.abspath', (['self.weightPath'], {}), '(self.weightPath)\n', (961, 978), False, 'import os\n'), ((1117, 1147), 'os.path.abspath', 'os.path.abspath', (['self.metaPath'], {}), '(self.metaPath)\n', (1132, 1147), False, 'import os\n'), ((1970, 1992), 'os.path.exists', 'os.path.exists', (['result'], {}), '(result)\n', (1984, 1992), False, 'import os\n'), ((10306, 10407), 'cv2.putText', 'cv2.putText', (['draw_frame', '"""Unknow"""', '(x + 5, y - 15)', 'cv2.FONT_HERSHEY_SIMPLEX', '(0.5)', '(0, 0, 255)', '(2)'], {}), "(draw_frame, 'Unknow', (x + 5, y - 15), cv2.FONT_HERSHEY_SIMPLEX,\n 0.5, (0, 0, 255), 2)\n", (10317, 10407), False, 'import cv2\n'), ((10424, 10489), 'cv2.rectangle', 'cv2.rectangle', (['draw_frame', '(x, y)', '(x + w, y + h)', '(0, 0, 255)', '(2)'], {}), '(draw_frame, (x, y), (x + w, y + h), (0, 0, 255), 2)\n', (10437, 10489), False, 'import cv2\n'), ((9784, 9885), 'cv2.putText', 'cv2.putText', (['draw_frame', '"""Unknow"""', '(x + 5, y - 15)', 'cv2.FONT_HERSHEY_SIMPLEX', '(0.5)', '(0, 0, 255)', '(2)'], {}), "(draw_frame, 'Unknow', (x + 5, y - 15), cv2.FONT_HERSHEY_SIMPLEX,\n 0.5, (0, 0, 255), 2)\n", (9795, 9885), False, 'import cv2\n'), ((9906, 9971), 'cv2.rectangle', 'cv2.rectangle', (['draw_frame', '(x, y)', '(x + w, y + h)', '(0, 0, 255)', '(2)'], {}), '(draw_frame, (x, y), (x + w, y + h), (0, 0, 255), 2)\n', (9919, 9971), False, 'import cv2\n'), ((10030, 10129), 'cv2.putText', 'cv2.putText', (['draw_frame', 'match', '(x + 5, y - 15)', 'cv2.FONT_HERSHEY_SIMPLEX', '(0.5)', '(0, 255, 0)', '(2)'], {}), '(draw_frame, match, (x + 5, y - 15), cv2.FONT_HERSHEY_SIMPLEX, \n 0.5, (0, 255, 0), 2)\n', (10041, 10129), False, 'import cv2\n'), ((10149, 10214), 'cv2.rectangle', 'cv2.rectangle', (['draw_frame', '(x, y)', '(x + w, y + h)', '(0, 255, 0)', '(2)'], {}), '(draw_frame, (x, y), (x + w, y + h), (0, 255, 0), 2)\n', (10162, 10214), False, 'import cv2\n'), ((12621, 12722), 'cv2.putText', 'cv2.putText', (['draw_frame', '"""Unknow"""', '(x + 5, y - 15)', 'cv2.FONT_HERSHEY_SIMPLEX', '(0.5)', '(0, 0, 255)', '(2)'], {}), "(draw_frame, 'Unknow', (x + 5, y - 15), cv2.FONT_HERSHEY_SIMPLEX,\n 0.5, (0, 0, 255), 2)\n", (12632, 12722), False, 'import cv2\n'), ((12743, 12808), 'cv2.rectangle', 'cv2.rectangle', (['draw_frame', '(x, y)', '(x + w, y + h)', '(0, 0, 255)', '(2)'], {}), '(draw_frame, (x, y), (x + w, y + h), (0, 0, 255), 2)\n', (12756, 12808), False, 'import cv2\n'), ((12071, 12172), 'cv2.putText', 'cv2.putText', (['draw_frame', '"""Unknow"""', '(x + 5, y - 15)', 'cv2.FONT_HERSHEY_SIMPLEX', '(0.5)', '(0, 0, 255)', '(2)'], {}), "(draw_frame, 'Unknow', (x + 5, y - 15), cv2.FONT_HERSHEY_SIMPLEX,\n 0.5, (0, 0, 255), 2)\n", (12082, 12172), False, 'import cv2\n'), ((12197, 12262), 'cv2.rectangle', 'cv2.rectangle', (['draw_frame', '(x, y)', '(x + w, y + h)', '(0, 0, 255)', '(2)'], {}), '(draw_frame, (x, y), (x + w, y + h), (0, 0, 255), 2)\n', (12210, 12262), False, 'import cv2\n'), ((12329, 12428), 'cv2.putText', 'cv2.putText', (['draw_frame', 'match', '(x + 5, y - 15)', 'cv2.FONT_HERSHEY_SIMPLEX', '(0.5)', '(0, 255, 0)', '(2)'], {}), '(draw_frame, match, (x + 5, y - 15), cv2.FONT_HERSHEY_SIMPLEX, \n 0.5, (0, 255, 0), 2)\n', (12340, 12428), False, 'import cv2\n'), ((12452, 12517), 'cv2.rectangle', 'cv2.rectangle', (['draw_frame', '(x, y)', '(x + w, y + h)', '(0, 255, 0)', '(2)'], {}), '(draw_frame, (x, y), (x + w, y + h), (0, 255, 0), 2)\n', (12465, 12517), False, 'import cv2\n')] |
# import all the required python libaries: graphics and random
from graphics import *
import random
# create the graphics window and set background colour
win = GraphWin("Colour Guessing Game", 1000, 500)
win.setBackground('#232323')
# create a title for your game
titleBg = Rectangle(Point(0, 0), Point(1000, 135))
titleBg.setOutline('steelblue')
titleBg.setFill('steelblue')
titleBg.draw(win)
title = Text(Point(500, 67.5),"RGB Colour Guessing Game")
title.setTextColor('white')
title.setSize(48)
title.setFace('times roman')
title.draw(win)
colors = []
correctChoice = int
# generate random colors and questions
def randomise_answers():
global colors
global correctChoice
colors = []
for i in range(4):
rand_r = random.randint(0, 255)
rand_g = random.randint(0, 255)
rand_b = random.randint(0, 255)
colors.append([rand_r, rand_g, rand_b])
correctChoice = random.randint(0, 3)
randomise_answers()
squares = []
# create 4 squares of random colour evenly spaced across the page
def create_squares(x, y, sideLen, spacing):
global squares
squares = []
for i in range(4):
# create a square (Rectangle) that is positioned based on the current 'i' value
square = Rectangle(Point(x+i*sideLen+i*spacing, y), Point(x+(i+1)*sideLen+i*spacing, y+sideLen))
# set the fill of the square to the random values of r,g and b
square.setFill(color_rgb(colors[i][0], colors[i][1], colors[i][2]))
# draw the square in the window
square.draw(win)
squares.append(square)
create_squares(225, 325, 100, 50)
def wait_for_click():
while True:
# get the click position of the mouse
mousePos = win.getMouse()
mouseX = mousePos.getX()
mouseY = mousePos.getY()
# check if the mouse clicked on the correct square, if it did display correct otherwise incorrect
for i in range(4):
currentSquare = squares[i]
currentX1 = currentSquare.getP1().getX()
currentY1 = currentSquare.getP1().getY()
currentX2 = currentSquare.getP2().getX()
currentY2 = currentSquare.getP2().getY()
if mouseX > currentX1 and mouseX < currentX2 and mouseY > currentY1 and mouseY < currentY2:
return i
''' main game '''
gameover = False
# create a rectangle that fills the whole screen
bgRect = Rectangle(Point(0, 0), Point(1000, 500))
# create a Text box that will display the results of the guess (correct/incorrect)
resultText = Text(Point(500, 125),"")
resultText.setSize(128)
resultText.setFill('white')
# create a Text box that will display the rgb of the correct choice
questionText = Text(Point(500, 225), f"rgb({colors[correctChoice][0]}, {colors[correctChoice][1]}, {colors[correctChoice][2]})")
questionText.setFill('white')
questionText.setSize(25)
questionText.setStyle('bold')
questionText.draw(win)
# create a Text box that will display the score of the player
score = 0
scoreText = Text(Point(500, 155), f"SCORE: {score}")
scoreText.setFill('white')
scoreText.setSize(12)
scoreText.draw(win)
while gameover == False:
square_clicked = wait_for_click()
if square_clicked == correctChoice:
score += 1
scoreText.setText(f"SCORE: {score}")
randomise_answers()
create_squares(225, 325, 100, 50)
questionText.setText(f"rgb({colors[correctChoice][0]}, {colors[correctChoice][1]}, {colors[correctChoice][2]})")
else:
bgRect.setFill(color_rgb(colors[square_clicked][0], colors[square_clicked][1], colors[square_clicked][2]))
bgRect.draw(win)
resultText.setText("TOO BAD")
resultText.draw(win)
scoreText.setSize(24)
scoreText.anchor = Point(500, 350)
scoreText.undraw()
scoreText.draw(win)
gameover = True
# wait for click to close window
win.getMouse()
| [
"random.randint"
] | [((914, 934), 'random.randint', 'random.randint', (['(0)', '(3)'], {}), '(0, 3)\n', (928, 934), False, 'import random\n'), ((743, 765), 'random.randint', 'random.randint', (['(0)', '(255)'], {}), '(0, 255)\n', (757, 765), False, 'import random\n'), ((783, 805), 'random.randint', 'random.randint', (['(0)', '(255)'], {}), '(0, 255)\n', (797, 805), False, 'import random\n'), ((823, 845), 'random.randint', 'random.randint', (['(0)', '(255)'], {}), '(0, 255)\n', (837, 845), False, 'import random\n')] |
#!/usr/bin/python3
import nvidia_smi
import json
mydict = nvidia_smi.JsonDeviceQuery()
# Example print JSON
print(json.dumps(mydict, indent=2))
| [
"nvidia_smi.JsonDeviceQuery",
"json.dumps"
] | [((59, 87), 'nvidia_smi.JsonDeviceQuery', 'nvidia_smi.JsonDeviceQuery', ([], {}), '()\n', (85, 87), False, 'import nvidia_smi\n'), ((116, 144), 'json.dumps', 'json.dumps', (['mydict'], {'indent': '(2)'}), '(mydict, indent=2)\n', (126, 144), False, 'import json\n')] |
import datetime
import json
import redis
redis_device_key = 'redis_device_key'
device_expire_second = 60
class RedisProxy(object):
def __init__(self, host='127.0.0.1', port=6379):
self.redis_pool = redis.ConnectionPool(host=host, port=port, db=0)
def connect(self):
return redis.Redis(connection_pool=self.redis_pool)
def get_device_datas(self):
device_datas = []
r = self.connect()
result = r.hgetall(redis_device_key)
remove_device_list = []
for user_pair in result.items():
values = user_pair[1].split('@')
'''
device_id = user_pair[0]
update_time = datetime.datetime.strptime(values[1], "%Y-%m-%d %H:%M:%S.%f")
now_time = datetime.datetime.today()
expire_time_delta = datetime.timedelta(seconds=device_expire_second)
if now_time > update_time + expire_time_delta:
device_ids.append(device_id)
else:
remove_device_list.append(device_id)
'''
device_datas.append(json.loads(values[0]))
self.remove_devices(remove_device_list)
return device_datas
def remove_devices(self, device_list):
r = self.connect()
p = r.pipeline()
for device_id in device_list:
p.hdel(redis_device_key, device_id)
p.execute()
def update_device(self, device_id, websocket_send_data):
r = self.connect()
insert_value = "%s@%s" % (json.dumps(websocket_send_data), datetime.datetime.now())
return r.hset(redis_device_key, device_id, insert_value)
| [
"redis.Redis",
"json.loads",
"json.dumps",
"redis.ConnectionPool",
"datetime.datetime.now"
] | [((213, 261), 'redis.ConnectionPool', 'redis.ConnectionPool', ([], {'host': 'host', 'port': 'port', 'db': '(0)'}), '(host=host, port=port, db=0)\n', (233, 261), False, 'import redis\n'), ((301, 345), 'redis.Redis', 'redis.Redis', ([], {'connection_pool': 'self.redis_pool'}), '(connection_pool=self.redis_pool)\n', (312, 345), False, 'import redis\n'), ((1094, 1115), 'json.loads', 'json.loads', (['values[0]'], {}), '(values[0])\n', (1104, 1115), False, 'import json\n'), ((1522, 1553), 'json.dumps', 'json.dumps', (['websocket_send_data'], {}), '(websocket_send_data)\n', (1532, 1553), False, 'import json\n'), ((1555, 1578), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1576, 1578), False, 'import datetime\n')] |
from flask_cors import CORS
cors = CORS(resources={r"/maskmap/*": {"origins": "*"}})
def init_app(app):
cors.init_app(app)
| [
"flask_cors.CORS"
] | [((37, 85), 'flask_cors.CORS', 'CORS', ([], {'resources': "{'/maskmap/*': {'origins': '*'}}"}), "(resources={'/maskmap/*': {'origins': '*'}})\n", (41, 85), False, 'from flask_cors import CORS\n')] |
import numpy as np
image_dimensions = (25, 6)
def load(image_dims, path: str = "input/08.txt"):
with open(path) as file:
return np.array([c for c in file.read()]).reshape((-1, image_dims[0] * image_dims[1]))
def number_of_values_in_layer(layer, value):
return np.count_nonzero(layer == value)
def stack_layers(image_layers):
final_layer = list()
for i in range(len(image_layers[0])):
for j in range(len(image_layers)):
if image_layers[j][i] != "2":
final_layer.append(image_layers[j][i])
break
return np.array(final_layer)
# Prep
layers = load(image_dimensions)
# First
wanted_layer = None
minimum = None
for l in layers:
n = number_of_values_in_layer(l, "0")
if minimum is None or wanted_layer is None or n < minimum:
minimum = n
wanted_layer = l
wanted_1 = number_of_values_in_layer(wanted_layer, "1") * number_of_values_in_layer(wanted_layer, "2")
print(f"[1]\t{wanted_1}")
# Second
stacked_layer = stack_layers(layers).reshape(image_dimensions[::-1])
final_image = list()
for row in stacked_layer:
r = ""
for element in row:
r += "##" if element == "1" else " " if element == "0" else " "
final_image.append(r)
print(f"[2]")
for r in final_image:
print(r)
| [
"numpy.array",
"numpy.count_nonzero"
] | [((281, 313), 'numpy.count_nonzero', 'np.count_nonzero', (['(layer == value)'], {}), '(layer == value)\n', (297, 313), True, 'import numpy as np\n'), ((590, 611), 'numpy.array', 'np.array', (['final_layer'], {}), '(final_layer)\n', (598, 611), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Date : Jan-02-21 20:43
# @Author : <NAME> (<EMAIL>)
import tensorflow as tf
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.platform import test
from tensorflow.keras.utils import plot_model
from senet.keras_fn.se_resnet import SE_ResNet_18, SE_ResNet_50, SE_ResNet_101, SE_ResNet_152
class TestModelArchitectures(keras_parameterized.TestCase):
def test_se_resnet_18(self):
model_type = "SE_ResNet_18"
input_shape = (224, 224, 3)
num_classes = 2
model = SE_ResNet_18(
include_top=True,
weights=None,
input_shape=input_shape,
classes=num_classes
)
plot_model(model, to_file=model_type + ".png", show_shapes=True)
def test_se_resnet_50(self):
model_type = "SE_ResNet_50"
input_shape = (224, 224, 3)
num_classes = 2
model = SE_ResNet_50(
include_top=True,
weights=None,
input_shape=input_shape,
classes=num_classes
)
plot_model(model, to_file=model_type + ".png", show_shapes=True)
def test_se_resnet_101(self):
model_type = "SE_ResNet_101"
input_shape = (224, 224, 3)
num_classes = 2
model = SE_ResNet_101(
include_top=True,
weights=None,
input_shape=input_shape,
classes=num_classes
)
plot_model(model, to_file=model_type + ".png", show_shapes=True)
def test_se_resnet_152(self):
model_type = "SE_ResNet_152"
input_shape = (224, 224, 3)
num_classes = 2
model = SE_ResNet_152(
include_top=True,
weights=None,
input_shape=input_shape,
classes=num_classes
)
plot_model(model, to_file=model_type + ".png", show_shapes=True)
if __name__ == "__main__":
test.main()
| [
"tensorflow.python.platform.test.main",
"senet.keras_fn.se_resnet.SE_ResNet_50",
"senet.keras_fn.se_resnet.SE_ResNet_18",
"tensorflow.keras.utils.plot_model",
"senet.keras_fn.se_resnet.SE_ResNet_101",
"senet.keras_fn.se_resnet.SE_ResNet_152"
] | [((1953, 1964), 'tensorflow.python.platform.test.main', 'test.main', ([], {}), '()\n', (1962, 1964), False, 'from tensorflow.python.platform import test\n'), ((581, 675), 'senet.keras_fn.se_resnet.SE_ResNet_18', 'SE_ResNet_18', ([], {'include_top': '(True)', 'weights': 'None', 'input_shape': 'input_shape', 'classes': 'num_classes'}), '(include_top=True, weights=None, input_shape=input_shape,\n classes=num_classes)\n', (593, 675), False, 'from senet.keras_fn.se_resnet import SE_ResNet_18, SE_ResNet_50, SE_ResNet_101, SE_ResNet_152\n'), ((739, 803), 'tensorflow.keras.utils.plot_model', 'plot_model', (['model'], {'to_file': "(model_type + '.png')", 'show_shapes': '(True)'}), "(model, to_file=model_type + '.png', show_shapes=True)\n", (749, 803), False, 'from tensorflow.keras.utils import plot_model\n'), ((951, 1045), 'senet.keras_fn.se_resnet.SE_ResNet_50', 'SE_ResNet_50', ([], {'include_top': '(True)', 'weights': 'None', 'input_shape': 'input_shape', 'classes': 'num_classes'}), '(include_top=True, weights=None, input_shape=input_shape,\n classes=num_classes)\n', (963, 1045), False, 'from senet.keras_fn.se_resnet import SE_ResNet_18, SE_ResNet_50, SE_ResNet_101, SE_ResNet_152\n'), ((1109, 1173), 'tensorflow.keras.utils.plot_model', 'plot_model', (['model'], {'to_file': "(model_type + '.png')", 'show_shapes': '(True)'}), "(model, to_file=model_type + '.png', show_shapes=True)\n", (1119, 1173), False, 'from tensorflow.keras.utils import plot_model\n'), ((1323, 1418), 'senet.keras_fn.se_resnet.SE_ResNet_101', 'SE_ResNet_101', ([], {'include_top': '(True)', 'weights': 'None', 'input_shape': 'input_shape', 'classes': 'num_classes'}), '(include_top=True, weights=None, input_shape=input_shape,\n classes=num_classes)\n', (1336, 1418), False, 'from senet.keras_fn.se_resnet import SE_ResNet_18, SE_ResNet_50, SE_ResNet_101, SE_ResNet_152\n'), ((1482, 1546), 'tensorflow.keras.utils.plot_model', 'plot_model', (['model'], {'to_file': "(model_type + '.png')", 'show_shapes': '(True)'}), "(model, to_file=model_type + '.png', show_shapes=True)\n", (1492, 1546), False, 'from tensorflow.keras.utils import plot_model\n'), ((1696, 1791), 'senet.keras_fn.se_resnet.SE_ResNet_152', 'SE_ResNet_152', ([], {'include_top': '(True)', 'weights': 'None', 'input_shape': 'input_shape', 'classes': 'num_classes'}), '(include_top=True, weights=None, input_shape=input_shape,\n classes=num_classes)\n', (1709, 1791), False, 'from senet.keras_fn.se_resnet import SE_ResNet_18, SE_ResNet_50, SE_ResNet_101, SE_ResNet_152\n'), ((1855, 1919), 'tensorflow.keras.utils.plot_model', 'plot_model', (['model'], {'to_file': "(model_type + '.png')", 'show_shapes': '(True)'}), "(model, to_file=model_type + '.png', show_shapes=True)\n", (1865, 1919), False, 'from tensorflow.keras.utils import plot_model\n')] |
"""
Usage:
- From Spark 3.1.1 base container with Python bindings:
docker run --rm -it --name test_pyspark spark-ingest:latest /bin/bash
./bin/spark-submit spark-ingest/main.py --filepath ./examples/src/main/python/pi.py
- From binaries:
./pyspark --packages io.delta:delta-core_2.12:1.0.0 \
--conf "spark.sql.extensions=io.delta.sql.DeltaSparkSessionExtension" \
--conf "spark.sql.catalog.spark_catalog=org.apache.spark.sql.delta.catalog.DeltaCatalog"
./spark-sql --packages io.delta:delta-core_2.12:1.0.0 \
--conf "spark.sql.extensions=io.delta.sql.DeltaSparkSessionExtension" \
--conf "spark.sql.catalog.spark_catalog=org.apache.spark.sql.delta.catalog.DeltaCatalog"
"""
from datetime import datetime, date, timedelta
import os
import shutil
import boto3
import click
from pyspark.sql import SparkSession
from spark_etl import logger, SPARK_LOG_LEVEL
from spark_etl.etl import (
create_vitals_delta, cache_mpmi, save_mpmi,
load_vitals, upsert_vitals, time_travel
)
from spark_etl.secret import get_secret
"""
To configure AWS bucket-specific authorization, use the
`fs.s3a.bucket.[bucket name].access.key` configuration setting.
As specified here:
- https://hadoop.apache.org/docs/current2/hadoop-aws/tools/hadoop-aws/index.html#Configuring_different_S3_buckets
TODO: Consider optimizing the S3A for I/O.
- https://spark.apache.org/docs/3.1.1/cloud-integration.html#recommended-settings-for-writing-to-object-stores
"""
spark_session = (
SparkSession
.builder
.appName("stage_data")
# AWS general authorization
# .config("spark.hadoop.fs.s3a.access.key", os.environ['P3_AWS_ACCESS_KEY'])
# .config("spark.hadoop.fs.s3a.secret.key", os.environ['P3_AWS_SECRET_KEY'])
# AWS bucket-specific authorization
# .config(f"fs.s3a.bucket.{os.environ['P3_BUCKET']}.access.key", os.environ['P3_AWS_ACCESS_KEY'])
# .config(f"fs.s3a.bucket.{os.environ['P3_BUCKET']}.secret.key", os.environ['P3_AWS_SECRET_KEY'])
# .config(f"fs.s3a.bucket.{os.environ['P3_BUCKET']}.session.token", os.environ['P3_AWS_SESSION_TOKEN'])
# Or
.config(f"spark.hadoop.fs.s3a.bucket.{os.environ['P3_BUCKET']}.access.key", os.environ['P3_AWS_ACCESS_KEY'])
.config(f"spark.hadoop.fs.s3a.bucket.{os.environ['P3_BUCKET']}.secret.key", os.environ['P3_AWS_SECRET_KEY'])
# .config("spark.hadoop.fs.s3a.bucket.bangkok.access.key", os.environ['BK_AWS_ACCESS_KEY'])
# .config("spark.hadoop.fs.s3a.bucket.bangkok.secret.key", os.environ['BK_AWS_SECRET_KEY'])
# .config("spark.hadoop.fs.s3a.bucket.condesa.access.key", os.environ['CO_AWS_ACCESS_KEY'])
# .config("spark.hadoop.fs.s3a.bucket.condesa.secret.key", os.environ['CO_AWS_SECRET_KEY'])
# TODO: S3A Optimizations
.config("spark.hadoop.mapreduce.fileoutputcommitter.algorithm.version", "2")
.config("spark.hadoop.mapreduce.fileoutputcommitter.cleanup-failures.ignored", "true")
# TODO: S3A Optimizations: PathOutputCommitProtocol cannot be resolved
# .config("spark.hadoop.fs.s3a.committer.name", "directory")
# .config("spark.sql.sources.commitProtocolClass",
# "org.apache.spark.internal.io.cloud.PathOutputCommitProtocol")
# .config("spark.sql.parquet.output.committer.class",
# "org.apache.spark.internal.io.cloud.BindingParquetOutputCommitter")
# TODO: Parquet Optimizations
.config("spark.hadoop.parquet.enable.summary-metadata", "false")
.config("spark.sql.parquet.mergeSchema", "false")
.config("spark.sql.parquet.filterPushdown", "true")
.config("spark.sql.hive.metastorePartitionPruning", "true")
# Specify different location for Hive metastore
# .config("spark.sql.warehouse.dir", "/opt/spark/hive_warehouse")
# .config("spark.sql.catalogImplementation", "hive")
# Delta lake integration with Spark DataSourceV2 and Catalog
# .config("spark.jars.packages", "io.delta:delta-core_2.12:1.0.0")
# .config("spark.sql.extensions", "io.delta.sql.DeltaSparkSessionExtension")
# .config("spark.sql.catalog.spark_catalog", "org.apache.spark.sql.delta.catalog.DeltaCatalog")
.getOrCreate()
)
spark_session.sparkContext.setLogLevel(SPARK_LOG_LEVEL)
@click.group()
def cli():
pass
@cli.command()
def smoke_test():
pass
@cli.command()
@click.option('--filepath', required=False, help='The input file path')
@click.option('--filepath2', required=False, help='The input file path')
@click.option(
'--output-path', required=False, help='The output file path')
@click.option(
'--delta-truncate/--no-delta-truncate', default=True, help='Clear previous delta runs')
def acquire_vitals(
filepath: str,
filepath2: str,
output_path: str,
delta_truncate: bool) -> None:
"""
"""
# TODO: Import spark_etl to Jupyter container
# TODO: Build Spark 3.2 container with Python bindings
# TODO: RE: patient matches, load demographics as a Delta and keep sync'd
# TODO: Partition demographics Delta by prac
# TODO: Implement "Current" tables as delta lake tables (merge/upsert)
# TODO: How to write parent/child tables to db at scale?
# See here: https://www.youtube.com/watch?v=aF2hRH5WZAU
# monotonically_increasing_id() can also be used.
start = datetime.now()
delta_path = "{root}/public/vitals/delta".format(root=output_path)
if delta_truncate:
logger.info(f"Clearing vitals delta: {delta_path}")
shutil.rmtree(delta_path, ignore_errors=True)
# logger.info(f"Creating vitals delta: {output_path}")
# delta_path = create_vitals_delta(spark_session, output_path)
# logger.info(f"Create finished in {datetime.now() - start}")
logger.info(f"Caching mpmi")
mpmi = cache_mpmi(spark_session)
logger.info(f"Cache finished in {datetime.now() - start}")
# logger.info(f"Persisting mpmi")
# mpmi_path = save_mpmi(spark_session, output_path)
# logger.info(f"Save finished in {datetime.now() - start}")
logger.info(f"Processing vitals: {filepath}")
load_vitals(spark_session, mpmi, filepath, output_path)
logger.info(f"Load process finished in {datetime.now() - start}")
logger.info(f"Processing vitals: {filepath2}")
upsert_vitals(spark_session, mpmi, filepath2, output_path)
logger.info(f"Upsert process finished in {datetime.now() - start}")
logger.info(f"Time-travel vitals: {delta_path}")
time_travel(
spark_session,
delta_path
)
logger.info(f"Time-travel finished in {datetime.now() - start}")
input("Press enter to exit...") # keep alive for Spark UI
@cli.command()
@click.option('--source-path', required=False, help='The Delta path')
@click.option('--output-path', required=False, help='The output file path')
def stream_vitals(source_path: str, output_path: str) -> None:
"""
JDBC streaming is not supported so I'm not sure how to use this.
It may be that Kafka is necessary for true streaming.
"""
logger.info(f"Stream (append mode) to delta on: {source_path}")
(
spark_session
.readStream
.format("delta")
# .option("ignoreDeletes", "true")
# .option("ignoreChanges", "true")
.load(source_path)
.writeStream
# .format("console") # debug
.format("delta")
.outputMode("append")
.option("checkpointLocation", f"{output_path}/_checkpoints/stream-from-delta")
.queryName('vitals_stream')
.start(output_path)
.awaitTermination(timeout=60*5) # 5 min
)
if __name__ == "__main__":
cli()
| [
"spark_etl.etl.load_vitals",
"shutil.rmtree",
"spark_etl.logger.info",
"click.option",
"spark_etl.etl.cache_mpmi",
"pyspark.sql.SparkSession.builder.appName",
"spark_etl.etl.time_travel",
"spark_etl.etl.upsert_vitals",
"click.group",
"datetime.datetime.now"
] | [((4160, 4173), 'click.group', 'click.group', ([], {}), '()\n', (4171, 4173), False, 'import click\n'), ((4256, 4326), 'click.option', 'click.option', (['"""--filepath"""'], {'required': '(False)', 'help': '"""The input file path"""'}), "('--filepath', required=False, help='The input file path')\n", (4268, 4326), False, 'import click\n'), ((4328, 4399), 'click.option', 'click.option', (['"""--filepath2"""'], {'required': '(False)', 'help': '"""The input file path"""'}), "('--filepath2', required=False, help='The input file path')\n", (4340, 4399), False, 'import click\n'), ((4401, 4475), 'click.option', 'click.option', (['"""--output-path"""'], {'required': '(False)', 'help': '"""The output file path"""'}), "('--output-path', required=False, help='The output file path')\n", (4413, 4475), False, 'import click\n'), ((4482, 4587), 'click.option', 'click.option', (['"""--delta-truncate/--no-delta-truncate"""'], {'default': '(True)', 'help': '"""Clear previous delta runs"""'}), "('--delta-truncate/--no-delta-truncate', default=True, help=\n 'Clear previous delta runs')\n", (4494, 4587), False, 'import click\n'), ((6582, 6650), 'click.option', 'click.option', (['"""--source-path"""'], {'required': '(False)', 'help': '"""The Delta path"""'}), "('--source-path', required=False, help='The Delta path')\n", (6594, 6650), False, 'import click\n'), ((6652, 6726), 'click.option', 'click.option', (['"""--output-path"""'], {'required': '(False)', 'help': '"""The output file path"""'}), "('--output-path', required=False, help='The output file path')\n", (6664, 6726), False, 'import click\n'), ((5234, 5248), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (5246, 5248), False, 'from datetime import datetime, date, timedelta\n'), ((5656, 5684), 'spark_etl.logger.info', 'logger.info', (['f"""Caching mpmi"""'], {}), "(f'Caching mpmi')\n", (5667, 5684), False, 'from spark_etl import logger, SPARK_LOG_LEVEL\n'), ((5696, 5721), 'spark_etl.etl.cache_mpmi', 'cache_mpmi', (['spark_session'], {}), '(spark_session)\n', (5706, 5721), False, 'from spark_etl.etl import create_vitals_delta, cache_mpmi, save_mpmi, load_vitals, upsert_vitals, time_travel\n'), ((5949, 5994), 'spark_etl.logger.info', 'logger.info', (['f"""Processing vitals: {filepath}"""'], {}), "(f'Processing vitals: {filepath}')\n", (5960, 5994), False, 'from spark_etl import logger, SPARK_LOG_LEVEL\n'), ((5999, 6054), 'spark_etl.etl.load_vitals', 'load_vitals', (['spark_session', 'mpmi', 'filepath', 'output_path'], {}), '(spark_session, mpmi, filepath, output_path)\n', (6010, 6054), False, 'from spark_etl.etl import create_vitals_delta, cache_mpmi, save_mpmi, load_vitals, upsert_vitals, time_travel\n'), ((6130, 6176), 'spark_etl.logger.info', 'logger.info', (['f"""Processing vitals: {filepath2}"""'], {}), "(f'Processing vitals: {filepath2}')\n", (6141, 6176), False, 'from spark_etl import logger, SPARK_LOG_LEVEL\n'), ((6181, 6239), 'spark_etl.etl.upsert_vitals', 'upsert_vitals', (['spark_session', 'mpmi', 'filepath2', 'output_path'], {}), '(spark_session, mpmi, filepath2, output_path)\n', (6194, 6239), False, 'from spark_etl.etl import create_vitals_delta, cache_mpmi, save_mpmi, load_vitals, upsert_vitals, time_travel\n'), ((6317, 6365), 'spark_etl.logger.info', 'logger.info', (['f"""Time-travel vitals: {delta_path}"""'], {}), "(f'Time-travel vitals: {delta_path}')\n", (6328, 6365), False, 'from spark_etl import logger, SPARK_LOG_LEVEL\n'), ((6370, 6408), 'spark_etl.etl.time_travel', 'time_travel', (['spark_session', 'delta_path'], {}), '(spark_session, delta_path)\n', (6381, 6408), False, 'from spark_etl.etl import create_vitals_delta, cache_mpmi, save_mpmi, load_vitals, upsert_vitals, time_travel\n'), ((6937, 7000), 'spark_etl.logger.info', 'logger.info', (['f"""Stream (append mode) to delta on: {source_path}"""'], {}), "(f'Stream (append mode) to delta on: {source_path}')\n", (6948, 7000), False, 'from spark_etl import logger, SPARK_LOG_LEVEL\n'), ((5352, 5403), 'spark_etl.logger.info', 'logger.info', (['f"""Clearing vitals delta: {delta_path}"""'], {}), "(f'Clearing vitals delta: {delta_path}')\n", (5363, 5403), False, 'from spark_etl import logger, SPARK_LOG_LEVEL\n'), ((5412, 5457), 'shutil.rmtree', 'shutil.rmtree', (['delta_path'], {'ignore_errors': '(True)'}), '(delta_path, ignore_errors=True)\n', (5425, 5457), False, 'import shutil\n'), ((5759, 5773), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (5771, 5773), False, 'from datetime import datetime, date, timedelta\n'), ((6099, 6113), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (6111, 6113), False, 'from datetime import datetime, date, timedelta\n'), ((6286, 6300), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (6298, 6300), False, 'from datetime import datetime, date, timedelta\n'), ((6474, 6488), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (6486, 6488), False, 'from datetime import datetime, date, timedelta\n'), ((1475, 1517), 'pyspark.sql.SparkSession.builder.appName', 'SparkSession.builder.appName', (['"""stage_data"""'], {}), "('stage_data')\n", (1503, 1517), False, 'from pyspark.sql import SparkSession\n')] |
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from runner.tasks import start_flow_task
class RunnerStartFlow(APIView):
def post(self, request):
flow_uuid = request.POST.get('flow_uuid', None)
flow_repo_url = request.POST.get('flow_repo_url', None)
if not flow_uuid or not flow_repo_url:
return Response('Missing parameters', status=status.HTTP_400_BAD_REQUEST)
start_flow_task.delay(flow_uuid, flow_repo_url)
return Response('Received', status=status.HTTP_202_ACCEPTED)
| [
"rest_framework.response.Response",
"runner.tasks.start_flow_task.delay"
] | [((487, 534), 'runner.tasks.start_flow_task.delay', 'start_flow_task.delay', (['flow_uuid', 'flow_repo_url'], {}), '(flow_uuid, flow_repo_url)\n', (508, 534), False, 'from runner.tasks import start_flow_task\n'), ((551, 604), 'rest_framework.response.Response', 'Response', (['"""Received"""'], {'status': 'status.HTTP_202_ACCEPTED'}), "('Received', status=status.HTTP_202_ACCEPTED)\n", (559, 604), False, 'from rest_framework.response import Response\n'), ((411, 477), 'rest_framework.response.Response', 'Response', (['"""Missing parameters"""'], {'status': 'status.HTTP_400_BAD_REQUEST'}), "('Missing parameters', status=status.HTTP_400_BAD_REQUEST)\n", (419, 477), False, 'from rest_framework.response import Response\n')] |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class GetDefaultServiceAccountResult:
"""
A collection of values returned by getDefaultServiceAccount.
"""
def __init__(__self__, display_name=None, email=None, name=None, project=None, unique_id=None, id=None):
if display_name and not isinstance(display_name, str):
raise TypeError("Expected argument 'display_name' to be a str")
__self__.display_name = display_name
"""
The display name for the service account.
"""
if email and not isinstance(email, str):
raise TypeError("Expected argument 'email' to be a str")
__self__.email = email
"""
Email address of the default service account used by VMs running in this project
"""
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
__self__.name = name
"""
The fully-qualified name of the service account.
"""
if project and not isinstance(project, str):
raise TypeError("Expected argument 'project' to be a str")
__self__.project = project
if unique_id and not isinstance(unique_id, str):
raise TypeError("Expected argument 'unique_id' to be a str")
__self__.unique_id = unique_id
"""
The unique id of the service account.
"""
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
__self__.id = id
"""
id is the provider-assigned unique ID for this managed resource.
"""
class AwaitableGetDefaultServiceAccountResult(GetDefaultServiceAccountResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetDefaultServiceAccountResult(
display_name=self.display_name,
email=self.email,
name=self.name,
project=self.project,
unique_id=self.unique_id,
id=self.id)
def get_default_service_account(project=None,opts=None):
"""
Use this data source to retrieve default service account for this project
:param str project: The project ID. If it is not provided, the provider project is used.
> This content is derived from https://github.com/terraform-providers/terraform-provider-google/blob/master/website/docs/d/compute_default_service_account.html.markdown.
"""
__args__ = dict()
__args__['project'] = project
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = utilities.get_version()
__ret__ = pulumi.runtime.invoke('gcp:compute/getDefaultServiceAccount:getDefaultServiceAccount', __args__, opts=opts).value
return AwaitableGetDefaultServiceAccountResult(
display_name=__ret__.get('displayName'),
email=__ret__.get('email'),
name=__ret__.get('name'),
project=__ret__.get('project'),
unique_id=__ret__.get('uniqueId'),
id=__ret__.get('id'))
| [
"pulumi.runtime.invoke",
"pulumi.InvokeOptions"
] | [((2860, 2882), 'pulumi.InvokeOptions', 'pulumi.InvokeOptions', ([], {}), '()\n', (2880, 2882), False, 'import pulumi\n'), ((2973, 3089), 'pulumi.runtime.invoke', 'pulumi.runtime.invoke', (['"""gcp:compute/getDefaultServiceAccount:getDefaultServiceAccount"""', '__args__'], {'opts': 'opts'}), "(\n 'gcp:compute/getDefaultServiceAccount:getDefaultServiceAccount',\n __args__, opts=opts)\n", (2994, 3089), False, 'import pulumi\n')] |
import math
def calculate_power_luminance(ambient_area):
#area in m^2
potency = 0
if ambient_area <= 6:
print('Lighting Potency: '+ str(100) +' (VA)')
potency = 100
else:
print('extra potency: ' + str((ambient_area - 6)))
potency = 100 + 60 * int((ambient_area - 6)/4)
print('Lighting Potency: '+ str(potency) +' (VA)')
print('')
return potency
"""#Dimensionamento de TUGs"""
def calculate_number_and_power_of_tugs(ambient_name, perimeter = 0):
#area in m^2
#perimeter in m
class1 = ['banheiro']
class2 = ['cozinha', 'copa','copa-cozinha', 'area de servico', 'lavanderia']
class3 = ['varanda']
class4 = ['sala', 'quarto', 'dormitorio', 'escritorio']
number_tugs = 0
power_tugs = 0
if ambient_name in class1:
number_tugs = 1
power_tugs = number_tugs * 600
elif ambient_name in class2:
number_tugs = math.ceil(perimeter/3.5)
if number_tugs <= 3:
power_tugs = number_tugs * 600
else:
power_tugs = 3 * 600 + 100 * (number_tugs - 3)
elif ambient_name in class3:
number_tugs = 1
power_tugs = number_tugs * 100
elif ambient_name in class4:
number_tugs = math.ceil(perimeter/5)
power_tugs = number_tugs * 100
else:
print('No matches found')
print('warning: ambient is calculated by area, see in 54.10 norma\nEntry with area: ')
area = float(input())
if area <= 2.55:
number_tugs = 1
power_tugs = number_tugs * 100
return 0
print('Numbers TUG: ' + str(number_tugs) + '\nTUG Potency:' + str(power_tugs) +'(VA)')
print('')
return number_tugs, power_tugs
| [
"math.ceil"
] | [((866, 892), 'math.ceil', 'math.ceil', (['(perimeter / 3.5)'], {}), '(perimeter / 3.5)\n', (875, 892), False, 'import math\n'), ((1151, 1175), 'math.ceil', 'math.ceil', (['(perimeter / 5)'], {}), '(perimeter / 5)\n', (1160, 1175), False, 'import math\n')] |
import pandas as pd
SUPPORT = 0.005
CONF = 0.5
def csv2list():
df = pd.read_csv("./实验三/数据/Groceries.csv")
itemsets = []
for itemset_str in df["items"]:
itemsets.append(set(itemset_str[1:-1].split(",")))
return itemsets
itemsets = csv2list()
itemsets_len = itemsets.__len__()
def build1deg(itemsets):
SAVE_PATH = "./one_deg_support.txt"
one_deg = {}
for itemset in itemsets:
for item in itemset:
one_deg[item] = one_deg.get(item, 0) + 1
one_deg_count = 0
items = list(one_deg.keys())
with open(SAVE_PATH, "w") as fw:
for item in items:
support = one_deg[item] / itemsets_len
if support > SUPPORT:
one_deg[item] = support
fw.write(f"{item}: {support}\n")
one_deg_count += 1
else:
del one_deg[item]
print(f"频繁一项集数量: {one_deg_count}", )
print(f"频繁一项集保存在`{SAVE_PATH}`")
return one_deg
one_deg = build1deg(itemsets)
def build2deg(one_deg, itemsets):
SAVE_PATH = "./two_deg_support.txt"
items = list(one_deg.keys())
two_deg = {}
for i in range(0, len(items)):
for j in range(i+1, len(items)):
key = (items[i], items[j])
for itemset in itemsets:
if key[0] in itemset and key[1] in itemset:
two_deg[key] = two_deg.get(key, 0) + 1
pairs = list(two_deg.keys())
two_deg_count = 0
with open(SAVE_PATH, "w") as fw:
for pair in pairs:
support = two_deg[pair] / itemsets_len
if support > SUPPORT:
two_deg[pair] = support
fw.write(f"{pair}: {support}\n")
two_deg_count += 1
else:
del two_deg[pair]
print(f"频繁二项集数量: {two_deg_count}", )
print(f"频繁二项集保存在`{SAVE_PATH}`")
return two_deg
two_deg = build2deg(one_deg, itemsets)
def gen2deg_rules(one_deg, two_deg):
SAVE_PATH = "./two_deg_rules.txt"
pairs = list(two_deg.keys())
rules = {}
for pair in pairs:
rule = (pair[0], pair[1])
conf = two_deg[pair] / one_deg[rule[0]]
if conf > CONF:
rules[rule] = conf
rule = (pair[1], pair[0])
conf = two_deg[pair] / one_deg[rule[0]]
if conf > CONF:
rules[rule] = conf
with open(SAVE_PATH, "w") as fw:
for k, v in rules.items():
fw.write(f"{k[0]}->{k[1]}: {v}\n")
print(f"频繁二项集规则数量: {len(rules.keys())}", )
print(f"频繁二项集规则保存在`{SAVE_PATH}`")
gen2deg_rules(one_deg, two_deg)
def build3deg(two_deg, itemsets):
SAVE_PATH = "./three_deg_support.txt"
pairs = list(two_deg.keys())
itemset_3 = set()
for pair in pairs:
itemset_3.add(pair[0])
itemset_3.add(pair[1])
itemset_3 = list(itemset_3)
itemset_3.sort()
three_deg = {}
for i in range(0, len(itemset_3)):
for j in range(i+1, len(itemset_3)):
for k in range(j+1, len(itemset_3)):
item_i = itemset_3[i]
item_j = itemset_3[j]
item_k = itemset_3[k]
for itemset in itemsets:
if item_i in itemset and item_j in itemset and item_k in itemset:
tup = (item_i, item_j, item_k)
three_deg[tup] = three_deg.get(tup, 0)+1
three_deg_count = 0
tups = list(three_deg.keys())
with open(SAVE_PATH, "w") as fw:
for tup in tups:
support = three_deg[tup] / itemsets_len
if support > SUPPORT:
three_deg[tup] = support
fw.write(f"{tup}: {support}\n")
three_deg_count += 1
else:
del three_deg[tup]
print(f"频繁三项集数量: {three_deg_count}", )
print(f"频繁三项集保存在`{SAVE_PATH}`")
return three_deg
three_deg = build3deg(two_deg, itemsets)
def gen3deg_rules(one_deg, two_deg, three_deg):
SAVE_PATH = "./three_deg_rules.txt"
tups = list(three_deg.keys())
rules = {}
def enumTup(tup):
return [
(tup, tup[0], (tup[1], tup[2])),
(tup, tup[1], (tup[0], tup[2])),
(tup, tup[2], (tup[0], tup[1])),
(tup, (tup[1], tup[2]), tup[0]),
(tup, (tup[0], tup[2]), tup[1]),
(tup, (tup[0], tup[1]), tup[2]),
]
three_deg_rule_num = 0
with open(SAVE_PATH, "w") as fw:
for tup in tups:
rules = enumTup(tup)
for three, one, two in rules[:3]:
conf = three_deg[three] / one_deg[one]
if conf > CONF:
fw.write(f"{one}->{two}: {conf}\n")
three_deg_rule_num += 1
for three, two, one in rules[3:]:
try:
conf = three_deg[three] / two_deg[two]
except:
try:
conf = three_deg[three] / two_deg[(two[1], two[0])]
except:
print(two, "not found")
if conf > CONF:
fw.write(f"{two}->{one}: {conf}\n")
three_deg_rule_num += 1
print(f"频繁三项集规则数量: {three_deg_rule_num}", )
print(f"频繁三项集规则保存在`{SAVE_PATH}`")
gen3deg_rules(one_deg, two_deg, three_deg)
| [
"pandas.read_csv"
] | [((75, 112), 'pandas.read_csv', 'pd.read_csv', (['"""./实验三/数据/Groceries.csv"""'], {}), "('./实验三/数据/Groceries.csv')\n", (86, 112), True, 'import pandas as pd\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
-------------------------------------------------
@ Author : pengj
@ date : 2019/12/10 15:45
@ IDE : PyCharm
@ GitHub : https://github.com/JackyPJB
@ Contact : <EMAIL>
-------------------------------------------------
Description :
-------------------------------------------------
"""
import datetime
__author__ = 'Max_Pengjb'
from app.models import db
from app.models.User import User
class Address(db.Document):
user_id = db.ReferenceField(User, required=True, verbose_name='用户id')
streetName = db.StringField(max_length=512, required=True, verbose_name='地址')
userName = db.StringField(max_length=128, required=True, verbose_name='收货人姓名')
tel = db.StringField(max_length=64, required=True, verbose_name='收货人手机号')
isDefault = db.BooleanField(default=False, required=True, verbose_name='是否默认地址')
create_time = db.DateTimeField(default=datetime.datetime.now, verbose_name='创建时间')
def __unicode__(self):
return str(self.streetName) + str(self.userName)
| [
"app.models.db.StringField",
"app.models.db.ReferenceField",
"app.models.db.BooleanField",
"app.models.db.DateTimeField"
] | [((564, 623), 'app.models.db.ReferenceField', 'db.ReferenceField', (['User'], {'required': '(True)', 'verbose_name': '"""用户id"""'}), "(User, required=True, verbose_name='用户id')\n", (581, 623), False, 'from app.models import db\n'), ((641, 705), 'app.models.db.StringField', 'db.StringField', ([], {'max_length': '(512)', 'required': '(True)', 'verbose_name': '"""地址"""'}), "(max_length=512, required=True, verbose_name='地址')\n", (655, 705), False, 'from app.models import db\n'), ((721, 788), 'app.models.db.StringField', 'db.StringField', ([], {'max_length': '(128)', 'required': '(True)', 'verbose_name': '"""收货人姓名"""'}), "(max_length=128, required=True, verbose_name='收货人姓名')\n", (735, 788), False, 'from app.models import db\n'), ((799, 866), 'app.models.db.StringField', 'db.StringField', ([], {'max_length': '(64)', 'required': '(True)', 'verbose_name': '"""收货人手机号"""'}), "(max_length=64, required=True, verbose_name='收货人手机号')\n", (813, 866), False, 'from app.models import db\n'), ((883, 951), 'app.models.db.BooleanField', 'db.BooleanField', ([], {'default': '(False)', 'required': '(True)', 'verbose_name': '"""是否默认地址"""'}), "(default=False, required=True, verbose_name='是否默认地址')\n", (898, 951), False, 'from app.models import db\n'), ((971, 1039), 'app.models.db.DateTimeField', 'db.DateTimeField', ([], {'default': 'datetime.datetime.now', 'verbose_name': '"""创建时间"""'}), "(default=datetime.datetime.now, verbose_name='创建时间')\n", (987, 1039), False, 'from app.models import db\n')] |
from pyflink.datastream import StreamExecutionEnvironment, TimeCharacteristic
from pyflink.table import StreamTableEnvironment, DataTypes, EnvironmentSettings
from pyflink.table.descriptors import (
Schema,
Kafka,
Json,
Rowtime,
OldCsv,
FileSystem,
)
from pyflink.table.udf import udf
s_env = StreamExecutionEnvironment.get_execution_environment()
s_env.set_stream_time_characteristic(TimeCharacteristic.EventTime)
s_env.set_parallelism(1)
st_env = StreamTableEnvironment.create(
s_env,
environment_settings = EnvironmentSettings.new_instance()
.in_streaming_mode()
.use_blink_planner()
.build(),
)
X, Y, sess = None, None, None
@udf(result_type = DataTypes.STRING())
def predict(string):
global X, Y, sess
import tensorflow as tf
import json
import numpy as np
def load_graph(frozen_graph_filename):
with tf.gfile.GFile(frozen_graph_filename, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
with tf.Graph().as_default() as graph:
tf.import_graph_def(graph_def)
return graph
if X is None or Y is None or sess is None:
g = load_graph('/notebooks/frozen_model.pb')
X = g.get_tensor_by_name('import/Placeholder:0')
Y = g.get_tensor_by_name('import/logits:0')
sess = tf.Session(graph = g)
label = ['negative', 'positive']
maxlen = 50
UNK = 3
with open('/notebooks/dictionary-test.json', 'r') as fopen:
dic = json.load(fopen)
sentences = [string]
x = np.zeros((len(sentences), maxlen))
for i, sentence in enumerate(sentences):
for no, k in enumerate(sentence.split()[:maxlen][::-1]):
x[i, -1 - no] = dic.get(k, UNK)
indices = np.argmax(sess.run(Y, feed_dict = {X: x}), axis = 1)
return label[indices[0]]
st_env.set_python_requirements('/notebooks/requirements.txt')
st_env.register_function('predict', predict)
st_env.connect(
Kafka()
.version('universal')
.topic('test')
.start_from_earliest()
.property('zookeeper.connect', 'zookeeper:2181')
.property('bootstrap.servers', 'kafka:9092')
).with_format(
Json()
.fail_on_missing_field(True)
.schema(
DataTypes.ROW(
[
DataTypes.FIELD('datetime', DataTypes.STRING()),
DataTypes.FIELD('text', DataTypes.STRING()),
]
)
)
).with_schema(
Schema()
.field('datetime', DataTypes.STRING())
.field('text', DataTypes.STRING())
).in_append_mode().register_table_source(
'source'
)
result_path = '/notebooks/output-tensorflow.csv'
t_env.connect(FileSystem().path(result_path)).with_format(
OldCsv()
.field_delimiter(',')
.field('datetime', DataTypes.STRING())
.field('sentence', DataTypes.STRING())
.field('label', DataTypes.STRING())
).with_schema(
Schema()
.field('datetime', DataTypes.STRING())
.field('sentence', DataTypes.STRING())
.field('label', DataTypes.STRING())
).in_append_mode().register_table_sink(
'sink'
)
st_env.from_path('source').select(
'datetime, sentence, predict(sentence)'
).insert_into('sink')
st_env.execute('predict')
| [
"json.load",
"pyflink.table.EnvironmentSettings.new_instance",
"tensorflow.Session",
"pyflink.table.descriptors.OldCsv",
"pyflink.table.DataTypes.STRING",
"pyflink.table.descriptors.Kafka",
"tensorflow.gfile.GFile",
"pyflink.table.descriptors.Schema",
"tensorflow.Graph",
"tensorflow.import_graph_def",
"tensorflow.GraphDef",
"pyflink.datastream.StreamExecutionEnvironment.get_execution_environment",
"pyflink.table.descriptors.FileSystem",
"pyflink.table.descriptors.Json"
] | [((318, 372), 'pyflink.datastream.StreamExecutionEnvironment.get_execution_environment', 'StreamExecutionEnvironment.get_execution_environment', ([], {}), '()\n', (370, 372), False, 'from pyflink.datastream import StreamExecutionEnvironment, TimeCharacteristic\n'), ((1358, 1377), 'tensorflow.Session', 'tf.Session', ([], {'graph': 'g'}), '(graph=g)\n', (1368, 1377), True, 'import tensorflow as tf\n'), ((1525, 1541), 'json.load', 'json.load', (['fopen'], {}), '(fopen)\n', (1534, 1541), False, 'import json\n'), ((697, 715), 'pyflink.table.DataTypes.STRING', 'DataTypes.STRING', ([], {}), '()\n', (713, 715), False, 'from pyflink.table import StreamTableEnvironment, DataTypes, EnvironmentSettings\n'), ((886, 929), 'tensorflow.gfile.GFile', 'tf.gfile.GFile', (['frozen_graph_filename', '"""rb"""'], {}), "(frozen_graph_filename, 'rb')\n", (900, 929), True, 'import tensorflow as tf\n'), ((960, 973), 'tensorflow.GraphDef', 'tf.GraphDef', ([], {}), '()\n', (971, 973), True, 'import tensorflow as tf\n'), ((1081, 1111), 'tensorflow.import_graph_def', 'tf.import_graph_def', (['graph_def'], {}), '(graph_def)\n', (1100, 1111), True, 'import tensorflow as tf\n'), ((1035, 1045), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (1043, 1045), True, 'import tensorflow as tf\n'), ((2531, 2549), 'pyflink.table.DataTypes.STRING', 'DataTypes.STRING', ([], {}), '()\n', (2547, 2549), False, 'from pyflink.table import StreamTableEnvironment, DataTypes, EnvironmentSettings\n'), ((3018, 3036), 'pyflink.table.DataTypes.STRING', 'DataTypes.STRING', ([], {}), '()\n', (3034, 3036), False, 'from pyflink.table import StreamTableEnvironment, DataTypes, EnvironmentSettings\n'), ((544, 578), 'pyflink.table.EnvironmentSettings.new_instance', 'EnvironmentSettings.new_instance', ([], {}), '()\n', (576, 578), False, 'from pyflink.table import StreamTableEnvironment, DataTypes, EnvironmentSettings\n'), ((2492, 2510), 'pyflink.table.DataTypes.STRING', 'DataTypes.STRING', ([], {}), '()\n', (2508, 2510), False, 'from pyflink.table import StreamTableEnvironment, DataTypes, EnvironmentSettings\n'), ((2864, 2882), 'pyflink.table.DataTypes.STRING', 'DataTypes.STRING', ([], {}), '()\n', (2880, 2882), False, 'from pyflink.table import StreamTableEnvironment, DataTypes, EnvironmentSettings\n'), ((2978, 2996), 'pyflink.table.DataTypes.STRING', 'DataTypes.STRING', ([], {}), '()\n', (2994, 2996), False, 'from pyflink.table import StreamTableEnvironment, DataTypes, EnvironmentSettings\n'), ((2460, 2468), 'pyflink.table.descriptors.Schema', 'Schema', ([], {}), '()\n', (2466, 2468), False, 'from pyflink.table.descriptors import Schema, Kafka, Json, Rowtime, OldCsv, FileSystem\n'), ((2824, 2842), 'pyflink.table.DataTypes.STRING', 'DataTypes.STRING', ([], {}), '()\n', (2840, 2842), False, 'from pyflink.table import StreamTableEnvironment, DataTypes, EnvironmentSettings\n'), ((2935, 2953), 'pyflink.table.DataTypes.STRING', 'DataTypes.STRING', ([], {}), '()\n', (2951, 2953), False, 'from pyflink.table import StreamTableEnvironment, DataTypes, EnvironmentSettings\n'), ((2195, 2201), 'pyflink.table.descriptors.Json', 'Json', ([], {}), '()\n', (2199, 2201), False, 'from pyflink.table.descriptors import Schema, Kafka, Json, Rowtime, OldCsv, FileSystem\n'), ((2329, 2347), 'pyflink.table.DataTypes.STRING', 'DataTypes.STRING', ([], {}), '()\n', (2345, 2347), False, 'from pyflink.table import StreamTableEnvironment, DataTypes, EnvironmentSettings\n'), ((2390, 2408), 'pyflink.table.DataTypes.STRING', 'DataTypes.STRING', ([], {}), '()\n', (2406, 2408), False, 'from pyflink.table import StreamTableEnvironment, DataTypes, EnvironmentSettings\n'), ((2674, 2686), 'pyflink.table.descriptors.FileSystem', 'FileSystem', ([], {}), '()\n', (2684, 2686), False, 'from pyflink.table.descriptors import Schema, Kafka, Json, Rowtime, OldCsv, FileSystem\n'), ((2903, 2911), 'pyflink.table.descriptors.Schema', 'Schema', ([], {}), '()\n', (2909, 2911), False, 'from pyflink.table.descriptors import Schema, Kafka, Json, Rowtime, OldCsv, FileSystem\n'), ((2781, 2799), 'pyflink.table.DataTypes.STRING', 'DataTypes.STRING', ([], {}), '()\n', (2797, 2799), False, 'from pyflink.table import StreamTableEnvironment, DataTypes, EnvironmentSettings\n'), ((2723, 2731), 'pyflink.table.descriptors.OldCsv', 'OldCsv', ([], {}), '()\n', (2729, 2731), False, 'from pyflink.table.descriptors import Schema, Kafka, Json, Rowtime, OldCsv, FileSystem\n'), ((1994, 2001), 'pyflink.table.descriptors.Kafka', 'Kafka', ([], {}), '()\n', (1999, 2001), False, 'from pyflink.table.descriptors import Schema, Kafka, Json, Rowtime, OldCsv, FileSystem\n')] |
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-05-24 12:07
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('servers', '0003_auto_20170523_1409'),
]
operations = [
migrations.AlterField(
model_name='server',
name='operating_system',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='servers', to='servers.OperatingSystem'),
),
]
| [
"django.db.models.ForeignKey"
] | [((442, 563), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""servers"""', 'to': '"""servers.OperatingSystem"""'}), "(on_delete=django.db.models.deletion.CASCADE, related_name\n ='servers', to='servers.OperatingSystem')\n", (459, 563), False, 'from django.db import migrations, models\n')] |
# Solution of;
# Project Euler Problem 558: Irrational base
# https://projecteuler.net/problem=558
#
# Let r be the real root of the equation x3 = x2 + 1. Every positive integer
# can be written as the sum of distinct increasing powers of r. If we require
# the number of terms to be finite and the difference between any two
# exponents to be three or more, then the representation is unique. For
# example, 3 = r -10 + r -5 + r -1 + r 2 and 10 = r -10 + r -7 + r 6.
# Interestingly, the relation holds for the complex roots of the equation. Let
# w(n) be the number of terms in this unique representation of n. Thus w(3) =
# 4 and w(10) = 3. More formally, for all positive integers n, we have:n =
# $\displaystyle \sum_{k=-\infty}^{\infty}$ bk rkunder the conditions that:bk
# is 0 or 1 for all k;bk + bk+1 + bk+2 ≤ 1 for all k;w(n) = $\displaystyle
# \sum_{k=-\infty}^{\infty}$ bk is finite. Let S(m) = $\displaystyle
# \sum_{j=1}^{m}$ w(j2). You are given S(10) = 61 and S(1000) = 19403. Find
# S(5 000 000).
#
# by lcsm29 http://github.com/lcsm29/project-euler
import timed
def dummy(n):
pass
if __name__ == '__main__':
n = 1000
i = 10000
prob_id = 558
timed.caller(dummy, n, i, prob_id)
| [
"timed.caller"
] | [((1197, 1231), 'timed.caller', 'timed.caller', (['dummy', 'n', 'i', 'prob_id'], {}), '(dummy, n, i, prob_id)\n', (1209, 1231), False, 'import timed\n')] |
import argparse
import numpy as np
from benchmark_statistics import Statistics
from benchmark_containers import BenchmarkResultsContainer
##############################################################################
def createBenchmarkResults(benchmark_samples, operation):
benchmark_results = BenchmarkResultsContainer()
benchmark_results.operation = operation
# Filter outliers
lower_fence, upper_fence = Statistics.getTukeyFences(benchmark_samples)
lower_outliers_samples = benchmark_samples[benchmark_samples < lower_fence]
benchmark_no_outliers_samples = benchmark_samples[(benchmark_samples >= lower_fence) & (benchmark_samples <= upper_fence)]
upper_outliers_samples = benchmark_samples[benchmark_samples > upper_fence]
benchmark_results.sorted_lower_outliers_samples = np.sort(lower_outliers_samples).tolist()
benchmark_results.sorted_no_outliers_samples = np.sort(benchmark_no_outliers_samples).tolist()
benchmark_results.sorted_upper_outliers_samples = np.sort(upper_outliers_samples).tolist()
# Create statistics info from benchmark samples
for key in benchmark_results.statistics:
without_outliers = key == "Without outliers"
benchmark_samples_to_process = benchmark_no_outliers_samples if without_outliers else benchmark_samples
benchmark_stats = benchmark_results.statistics[key]
benchmark_stats.num_analyzed_samples = Statistics.getNumAnalyzedSamples(benchmark_samples_to_process)
benchmark_stats.minimum = Statistics.getMin(benchmark_samples_to_process)
benchmark_stats.lower_fence = benchmark_results.sorted_no_outliers_samples[0] # Plotly uses first non outlier point, for exact lower_fence set to: lower_fence
benchmark_stats.q1 = Statistics.getPercentile(benchmark_samples_to_process, 25)
benchmark_stats.mean = Statistics.getMean(benchmark_samples_to_process)
benchmark_stats.median = Statistics.getPercentile(benchmark_samples_to_process, 50)
benchmark_stats.q3 = Statistics.getPercentile(benchmark_samples_to_process, 75)
benchmark_stats.upper_fence = benchmark_results.sorted_no_outliers_samples[-1] # Plotly uses last non outlier point, for exact upper_fence set to: upper_fence
benchmark_stats.maximum = Statistics.getMax(benchmark_samples_to_process)
benchmark_stats.iqr = Statistics.getIQR(benchmark_samples_to_process)
benchmark_stats.std_dev = Statistics.getStdDev(benchmark_samples_to_process)
benchmark_stats.std_err = Statistics.getStdErr(benchmark_samples_to_process)
benchmark_stats.std_err_percentage = benchmark_stats.std_err / benchmark_stats.mean * 100.0 if benchmark_stats.std_err > 0.0 else 0.0
benchmark_stats.margin = Statistics.getMargin(benchmark_samples_to_process)
benchmark_stats.margin_percentage = benchmark_stats.margin / benchmark_stats.mean * 100.0 if benchmark_stats.margin > 0.0 else 0.0
benchmark_stats.confidence_interval = Statistics.getConfidenceInterval(benchmark_samples_to_process)
benchmark_stats.skewness = Statistics.getSkewness(benchmark_samples_to_process)
benchmark_stats.kurtosis = Statistics.getKurtosis(benchmark_samples_to_process)
return benchmark_results
##############################################################################
def printBenchmarkResults(benchmark_samples, benchmark_results):
print("Samples:")
print(benchmark_samples, "\n")
print("Sorted Samples:")
print(benchmark_results.sorted_lower_outliers_samples, benchmark_results.sorted_no_outliers_samples, benchmark_results.sorted_upper_outliers_samples, "\n")
for key in benchmark_results.statistics:
without_outliers = key == "Without outliers"
statistics_results = benchmark_results.getFormatedStatisticsResultsWithoutOutliers() if without_outliers else benchmark_results.getFormatedStatisticsResultsWithOutliers()
text_alignment_offset = len(max(statistics_results, key=len)) + 3
print(key + ":")
for stat_key in statistics_results:
print(stat_key + "= ".rjust(text_alignment_offset - len(stat_key)) + statistics_results[stat_key])
print("\n")
##############################################################################
def runAnalyzer(kwargs=None):
# Parse args
parser = argparse.ArgumentParser(description="Benchmark Analyzer")
parser.add_argument("-in",
"--benchmark_samples_file",
type=str,
required=True,
help="File path containing the benchmark observations as comma separated numbers.")
parser.add_argument("-out",
"--json_output_path",
type=str,
required=True,
help="JSON output path for file containing the statistical information of the analyzed benchmark.")
parser.add_argument("-op",
"--operation_name",
type=str,
required=True,
help="Name of the operation related to the benchmark observations.")
parser.add_argument("-out_name",
"--output_file_name",
type=str,
required=False,
help="(Optional) The name of the output file, if this option is not used the file will be called Benchmark_Results_<MONTH>-<DAY>-<YEAR>_<HOUR>h<MINUTE>m<SECOND>s.")
args = parser.parse_args()
# Input Params
benchmark_samples_file = args.benchmark_samples_file
json_output_path = args.json_output_path
operation_name = args.operation_name
output_file_name = args.output_file_name
# Create an array from benchmark samples in file
with open(benchmark_samples_file) as file:
benchmark_samples = np.fromfile(file, dtype=float, sep=",")
# Create benchmark results
benchmark_results = createBenchmarkResults(benchmark_samples, operation_name)
# Print benchmark results
printBenchmarkResults(benchmark_samples, benchmark_results)
# Export benchmark results to a JSON file
benchmark_results.toJSONFile(json_output_path, operation_name, output_file_name)
##############################################################################
#-----------------------------------------------------------------------------
# Main
#-----------------------------------------------------------------------------
if __name__ == '__main__':
runAnalyzer()
| [
"benchmark_containers.BenchmarkResultsContainer",
"benchmark_statistics.Statistics.getTukeyFences",
"benchmark_statistics.Statistics.getStdErr",
"argparse.ArgumentParser",
"benchmark_statistics.Statistics.getKurtosis",
"benchmark_statistics.Statistics.getIQR",
"numpy.fromfile",
"benchmark_statistics.Statistics.getConfidenceInterval",
"benchmark_statistics.Statistics.getMean",
"benchmark_statistics.Statistics.getMax",
"numpy.sort",
"benchmark_statistics.Statistics.getStdDev",
"benchmark_statistics.Statistics.getPercentile",
"benchmark_statistics.Statistics.getMin",
"benchmark_statistics.Statistics.getSkewness",
"benchmark_statistics.Statistics.getMargin",
"benchmark_statistics.Statistics.getNumAnalyzedSamples"
] | [((304, 331), 'benchmark_containers.BenchmarkResultsContainer', 'BenchmarkResultsContainer', ([], {}), '()\n', (329, 331), False, 'from benchmark_containers import BenchmarkResultsContainer\n'), ((439, 483), 'benchmark_statistics.Statistics.getTukeyFences', 'Statistics.getTukeyFences', (['benchmark_samples'], {}), '(benchmark_samples)\n', (464, 483), False, 'from benchmark_statistics import Statistics\n'), ((4643, 4700), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Benchmark Analyzer"""'}), "(description='Benchmark Analyzer')\n", (4666, 4700), False, 'import argparse\n'), ((1476, 1538), 'benchmark_statistics.Statistics.getNumAnalyzedSamples', 'Statistics.getNumAnalyzedSamples', (['benchmark_samples_to_process'], {}), '(benchmark_samples_to_process)\n', (1508, 1538), False, 'from benchmark_statistics import Statistics\n'), ((1586, 1633), 'benchmark_statistics.Statistics.getMin', 'Statistics.getMin', (['benchmark_samples_to_process'], {}), '(benchmark_samples_to_process)\n', (1603, 1633), False, 'from benchmark_statistics import Statistics\n'), ((1858, 1916), 'benchmark_statistics.Statistics.getPercentile', 'Statistics.getPercentile', (['benchmark_samples_to_process', '(25)'], {}), '(benchmark_samples_to_process, 25)\n', (1882, 1916), False, 'from benchmark_statistics import Statistics\n'), ((1964, 2012), 'benchmark_statistics.Statistics.getMean', 'Statistics.getMean', (['benchmark_samples_to_process'], {}), '(benchmark_samples_to_process)\n', (1982, 2012), False, 'from benchmark_statistics import Statistics\n'), ((2060, 2118), 'benchmark_statistics.Statistics.getPercentile', 'Statistics.getPercentile', (['benchmark_samples_to_process', '(50)'], {}), '(benchmark_samples_to_process, 50)\n', (2084, 2118), False, 'from benchmark_statistics import Statistics\n'), ((2166, 2224), 'benchmark_statistics.Statistics.getPercentile', 'Statistics.getPercentile', (['benchmark_samples_to_process', '(75)'], {}), '(benchmark_samples_to_process, 75)\n', (2190, 2224), False, 'from benchmark_statistics import Statistics\n'), ((2449, 2496), 'benchmark_statistics.Statistics.getMax', 'Statistics.getMax', (['benchmark_samples_to_process'], {}), '(benchmark_samples_to_process)\n', (2466, 2496), False, 'from benchmark_statistics import Statistics\n'), ((2544, 2591), 'benchmark_statistics.Statistics.getIQR', 'Statistics.getIQR', (['benchmark_samples_to_process'], {}), '(benchmark_samples_to_process)\n', (2561, 2591), False, 'from benchmark_statistics import Statistics\n'), ((2639, 2689), 'benchmark_statistics.Statistics.getStdDev', 'Statistics.getStdDev', (['benchmark_samples_to_process'], {}), '(benchmark_samples_to_process)\n', (2659, 2689), False, 'from benchmark_statistics import Statistics\n'), ((2737, 2787), 'benchmark_statistics.Statistics.getStdErr', 'Statistics.getStdErr', (['benchmark_samples_to_process'], {}), '(benchmark_samples_to_process)\n', (2757, 2787), False, 'from benchmark_statistics import Statistics\n'), ((2979, 3029), 'benchmark_statistics.Statistics.getMargin', 'Statistics.getMargin', (['benchmark_samples_to_process'], {}), '(benchmark_samples_to_process)\n', (2999, 3029), False, 'from benchmark_statistics import Statistics\n'), ((3219, 3281), 'benchmark_statistics.Statistics.getConfidenceInterval', 'Statistics.getConfidenceInterval', (['benchmark_samples_to_process'], {}), '(benchmark_samples_to_process)\n', (3251, 3281), False, 'from benchmark_statistics import Statistics\n'), ((3329, 3381), 'benchmark_statistics.Statistics.getSkewness', 'Statistics.getSkewness', (['benchmark_samples_to_process'], {}), '(benchmark_samples_to_process)\n', (3351, 3381), False, 'from benchmark_statistics import Statistics\n'), ((3429, 3481), 'benchmark_statistics.Statistics.getKurtosis', 'Statistics.getKurtosis', (['benchmark_samples_to_process'], {}), '(benchmark_samples_to_process)\n', (3451, 3481), False, 'from benchmark_statistics import Statistics\n'), ((6246, 6285), 'numpy.fromfile', 'np.fromfile', (['file'], {'dtype': 'float', 'sep': '""","""'}), "(file, dtype=float, sep=',')\n", (6257, 6285), True, 'import numpy as np\n'), ((849, 880), 'numpy.sort', 'np.sort', (['lower_outliers_samples'], {}), '(lower_outliers_samples)\n', (856, 880), True, 'import numpy as np\n'), ((944, 982), 'numpy.sort', 'np.sort', (['benchmark_no_outliers_samples'], {}), '(benchmark_no_outliers_samples)\n', (951, 982), True, 'import numpy as np\n'), ((1046, 1077), 'numpy.sort', 'np.sort', (['upper_outliers_samples'], {}), '(upper_outliers_samples)\n', (1053, 1077), True, 'import numpy as np\n')] |
#!/usr/bin/env python
#
# hardware.py - OVFHardware class
#
# June 2016, <NAME>
# Copyright (c) 2013-2016, 2019 the COT project developers.
# See the COPYRIGHT.txt file at the top-level directory of this distribution
# and at https://github.com/glennmatthews/cot/blob/master/COPYRIGHT.txt.
#
# This file is part of the Common OVF Tool (COT) project.
# It is subject to the license terms in the LICENSE.txt file found in the
# top-level directory of this distribution and at
# https://github.com/glennmatthews/cot/blob/master/LICENSE.txt. No part
# of COT, including this file, may be copied, modified, propagated, or
# distributed except according to the terms contained in the LICENSE.txt file.
"""Representation of OVF hardware definitions.
**Classes and Exceptions**
.. autosummary::
:nosignatures:
OVFHardware
OVFHardwareDataError
"""
import copy
import logging
from COT.data_validation import natural_sort
from COT.xml_file import XML
from .item import OVFItem, OVFItemDataError
logger = logging.getLogger(__name__)
class OVFHardwareDataError(Exception):
"""The input data used to construct an :class:`OVFHardware` is not sane."""
class OVFHardware(object):
"""Helper class for :class:`~COT.vm_description.ovf.ovf.OVF`.
Represents all hardware items defined by this OVF;
i.e., the contents of all Items in the VirtualHardwareSection.
Fundamentally it's just a dict of
:class:`~COT.vm_description.ovf.item.OVFItem` objects
with a bunch of helper methods.
"""
def __init__(self, ovf):
"""Construct an OVFHardware object describing all Items in the OVF.
Args:
ovf (OVF): OVF instance to extract hardware information from.
Raises:
OVFHardwareDataError: if any data errors are seen
"""
self.ovf = ovf
self.item_dict = {}
valid_profiles = set(ovf.config_profiles)
item_count = 0
for item in ovf.virtual_hw_section:
namespace = ovf.namespace_for_item_tag(item.tag)
if not namespace:
continue
item_count += 1
# We index the dict by InstanceID as it's the one property of
# an Item that uniquely identifies this set of hardware items.
instance = item.find(namespace + self.ovf.INSTANCE_ID).text
# Pre-sanity check - are all of the profiles associated with this
# item properly defined in the OVF DeploymentOptionSection?
item_profiles = set(item.get(self.ovf.ITEM_CONFIG, "").split())
unknown_profiles = item_profiles - valid_profiles
if unknown_profiles:
raise OVFHardwareDataError("Unknown profile(s) {0} for "
"Item instance {1}"
.format(unknown_profiles, instance))
if instance not in self.item_dict:
self.item_dict[instance] = OVFItem(self.ovf, item)
else:
try:
self.item_dict[instance].add_item(item)
except OVFItemDataError as exc:
logger.debug(exc)
# Mask away the nitty-gritty details from our caller
raise OVFHardwareDataError("Data conflict for instance {0}"
.format(instance))
logger.debug(
"OVF contains %s hardware Item elements describing %s "
"unique devices", item_count, len(self.item_dict))
# Treat the current state as golden:
for ovfitem in self.item_dict.values():
ovfitem.modified = False
def update_xml(self):
"""Regenerate all Items under the VirtualHardwareSection, if needed.
Will do nothing if no Items have been changed.
"""
modified = False
if len(self.item_dict) != len(XML.find_all_children(
self.ovf.virtual_hw_section,
set([self.ovf.ITEM, self.ovf.STORAGE_ITEM,
self.ovf.ETHERNET_PORT_ITEM]))):
modified = True
else:
for ovfitem in self.item_dict.values():
if ovfitem.modified:
modified = True
break
if not modified:
logger.verbose("No changes to hardware definition, "
"so no XML update is required")
return
# Delete the existing Items:
delete_count = 0
for item in list(self.ovf.virtual_hw_section):
if (item.tag == self.ovf.ITEM or
item.tag == self.ovf.STORAGE_ITEM or
item.tag == self.ovf.ETHERNET_PORT_ITEM):
self.ovf.virtual_hw_section.remove(item)
delete_count += 1
logger.debug("Cleared %d existing items from VirtualHWSection",
delete_count)
# Generate the new XML Items, in appropriately sorted order by Instance
ordering = [self.ovf.INFO, self.ovf.SYSTEM, self.ovf.ITEM]
for instance in natural_sort(self.item_dict):
logger.debug("Writing Item(s) with InstanceID %s", instance)
ovfitem = self.item_dict[instance]
new_items = ovfitem.generate_items()
logger.spam("Generated %d items", len(new_items))
for item in new_items:
XML.add_child(self.ovf.virtual_hw_section, item, ordering)
logger.verbose("Updated XML VirtualHardwareSection, now contains %d "
"Items representing %d devices",
len(self.ovf.virtual_hw_section.findall(self.ovf.ITEM)),
len(self.item_dict))
def find_unused_instance_id(self, start=1):
"""Find the first available ``InstanceID`` number.
Args:
start (int): First InstanceID value to consider (disregarding all
lower InstanceIDs, even if available).
Returns:
str: An instance ID that is not yet in use.
"""
instance = int(start)
while str(instance) in self.item_dict.keys():
instance += 1
logger.debug("Found unused InstanceID %d", instance)
return str(instance)
def new_item(self, resource_type, profile_list=None):
"""Create a new OVFItem of the given type.
Args:
resource_type (str): String such as 'cpu' or 'harddisk' - used as
a key to
:data:`~COT.vm_description.ovf.name_helper.OVFNameHelper1.RES_MAP`
profile_list (list): Profiles the new item should belong to
Returns:
tuple: ``(instance_id, ovfitem)``
"""
instance = self.find_unused_instance_id()
ovfitem = OVFItem(self.ovf)
ovfitem.set_property(self.ovf.INSTANCE_ID, instance, profile_list)
ovfitem.set_property(self.ovf.RESOURCE_TYPE,
self.ovf.RES_MAP[resource_type],
profile_list)
# ovftool freaks out if we leave out the ElementName on an Item,
# so provide a simple default value.
ovfitem.set_property(self.ovf.ELEMENT_NAME, resource_type,
profile_list)
self.item_dict[instance] = ovfitem
ovfitem.modified = True
logger.info("Created new %s under profile(s) %s, InstanceID is %s",
resource_type, profile_list, instance)
return (instance, ovfitem)
def delete_item(self, item):
"""Delete the given Item from the hardware.
Args:
item (OVFItem): Item to delete
"""
instance = item.get_value(self.ovf.INSTANCE_ID)
if self.item_dict[instance] == item:
del self.item_dict[instance]
# TODO: error handling - currently a no-op if item not in item_dict
def clone_item(self, parent_item, profile_list):
"""Clone an OVFItem to create a new instance.
Args:
parent_item (OVFItem): Instance to clone from
profile_list (list): List of profiles to clone into
Returns:
tuple: ``(instance_id, ovfitem)``
"""
instance = self.find_unused_instance_id(start=parent_item.instance_id)
logger.spam("Cloning existing Item %s with new instance ID %s",
parent_item, instance)
ovfitem = copy.deepcopy(parent_item)
# Delete any profiles from the parent that we don't need now,
# otherwise we'll get an error when trying to set the instance ID
# on our clone due to self-inconsistency (#64).
for profile in self.ovf.config_profiles:
if ovfitem.has_profile(profile) and profile not in profile_list:
ovfitem.remove_profile(profile)
ovfitem.set_property(self.ovf.INSTANCE_ID, instance, profile_list)
ovfitem.modified = True
self.item_dict[instance] = ovfitem
logger.spam("Added clone of %s under %s, instance is %s",
parent_item, profile_list, instance)
return (instance, ovfitem)
def item_match(self, item, resource_type, properties, profile_list):
"""Check whether the given item matches the given filters.
Args:
item (OVFItem): Item to validate
resource_type (str): Resource type string like 'scsi' or 'serial'
properties (dict): Properties and their values to match
profile_list (list): List of profiles to filter on
Returns:
bool: True if the item matches all filters, False if not.
"""
if resource_type and (self.ovf.RES_MAP[resource_type] !=
item.get_value(self.ovf.RESOURCE_TYPE)):
return False
if profile_list:
for profile in profile_list:
if not item.has_profile(profile):
return False
for (prop, value) in properties.items():
if item.get_value(prop) != value:
return False
return True
def find_all_items(self, resource_type=None, properties=None,
profile_list=None):
"""Find all items matching the given type, properties, and profiles.
Args:
resource_type (str): Resource type string like 'scsi' or 'serial'
properties (dict): Properties and their values to match
profile_list (list): List of profiles to filter on
Returns:
list: Matching OVFItem instances
"""
items = [self.item_dict[instance] for instance in
natural_sort(self.item_dict)]
filtered_items = []
if properties is None:
properties = {}
for item in items:
if self.item_match(item, resource_type, properties, profile_list):
filtered_items.append(item)
logger.spam("Found %s Items of type %s with properties %s and"
" profiles %s", len(filtered_items), resource_type,
properties, profile_list)
return filtered_items
def find_item(self, resource_type=None, properties=None, profile=None):
"""Find the only OVFItem of the given :attr:`resource_type`.
Args:
resource_type (str): Resource type string like 'scsi' or 'serial'
properties (dict): Properties and their values to match
profile (str): Single profile ID to search within
Returns:
OVFItem: Matching instance, or None
Raises:
LookupError: if more than one such Item exists.
"""
matches = self.find_all_items(resource_type, properties, [profile])
if len(matches) > 1:
raise LookupError(
"Found multiple matching '{0}' Items (instances {1})"
.format(resource_type, [m.instance_id for m in matches]))
elif len(matches) == 0:
return None
else:
return matches[0]
def get_item_count(self, resource_type, profile):
"""Get the number of Items of the given type for the given profile.
Wrapper for :meth:`get_item_count_per_profile`.
Args:
resource_type (str): Resource type string like 'scsi' or 'serial'
profile (str): Single profile identifier string to look up.
Returns:
int: Number of items of this type in this profile.
"""
return (self.get_item_count_per_profile(resource_type, [profile])
[profile])
def get_item_count_per_profile(self, resource_type, profile_list):
"""Get the number of Items of the given type per profile.
Items present under "no profile" will be counted against
the total for each profile.
Args:
resource_type (str): Resource type string like 'scsi' or 'serial'
profile_list (list): List of profiles to filter on
(default: apply across all profiles)
Returns:
dict: mapping profile strings to the number of items under each
profile.
"""
count_dict = {}
if not profile_list:
# Get the count under all profiles
profile_list = self.ovf.config_profiles + [None]
for profile in profile_list:
count_dict[profile] = 0
for ovfitem in self.find_all_items(resource_type):
for profile in profile_list:
if ovfitem.has_profile(profile):
count_dict[profile] += 1
for (profile, count) in count_dict.items():
logger.spam("Profile '%s' has %s %s Item(s)",
profile, count, resource_type)
return count_dict
def _update_existing_item_profiles(self, resource_type,
count, profile_list):
"""Change profile membership of existing items as needed.
Helper method for :meth:`set_item_count_per_profile`.
Args:
resource_type (str): 'cpu', 'harddisk', etc.
count (int): Desired number of items
profile_list (list): List of profiles to filter on
(default: apply across all profiles)
Returns:
tuple: (count_dict, items_to_add, last_item)
"""
count_dict = self.get_item_count_per_profile(resource_type,
profile_list)
items_seen = dict.fromkeys(profile_list, 0)
last_item = None
# First, iterate over existing Items.
# Once we've seen "count" items under a profile, remove all subsequent
# items from this profile.
# If we don't have enough items under a profile, add any items found
# under other profiles to this profile as well.
for ovfitem in self.find_all_items(resource_type):
last_item = ovfitem
for profile in profile_list:
if ovfitem.has_profile(profile):
if items_seen[profile] >= count:
# Too many items - remove this one!
ovfitem.remove_profile(profile)
else:
items_seen[profile] += 1
else:
if count_dict[profile] < count:
# Add this profile to this Item
ovfitem.add_profile(profile)
count_dict[profile] += 1
items_seen[profile] += 1
# How many new Items do we need to create in total?
items_to_add = 0
for profile in profile_list:
delta = count - items_seen[profile]
if delta > items_to_add:
items_to_add = delta
return count_dict, items_to_add, last_item
def _update_cloned_item(self, new_item, new_item_profiles, item_count):
"""Update a cloned item to make it distinct from its parent.
Helper method for :meth:`set_item_count_per_profile`.
Args:
new_item (OVFItem): Newly cloned Item
new_item_profiles (list): Profiles new_item should belong to
item_count (int): How many Items of this type (including this
item) now exist. Used with
:meth:`COT.platform.Platform.guess_nic_name`
Returns:
OVFItem: Updated :param:`new_item`
Raises:
NotImplementedError: No support yet for updating ``Address``
NotImplementedError: If updating ``AddressOnParent`` but the
prior value varies across config profiles.
NotImplementedError: if ``AddressOnParent`` is not an integer.
"""
resource_type = new_item.hardware_type
address = new_item.get(self.ovf.ADDRESS)
if address:
raise NotImplementedError("Don't know how to ensure a unique "
"Address value when cloning an Item "
"of type {0}".format(resource_type))
address_on_parent = new_item.get(self.ovf.ADDRESS_ON_PARENT)
if address_on_parent:
address_list = new_item.get_all_values(self.ovf.ADDRESS_ON_PARENT)
if len(address_list) > 1:
raise NotImplementedError("AddressOnParent is not common "
"across all profiles but has "
"multiple values {0}. COT can't "
"handle this yet."
.format(address_list))
address_on_parent = address_list[0]
# Currently we only handle integer addresses
try:
address_on_parent = int(address_on_parent)
address_on_parent += 1
new_item.set_property(self.ovf.ADDRESS_ON_PARENT,
str(address_on_parent),
new_item_profiles)
except ValueError:
raise NotImplementedError("Don't know how to ensure a "
"unique AddressOnParent value "
"given base value '{0}'"
.format(address_on_parent))
if resource_type == 'ethernet':
# Update ElementName to reflect the NIC number
element_name = self.ovf.platform.guess_nic_name(item_count)
new_item.set_property(self.ovf.ELEMENT_NAME, element_name,
new_item_profiles)
return new_item
def set_item_count_per_profile(self, resource_type, count, profile_list):
"""Set the number of items of a given type under the given profile(s).
If the new count is greater than the current count under this
profile, then additional instances that already exist under
another profile will be added to this profile, starting with
the lowest-sequence instance not already present, and only as
a last resort will new instances be created.
If the new count is less than the current count under this profile,
then the highest-numbered instances will be removed preferentially.
Args:
resource_type (str): 'cpu', 'harddisk', etc.
count (int): Desired number of items
profile_list (list): List of profiles to filter on
(default: apply across all profiles)
"""
if not profile_list:
# Set the profile list for all profiles, including the default
profile_list = self.ovf.config_profiles + [None]
count_dict, items_to_add, last_item = \
self._update_existing_item_profiles(
resource_type, count, profile_list)
logger.debug("Creating %d new items", items_to_add)
while items_to_add > 0:
# Which profiles does this Item need to belong to?
new_item_profiles = []
for profile in profile_list:
if count_dict[profile] < count:
new_item_profiles.append(profile)
count_dict[profile] += 1
if last_item is None:
logger.notice("No existing items of type %s found. "
"Will create new %s from scratch.",
resource_type, resource_type)
(_, new_item) = self.new_item(resource_type, new_item_profiles)
else:
(_, new_item) = self.clone_item(last_item, new_item_profiles)
# Check/update other properties of the clone that should be unique:
# TODO - we assume that the count is the same across profiles
new_item = self._update_cloned_item(
new_item, new_item_profiles, count_dict[new_item_profiles[0]])
last_item = new_item
items_to_add -= 1
def set_value_for_all_items(self, resource_type, prop_name, new_value,
profile_list, create_new=False):
"""Set a property to the given value for all items of the given type.
If no items of the given type exist, will create a new ``Item`` if
:attr:`create_new` is set to ``True``; otherwise will log a warning
and do nothing.
Args:
resource_type (str): Resource type such as 'cpu' or 'harddisk'
prop_name (str): Property name to update
new_value (str): New value to set the property to
profile_list (list): List of profiles to filter on
(default: apply across all profiles)
create_new (bool): Whether to create a new entry if no items
of this :attr:`resource_type` presently exist.
"""
ovfitem_list = self.find_all_items(resource_type)
if not ovfitem_list:
if not create_new:
logger.warning("No items of type %s found. Nothing to do.",
resource_type)
return
logger.notice("No existing items of type %s found. "
"Will create new %s from scratch.",
resource_type, resource_type)
(_, ovfitem) = self.new_item(resource_type, profile_list)
ovfitem_list = [ovfitem]
for ovfitem in ovfitem_list:
ovfitem.set_property(prop_name, new_value, profile_list)
logger.debug("Updated %s %s to %s under profiles %s",
resource_type, prop_name, new_value, profile_list)
def set_item_values_per_profile(self, resource_type, prop_name, value_list,
profile_list, default=None):
"""Set value(s) for a property of multiple items of a type.
Args:
resource_type (str): Device type such as 'harddisk' or 'cpu'
prop_name (str): Property name to update
value_list (list): List of values to set (one value per item of the
given :attr:`resource_type`)
profile_list (list): List of profiles to filter on
(default: apply across all profiles)
default (str): If there are more matching items than entries in
:attr:`value_list`, set extra items to this value
"""
if profile_list is None:
profile_list = self.ovf.config_profiles + [None]
for ovfitem in self.find_all_items(resource_type):
if len(value_list):
new_value = value_list.pop(0)
else:
new_value = default
for profile in profile_list:
if ovfitem.has_profile(profile):
ovfitem.set_property(prop_name, new_value, [profile])
logger.info("Updated %s property %s to %s under %s",
resource_type, prop_name, new_value, profile_list)
if len(value_list):
logger.warning("After scanning all known %s Items, not all "
"%s values were used - leftover %s",
resource_type, prop_name, value_list)
| [
"copy.deepcopy",
"COT.xml_file.XML.add_child",
"COT.data_validation.natural_sort",
"logging.getLogger"
] | [((1007, 1034), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1024, 1034), False, 'import logging\n'), ((5097, 5125), 'COT.data_validation.natural_sort', 'natural_sort', (['self.item_dict'], {}), '(self.item_dict)\n', (5109, 5125), False, 'from COT.data_validation import natural_sort\n'), ((8395, 8421), 'copy.deepcopy', 'copy.deepcopy', (['parent_item'], {}), '(parent_item)\n', (8408, 8421), False, 'import copy\n'), ((5409, 5467), 'COT.xml_file.XML.add_child', 'XML.add_child', (['self.ovf.virtual_hw_section', 'item', 'ordering'], {}), '(self.ovf.virtual_hw_section, item, ordering)\n', (5422, 5467), False, 'from COT.xml_file import XML\n'), ((10613, 10641), 'COT.data_validation.natural_sort', 'natural_sort', (['self.item_dict'], {}), '(self.item_dict)\n', (10625, 10641), False, 'from COT.data_validation import natural_sort\n')] |
import json
#Convert from JSON to Python
# some JSON:
x = '{ "name":"John", "age":30, "city":"New York"}'
# parse x:
y = json.loads(x)
# the result is a Python dictionary:
print(y["name"])
#Convert from Python to JSON
# a Python object (dict):
x = {
"name": "John",
"age": 30,
"city": "New York"
}
# convert into JSON:
y = json.dumps(x)
# the result is a JSON string:
print(y)
#Convert a Python object containing all the legal data types:
x = {
"name": "John",
"age": 30,
"married": True,
"divorced": False,
"children": ("Ann","Billy"),
"pets": None,
"cars": [
{"model": "BMW 230", "mpg": 27.5},
{"model": "Ford Edge", "mpg": 24.1}
]
}
print(json.dumps(x))
#Use the indent parameter to define the numbers of indents:
#Use the separators parameter change the default separator:
y = json.dumps(x, indent=4, separators=(". ", " = "))
#Use the sort_keys parameter to specify if the result should be sorted or not:
z = json.dumps(x, indent=4, sort_keys=True)
print(y)
print(z) | [
"json.loads",
"json.dumps"
] | [((123, 136), 'json.loads', 'json.loads', (['x'], {}), '(x)\n', (133, 136), False, 'import json\n'), ((333, 346), 'json.dumps', 'json.dumps', (['x'], {}), '(x)\n', (343, 346), False, 'import json\n'), ((817, 866), 'json.dumps', 'json.dumps', (['x'], {'indent': '(4)', 'separators': "('. ', ' = ')"}), "(x, indent=4, separators=('. ', ' = '))\n", (827, 866), False, 'import json\n'), ((950, 989), 'json.dumps', 'json.dumps', (['x'], {'indent': '(4)', 'sort_keys': '(True)'}), '(x, indent=4, sort_keys=True)\n', (960, 989), False, 'import json\n'), ((678, 691), 'json.dumps', 'json.dumps', (['x'], {}), '(x)\n', (688, 691), False, 'import json\n')] |
"""Calendar_widget.py"""
import re
import curses
import random
import calendar
import itertools
import source.config as config
from collections import namedtuple
date = namedtuple("Date", "Year Month Day")
def iter_months_years(startDate: object, endDate: object) -> tuple:
"""Returns years and months based on given start and end dates. Expected
date format is YYYY-MM-DD. Ex. 2012-07-15
"""
# TODO: Make the function an iterable
months = []
# begin with all years between start and end date
years = [year for year in range(startDate.Year, endDate.Year + 1)]
if len(years) > 1:
# covering more than a single year, find the months being used
for year in range(len(years)):
monthsRange = (1, 13) # normal year covers between months 1-12
if year == 0:
monthsRange = (startDate.Month, 13) # first year in list
elif year == len(years) - 1:
monthsRange = (1, endDate.Month + 1) # last year in list
months.append([month for month in range(*monthsRange)])
else:
# dates are in the same year. grab the months between the dates
months.append([i for i in range(startDate.Month, endDate.Month + 1)])
# return [(year, m) for year, month in zip(years, months) for m in month]
for year, month in zip(years, months):
for m in month:
yield (year, m)
def days_in_month_year(startDate, endDate):
"""Returns the day/date tuple combination for each month/year input passed
into the calendar.TextCalendar class method months2calendar(year, month).
Differences in TextCalendar methods (W => number of weeks in the month):
monthdatescalendar -> returns Wx7 matrix of datetime objects
monthdays2calendar -> returns Wx7 matrix of tuple objects (date, day)
monthdayscalendar -> returns Wx7 matrix of ints representing the date
"""
# setup calendar settings to retrieve dates based on year/month pairs
tc = calendar.TextCalendar()
tc.setfirstweekday(6) # set to sunday as first day
days_per_monthyear = dict()
for year, month in iter_months_years(startDate, endDate):
days_per_monthyear[(year, month)] = tc.monthdays2calendar(year, month)
return days_per_monthyear
def parse_date(datestring: str) -> object:
"""Takes in a string object representing a formatted date. If not
formatted correctly, will raise an error giving description of the correct
format. Returns a date object with year, month, date properties
"""
if not re.match(config.DATE_FORMAT_REGEX, datestring):
error = f"{config.DATE_FORMAT_INVALID} {config.DATE_FORMAT_EXPECTED}"
raise ValueError(error)
return date(*[int(i) for i in datestring.split('-')])
def initialize_curses_settings():
"""Curses settings that need to be called before the rest of program"""
curses.curs_set(0)
def main(window):
"""Creates a navigatable calendar widget for the dates passed in. Later on
should use min/max dates from the database holding the date infos.
"""
initialize_curses_settings()
loc = 0
# dateParser(db.getMinDate, db.getMaxDate)
start = parse_date("2017-12-1")
end = parse_date("2018-2-1")
# we should now have a list of lists matrix holding weeks per month/year
monthtable = days_in_month_year(start, end)
window.border()
y, x = window.getmaxyx()
window.vline(1, 8, curses.ACS_VLINE, y - 2)
verticaloffset = 2
horizontaloffset = 1
window.addstr(1, 1, "SMTWTFS")
for month in monthtable.values():
for week in month:
window.addstr(verticaloffset, horizontaloffset + 9, str(week))
weekdayindex = 0
for date, dayofweek in week:
if (date) != 0:
window.addstr(verticaloffset,
horizontaloffset + weekdayindex,
'o')
weekdayindex += 1
verticaloffset += 1
ch = window.getch()
print(ch, curses.KEY_PPAGE == ch) #ppage:339, #npage:338
# TODO: implement program loop involving vertical/horiontal scrolling
if __name__ == "__main__":
curses.wrapper(main)
| [
"curses.wrapper",
"re.match",
"calendar.TextCalendar",
"collections.namedtuple",
"curses.curs_set"
] | [((170, 206), 'collections.namedtuple', 'namedtuple', (['"""Date"""', '"""Year Month Day"""'], {}), "('Date', 'Year Month Day')\n", (180, 206), False, 'from collections import namedtuple\n'), ((2020, 2043), 'calendar.TextCalendar', 'calendar.TextCalendar', ([], {}), '()\n', (2041, 2043), False, 'import calendar\n'), ((2918, 2936), 'curses.curs_set', 'curses.curs_set', (['(0)'], {}), '(0)\n', (2933, 2936), False, 'import curses\n'), ((4267, 4287), 'curses.wrapper', 'curses.wrapper', (['main'], {}), '(main)\n', (4281, 4287), False, 'import curses\n'), ((2585, 2631), 're.match', 're.match', (['config.DATE_FORMAT_REGEX', 'datestring'], {}), '(config.DATE_FORMAT_REGEX, datestring)\n', (2593, 2631), False, 'import re\n')] |
from flask import Flask, Request, Response, request
import json
def devices():
dict_device = request.get_data(as_text=True)
dados_device = json.loads(dict_device)
| [
"flask.request.get_data",
"json.loads"
] | [((102, 132), 'flask.request.get_data', 'request.get_data', ([], {'as_text': '(True)'}), '(as_text=True)\n', (118, 132), False, 'from flask import Flask, Request, Response, request\n'), ((153, 176), 'json.loads', 'json.loads', (['dict_device'], {}), '(dict_device)\n', (163, 176), False, 'import json\n')] |
import sys
from urllib import request, parse, error
from multiprocessing import Process
urls = [
'https://github.com/',
'https://twitter.com/',
'https://hub.docker.com/v2/users/'
]
def inspect_status_code(url):
try:
response = request.urlopen(url)
return response.code
except error.HTTPError as e:
return e.code
def inspect(url, user_id):
code = inspect_status_code(url+user_id)
title = parse.urlparse(url).netloc
prefix = '\033[32m' if code == 404 else '\033[31m'
suffix = '\033[0m'
result = '{}{}{}'.format(prefix, code, suffix)
print(title.ljust(16), result)
def main():
if len(sys.argv) < 2:
print('usage: python3 main.py ${USER_ID}')
exit(1)
user_id = sys.argv[1]
ps = [Process(target=inspect, args=(url, user_id)).start() for url in urls]
if __name__ == '__main__':
main()
| [
"multiprocessing.Process",
"urllib.request.urlopen",
"urllib.parse.urlparse"
] | [((254, 274), 'urllib.request.urlopen', 'request.urlopen', (['url'], {}), '(url)\n', (269, 274), False, 'from urllib import request, parse, error\n'), ((443, 462), 'urllib.parse.urlparse', 'parse.urlparse', (['url'], {}), '(url)\n', (457, 462), False, 'from urllib import request, parse, error\n'), ((777, 821), 'multiprocessing.Process', 'Process', ([], {'target': 'inspect', 'args': '(url, user_id)'}), '(target=inspect, args=(url, user_id))\n', (784, 821), False, 'from multiprocessing import Process\n')] |
import torch
import numpy as np
from torch.utils.data import DataLoader
from torchvision import transforms
from data_loader.datasets_custom import TextImageDataset, COCOTextImageDataset
from base import BaseDataLoader
def text_image_collate_fn(data):
collate_data = {}
# Sort a data list by right caption length (descending order).
data.sort(key=lambda x: x['right_caption'].size(0), reverse=True)
collate_data['right_img_id'] = []
collate_data['class_id'] = []
collate_data['right_txt'] = []
class_ids = []
right_captions = []
right_embeds = []
right_images_32 = []
right_images_64 = []
right_images_128 = []
right_images_256 = []
collate_data['wrong_img_id'] = []
collate_data['wrong_txt'] = []
wrong_captions = []
wrong_embeds = []
wrong_images_32 = []
wrong_images_64 = []
wrong_images_128 = []
wrong_images_256 = []
for i in range(len(data)):
class_ids.append(data[i]['right_img_id'])
collate_data['class_id'].append(data[i]['right_class_id'])
collate_data['right_txt'].append(data[i]['right_txt'])
right_captions.append(data[i]['right_caption'])
right_embeds.append(data[i]['right_embed'])
right_images_32.append(data[i]['right_image_32'])
right_images_64.append(data[i]['right_image_64'])
right_images_128.append(data[i]['right_image_128'])
right_images_256.append(data[i]['right_image_256'])
collate_data['wrong_txt'].append(data[i]['wrong_txt'])
wrong_captions.append(data[i]['wrong_caption'])
wrong_embeds.append(data[i]['wrong_embed'])
wrong_images_32.append(data[i]['wrong_image_32'])
wrong_images_64.append(data[i]['wrong_image_64'])
wrong_images_128.append(data[i]['wrong_image_128'])
wrong_images_256.append(data[i]['wrong_image_256'])
# sort and get captions, lengths, images, embeds, etc.
right_caption_lengths = [len(cap) for cap in right_captions]
collate_data['right_caption_lengths'] = torch.LongTensor(right_caption_lengths)
collate_data['right_captions'] = torch.zeros(len(right_caption_lengths), max(right_caption_lengths)).long()
for i, cap in enumerate(right_captions):
end = right_caption_lengths[i]
collate_data['right_captions'][i, :end] = cap[:end]
# sort and get captions, lengths, images, embeds, etc.
wrong_captions.sort(key=lambda x: len(x), reverse=True)
wrong_caption_lengths = [len(cap) for cap in wrong_captions]
collate_data['wrong_caption_lengths'] = torch.LongTensor(wrong_caption_lengths)
collate_data['wrong_captions'] = torch.zeros(len(wrong_caption_lengths), max(wrong_caption_lengths)).long()
for i, cap in enumerate(wrong_captions):
end = wrong_caption_lengths[i]
collate_data['wrong_captions'][i, :end] = cap[:end]
collate_data['class_id'] = np.stack(class_ids)
collate_data['right_embeds'] = torch.stack(right_embeds, 0)
collate_data['right_images_32'] = torch.stack(right_images_32, 0)
collate_data['right_images_64'] = torch.stack(right_images_64, 0)
collate_data['right_images_128'] = torch.stack(right_images_128, 0)
collate_data['right_images_256'] = torch.stack(right_images_256, 0)
collate_data['wrong_embeds'] = torch.stack(wrong_embeds, 0)
collate_data['wrong_images_32'] = torch.stack(wrong_images_32, 0)
collate_data['wrong_images_64'] = torch.stack(wrong_images_64, 0)
collate_data['wrong_images_128'] = torch.stack(wrong_images_128, 0)
collate_data['wrong_images_256'] = torch.stack(wrong_images_256, 0)
return collate_data
class TextImageDataLoader(DataLoader):
def __init__(self, data_dir, dataset_name, which_set, image_size, batch_size, num_workers):
self.data_dir = data_dir
self.which_set = which_set
self.dataset_name = dataset_name
assert self.which_set in {'train', 'valid', 'test'}
self.image_size = (image_size, image_size)
self.batch_size = batch_size
self.num_workers = num_workers
# transforms.ToTensor convert PIL images in range [0, 255] to a torch in range [-1.0, 1.0]
self.transform = transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
])
self.dataset = TextImageDataset(self.data_dir, self.dataset_name, self.which_set, self.transform, vocab_from_file=False)
self.n_samples = len(self.dataset)
if self.which_set == 'train' or self.which_set == 'valid':
super(TextImageDataLoader, self).__init__(
dataset=self.dataset,
batch_size=self.batch_size,
shuffle=True,
num_workers=self.num_workers,
collate_fn=text_image_collate_fn
)
else:
super(TextImageDataLoader, self).__init__(
dataset=self.dataset,
batch_size=self.batch_size,
shuffle=False,
num_workers=0,
collate_fn=text_image_collate_fn)
class COCOTextImageDataLoader(BaseDataLoader):
"""
COCO Image Caption Model Data Loader
"""
def __init__(self, data_dir, which_set, image_size, batch_size, validation_split, num_workers):
self.data_dir = data_dir
self.which_set = which_set
self.validation_split = validation_split
assert self.which_set in {'train', 'val', 'test'}
self.image_size = (image_size, image_size)
self.batch_size = batch_size
self.num_workers = num_workers
# transforms.ToTensor convert PIL images in range [0, 255] to a torch in range [-1.0, 1.0]
mean = torch.tensor([0.5, 0.5, 0.5], dtype=torch.float32)
std = torch.tensor([0.5, 0.5, 0.5], dtype=torch.float32)
if which_set == 'val' or which_set == 'test':
self.transform = transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=mean, std=std)
])
else:
self.transform = transforms.Compose([
# transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=mean, std=std)
])
self.dataset = COCOTextImageDataset(self.data_dir, self.which_set, self.transform, vocab_from_file=True)
# self.n_samples = len(self.dataset)
if self.which_set == 'train':
super(COCOTextImageDataLoader, self).__init__(
dataset=self.dataset,
batch_size=self.batch_size,
shuffle=True,
validation_split=validation_split,
num_workers=self.num_workers,
collate_fn=text_image_collate_fn
)
else:
super(COCOTextImageDataLoader, self).__init__(
dataset=self.dataset,
batch_size=self.batch_size,
shuffle=False,
validation_split=0,
num_workers=self.num_workers,
collate_fn=text_image_collate_fn)
if __name__ == '__main__':
data_loader = COCOTextImageDataLoader(
data_dir='/Users/leon/Projects/I2T2I/data/coco/',
# dataset_name="birds",
which_set='val',
image_size=256,
batch_size=16,
validation_split=0.05,
num_workers=0)
print(len(data_loader.dataset.vocab))
print(len(data_loader.dataset.vocab.word2idx))
for i, data in enumerate(data_loader):
print(i)
print("right_img_id:", data['right_img_id'])
# print("class_ids:", data["class_id"])
print('right images 32 shape:', data['right_images_32'].shape)
print('right images 64 shape:', data['right_images_64'].shape)
print('right images 128 shape:', data['right_images_128'].shape)
print('right images 256 shape:', data['right_images_256'].shape)
print("right embed shape:", data['right_embeds'].shape)
print("right caption shape:", data['right_captions'].shape)
print("right caption lengths:", data['right_caption_lengths'])
print("right txt:", data["right_txt"])
print("wrong_img_id:", data['wrong_img_id'])
print('wrong images 32 shape:', data['wrong_images_32'].shape)
print('wrong images 64 shape:', data['wrong_images_64'].shape)
print('wrong images 128 shape:', data['wrong_images_128'].shape)
print('wrong images 256 shape:', data['wrong_images_256'].shape)
print("wrong embed shape:", data['wrong_embeds'].shape)
print("wrong caption shape:", data['wrong_captions'].shape)
print("wrong caption lengths:", data['wrong_caption_lengths'])
print("wrong txt:", data["wrong_txt"])
if i == 10:
print("done")
break | [
"numpy.stack",
"data_loader.datasets_custom.COCOTextImageDataset",
"torch.stack",
"torchvision.transforms.RandomHorizontalFlip",
"torch.LongTensor",
"data_loader.datasets_custom.TextImageDataset",
"torchvision.transforms.Normalize",
"torch.tensor",
"torchvision.transforms.ToTensor"
] | [((2042, 2081), 'torch.LongTensor', 'torch.LongTensor', (['right_caption_lengths'], {}), '(right_caption_lengths)\n', (2058, 2081), False, 'import torch\n'), ((2567, 2606), 'torch.LongTensor', 'torch.LongTensor', (['wrong_caption_lengths'], {}), '(wrong_caption_lengths)\n', (2583, 2606), False, 'import torch\n'), ((2895, 2914), 'numpy.stack', 'np.stack', (['class_ids'], {}), '(class_ids)\n', (2903, 2914), True, 'import numpy as np\n'), ((2950, 2978), 'torch.stack', 'torch.stack', (['right_embeds', '(0)'], {}), '(right_embeds, 0)\n', (2961, 2978), False, 'import torch\n'), ((3017, 3048), 'torch.stack', 'torch.stack', (['right_images_32', '(0)'], {}), '(right_images_32, 0)\n', (3028, 3048), False, 'import torch\n'), ((3087, 3118), 'torch.stack', 'torch.stack', (['right_images_64', '(0)'], {}), '(right_images_64, 0)\n', (3098, 3118), False, 'import torch\n'), ((3158, 3190), 'torch.stack', 'torch.stack', (['right_images_128', '(0)'], {}), '(right_images_128, 0)\n', (3169, 3190), False, 'import torch\n'), ((3230, 3262), 'torch.stack', 'torch.stack', (['right_images_256', '(0)'], {}), '(right_images_256, 0)\n', (3241, 3262), False, 'import torch\n'), ((3299, 3327), 'torch.stack', 'torch.stack', (['wrong_embeds', '(0)'], {}), '(wrong_embeds, 0)\n', (3310, 3327), False, 'import torch\n'), ((3366, 3397), 'torch.stack', 'torch.stack', (['wrong_images_32', '(0)'], {}), '(wrong_images_32, 0)\n', (3377, 3397), False, 'import torch\n'), ((3436, 3467), 'torch.stack', 'torch.stack', (['wrong_images_64', '(0)'], {}), '(wrong_images_64, 0)\n', (3447, 3467), False, 'import torch\n'), ((3507, 3539), 'torch.stack', 'torch.stack', (['wrong_images_128', '(0)'], {}), '(wrong_images_128, 0)\n', (3518, 3539), False, 'import torch\n'), ((3579, 3611), 'torch.stack', 'torch.stack', (['wrong_images_256', '(0)'], {}), '(wrong_images_256, 0)\n', (3590, 3611), False, 'import torch\n'), ((4410, 4520), 'data_loader.datasets_custom.TextImageDataset', 'TextImageDataset', (['self.data_dir', 'self.dataset_name', 'self.which_set', 'self.transform'], {'vocab_from_file': '(False)'}), '(self.data_dir, self.dataset_name, self.which_set, self.\n transform, vocab_from_file=False)\n', (4426, 4520), False, 'from data_loader.datasets_custom import TextImageDataset, COCOTextImageDataset\n'), ((5791, 5841), 'torch.tensor', 'torch.tensor', (['[0.5, 0.5, 0.5]'], {'dtype': 'torch.float32'}), '([0.5, 0.5, 0.5], dtype=torch.float32)\n', (5803, 5841), False, 'import torch\n'), ((5856, 5906), 'torch.tensor', 'torch.tensor', (['[0.5, 0.5, 0.5]'], {'dtype': 'torch.float32'}), '([0.5, 0.5, 0.5], dtype=torch.float32)\n', (5868, 5906), False, 'import torch\n'), ((6426, 6519), 'data_loader.datasets_custom.COCOTextImageDataset', 'COCOTextImageDataset', (['self.data_dir', 'self.which_set', 'self.transform'], {'vocab_from_file': '(True)'}), '(self.data_dir, self.which_set, self.transform,\n vocab_from_file=True)\n', (6446, 6519), False, 'from data_loader.datasets_custom import TextImageDataset, COCOTextImageDataset\n'), ((4229, 4262), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {}), '()\n', (4260, 4262), False, 'from torchvision import transforms\n'), ((4276, 4297), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (4295, 4297), False, 'from torchvision import transforms\n'), ((4311, 4374), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '[0.5, 0.5, 0.5]', 'std': '[0.5, 0.5, 0.5]'}), '(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])\n', (4331, 4374), False, 'from torchvision import transforms\n'), ((6028, 6061), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {}), '()\n', (6059, 6061), False, 'from torchvision import transforms\n'), ((6079, 6100), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (6098, 6100), False, 'from torchvision import transforms\n'), ((6118, 6158), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': 'mean', 'std': 'std'}), '(mean=mean, std=std)\n', (6138, 6158), False, 'from torchvision import transforms\n'), ((6307, 6328), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (6326, 6328), False, 'from torchvision import transforms\n'), ((6346, 6386), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': 'mean', 'std': 'std'}), '(mean=mean, std=std)\n', (6366, 6386), False, 'from torchvision import transforms\n')] |
import pytest
from sqlalchemy.exc import ProgrammingError
from sqlalchemy_continuum.utils import count_versions
from kokon.orm import Guest
from kokon.utils.db import DB
from tests.helpers import admin_session
def test_app_user():
with admin_session() as session:
session.execute("TRUNCATE guests_version RESTART IDENTITY;")
session.execute("TRUNCATE guests RESTART IDENTITY;")
session.execute("TRUNCATE transaction RESTART IDENTITY;")
with DB().acquire() as session:
# creates a guest without error and version as well
guid = "74b86069-c837-4431-a7ee-3a4aedda978b"
guest = Guest(
guid=guid,
full_name="<NAME>",
email="<EMAIL>",
phone_number="100-330-497",
people_in_group=4,
adult_male_count=0,
adult_female_count=2,
children_ages=[1, 10],
have_pets=False,
how_long_to_stay="1w",
updated_by_id="782962fc-dc11-4a33-8f08-b7da532dd40d",
)
session.add(guest)
session.commit()
session.refresh(guest)
assert guest.claimed_by_id is None
# trigger works
claimed_at = guest.claimed_at
assert claimed_at is not None
guest.adult_male_count = 1
session.commit()
with pytest.raises(ProgrammingError):
_ = guest.versions[0]
with admin_session() as session:
guest = session.query(Guest).where(Guest.guid == guid).one()
assert count_versions(guest) == 2
assert str(guest.versions[0].guid) == guid
| [
"tests.helpers.admin_session",
"sqlalchemy_continuum.utils.count_versions",
"kokon.orm.Guest",
"kokon.utils.db.DB",
"pytest.raises"
] | [((244, 259), 'tests.helpers.admin_session', 'admin_session', ([], {}), '()\n', (257, 259), False, 'from tests.helpers import admin_session\n'), ((636, 913), 'kokon.orm.Guest', 'Guest', ([], {'guid': 'guid', 'full_name': '"""<NAME>"""', 'email': '"""<EMAIL>"""', 'phone_number': '"""100-330-497"""', 'people_in_group': '(4)', 'adult_male_count': '(0)', 'adult_female_count': '(2)', 'children_ages': '[1, 10]', 'have_pets': '(False)', 'how_long_to_stay': '"""1w"""', 'updated_by_id': '"""782962fc-dc11-4a33-8f08-b7da532dd40d"""'}), "(guid=guid, full_name='<NAME>', email='<EMAIL>', phone_number=\n '100-330-497', people_in_group=4, adult_male_count=0,\n adult_female_count=2, children_ages=[1, 10], have_pets=False,\n how_long_to_stay='1w', updated_by_id='782962fc-dc11-4a33-8f08-b7da532dd40d'\n )\n", (641, 913), False, 'from kokon.orm import Guest\n'), ((1417, 1432), 'tests.helpers.admin_session', 'admin_session', ([], {}), '()\n', (1430, 1432), False, 'from tests.helpers import admin_session\n'), ((1340, 1371), 'pytest.raises', 'pytest.raises', (['ProgrammingError'], {}), '(ProgrammingError)\n', (1353, 1371), False, 'import pytest\n'), ((1529, 1550), 'sqlalchemy_continuum.utils.count_versions', 'count_versions', (['guest'], {}), '(guest)\n', (1543, 1550), False, 'from sqlalchemy_continuum.utils import count_versions\n'), ((478, 482), 'kokon.utils.db.DB', 'DB', ([], {}), '()\n', (480, 482), False, 'from kokon.utils.db import DB\n')] |
import torch
import torch.nn as nn
from torchvision import transforms as ttf
class RandAugment(nn.Module):
def __init__(self, N, M):
super().__init__()
"""
rotate
shear x
shear y
translate y
translate x
autoContrast
sharpness
identity
contrast
color
brightness
eqaulize
solarize
posterize
"""
self.N = N
self.M = M
self.aug_list = [Rotate, ShearX, ShearY, TranslateX, TranslateY, AutoContrast,
Sharpness, Identity, Contrast, Color, Brightness, Equalize,
Solarize, Posterize]
def forward(self, img):
self.aug_index = torch.randperm(len(self.aug_list))[:self.N]
self.augmentations = nn.ModuleList([])
for aug_id in self.aug_index:
self.augmentations.append(self.aug_list[aug_id](self.M))
self.augmentations = nn.Sequential(*self.augmentations)
return self.augmentations(img)
class Rotate(nn.Module):
def __init__(self, M):
super().__init__()
self.M = M
self.angle = 359 / 10 * self.M
def forward(self, img):
return ttf.functional.rotate(img, self.angle)
class ShearX(nn.Module):
def __init__(self, M):
super().__init__()
self.M = M
self.angle = 359 / 10 * self.M - 180
def forward(self, img):
return ttf.functional.affine(img, 0, [0, 0], 1, [self.angle, 0])
class ShearY(nn.Module):
def __init__(self, M):
super().__init__()
self.M = M
self.angle = 359 / 10 * self.M - 180
def forward(self, img):
return ttf.functional.affine(img, 0, [0, 0], 1, [0, self.angle])
class TranslateX(nn.Module):
def __init__(self, M):
super().__init__()
self.M = M
def forward(self, img):
try:
max_size = img.size()[0]
except TypeError:
max_size = img.size()[0]
return ttf.functional.affine(img, 0, [(max_size - 1) / 10 * self.M, 0], 1, [0, 0])
class TranslateY(nn.Module):
def __init__(self, M):
super().__init__()
self.M = M
def forward(self, img):
try:
max_size = img.size()[1]
except TypeError:
max_size = img.size()[1]
return ttf.functional.affine(img, 0, [0, (max_size - 1) / 10 * self.M], 1, [0, 0])
class AutoContrast(nn.Module):
def __init__(self, M):
super().__init__()
self.M = M
def forward(self, img):
return ttf.functional.autocontrast(img)
class Sharpness(nn.Module):
def __init__(self, M):
super().__init__()
self.M = M
def forward(self, img):
return ttf.functional.adjust_sharpness(img, self.M / 5.)
class Identity(nn.Module):
def __init__(self, M):
super().__init__()
self.M = M
def forward(self, img):
return img
class Contrast(nn.Module):
def __init__(self, M):
super().__init__()
self.M = M
def forward(self, img):
return ttf.functional.adjust_contrast(img, self.M / 5.)
class Color(nn.Module):
def __init__(self, M):
super().__init__()
self.M = M
def forward(self, img):
return ttf.functional.adjust_saturation(img, self.M / 5.)
class Brightness(nn.Module):
def __init__(self, M):
super().__init__()
self.M = M
def forward(self, img):
return ttf.functional.adjust_brightness(img, self.M / 5.)
class Equalize(nn.Module):
def __init__(self, M):
super().__init__()
self.M = M
def forward(self, img):
return ttf.functional.equalize(img)
class Solarize(nn.Module):
def __init__(self, M):
super().__init__()
self.M = M
def forward(self, img):
return ttf.functional.solarize(img, (10 - self.M) * 25.5)
class Posterize(nn.Module):
def __init__(self, M):
super().__init__()
self.M = M
def forward(self, img):
return ttf.functional.posterize(img, round((10 - self.M) / 10 * 8)) | [
"torchvision.transforms.functional.solarize",
"torchvision.transforms.functional.adjust_contrast",
"torchvision.transforms.functional.rotate",
"torch.nn.Sequential",
"torch.nn.ModuleList",
"torchvision.transforms.functional.autocontrast",
"torchvision.transforms.functional.equalize",
"torchvision.transforms.functional.adjust_saturation",
"torchvision.transforms.functional.affine",
"torchvision.transforms.functional.adjust_brightness",
"torchvision.transforms.functional.adjust_sharpness"
] | [((818, 835), 'torch.nn.ModuleList', 'nn.ModuleList', (['[]'], {}), '([])\n', (831, 835), True, 'import torch.nn as nn\n'), ((972, 1006), 'torch.nn.Sequential', 'nn.Sequential', (['*self.augmentations'], {}), '(*self.augmentations)\n', (985, 1006), True, 'import torch.nn as nn\n'), ((1233, 1271), 'torchvision.transforms.functional.rotate', 'ttf.functional.rotate', (['img', 'self.angle'], {}), '(img, self.angle)\n', (1254, 1271), True, 'from torchvision import transforms as ttf\n'), ((1465, 1522), 'torchvision.transforms.functional.affine', 'ttf.functional.affine', (['img', '(0)', '[0, 0]', '(1)', '[self.angle, 0]'], {}), '(img, 0, [0, 0], 1, [self.angle, 0])\n', (1486, 1522), True, 'from torchvision import transforms as ttf\n'), ((1716, 1773), 'torchvision.transforms.functional.affine', 'ttf.functional.affine', (['img', '(0)', '[0, 0]', '(1)', '[0, self.angle]'], {}), '(img, 0, [0, 0], 1, [0, self.angle])\n', (1737, 1773), True, 'from torchvision import transforms as ttf\n'), ((2039, 2114), 'torchvision.transforms.functional.affine', 'ttf.functional.affine', (['img', '(0)', '[(max_size - 1) / 10 * self.M, 0]', '(1)', '[0, 0]'], {}), '(img, 0, [(max_size - 1) / 10 * self.M, 0], 1, [0, 0])\n', (2060, 2114), True, 'from torchvision import transforms as ttf\n'), ((2380, 2455), 'torchvision.transforms.functional.affine', 'ttf.functional.affine', (['img', '(0)', '[0, (max_size - 1) / 10 * self.M]', '(1)', '[0, 0]'], {}), '(img, 0, [0, (max_size - 1) / 10 * self.M], 1, [0, 0])\n', (2401, 2455), True, 'from torchvision import transforms as ttf\n'), ((2610, 2642), 'torchvision.transforms.functional.autocontrast', 'ttf.functional.autocontrast', (['img'], {}), '(img)\n', (2637, 2642), True, 'from torchvision import transforms as ttf\n'), ((2794, 2844), 'torchvision.transforms.functional.adjust_sharpness', 'ttf.functional.adjust_sharpness', (['img', '(self.M / 5.0)'], {}), '(img, self.M / 5.0)\n', (2825, 2844), True, 'from torchvision import transforms as ttf\n'), ((3148, 3197), 'torchvision.transforms.functional.adjust_contrast', 'ttf.functional.adjust_contrast', (['img', '(self.M / 5.0)'], {}), '(img, self.M / 5.0)\n', (3178, 3197), True, 'from torchvision import transforms as ttf\n'), ((3344, 3395), 'torchvision.transforms.functional.adjust_saturation', 'ttf.functional.adjust_saturation', (['img', '(self.M / 5.0)'], {}), '(img, self.M / 5.0)\n', (3376, 3395), True, 'from torchvision import transforms as ttf\n'), ((3547, 3598), 'torchvision.transforms.functional.adjust_brightness', 'ttf.functional.adjust_brightness', (['img', '(self.M / 5.0)'], {}), '(img, self.M / 5.0)\n', (3579, 3598), True, 'from torchvision import transforms as ttf\n'), ((3748, 3776), 'torchvision.transforms.functional.equalize', 'ttf.functional.equalize', (['img'], {}), '(img)\n', (3771, 3776), True, 'from torchvision import transforms as ttf\n'), ((3927, 3977), 'torchvision.transforms.functional.solarize', 'ttf.functional.solarize', (['img', '((10 - self.M) * 25.5)'], {}), '(img, (10 - self.M) * 25.5)\n', (3950, 3977), True, 'from torchvision import transforms as ttf\n')] |
# -*- coding: utf-8 -*-
"""
Copyright 2019 CS Systèmes d'Information
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import re
from ikats.client.datamodel_client import DatamodelClient
from ikats.client.datamodel_stub import DatamodelStub
from ikats.client.opentsdb_client import OpenTSDBClient
from ikats.client.opentsdb_stub import OpenTSDBStub
from ikats.exceptions import (IkatsConflictError, IkatsException,
IkatsNotFoundError)
from ikats.lib import (MDType, check_is_fid_valid, check_is_valid_epoch,
check_type)
from ikats.manager.generic_mgr_ import IkatsGenericApiEndPoint
from ikats.objects import Timeseries
NON_INHERITABLE_PATTERN = re.compile("^qual(.)*|ikats(.)*|funcId")
class IkatsTimeseriesMgr(IkatsGenericApiEndPoint):
"""
Ikats EndPoint specific to Timeseries management
"""
def __init__(self, *args, **kwargs):
super(IkatsTimeseriesMgr, self).__init__(*args, **kwargs)
if self.api.emulate:
self.tsdb_client = OpenTSDBStub(session=self.api.session)
self.dm_client = DatamodelStub(session=self.api.session)
else:
self.tsdb_client = OpenTSDBClient(session=self.api.session)
self.dm_client = DatamodelClient(session=self.api.session)
def new(self, fid=None, data=None):
"""
Create an empty local Timeseries (if fid not provided)
If fid is set, the identifier will be created to database
:param fid: Identifier to create (if provided)
:param data: List of data points as numpy array or python 2D-list
:type fid: str
:type data: list or np.array
:returns: the Timeseries object
:rtype: Timeseries
:raises IkatsConflictError: if *fid* already present in database (use `get` instead of `new`)
"""
if fid is None:
ts = Timeseries(api=self.api)
else:
ts = self._create_ref(fid=fid)
ts.data = data
return ts
def get(self, fid=None, tsuid=None):
"""
Returns an existing Timeseries object by providing either its FID or TSUID (only one shall be provided)
:param fid: FID of the Timeseries
:param tsuid: TSUID of the Timeseries
:type fid: str
:type tsuid: str
:returns: The Timeseries object
:rtype: Timeseries
:raises ValueError: if both *fid* and *tsuid* are set (or none of them)
:raises IkatsNotFoundError: if the identifier was not found in database
"""
if bool(fid) == bool(tsuid):
raise ValueError("fid and tsuid are mutually exclusive")
if fid is not None:
tsuid = self.fid2tsuid(fid=fid, raise_exception=True)
return Timeseries(api=self.api, tsuid=tsuid, fid=fid)
def save(self, ts, parent=None, generate_metadata=True, raise_exception=True):
"""
Import timeseries data points to database or update an existing timeseries with new points
if *generate_metadata* is set or if no TSUID is present in *ts* object,
the *ikats_start_date*, *ikats_end_date* and *qual_nb_points* will be
overwritten by the first point date, last point date and number of points in *ts.data*
*parent* is the original timeseries where metadata shall be taken from
(except intrinsic ones, eg. *qual_nb_points*)
If the timeseries is a new one (object has no tsuid defined), the computation of the metadata is forced
Returns a boolean status of the action (True means "OK", False means "errors occurred")
:param ts: Timeseries object containing information about what to create
:param parent: (optional) Timeseries object of inheritance parent
:param generate_metadata: Generate metadata (set to False when doing partial import) (Default: True)
:param raise_exception: Indicates if exceptions shall be raised (True, default) or not (False)
:type ts: Timeseries
:type parent: Timeseries
:type generate_metadata: bool
:type raise_exception: bool
:returns: the status of the action
:rtype: bool
:raises TypeError: if *ts* is not a valid Timeseries object
"""
# Input checks
check_type(ts, Timeseries, "ts", raise_exception=True)
check_type(parent, [Timeseries, None], "parent", raise_exception=True)
check_type(generate_metadata, bool, "generate_metadata", raise_exception=True)
check_is_fid_valid(ts.fid, raise_exception=True)
try:
# First, we shall create the TSUID reference (if not provided)
if ts.tsuid is None:
ts.tsuid = self._create_ref(ts.fid).tsuid
# If the TS is fresh, we force the creation of the metadata
generate_metadata = True
# Add points to this TSUID
start_date, end_date, nb_points = self.tsdb_client.add_points(tsuid=ts.tsuid, data=ts.data)
if generate_metadata:
# ikats_start_date
self.dm_client.metadata_update(tsuid=ts.tsuid, name='ikats_start_date', value=start_date,
data_type=MDType.DATE, force_create=True)
ts.metadata.set(name='ikats_start_date', value=start_date, dtype=MDType.DATE)
# ikats_end_date
self.dm_client.metadata_update(tsuid=ts.tsuid, name='ikats_end_date', value=end_date,
data_type=MDType.DATE, force_create=True)
ts.metadata.set(name='ikats_end_date', value=end_date, dtype=MDType.DATE)
# qual_nb_points
self.dm_client.metadata_update(tsuid=ts.tsuid, name='qual_nb_points', value=nb_points,
data_type=MDType.NUMBER, force_create=True)
ts.metadata.set(name='qual_nb_points', value=nb_points, dtype=MDType.NUMBER)
# Inherit from parent when it is defined
if parent is not None:
self.inherit(ts=ts, parent=parent)
except IkatsException:
if raise_exception:
raise
return False
return True
def delete(self, ts, raise_exception=True):
"""
Delete the data corresponding to a *ts* object and all associated metadata
Note that if timeseries belongs to a dataset it will not be removed
Returns a boolean status of the action (True means "OK", False means "errors occurred")
:param ts: tsuid of the timeseries or Timeseries Object to remove
:param raise_exception: (optional) Indicates if IKATS exceptions shall be raised (True, default) or not (False)
:type ts: str or Timeseries
:type raise_exception: bool
:returns: the status of the action
:rtype: bool
:raises TypeError: if *ts* is not a str nor a Timeseries
:raises IkatsNotFoundError: if timeseries is not found on server
:raises IkatsConflictError: if timeseries belongs to -at least- one dataset
"""
check_type(value=ts, allowed_types=[str, Timeseries], var_name="ts", raise_exception=True)
tsuid = ts
if isinstance(ts, Timeseries):
if ts.tsuid is not None:
tsuid = ts.tsuid
elif ts.fid is not None:
try:
tsuid = self.dm_client.get_tsuid_from_fid(fid=ts.fid)
except IkatsException:
if raise_exception:
raise
return False
else:
raise ValueError("Timeseries object shall have set at least tsuid or fid")
return self.dm_client.ts_delete(tsuid=tsuid, raise_exception=raise_exception)
def list(self):
"""
Get the list of all Timeseries from database
.. note::
This action may take a while
:returns: the list of Timeseries object
:rtype: list
"""
return [Timeseries(tsuid=x["tsuid"], fid=x["funcId"], api=self.api) for x in
self.dm_client.get_ts_list()]
def fetch(self, ts, sd=None, ed=None):
"""
Retrieve the data corresponding to a Timeseries object as a numpy array
.. note::
if omitted, *sd* (start date) and *ed* (end date) will be retrieved from metadata
if you want a fixed windowed range, set *sd* and *ed* manually (but be aware that the TS may be
not completely gathered)
:param ts: Timeseries object
:param sd: (optional) starting date (timestamp in ms from epoch)
:param ed: (optional) ending date (timestamp in ms from epoch)
:type ts: Timeseries
:type sd: int or None
:type ed: int or None
:returns: The data points
:rtype: np.array
:raises TypeError: if *ts* is not a Timeseries object
:raises TypeError: if *sd* is not an int
:raises TypeError: if *ed* is not an int
:raises IkatsNotFoundError: if TS data points couldn't be retrieved properly
"""
check_type(value=ts, allowed_types=Timeseries, var_name="ts", raise_exception=True)
check_type(value=sd, allowed_types=[int, None], var_name="sd", raise_exception=True)
check_type(value=ed, allowed_types=[int, None], var_name="ed", raise_exception=True)
if sd is None:
sd = ts.metadata.get(name="ikats_start_date")
check_is_valid_epoch(value=sd, raise_exception=True)
if ed is None:
ed = ts.metadata.get(name="ikats_end_date")
check_is_valid_epoch(value=ed, raise_exception=True)
try:
data_points = self.tsdb_client.get_ts_by_tsuid(tsuid=ts.tsuid, sd=sd, ed=ed)
# Return the points
return data_points
except ValueError:
raise IkatsNotFoundError("TS data points couldn't be retrieved properly")
def inherit(self, ts, parent):
"""
Make a timeseries inherit of parent's metadata according to a pattern (not all metadata inherited)
:param ts: TS object in IKATS (which will inherit)
:param parent: TS object in IKATS of inheritance parent
:type ts: Timeseries
:param parent: Timeseries
"""
try:
result = self.dm_client.metadata_get_typed([parent.tsuid])[parent.tsuid]
for meta_name in result:
# Flag metadata as "not deleted"
result[meta_name]["deleted"] = False
if not NON_INHERITABLE_PATTERN.match(meta_name):
self.dm_client.metadata_create(tsuid=ts.tsuid, name=meta_name, value=result[meta_name]["value"],
data_type=MDType(result[meta_name]["dtype"]),
force_update=True)
except(ValueError, TypeError, SystemError) as exception:
self.api.session.log.warning(
"Can't get metadata of parent TS (%s), nothing will be inherited; \nreason: %s", parent, exception)
def find_from_meta(self, constraint=None):
"""
From a metadata constraint provided in parameter, the method get a TS list matching these constraints
Example of constraint:
| {
| frequency: [1, 2],
| flight_phase: 8
| }
will find the TS having the following metadata:
| (frequency == 1 OR frequency == 2)
| AND
| flight_phase == 8
:param constraint: constraint definition
:type constraint: dict
:returns: list of TSUID matching the constraints
:rtype: dict
:raises TypeError: if *constraint* is not a dict
"""
return self.dm_client.get_ts_from_metadata(constraint=constraint)
def tsuid2fid(self, tsuid, raise_exception=True):
"""
Retrieve the functional ID associated to the tsuid param.
:param tsuid: one tsuid value
:param raise_exception: Allow to specify if the action shall assert if not found or not
:type tsuid: str
:type raise_exception: bool
:returns: retrieved functional identifier value
:rtype: str
:raises TypeError: if tsuid is not a defined str
:raises ValueError: no functional ID matching the tsuid
:raises ServerError: http answer with status : 500 <= status < 600
"""
try:
return self.dm_client.get_func_id_from_tsuid(tsuid=tsuid)
except IkatsException:
if raise_exception:
raise
return None
def fid2tsuid(self, fid, raise_exception=True):
"""
Retrieve the TSUID associated to the functional ID param.
:param fid: the functional Identifier
:param raise_exception: Allow to specify if the action shall assert if not found or not
:type fid: str
:type raise_exception: bool
:returns: retrieved TSUID value or None if not found
:rtype: str
:raises TypeError: if fid is not str
:raises IkatsNotFoundError: no match
"""
check_is_fid_valid(fid=fid)
# Check if fid already associated to an existing tsuid
try:
return self.dm_client.get_tsuid_from_fid(fid=fid)
except IkatsException:
if raise_exception:
raise
return None
def _create_ref(self, fid):
"""
Create a reference of timeseries in temporal data database and associate it to fid
in temporal database for future use.
Shall be used before create method in case of parallel creation of data (import data via spark for example)
:param fid: Functional Identifier of the TS in Ikats
:type fid: str
:returns: A prepared Timeseries object
:rtype: Timeseries
:raises IkatsConflictError: if FID already present in database (use `get` instead of `new`)
"""
check_is_fid_valid(fid, raise_exception=True)
try:
# Check if fid already associated to an existing tsuid
tsuid = self.dm_client.get_tsuid_from_fid(fid=fid)
# if fid already exists in database, raise a conflict exception
raise IkatsConflictError("%s already associated to an existing tsuid: %s" % (fid, tsuid))
except IkatsNotFoundError:
# Creation of a new tsuid
metric, tags = self.tsdb_client.gen_metric_tags()
tsuid = self.tsdb_client.assign_metric(metric=metric, tags=tags)
# finally importing tsuid/fid pair in non temporal database
self.dm_client.import_fid(tsuid=tsuid, fid=fid)
return Timeseries(tsuid=tsuid, fid=fid, api=self.api)
| [
"ikats.exceptions.IkatsConflictError",
"ikats.lib.check_type",
"ikats.lib.MDType",
"ikats.client.opentsdb_stub.OpenTSDBStub",
"ikats.client.opentsdb_client.OpenTSDBClient",
"ikats.client.datamodel_client.DatamodelClient",
"ikats.exceptions.IkatsNotFoundError",
"ikats.lib.check_is_valid_epoch",
"ikats.objects.Timeseries",
"ikats.lib.check_is_fid_valid",
"ikats.client.datamodel_stub.DatamodelStub",
"re.compile"
] | [((1181, 1221), 're.compile', 're.compile', (['"""^qual(.)*|ikats(.)*|funcId"""'], {}), "('^qual(.)*|ikats(.)*|funcId')\n", (1191, 1221), False, 'import re\n'), ((3260, 3306), 'ikats.objects.Timeseries', 'Timeseries', ([], {'api': 'self.api', 'tsuid': 'tsuid', 'fid': 'fid'}), '(api=self.api, tsuid=tsuid, fid=fid)\n', (3270, 3306), False, 'from ikats.objects import Timeseries\n'), ((4784, 4838), 'ikats.lib.check_type', 'check_type', (['ts', 'Timeseries', '"""ts"""'], {'raise_exception': '(True)'}), "(ts, Timeseries, 'ts', raise_exception=True)\n", (4794, 4838), False, 'from ikats.lib import MDType, check_is_fid_valid, check_is_valid_epoch, check_type\n'), ((4847, 4917), 'ikats.lib.check_type', 'check_type', (['parent', '[Timeseries, None]', '"""parent"""'], {'raise_exception': '(True)'}), "(parent, [Timeseries, None], 'parent', raise_exception=True)\n", (4857, 4917), False, 'from ikats.lib import MDType, check_is_fid_valid, check_is_valid_epoch, check_type\n'), ((4926, 5004), 'ikats.lib.check_type', 'check_type', (['generate_metadata', 'bool', '"""generate_metadata"""'], {'raise_exception': '(True)'}), "(generate_metadata, bool, 'generate_metadata', raise_exception=True)\n", (4936, 5004), False, 'from ikats.lib import MDType, check_is_fid_valid, check_is_valid_epoch, check_type\n'), ((5013, 5061), 'ikats.lib.check_is_fid_valid', 'check_is_fid_valid', (['ts.fid'], {'raise_exception': '(True)'}), '(ts.fid, raise_exception=True)\n', (5031, 5061), False, 'from ikats.lib import MDType, check_is_fid_valid, check_is_valid_epoch, check_type\n'), ((7663, 7757), 'ikats.lib.check_type', 'check_type', ([], {'value': 'ts', 'allowed_types': '[str, Timeseries]', 'var_name': '"""ts"""', 'raise_exception': '(True)'}), "(value=ts, allowed_types=[str, Timeseries], var_name='ts',\n raise_exception=True)\n", (7673, 7757), False, 'from ikats.lib import MDType, check_is_fid_valid, check_is_valid_epoch, check_type\n'), ((9706, 9793), 'ikats.lib.check_type', 'check_type', ([], {'value': 'ts', 'allowed_types': 'Timeseries', 'var_name': '"""ts"""', 'raise_exception': '(True)'}), "(value=ts, allowed_types=Timeseries, var_name='ts',\n raise_exception=True)\n", (9716, 9793), False, 'from ikats.lib import MDType, check_is_fid_valid, check_is_valid_epoch, check_type\n'), ((9798, 9886), 'ikats.lib.check_type', 'check_type', ([], {'value': 'sd', 'allowed_types': '[int, None]', 'var_name': '"""sd"""', 'raise_exception': '(True)'}), "(value=sd, allowed_types=[int, None], var_name='sd',\n raise_exception=True)\n", (9808, 9886), False, 'from ikats.lib import MDType, check_is_fid_valid, check_is_valid_epoch, check_type\n'), ((9891, 9979), 'ikats.lib.check_type', 'check_type', ([], {'value': 'ed', 'allowed_types': '[int, None]', 'var_name': '"""ed"""', 'raise_exception': '(True)'}), "(value=ed, allowed_types=[int, None], var_name='ed',\n raise_exception=True)\n", (9901, 9979), False, 'from ikats.lib import MDType, check_is_fid_valid, check_is_valid_epoch, check_type\n'), ((10066, 10118), 'ikats.lib.check_is_valid_epoch', 'check_is_valid_epoch', ([], {'value': 'sd', 'raise_exception': '(True)'}), '(value=sd, raise_exception=True)\n', (10086, 10118), False, 'from ikats.lib import MDType, check_is_fid_valid, check_is_valid_epoch, check_type\n'), ((10207, 10259), 'ikats.lib.check_is_valid_epoch', 'check_is_valid_epoch', ([], {'value': 'ed', 'raise_exception': '(True)'}), '(value=ed, raise_exception=True)\n', (10227, 10259), False, 'from ikats.lib import MDType, check_is_fid_valid, check_is_valid_epoch, check_type\n'), ((13809, 13836), 'ikats.lib.check_is_fid_valid', 'check_is_fid_valid', ([], {'fid': 'fid'}), '(fid=fid)\n', (13827, 13836), False, 'from ikats.lib import MDType, check_is_fid_valid, check_is_valid_epoch, check_type\n'), ((14663, 14708), 'ikats.lib.check_is_fid_valid', 'check_is_fid_valid', (['fid'], {'raise_exception': '(True)'}), '(fid, raise_exception=True)\n', (14681, 14708), False, 'from ikats.lib import MDType, check_is_fid_valid, check_is_valid_epoch, check_type\n'), ((1512, 1550), 'ikats.client.opentsdb_stub.OpenTSDBStub', 'OpenTSDBStub', ([], {'session': 'self.api.session'}), '(session=self.api.session)\n', (1524, 1550), False, 'from ikats.client.opentsdb_stub import OpenTSDBStub\n'), ((1580, 1619), 'ikats.client.datamodel_stub.DatamodelStub', 'DatamodelStub', ([], {'session': 'self.api.session'}), '(session=self.api.session)\n', (1593, 1619), False, 'from ikats.client.datamodel_stub import DatamodelStub\n'), ((1665, 1705), 'ikats.client.opentsdb_client.OpenTSDBClient', 'OpenTSDBClient', ([], {'session': 'self.api.session'}), '(session=self.api.session)\n', (1679, 1705), False, 'from ikats.client.opentsdb_client import OpenTSDBClient\n'), ((1735, 1776), 'ikats.client.datamodel_client.DatamodelClient', 'DatamodelClient', ([], {'session': 'self.api.session'}), '(session=self.api.session)\n', (1750, 1776), False, 'from ikats.client.datamodel_client import DatamodelClient\n'), ((2374, 2398), 'ikats.objects.Timeseries', 'Timeseries', ([], {'api': 'self.api'}), '(api=self.api)\n', (2384, 2398), False, 'from ikats.objects import Timeseries\n'), ((8598, 8657), 'ikats.objects.Timeseries', 'Timeseries', ([], {'tsuid': "x['tsuid']", 'fid': "x['funcId']", 'api': 'self.api'}), "(tsuid=x['tsuid'], fid=x['funcId'], api=self.api)\n", (8608, 8657), False, 'from ikats.objects import Timeseries\n'), ((14946, 15033), 'ikats.exceptions.IkatsConflictError', 'IkatsConflictError', (["('%s already associated to an existing tsuid: %s' % (fid, tsuid))"], {}), "('%s already associated to an existing tsuid: %s' % (fid,\n tsuid))\n", (14964, 15033), False, 'from ikats.exceptions import IkatsConflictError, IkatsException, IkatsNotFoundError\n'), ((10472, 10539), 'ikats.exceptions.IkatsNotFoundError', 'IkatsNotFoundError', (['"""TS data points couldn\'t be retrieved properly"""'], {}), '("TS data points couldn\'t be retrieved properly")\n', (10490, 10539), False, 'from ikats.exceptions import IkatsConflictError, IkatsException, IkatsNotFoundError\n'), ((15396, 15442), 'ikats.objects.Timeseries', 'Timeseries', ([], {'tsuid': 'tsuid', 'fid': 'fid', 'api': 'self.api'}), '(tsuid=tsuid, fid=fid, api=self.api)\n', (15406, 15442), False, 'from ikats.objects import Timeseries\n'), ((11377, 11411), 'ikats.lib.MDType', 'MDType', (["result[meta_name]['dtype']"], {}), "(result[meta_name]['dtype'])\n", (11383, 11411), False, 'from ikats.lib import MDType, check_is_fid_valid, check_is_valid_epoch, check_type\n')] |
import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # see issue #152
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
import tensorflow as tf
from keras.models import model_from_json
import json
from sklearn.metrics import roc_curve, auc, confusion_matrix
import numpy as np
import pandas as pd
from copy import deepcopy
import itertools
from utils import load_data
# import matplotlib
# matplotlib.use('agg')
import matplotlib.pyplot as plt
def load_model_helper(path, model_base_name):
# return load_model(path)
with open(os.path.join(path, f'{model_base_name}.architecture.json'), 'r') as json_file:
loaded_model_json = json_file.read()
m = model_from_json(loaded_model_json)
m.load_weights(os.path.join(path, f'{model_base_name}.weights.h5'))
return m
def thres(v, thr: float = 0.5):
v_ = np.array(deepcopy(v))
v_[v_ >= thr] = 1
v_[v_ < thr] = 0
return v_
if __name__ == '__main__':
tf.keras.backend.clear_session()
# path_base = '/Users/dmitryduev/_caltech/python/deep-asteroids/'
path_base = './'
with open(os.path.join(path_base, 'service/code/config.json')) as f:
config = json.load(f)
# models = config['models']
models = config['models_201901']
model_names = list(models.keys())
path_models = os.path.join(path_base, 'service/models')
c_families = {'rb': '5b96af9c0354c9000b0aea36',
'sl': '5b99b2c6aec3c500103a14de',
'kd': '5be0ae7958830a0018821794',
'os': '5c05bbdc826480000a95c0bf'}
# c_families = {'rb': '5b96af9c0354c9000b0aea36',
# 'sl': '5b99b2c6aec3c500103a14de',
# 'kd': '5be0ae7958830a0018821794'}
# c_families = {'rb': '5b96af9c0354c9000b0aea36'}
path_data = './data'
# mpl colors:
colors = plt.rcParams['axes.prop_cycle'].by_key()['color']
# [u'#1f77b4', u'#ff7f0e', u'#2ca02c', u'#d62728', u'#9467bd',
# u'#8c564b', u'#e377c2', u'#7f7f7f', u'#bcbd22', u'#17becf']
# line styles:
line_styles = ['-', '--', ':']
# thresholds
score_thresholds = [0.99, 0.9, 0.5, 0.1, 0.01]
# ROC
fig = plt.figure(figsize=(14, 5))
fig.subplots_adjust(bottom=0.09, left=0.05, right=0.70, top=0.98, wspace=0.2, hspace=0.2)
lw = 1.6
# ROCs
ax = fig.add_subplot(1, 2, 1)
# zoomed ROCs
ax2 = fig.add_subplot(1, 2, 2)
ax.plot([0, 1], [0, 1], color='#333333', lw=lw, linestyle='--')
ax.set_xlim([0.0, 1.0])
ax.set_ylim([0.0, 1.05])
ax.set_xlabel('False Positive Rate (Contamination)')
ax.set_ylabel('True Positive Rate (Sensitivity)')
# ax.legend(loc="lower right")
# ax.legend(loc="best")
ax.grid(True)
ax2.set_xlim([0.0, .2])
ax2.set_ylim([0.8, 1.0])
ax2.set_xlabel('False Positive Rate (Contamination)')
ax2.set_ylabel('True Positive Rate (Sensitivity)')
# ax.legend(loc="lower right")
# ax2.legend(loc="best")
ax2.grid(True)
# Confusion matrices
fig2 = plt.figure()
fig2.subplots_adjust(bottom=0.06, left=0.01, right=1.0, top=0.93, wspace=0.0, hspace=0.12)
cn = 0
for cfi, c_family in enumerate(c_families):
project_id = c_families[c_family]
print(c_family, project_id)
# load data
x_train, y_train, x_test, y_test, classes = load_data(path=path_data,
project_id=project_id,
binary=True,
grayscale=True,
resize=(144, 144),
test_size=0.1,
verbose=True,
random_state=42)
mn = [m_ for m_ in model_names if c_family in m_]
n_mn = len(mn)
for ii, model_name in enumerate(mn):
print(f'loading model {model_name}: {models[model_name]}')
m = load_model_helper(path_models, models[model_name])
y = m.predict(x_test, batch_size=32, verbose=True)
# for thr in (0.5, 0.9):
for thr in (0.5,):
labels_pred = thres(y, thr=thr)
confusion_matr = confusion_matrix(y_test, labels_pred)
confusion_matr_normalized = confusion_matr.astype('float') / confusion_matr.sum(axis=1)[:, np.newaxis]
print(f'Threshold: {thr}')
print('Confusion matrix:')
print(confusion_matr)
print('Normalized confusion matrix:')
print(confusion_matr_normalized)
fpr, tpr, thresholds = roc_curve(y_test, y)
roc_auc = auc(fpr, tpr)
ax.plot(fpr, tpr, line_styles[ii], color=colors[cfi], lw=lw)
ax2.plot(fpr, tpr, line_styles[ii], color=colors[cfi], lw=lw,
label=f'{model_name} curve (area = {roc_auc:.5f})')
# plot thresholds
for it, thr in enumerate(score_thresholds):
x_ = np.interp(thr, thresholds[::-1], fpr)
y_ = np.interp(thr, thresholds[::-1], tpr)
# print(thr, x_, y_)
if cfi == 0 and ii == 0:
ax.plot(x_, y_, '.', markersize=8, color=colors[-(it + 1)], label=f'Threshold: {1-thr:.2f}')
else:
ax.plot(x_, y_, '.', markersize=8, color=colors[-(it + 1)])
ax2.plot(x_, y_, 'o', markersize=8, color=colors[-(it + 1)])
# plot confusion matrices
ax_ = fig2.add_subplot(3, 2 * len(c_families), ii * 8 + cfi * 2 + 1)
ax2_ = fig2.add_subplot(3, 2 * len(c_families), ii * 8 + cfi * 2 + 2)
ax_.imshow(confusion_matr, interpolation='nearest', cmap=plt.cm.Blues)
ax2_.imshow(confusion_matr_normalized, interpolation='nearest', cmap=plt.cm.Blues)
tick_marks = np.arange(2)
# ax_.set_xticks(tick_marks, tick_marks)
# ax_.set_yticks(tick_marks, tick_marks)
# ax2_.set_xticks(tick_marks, tick_marks)
# ax2_.set_yticks(tick_marks, tick_marks)
#
# ax_.xaxis.set_visible(False)
# ax_.yaxis.set_visible(False)
# ax2_.xaxis.set_visible(False)
# ax2_.yaxis.set_visible(False)
ax_.axis('off')
ax2_.axis('off')
thresh = confusion_matr.max() / 2.
thresh_norm = confusion_matr_normalized.max() / 2.
for i, j in itertools.product(range(confusion_matr.shape[0]), range(confusion_matr.shape[1])):
ax_.text(j, i, format(confusion_matr[i, j], 'd'),
horizontalalignment="center",
color="white" if confusion_matr[i, j] > thresh else "black")
ax2_.text(j, i, format(confusion_matr_normalized[i, j], '.2f'),
horizontalalignment="center",
color="white" if confusion_matr_normalized[i, j] > thresh_norm else "black")
# if ii == 0:
# break
ax.legend(loc='lower right')
ax2.legend(bbox_to_anchor=(1.04, 1), loc="upper left")
fig.savefig(f'./roc_rb_sl_kd.png', dpi=300)
fig2.savefig(f'./cm_rb_sl_kd.png', dpi=300)
plt.show()
| [
"copy.deepcopy",
"json.load",
"matplotlib.pyplot.show",
"utils.load_data",
"sklearn.metrics.roc_curve",
"tensorflow.keras.backend.clear_session",
"sklearn.metrics.auc",
"matplotlib.pyplot.figure",
"keras.models.model_from_json",
"numpy.arange",
"numpy.interp",
"sklearn.metrics.confusion_matrix",
"os.path.join"
] | [((669, 703), 'keras.models.model_from_json', 'model_from_json', (['loaded_model_json'], {}), '(loaded_model_json)\n', (684, 703), False, 'from keras.models import model_from_json\n'), ((948, 980), 'tensorflow.keras.backend.clear_session', 'tf.keras.backend.clear_session', ([], {}), '()\n', (978, 980), True, 'import tensorflow as tf\n'), ((1304, 1345), 'os.path.join', 'os.path.join', (['path_base', '"""service/models"""'], {}), "(path_base, 'service/models')\n", (1316, 1345), False, 'import os\n'), ((2157, 2184), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(14, 5)'}), '(figsize=(14, 5))\n', (2167, 2184), True, 'import matplotlib.pyplot as plt\n'), ((2999, 3011), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3009, 3011), True, 'import matplotlib.pyplot as plt\n'), ((7445, 7455), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7453, 7455), True, 'import matplotlib.pyplot as plt\n'), ((723, 774), 'os.path.join', 'os.path.join', (['path', 'f"""{model_base_name}.weights.h5"""'], {}), "(path, f'{model_base_name}.weights.h5')\n", (735, 774), False, 'import os\n'), ((842, 853), 'copy.deepcopy', 'deepcopy', (['v'], {}), '(v)\n', (850, 853), False, 'from copy import deepcopy\n'), ((1164, 1176), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1173, 1176), False, 'import json\n'), ((3321, 3468), 'utils.load_data', 'load_data', ([], {'path': 'path_data', 'project_id': 'project_id', 'binary': '(True)', 'grayscale': '(True)', 'resize': '(144, 144)', 'test_size': '(0.1)', 'verbose': '(True)', 'random_state': '(42)'}), '(path=path_data, project_id=project_id, binary=True, grayscale=\n True, resize=(144, 144), test_size=0.1, verbose=True, random_state=42)\n', (3330, 3468), False, 'from utils import load_data\n'), ((537, 595), 'os.path.join', 'os.path.join', (['path', 'f"""{model_base_name}.architecture.json"""'], {}), "(path, f'{model_base_name}.architecture.json')\n", (549, 595), False, 'import os\n'), ((1088, 1139), 'os.path.join', 'os.path.join', (['path_base', '"""service/code/config.json"""'], {}), "(path_base, 'service/code/config.json')\n", (1100, 1139), False, 'import os\n'), ((4801, 4821), 'sklearn.metrics.roc_curve', 'roc_curve', (['y_test', 'y'], {}), '(y_test, y)\n', (4810, 4821), False, 'from sklearn.metrics import roc_curve, auc, confusion_matrix\n'), ((4844, 4857), 'sklearn.metrics.auc', 'auc', (['fpr', 'tpr'], {}), '(fpr, tpr)\n', (4847, 4857), False, 'from sklearn.metrics import roc_curve, auc, confusion_matrix\n'), ((6062, 6074), 'numpy.arange', 'np.arange', (['(2)'], {}), '(2)\n', (6071, 6074), True, 'import numpy as np\n'), ((4379, 4416), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['y_test', 'labels_pred'], {}), '(y_test, labels_pred)\n', (4395, 4416), False, 'from sklearn.metrics import roc_curve, auc, confusion_matrix\n'), ((5187, 5224), 'numpy.interp', 'np.interp', (['thr', 'thresholds[::-1]', 'fpr'], {}), '(thr, thresholds[::-1], fpr)\n', (5196, 5224), True, 'import numpy as np\n'), ((5246, 5283), 'numpy.interp', 'np.interp', (['thr', 'thresholds[::-1]', 'tpr'], {}), '(thr, thresholds[::-1], tpr)\n', (5255, 5283), True, 'import numpy as np\n')] |
# coding: utf-8
import pprint
import six
from enum import Enum
class SubscriptionVersion:
swagger_types = {
'activated_on': 'datetime',
'billing_currency': 'str',
'component_configurations': 'list[SubscriptionComponentConfiguration]',
'created_on': 'datetime',
'expected_last_period_end': 'datetime',
'failed_on': 'datetime',
'id': 'int',
'language': 'str',
'linked_space_id': 'int',
'planned_purge_date': 'datetime',
'planned_termination_date': 'datetime',
'product_version': 'SubscriptionProductVersion',
'selected_components': 'list[SubscriptionProductComponent]',
'state': 'SubscriptionVersionState',
'subscription': 'Subscription',
'terminated_on': 'datetime',
'terminating_on': 'datetime',
'termination_issued_on': 'datetime',
'version': 'int',
}
attribute_map = {
'activated_on': 'activatedOn','billing_currency': 'billingCurrency','component_configurations': 'componentConfigurations','created_on': 'createdOn','expected_last_period_end': 'expectedLastPeriodEnd','failed_on': 'failedOn','id': 'id','language': 'language','linked_space_id': 'linkedSpaceId','planned_purge_date': 'plannedPurgeDate','planned_termination_date': 'plannedTerminationDate','product_version': 'productVersion','selected_components': 'selectedComponents','state': 'state','subscription': 'subscription','terminated_on': 'terminatedOn','terminating_on': 'terminatingOn','termination_issued_on': 'terminationIssuedOn','version': 'version',
}
_activated_on = None
_billing_currency = None
_component_configurations = None
_created_on = None
_expected_last_period_end = None
_failed_on = None
_id = None
_language = None
_linked_space_id = None
_planned_purge_date = None
_planned_termination_date = None
_product_version = None
_selected_components = None
_state = None
_subscription = None
_terminated_on = None
_terminating_on = None
_termination_issued_on = None
_version = None
def __init__(self, **kwargs):
self.discriminator = None
self.activated_on = kwargs.get('activated_on', None)
self.billing_currency = kwargs.get('billing_currency', None)
self.component_configurations = kwargs.get('component_configurations', None)
self.created_on = kwargs.get('created_on', None)
self.expected_last_period_end = kwargs.get('expected_last_period_end', None)
self.failed_on = kwargs.get('failed_on', None)
self.id = kwargs.get('id', None)
self.language = kwargs.get('language', None)
self.linked_space_id = kwargs.get('linked_space_id', None)
self.planned_purge_date = kwargs.get('planned_purge_date', None)
self.planned_termination_date = kwargs.get('planned_termination_date', None)
self.product_version = kwargs.get('product_version', None)
self.selected_components = kwargs.get('selected_components', None)
self.state = kwargs.get('state', None)
self.subscription = kwargs.get('subscription', None)
self.terminated_on = kwargs.get('terminated_on', None)
self.terminating_on = kwargs.get('terminating_on', None)
self.termination_issued_on = kwargs.get('termination_issued_on', None)
self.version = kwargs.get('version', None)
@property
def activated_on(self):
"""Gets the activated_on of this SubscriptionVersion.
:return: The activated_on of this SubscriptionVersion.
:rtype: datetime
"""
return self._activated_on
@activated_on.setter
def activated_on(self, activated_on):
"""Sets the activated_on of this SubscriptionVersion.
:param activated_on: The activated_on of this SubscriptionVersion.
:type: datetime
"""
self._activated_on = activated_on
@property
def billing_currency(self):
"""Gets the billing_currency of this SubscriptionVersion.
The subscriber is charged in the billing currency. The billing currency has to be one of the enabled currencies on the subscription product.
:return: The billing_currency of this SubscriptionVersion.
:rtype: str
"""
return self._billing_currency
@billing_currency.setter
def billing_currency(self, billing_currency):
"""Sets the billing_currency of this SubscriptionVersion.
The subscriber is charged in the billing currency. The billing currency has to be one of the enabled currencies on the subscription product.
:param billing_currency: The billing_currency of this SubscriptionVersion.
:type: str
"""
self._billing_currency = billing_currency
@property
def component_configurations(self):
"""Gets the component_configurations of this SubscriptionVersion.
:return: The component_configurations of this SubscriptionVersion.
:rtype: list[SubscriptionComponentConfiguration]
"""
return self._component_configurations
@component_configurations.setter
def component_configurations(self, component_configurations):
"""Sets the component_configurations of this SubscriptionVersion.
:param component_configurations: The component_configurations of this SubscriptionVersion.
:type: list[SubscriptionComponentConfiguration]
"""
self._component_configurations = component_configurations
@property
def created_on(self):
"""Gets the created_on of this SubscriptionVersion.
:return: The created_on of this SubscriptionVersion.
:rtype: datetime
"""
return self._created_on
@created_on.setter
def created_on(self, created_on):
"""Sets the created_on of this SubscriptionVersion.
:param created_on: The created_on of this SubscriptionVersion.
:type: datetime
"""
self._created_on = created_on
@property
def expected_last_period_end(self):
"""Gets the expected_last_period_end of this SubscriptionVersion.
The expected last period end is the date on which the projected end date of the last period is. This is only a projection and as such the actual date may be different.
:return: The expected_last_period_end of this SubscriptionVersion.
:rtype: datetime
"""
return self._expected_last_period_end
@expected_last_period_end.setter
def expected_last_period_end(self, expected_last_period_end):
"""Sets the expected_last_period_end of this SubscriptionVersion.
The expected last period end is the date on which the projected end date of the last period is. This is only a projection and as such the actual date may be different.
:param expected_last_period_end: The expected_last_period_end of this SubscriptionVersion.
:type: datetime
"""
self._expected_last_period_end = expected_last_period_end
@property
def failed_on(self):
"""Gets the failed_on of this SubscriptionVersion.
:return: The failed_on of this SubscriptionVersion.
:rtype: datetime
"""
return self._failed_on
@failed_on.setter
def failed_on(self, failed_on):
"""Sets the failed_on of this SubscriptionVersion.
:param failed_on: The failed_on of this SubscriptionVersion.
:type: datetime
"""
self._failed_on = failed_on
@property
def id(self):
"""Gets the id of this SubscriptionVersion.
The ID is the primary key of the entity. The ID identifies the entity uniquely.
:return: The id of this SubscriptionVersion.
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this SubscriptionVersion.
The ID is the primary key of the entity. The ID identifies the entity uniquely.
:param id: The id of this SubscriptionVersion.
:type: int
"""
self._id = id
@property
def language(self):
"""Gets the language of this SubscriptionVersion.
:return: The language of this SubscriptionVersion.
:rtype: str
"""
return self._language
@language.setter
def language(self, language):
"""Sets the language of this SubscriptionVersion.
:param language: The language of this SubscriptionVersion.
:type: str
"""
self._language = language
@property
def linked_space_id(self):
"""Gets the linked_space_id of this SubscriptionVersion.
The linked space id holds the ID of the space to which the entity belongs to.
:return: The linked_space_id of this SubscriptionVersion.
:rtype: int
"""
return self._linked_space_id
@linked_space_id.setter
def linked_space_id(self, linked_space_id):
"""Sets the linked_space_id of this SubscriptionVersion.
The linked space id holds the ID of the space to which the entity belongs to.
:param linked_space_id: The linked_space_id of this SubscriptionVersion.
:type: int
"""
self._linked_space_id = linked_space_id
@property
def planned_purge_date(self):
"""Gets the planned_purge_date of this SubscriptionVersion.
The planned purge date indicates when the entity is permanently removed. When the date is null the entity is not planned to be removed.
:return: The planned_purge_date of this SubscriptionVersion.
:rtype: datetime
"""
return self._planned_purge_date
@planned_purge_date.setter
def planned_purge_date(self, planned_purge_date):
"""Sets the planned_purge_date of this SubscriptionVersion.
The planned purge date indicates when the entity is permanently removed. When the date is null the entity is not planned to be removed.
:param planned_purge_date: The planned_purge_date of this SubscriptionVersion.
:type: datetime
"""
self._planned_purge_date = planned_purge_date
@property
def planned_termination_date(self):
"""Gets the planned_termination_date of this SubscriptionVersion.
:return: The planned_termination_date of this SubscriptionVersion.
:rtype: datetime
"""
return self._planned_termination_date
@planned_termination_date.setter
def planned_termination_date(self, planned_termination_date):
"""Sets the planned_termination_date of this SubscriptionVersion.
:param planned_termination_date: The planned_termination_date of this SubscriptionVersion.
:type: datetime
"""
self._planned_termination_date = planned_termination_date
@property
def product_version(self):
"""Gets the product_version of this SubscriptionVersion.
:return: The product_version of this SubscriptionVersion.
:rtype: SubscriptionProductVersion
"""
return self._product_version
@product_version.setter
def product_version(self, product_version):
"""Sets the product_version of this SubscriptionVersion.
:param product_version: The product_version of this SubscriptionVersion.
:type: SubscriptionProductVersion
"""
self._product_version = product_version
@property
def selected_components(self):
"""Gets the selected_components of this SubscriptionVersion.
:return: The selected_components of this SubscriptionVersion.
:rtype: list[SubscriptionProductComponent]
"""
return self._selected_components
@selected_components.setter
def selected_components(self, selected_components):
"""Sets the selected_components of this SubscriptionVersion.
:param selected_components: The selected_components of this SubscriptionVersion.
:type: list[SubscriptionProductComponent]
"""
self._selected_components = selected_components
@property
def state(self):
"""Gets the state of this SubscriptionVersion.
:return: The state of this SubscriptionVersion.
:rtype: SubscriptionVersionState
"""
return self._state
@state.setter
def state(self, state):
"""Sets the state of this SubscriptionVersion.
:param state: The state of this SubscriptionVersion.
:type: SubscriptionVersionState
"""
self._state = state
@property
def subscription(self):
"""Gets the subscription of this SubscriptionVersion.
:return: The subscription of this SubscriptionVersion.
:rtype: Subscription
"""
return self._subscription
@subscription.setter
def subscription(self, subscription):
"""Sets the subscription of this SubscriptionVersion.
:param subscription: The subscription of this SubscriptionVersion.
:type: Subscription
"""
self._subscription = subscription
@property
def terminated_on(self):
"""Gets the terminated_on of this SubscriptionVersion.
:return: The terminated_on of this SubscriptionVersion.
:rtype: datetime
"""
return self._terminated_on
@terminated_on.setter
def terminated_on(self, terminated_on):
"""Sets the terminated_on of this SubscriptionVersion.
:param terminated_on: The terminated_on of this SubscriptionVersion.
:type: datetime
"""
self._terminated_on = terminated_on
@property
def terminating_on(self):
"""Gets the terminating_on of this SubscriptionVersion.
:return: The terminating_on of this SubscriptionVersion.
:rtype: datetime
"""
return self._terminating_on
@terminating_on.setter
def terminating_on(self, terminating_on):
"""Sets the terminating_on of this SubscriptionVersion.
:param terminating_on: The terminating_on of this SubscriptionVersion.
:type: datetime
"""
self._terminating_on = terminating_on
@property
def termination_issued_on(self):
"""Gets the termination_issued_on of this SubscriptionVersion.
:return: The termination_issued_on of this SubscriptionVersion.
:rtype: datetime
"""
return self._termination_issued_on
@termination_issued_on.setter
def termination_issued_on(self, termination_issued_on):
"""Sets the termination_issued_on of this SubscriptionVersion.
:param termination_issued_on: The termination_issued_on of this SubscriptionVersion.
:type: datetime
"""
self._termination_issued_on = termination_issued_on
@property
def version(self):
"""Gets the version of this SubscriptionVersion.
The version number indicates the version of the entity. The version is incremented whenever the entity is changed.
:return: The version of this SubscriptionVersion.
:rtype: int
"""
return self._version
@version.setter
def version(self, version):
"""Sets the version of this SubscriptionVersion.
The version number indicates the version of the entity. The version is incremented whenever the entity is changed.
:param version: The version of this SubscriptionVersion.
:type: int
"""
self._version = version
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
elif isinstance(value, Enum):
result[attr] = value.value
else:
result[attr] = value
if issubclass(SubscriptionVersion, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, SubscriptionVersion):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| [
"six.iteritems"
] | [((16159, 16192), 'six.iteritems', 'six.iteritems', (['self.swagger_types'], {}), '(self.swagger_types)\n', (16172, 16192), False, 'import six\n')] |
"""
Example to demonstrate creating a pivot table from the output of zonal stats CLI
"""
import time
import pandas
# Return a pipe-delimited combination of value from every column up through zone
def get_key(row):
key_parts = []
for col in row.keys():
if col == 'zone':
return '|'.join(key_parts)
key_parts.append(str(row[col]))
start = time.time()
infilename = '/tmp/test.csv'
df = pandas.read_csv(infilename)
df['key'] = df.apply(lambda x: get_key(x), axis=1)
sub_df = df[['key', 'zone', 'mean']]
pivot = sub_df.pivot('zone', columns='key')
# Need to manually create the CSV instead of letting pandas do it, due to composite header
# we don't want
with open('/tmp/pivot.csv', 'w') as outfile:
header = ','.join( ['zone'] + pivot.columns.levels[1].tolist())
csv_data = pivot.to_csv(None, index=True, header=False)
outfile.write(header + '\n' + csv_data)
print('Elapsed: {0:.2f}'.format(time.time() - start)) | [
"pandas.read_csv",
"time.time"
] | [((397, 408), 'time.time', 'time.time', ([], {}), '()\n', (406, 408), False, 'import time\n'), ((447, 474), 'pandas.read_csv', 'pandas.read_csv', (['infilename'], {}), '(infilename)\n', (462, 474), False, 'import pandas\n'), ((977, 988), 'time.time', 'time.time', ([], {}), '()\n', (986, 988), False, 'import time\n')] |
import abc
import asyncio
from typing import Collection
class Job(abc.ABC):
__slots__ = ()
@property
@abc.abstractmethod
def is_running(self) -> bool:
...
@abc.abstractmethod
async def close(self, *, timeout: float = 0.5) -> bool:
...
class SingleTaskJob(Job):
__slots__ = ("_task",)
def __init__(self, task: asyncio.Task[None]):
self._task = task
@property
def is_running(self) -> bool:
return not self._task.done()
async def close(self, *, timeout: float = 0.5) -> bool:
if self._task.done():
return True
self._task.cancel()
await asyncio.wait({self._task}, timeout=timeout)
return self._task.done()
class CombinedJob(Job):
__slots__ = ("_jobs",)
def __init__(self, jobs: Collection[Job]):
self._jobs = jobs
@property
def is_running(self) -> bool:
return all(job.is_running for job in self._jobs)
async def close(self, *, timeout: float = 0.5) -> bool:
tasks = [asyncio.create_task(job.close(timeout=timeout)) for job in self._jobs]
closed = True
for task in tasks:
closed &= await task
return closed
| [
"asyncio.wait"
] | [((655, 698), 'asyncio.wait', 'asyncio.wait', (['{self._task}'], {'timeout': 'timeout'}), '({self._task}, timeout=timeout)\n', (667, 698), False, 'import asyncio\n')] |
# provide status of all jobs
import ulmodb
dbname = "ulmodb.db"
db = ulmodb.UlmoDB(dbname)
| [
"ulmodb.UlmoDB"
] | [((74, 95), 'ulmodb.UlmoDB', 'ulmodb.UlmoDB', (['dbname'], {}), '(dbname)\n', (87, 95), False, 'import ulmodb\n')] |
import numpy as np
import nimfa
V = np.random.rand(40, 100)
nmf = nimfa.Nmf(V, seed="nndsvd", rank=10, max_iter=12, update='euclidean',
objective='fro')
nmf_fit = nmf()
| [
"numpy.random.rand",
"nimfa.Nmf"
] | [((38, 61), 'numpy.random.rand', 'np.random.rand', (['(40)', '(100)'], {}), '(40, 100)\n', (52, 61), True, 'import numpy as np\n'), ((68, 158), 'nimfa.Nmf', 'nimfa.Nmf', (['V'], {'seed': '"""nndsvd"""', 'rank': '(10)', 'max_iter': '(12)', 'update': '"""euclidean"""', 'objective': '"""fro"""'}), "(V, seed='nndsvd', rank=10, max_iter=12, update='euclidean',\n objective='fro')\n", (77, 158), False, 'import nimfa\n')] |
#!/usr/bin/env python
import os
import json
import sys
import argparse
def _find_config_file():
config = 'etc/minicondas.json'
while not os.path.isfile(config):
config = '../{}'.format(config)
if len(config) > 70:
raise Exception('Cannot locate config file "etc/minicondas.json".')
return config
def _get(py_version, miniconda_version, attribute):
config = _find_config_file()
with open(config) as reader:
data = json.load(reader)
if miniconda_version == 'latest':
_all_versions = [i.split('-')[1] for i in data['minicondas'][py_version].keys()]
m_start = 'm' + py_version.replace('py', '')[0]
_av_ints = sorted([[int(i) for i in item.split('.')] for item in _all_versions])
_all_versions = ['.'.join([str(item) for item in items]) for items in _av_ints]
miniconda_version = m_start + '-' + _all_versions[-1]
try:
attr = data['minicondas'][py_version][miniconda_version][attribute]
except:
print('Could not find {} attribute for python version: "{}"'.format(attribute, py_version))
return attr
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("py_version", type=str, help="Python version")
parser.add_argument("attribute", type=str, choices=['url', 'md5', 'short_id'],
help="Attribute")
parser.add_argument('-m', '--miniconda-version', default='latest',
help='Add Miniconda version (or use "latest").',
type=str)
args = parser.parse_args()
print(_get(args.py_version, args.miniconda_version, args.attribute))
| [
"os.path.isfile",
"json.load",
"argparse.ArgumentParser"
] | [((1189, 1214), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1212, 1214), False, 'import argparse\n'), ((150, 172), 'os.path.isfile', 'os.path.isfile', (['config'], {}), '(config)\n', (164, 172), False, 'import os\n'), ((484, 501), 'json.load', 'json.load', (['reader'], {}), '(reader)\n', (493, 501), False, 'import json\n')] |
from transformers import GPT2Tokenizer
tokenizer = GPT2Tokenizer.from_pretrained('gpt2', bos_token='<|startoftext|>', eos_token='<|endoftext|>', pad_token='<|pad|>')
tokenizer.pad_token = tokenizer.eos_token
| [
"transformers.GPT2Tokenizer.from_pretrained"
] | [((51, 169), 'transformers.GPT2Tokenizer.from_pretrained', 'GPT2Tokenizer.from_pretrained', (['"""gpt2"""'], {'bos_token': '"""<|startoftext|>"""', 'eos_token': '"""<|endoftext|>"""', 'pad_token': '"""<|pad|>"""'}), "('gpt2', bos_token='<|startoftext|>',\n eos_token='<|endoftext|>', pad_token='<|pad|>')\n", (80, 169), False, 'from transformers import GPT2Tokenizer\n')] |
import matplotlib.pyplot as plt
from CuteFlower2.data_loading import cd
import os
def save_hist_plot(history, name="test", path=None):
train_errors = history.history['loss']
val_errors = history.history['val_loss']
plt.style.use('bmh')
plt.plot(range(len(train_errors)), train_errors, 'g-', label="Train")
plt.plot(range(len(val_errors)), val_errors, 'r-', label="Val")
plt.legend()
if path is None:
path = os.getcwd()+"/Data"
with cd(path):
plt.savefig("Train_val_graph_{}".format(name))
plt.clf()
def intermediate_drawer(name, path=None, draw=False):
train_loss = []
val_loss = []
plt.style.use('bmh')
def drawer(logs):
train_loss.append(logs['loss'])
val_loss.append(logs['val_loss'])
loss_range = range(len(train_loss))
plt.ion() # Ved ikke om man skal gøre det i hvert loop, det er nok fint at have den udenfor men w/e
train_loss_plot, = plt.plot(
loss_range, train_loss, label='Training Loss')
val_loss_plot, = plt.plot(
loss_range, val_loss, label='Validation loss')
plt.legend(handles=[train_loss_plot, val_loss_plot])
if not draw:
plt.show()
plt.pause(0.001)
if path is not None:
with cd(path):
plt.savefig("Train_val_graph_{}".format(name))
plt.clf()
return drawer
| [
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.clf",
"os.getcwd",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.ion",
"matplotlib.pyplot.style.use",
"CuteFlower2.data_loading.cd",
"matplotlib.pyplot.pause"
] | [((231, 251), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""bmh"""'], {}), "('bmh')\n", (244, 251), True, 'import matplotlib.pyplot as plt\n'), ((398, 410), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (408, 410), True, 'import matplotlib.pyplot as plt\n'), ((659, 679), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""bmh"""'], {}), "('bmh')\n", (672, 679), True, 'import matplotlib.pyplot as plt\n'), ((478, 486), 'CuteFlower2.data_loading.cd', 'cd', (['path'], {}), '(path)\n', (480, 486), False, 'from CuteFlower2.data_loading import cd\n'), ((551, 560), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (558, 560), True, 'import matplotlib.pyplot as plt\n'), ((840, 849), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (847, 849), True, 'import matplotlib.pyplot as plt\n'), ((969, 1024), 'matplotlib.pyplot.plot', 'plt.plot', (['loss_range', 'train_loss'], {'label': '"""Training Loss"""'}), "(loss_range, train_loss, label='Training Loss')\n", (977, 1024), True, 'import matplotlib.pyplot as plt\n'), ((1063, 1118), 'matplotlib.pyplot.plot', 'plt.plot', (['loss_range', 'val_loss'], {'label': '"""Validation loss"""'}), "(loss_range, val_loss, label='Validation loss')\n", (1071, 1118), True, 'import matplotlib.pyplot as plt\n'), ((1141, 1193), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'handles': '[train_loss_plot, val_loss_plot]'}), '(handles=[train_loss_plot, val_loss_plot])\n', (1151, 1193), True, 'import matplotlib.pyplot as plt\n'), ((1397, 1406), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (1404, 1406), True, 'import matplotlib.pyplot as plt\n'), ((448, 459), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (457, 459), False, 'import os\n'), ((1228, 1238), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1236, 1238), True, 'import matplotlib.pyplot as plt\n'), ((1251, 1267), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.001)'], {}), '(0.001)\n', (1260, 1267), True, 'import matplotlib.pyplot as plt\n'), ((1315, 1323), 'CuteFlower2.data_loading.cd', 'cd', (['path'], {}), '(path)\n', (1317, 1323), False, 'from CuteFlower2.data_loading import cd\n')] |
# Imports
import pandas as pd
import numpy as np
import torch
from torch import nn
from torch import optim
import torch.nn.functional as F
import torch.utils.data
from torchvision import datasets, models, transforms
from collections import OrderedDict
import os
import argparse
# Functions
def arg_parser():
'''
Takes in command-line arguments and parses them for usage of our Python functions.
'''
parser = argparse.ArgumentParser(description='ImageClassifier Params')
parser.add_argument('--architecture',
type=str,
help='Architecture and model from torchvision.models as strings: vgg16 and densenet121 supported.')
parser.add_argument('--learning_rate',
type=float,
help='Learning Rate for our Neural Network. Default is 0.001.')
parser.add_argument('--hidden',
type=int,
help='Hidden Units for our Neural Network. Default is 1024.')
parser.add_argument('--dropout',
type=float,
help='Dropout value for our Dropout layers. Default is 0.05.')
parser.add_argument('--epochs',
type=int,
help='Epochs for Neural Network training. Default is 1.')
parser.add_argument('--gpu',
type=str,
help='Use GPU (Y for Yes; N for No). Default is Y.')
args = parser.parse_args()
return(args)
def load(data_dir='./flowers'):
'''
Loads data for train, test and validation.
Also loads dataloaders for all three in the same order.
Returns all six datasets and loaders, in the same order.
'''
train_dir = data_dir + '/train'
valid_dir = data_dir + '/valid'
test_dir = data_dir + '/test'
data_transforms = {'train': transforms.Compose([transforms.RandomResizedCrop(224),
transforms.RandomVerticalFlip(0.5),
transforms.RandomRotation(75),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])]),
'valid': transforms.Compose([transforms.RandomResizedCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])]),
'test': transforms.Compose([transforms.RandomResizedCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
}
image_datasets = {
'train': datasets.ImageFolder(train_dir, transform=data_transforms['train']),
'test': datasets.ImageFolder(test_dir, transform=data_transforms['test']),
'valid': datasets.ImageFolder(valid_dir, transform=data_transforms['valid'])
}
dataloaders = {
'train': torch.utils.data.DataLoader(image_datasets['train'], 64, shuffle=True),
'test': torch.utils.data.DataLoader(image_datasets['test'], 32, shuffle=True),
'valid': torch.utils.data.DataLoader(image_datasets['valid'], 32, shuffle=True)
}
return(dataloaders['train'], dataloaders['test'], dataloaders['valid'], image_datasets['train'], image_datasets['test'], image_datasets['valid'])
def set_device(gpu):
'''
Sets the device based on the parameter. Also handles most edge-cases.
Returns the device variable to be used later.
'''
if gpu=='Y':
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
if device=='cpu':
print('CUDA not available; using CPU')
else:
print('Using GPU')
elif gpu=='N':
device = 'cpu'
print('Using CPU')
else:
print('Incorrect Value for GPU entered.')
print('Fallback to default GPU: 1')
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
if device=='cpu':
print('CUDA not available; using CPU')
else:
print('Using GPU')
return(device)
def build(device, architecture='vgg16', dropout=0.05, hidden=1024, learning_rate=0.001):
'''
Takens in architecture, gpu, dropout, hidden, learning_rate.
Returns a torch model.
'''
if architecture:
if architecture=='vgg16':
model = models.vgg16(pretrained=True)
model.name = architecture
input_ = 25088
elif architecture=='densenet121':
model = models.densenet121(pretrained=True)
model.name = architecture
input_ = 1024
else:
print('Invalid input: Please use \'vgg16\' or \'densenet121\'')
else:
print('No architecture given. Fallback to default architecture: \'vgg16\'')
model = models.vgg16(pretrained=True)
model.name = architecture
input_ = 25088
if hidden:
hidden = hidden
else:
print('No number of hidden inputs specified. Fallback to default inputs: 1024')
hidden = 1024
if learning_rate:
learning_rate = learning_rate
else:
print('No learning_rate specified. Fallback to default learning_rate: 0.001')
learning_rate = 0.001
if dropout:
dropout = dropout
else:
print('No dropout specified. Fallback to default dropout: 0.05')
dropout = 0.05
for parameter in model.parameters():
parameter.requires_grad = False
classifier = nn.Sequential(OrderedDict([
('fc1', nn.Linear(input_, hidden)),
('relu', nn.ReLU()),
('dropout1', nn.Dropout(dropout)),
('fc2', nn.Linear(hidden, 102, bias=True)),
('output', nn.LogSoftmax(dim=1))
]))
model.classifier = classifier
model.to(device)
criterion = nn.NLLLoss()
optimizer = optim.Adam(model.classifier.parameters(), lr=learning_rate)
return(model, criterion, optimizer)
def validation(model, valid_loader, criterion, device):
'''
Validation function for our model.
Returns validation loss and accuracy.
'''
valid_loss = 0
valid_acc = 0
for ii, (inputs, labels) in enumerate(valid_loader):
inputs, labels = inputs.to(device), labels.to(device)
output = model.forward(inputs)
valid_loss += criterion(output, labels).item()
ps = torch.exp(output)
equality = (labels.data == ps.max(dim=1)[1])
valid_acc += equality.type(torch.FloatTensor).mean()
return valid_loss, valid_acc
def train(model, criterion, optimizer, train_loader, valid_loader, device, epochs=1, print_every=50):
'''
Trains our Neural Network model
'''
steps = 0
if epochs:
epochs = epochs
else:
print('No epochs specified. Fallback to default epochs: 1')
epochs = 1
print('Training Model for {} epochs'.format(epochs))
for e in range(epochs):
running_loss = 0
for ii, (inputs, labels) in enumerate(train_loader):
steps += 1
inputs, labels = inputs.to(device), labels.to(device)
model.zero_grad()
# Forward and backward passes
outputs = model.forward(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
if steps % print_every == 0:
model.eval()
with torch.no_grad():
valid_loss, valid_acc = validation(model, valid_loader, criterion, device)
training_loss = round(float(running_loss/print_every), 3)
valid_loss = round(float(valid_loss/len(valid_loader)), 3)
valid_acc = round(float(valid_acc/len(valid_loader)), 3)
print('Epoch: {}/{} :: Training Loss: {} :: Validation Loss: {} :: Validation Accuracy: {}'
.format(e+1, epochs, training_loss, valid_loss, valid_acc))
running_loss = 0
model.train()
print('Model training complete!')
return(model)
def validate(model, test_loader, device):
'''
Prints validation accuracy of model
'''
correct = 0
total = 0
with torch.no_grad():
model.eval()
for data in test_loader:
images, labels = data
images, labels = images.to(device), labels.to(device)
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
accuracy = round(100 * correct / total, 2)
print('Accuracy: {}'.format(accuracy))
def save(model, train_data, epochs, architecture):
'''
Saves the model to the given path.
'''
model.class_to_idx = train_data.class_to_idx
if epochs:
epochs = epochs
else:
epochs = 1
checkpoint = {'state_dict': model.state_dict(),
'classifier': model.classifier,
'class_to_idx': model.class_to_idx,
'epochs': epochs,
'architecture': architecture}
file = 'checkpoint.pth'
torch.save(checkpoint, file)
print('Model saved to {}!'.format(file))
# Main
def main():
args = arg_parser()
if args.gpu:
gpu=args.gpu
else:
print('No input given. Fallback to default GPU: 1')
gpu='Y'
device = set_device(gpu)
train_loader, test_loader, valid_loader, train_data, test_data, valid_data = load()
model, criterion, optimizer = build(device, architecture=args.architecture, dropout=args.dropout, hidden=args.hidden, learning_rate=args.learning_rate)
model = train(model=model, train_loader=train_loader, valid_loader=valid_loader, device=device, criterion=criterion, optimizer=optimizer, epochs=args.epochs)
validate(model=model, test_loader=test_loader, device=device)
save(model=model, train_data=train_data, epochs=args.epochs, architecture = args.architecture)
if __name__ == '__main__':
main()
| [
"torch.nn.Dropout",
"argparse.ArgumentParser",
"torch.nn.NLLLoss",
"torchvision.transforms.Normalize",
"torch.no_grad",
"torch.utils.data.DataLoader",
"torchvision.transforms.RandomRotation",
"torch.exp",
"torch.nn.Linear",
"torchvision.models.vgg16",
"torch.nn.LogSoftmax",
"torchvision.datasets.ImageFolder",
"torch.max",
"torch.cuda.is_available",
"torch.nn.ReLU",
"torchvision.models.densenet121",
"torchvision.transforms.RandomVerticalFlip",
"torch.save",
"torchvision.transforms.RandomResizedCrop",
"torchvision.transforms.ToTensor"
] | [((435, 496), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""ImageClassifier Params"""'}), "(description='ImageClassifier Params')\n", (458, 496), False, 'import argparse\n'), ((6141, 6153), 'torch.nn.NLLLoss', 'nn.NLLLoss', ([], {}), '()\n', (6151, 6153), False, 'from torch import nn\n'), ((9731, 9759), 'torch.save', 'torch.save', (['checkpoint', 'file'], {}), '(checkpoint, file)\n', (9741, 9759), False, 'import torch\n'), ((2929, 2996), 'torchvision.datasets.ImageFolder', 'datasets.ImageFolder', (['train_dir'], {'transform': "data_transforms['train']"}), "(train_dir, transform=data_transforms['train'])\n", (2949, 2996), False, 'from torchvision import datasets, models, transforms\n'), ((3014, 3079), 'torchvision.datasets.ImageFolder', 'datasets.ImageFolder', (['test_dir'], {'transform': "data_transforms['test']"}), "(test_dir, transform=data_transforms['test'])\n", (3034, 3079), False, 'from torchvision import datasets, models, transforms\n'), ((3098, 3165), 'torchvision.datasets.ImageFolder', 'datasets.ImageFolder', (['valid_dir'], {'transform': "data_transforms['valid']"}), "(valid_dir, transform=data_transforms['valid'])\n", (3118, 3165), False, 'from torchvision import datasets, models, transforms\n'), ((3210, 3280), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (["image_datasets['train']", '(64)'], {'shuffle': '(True)'}), "(image_datasets['train'], 64, shuffle=True)\n", (3237, 3280), False, 'import torch\n'), ((3298, 3367), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (["image_datasets['test']", '(32)'], {'shuffle': '(True)'}), "(image_datasets['test'], 32, shuffle=True)\n", (3325, 3367), False, 'import torch\n'), ((3386, 3456), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (["image_datasets['valid']", '(32)'], {'shuffle': '(True)'}), "(image_datasets['valid'], 32, shuffle=True)\n", (3413, 3456), False, 'import torch\n'), ((5115, 5144), 'torchvision.models.vgg16', 'models.vgg16', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (5127, 5144), False, 'from torchvision import datasets, models, transforms\n'), ((6734, 6751), 'torch.exp', 'torch.exp', (['output'], {}), '(output)\n', (6743, 6751), False, 'import torch\n'), ((8782, 8797), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (8795, 8797), False, 'import torch\n'), ((4658, 4687), 'torchvision.models.vgg16', 'models.vgg16', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (4670, 4687), False, 'from torchvision import datasets, models, transforms\n'), ((9016, 9042), 'torch.max', 'torch.max', (['outputs.data', '(1)'], {}), '(outputs.data, 1)\n', (9025, 9042), False, 'import torch\n'), ((1916, 1949), 'torchvision.transforms.RandomResizedCrop', 'transforms.RandomResizedCrop', (['(224)'], {}), '(224)\n', (1944, 1949), False, 'from torchvision import datasets, models, transforms\n'), ((1982, 2016), 'torchvision.transforms.RandomVerticalFlip', 'transforms.RandomVerticalFlip', (['(0.5)'], {}), '(0.5)\n', (2011, 2016), False, 'from torchvision import datasets, models, transforms\n'), ((2049, 2078), 'torchvision.transforms.RandomRotation', 'transforms.RandomRotation', (['(75)'], {}), '(75)\n', (2074, 2078), False, 'from torchvision import datasets, models, transforms\n'), ((2111, 2132), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (2130, 2132), False, 'from torchvision import datasets, models, transforms\n'), ((2165, 2231), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['[0.485, 0.456, 0.406]', '[0.229, 0.224, 0.225]'], {}), '([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n', (2185, 2231), False, 'from torchvision import datasets, models, transforms\n'), ((2336, 2369), 'torchvision.transforms.RandomResizedCrop', 'transforms.RandomResizedCrop', (['(224)'], {}), '(224)\n', (2364, 2369), False, 'from torchvision import datasets, models, transforms\n'), ((2403, 2424), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (2422, 2424), False, 'from torchvision import datasets, models, transforms\n'), ((2458, 2524), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['[0.485, 0.456, 0.406]', '[0.229, 0.224, 0.225]'], {}), '([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n', (2478, 2524), False, 'from torchvision import datasets, models, transforms\n'), ((2628, 2661), 'torchvision.transforms.RandomResizedCrop', 'transforms.RandomResizedCrop', (['(224)'], {}), '(224)\n', (2656, 2661), False, 'from torchvision import datasets, models, transforms\n'), ((2693, 2714), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (2712, 2714), False, 'from torchvision import datasets, models, transforms\n'), ((2746, 2812), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['[0.485, 0.456, 0.406]', '[0.229, 0.224, 0.225]'], {}), '([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n', (2766, 2812), False, 'from torchvision import datasets, models, transforms\n'), ((3833, 3858), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3856, 3858), False, 'import torch\n'), ((4815, 4850), 'torchvision.models.densenet121', 'models.densenet121', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (4833, 4850), False, 'from torchvision import datasets, models, transforms\n'), ((4206, 4231), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (4229, 4231), False, 'import torch\n'), ((5866, 5891), 'torch.nn.Linear', 'nn.Linear', (['input_', 'hidden'], {}), '(input_, hidden)\n', (5875, 5891), False, 'from torch import nn\n'), ((5911, 5920), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (5918, 5920), False, 'from torch import nn\n'), ((5944, 5963), 'torch.nn.Dropout', 'nn.Dropout', (['dropout'], {}), '(dropout)\n', (5954, 5963), False, 'from torch import nn\n'), ((5982, 6015), 'torch.nn.Linear', 'nn.Linear', (['hidden', '(102)'], {'bias': '(True)'}), '(hidden, 102, bias=True)\n', (5991, 6015), False, 'from torch import nn\n'), ((6037, 6057), 'torch.nn.LogSoftmax', 'nn.LogSoftmax', ([], {'dim': '(1)'}), '(dim=1)\n', (6050, 6057), False, 'from torch import nn\n'), ((7938, 7953), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (7951, 7953), False, 'import torch\n')] |
import argparse
import sys
from os.path import join
from os import chdir
import subprocess
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--sge', type=str, default='nosge')
parser.add_argument('-l', '--filelist', type=str, default='')
parser.add_argument('-zr_root', '--zr_root', type=str, default='/home/korhan/Desktop/zerospeech2017/track2/src/ZRTools')
args = parser.parse_args()
chdir(args.zr_root)
command = './run_disc {} {}'.format(args.sge,args.filelist)
print(command)
subprocess.call(command.split())
| [
"argparse.ArgumentParser",
"os.chdir"
] | [((131, 156), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (154, 156), False, 'import argparse\n'), ((438, 457), 'os.chdir', 'chdir', (['args.zr_root'], {}), '(args.zr_root)\n', (443, 457), False, 'from os import chdir\n')] |
#!/usr/bin/env python3
import re
from gensim.models import word2vec
from gensim.models import KeyedVectors
from operator import itemgetter
filePath = '/home/ubuntu/danmu/corpusSegRecentWords.txt'
fileTrainRead = []
#read the file by line
with open(filePath) as fileTrainRaw:
for line in fileTrainRaw:
fileTrainRead.append(line)
#load the pre-trained word2vec vector set
model = KeyedVectors.load_word2vec_format('/home/ubuntu/danmu/corpusWord2Vec.bin', binary=True)
#predict for each word and then calculate the most frequent topic word set
wordFreq = {}
for i in range(len(fileTrainRead)):
words = fileTrainRead[i][0].split( )
for j, word in enumerate(words):
# word = re.sub("[\s+\.\!\/_,$%^*(+\"\']+|[+——!,。?、~@#¥%……&*()]+".decode("utf8"), "",word)
# word = re.sub("[【】╮╯▽╰╭★→「」]+".decode("utf8"),"",word)
# word = re.sub("!,❤。~《》:()【】「」?”“;:、".decode("utf8"),"",word)
if not re.match(r"[【】╮╯▽╰╭★→「」\s+\.\!\/_,$%^*(+\"\']+|[+——!,。?、~@#¥%……&*()!,❤。~《》:()【】「」?”“;:、0-9a-zA-Z]+", word):
try:
similarWords = model.most_similar(word, topn=10)
for idx, similarWord in enumerate(similarWords):
if similarWord[0] not in wordFreq:
wordFreq[similarWord[0]] = 1
else:
wordFreq[similarWord[0]] += 1
except:
pass
top10Words = [k for k in sorted(wordFreq.items(), key=itemgetter(1), reverse=True)[:10]]
for _, word in enumerate(top10Words):
print (word[0])
| [
"operator.itemgetter",
"re.match",
"gensim.models.KeyedVectors.load_word2vec_format"
] | [((393, 484), 'gensim.models.KeyedVectors.load_word2vec_format', 'KeyedVectors.load_word2vec_format', (['"""/home/ubuntu/danmu/corpusWord2Vec.bin"""'], {'binary': '(True)'}), "('/home/ubuntu/danmu/corpusWord2Vec.bin',\n binary=True)\n", (426, 484), False, 'from gensim.models import KeyedVectors\n'), ((931, 1053), 're.match', 're.match', (['"""[【】╮╯▽╰╭★→「」\\\\s+\\\\.\\\\!\\\\/_,$%^*(+\\\\"\\\\\']+|[+——!,。?、~@#¥%……&*()!,❤。~《》:()【】「」?”“;:、0-9a-zA-Z]+"""', 'word'], {}), '(\n \'[【】╮╯▽╰╭★→「」\\\\s+\\\\.\\\\!\\\\/_,$%^*(+\\\\"\\\\\\\']+|[+——!,。?、~@#¥%……&*()!,❤。~《》:()【】「」?”“;:、0-9a-zA-Z]+\'\n , word)\n', (939, 1053), False, 'import re\n'), ((1470, 1483), 'operator.itemgetter', 'itemgetter', (['(1)'], {}), '(1)\n', (1480, 1483), False, 'from operator import itemgetter\n')] |
import torch
import torch.nn as nn
from torch.autograd import Variable
from Param import nc, nz, device
class Model512(nn.Module):
def __init__(self,nz=nz,nef=8,ngf=8,nc=nc):
super(Model512, self).__init__()
self.nz=nz
self.nc=nc
## Encoder Part ##
self.encode = nn.Sequential(
# input is (nc) x 512 x 512
nn.Conv2d(nc, nef, 4, 2, 1, bias=False),
nn.BatchNorm2d(nef),
nn.LeakyReLU(0.2, inplace=True),
# state size is (nef) x 256 x 256
nn.Conv2d(nef, nef * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(nef * 2),
nn.LeakyReLU(0.2, inplace=True),
# state size. (nef*2) x 128 x 128
nn.Conv2d(nef*2, nef * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(nef * 4),
nn.LeakyReLU(0.2, inplace=True),
# state size. (nef*4) x 64 x 64
nn.Conv2d(nef * 4, nef * 8, 4, 2, 1, bias=False),
nn.BatchNorm2d(nef * 8),
nn.LeakyReLU(0.2, inplace=True),
# state size. (nef*8) x 32 x 32
nn.Conv2d(nef*8, nef * 16, 4, 2, 1, bias=False),
nn.BatchNorm2d(nef * 16),
nn.LeakyReLU(0.2, inplace=True),
# state size. (nef*16) x 16 x 16
nn.Conv2d(nef * 16, nef * 32, 4, 2, 1, bias=False),
nn.BatchNorm2d(nef * 32),
nn.LeakyReLU(0.2, inplace=True),
# state size. (nef*32) x 8 x 8
nn.Conv2d(nef * 32, nef * 64, 4, 2, 1, bias=False),
nn.BatchNorm2d(nef * 64),
nn.LeakyReLU(0.2, inplace=True),
# state size. (nef*64) x 4 x 4
nn.Conv2d(nef * 64, nef * 128, 4, 1, 0, bias=False),
nn.BatchNorm2d(nef * 128),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(nef * 128, nz, 1, 1, 0, bias=True),
nn.Sigmoid()
)
## #####
## Decoder Part ##
self.decode3 = nn.Sequential(
nn.ConvTranspose2d(nz, ngf *128 , 2, 1, 0, bias=False),
nn.BatchNorm2d(ngf * 128),
nn.ReLU(True),
# size ngf*128 x2 x2
nn.ConvTranspose2d(ngf * 128, ngf * 64, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 64),
nn.ReLU(True),
# size ngf*64 x4 x4
nn.ConvTranspose2d(ngf * 64, ngf * 32, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 32),
nn.ReLU(True),
# size ngf*32 x8 x8
nn.ConvTranspose2d(ngf*32, ngf * 16, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 16),
nn.ReLU(True),
# state size. (ngf*16) x 16 x16
nn.ConvTranspose2d(ngf * 16, ngf * 8, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 8),
nn.ReLU(True),
# state size. (ngf*8) x 32 x 32
nn.ConvTranspose2d(ngf * 8, ngf * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 4),
nn.ReLU(True))
# state size. (ngf*4) x 64 x 64
self.conv_layer128 = nn.Sequential(
nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 2),
nn.ReLU(True),
nn.ConvTranspose2d(ngf * 2, ngf, 3, 1, 1, bias=False),
nn.ConvTranspose2d(ngf, nc, 3, 1, 1, bias=False))
self.decode2 = nn.Sequential(
nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 2),
nn.ReLU(True))
# state size. (ngf*2) x 128 x 128
self.conv_layer256 = nn.Sequential(
nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf),
nn.ReLU(True),
nn.ConvTranspose2d(ngf, nc, 3, 1, 1, bias=False))
self.decode1 = nn.Sequential(
nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf),
nn.ReLU(True),
# state size. (ngf) x 256 x 256
nn.ConvTranspose2d(ngf, nc, 4, 2, 1, bias=False),
nn.Tanh()
#nn.Sigmoid() # for VAE
# state size. (nc) x 512 x 512
)
self.output_layer = nn.Tanh() #nn.Sigmoid()
def forward(self, input):
x = self.encode(input)
x = self.decode3(x)
out128 = self.output_layer(self.conv_layer128(x))
x = self.decode2(x)
out256 = self.output_layer(self.conv_layer256(x))
out512 = self.decode1(x)
return out128, out256, out512
""" VAE with three losses at three scales of the decoder """
class VAE_Model512(nn.Module):
def __init__(self,nz=nz,ngf=8,nef=8,nc=3):
super(VAE_Model512, self).__init__()
self.nz=nz
self.nc=nc
## Encoder Part ##
self.encode = nn.Sequential(
# input is (nc) x 512 x 512
nn.Conv2d(nc, nef, 4, 2, 1, bias=False),
nn.BatchNorm2d(nef),
nn.LeakyReLU(0.2, inplace=True),
# state size is (nef) x 256 x 256
nn.Conv2d(nef, nef * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(nef * 2),
nn.LeakyReLU(0.2, inplace=True),
# state size. (nef*2) x 128 x 128
nn.Conv2d(nef*2, nef * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(nef * 4),
nn.LeakyReLU(0.2, inplace=True),
# state size. (nef*4) x 64 x 64
nn.Conv2d(nef * 4, nef * 8, 4, 2, 1, bias=False),
nn.BatchNorm2d(nef * 8),
nn.LeakyReLU(0.2, inplace=True),
# state size. (nef*8) x 32 x 32
nn.Conv2d(nef*8, nef * 16, 4, 2, 1, bias=False),
nn.BatchNorm2d(nef * 16),
nn.LeakyReLU(0.2, inplace=True),
# state size. (nef*16) x 16 x 16
nn.Conv2d(nef * 16, nef * 32, 4, 2, 1, bias=False),
nn.BatchNorm2d(nef * 32),
nn.LeakyReLU(0.2, inplace=True),
# state size. (nef*32) x 8 x 8
nn.Conv2d(nef * 32, nef * 64, 4, 2, 1, bias=False),
nn.BatchNorm2d(nef * 64),
nn.LeakyReLU(0.2, inplace=True),
# state size. (nef*64) x 4 x 4
nn.Conv2d(nef * 64, nef * 128, 4, 1, 0, bias=False),
nn.BatchNorm2d(nef * 128),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(nef * 128, nz, 1, 1, 0, bias=True),
nn.Sigmoid()
)
## #####
## Decoder Part ##
self.decode3 = nn.Sequential(
nn.ConvTranspose2d(nz, ngf *128 , 2, 1, 0, bias=False),
nn.BatchNorm2d(ngf * 128),
nn.ReLU(True),
# size ngf*128 x2 x2
nn.ConvTranspose2d(ngf * 128, ngf * 64, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 64),
nn.ReLU(True),
# size ngf*64 x4 x4
nn.ConvTranspose2d(ngf * 64, ngf * 32, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 32),
nn.ReLU(True),
# size ngf*32 x8 x8
nn.ConvTranspose2d(ngf*32, ngf * 16, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 16),
nn.ReLU(True),
# state size. (ngf*16) x 16 x16
nn.ConvTranspose2d(ngf * 16, ngf * 8, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 8),
nn.ReLU(True),
# state size. (ngf*8) x 32 x 32
nn.ConvTranspose2d(ngf * 8, ngf * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 4),
nn.ReLU(True))
# state size. (ngf*4) x 64 x 64
self.conv_layer128 = nn.Sequential(
nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 2),
nn.ReLU(True),
nn.ConvTranspose2d(ngf * 2, ngf, 3, 1, 1, bias=False),
nn.ConvTranspose2d(ngf, nc, 3, 1, 1, bias=False))
self.decode2 = nn.Sequential(
nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 2),
nn.ReLU(True))
# state size. (ngf*2) x 128 x 128
self.conv_layer256 = nn.Sequential(
nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf),
nn.ReLU(True),
nn.ConvTranspose2d(ngf, nc, 3, 1, 1, bias=False))
self.decode1 = nn.Sequential(
nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf),
nn.ReLU(True),
# state size. (ngf) x 256 x 256
nn.ConvTranspose2d(ngf, nc, 4, 2, 1, bias=False),
nn.Tanh()
#nn.Sigmoid() # for VAE
# state size. (nc) x 512 x 512
)
self.output_layer = nn.Tanh() #nn.Sigmoid()
self.fc1 = nn.Linear(nz, 64)
self.fc2 = nn.Linear(nz, 64)
self.fc3 = nn.Linear(64, nz)
def reparametrize(self, mu, logvar):
std = logvar.mul(0.5).exp_()
eps = torch.FloatTensor(std.size()).normal_().to(device)
eps = Variable(eps)
return eps.mul(std).add_(mu)
def forward(self, input):
b_size = input.shape[0]
x = self.encode(input).view(b_size, nz)
mu = self.fc1(x) #fc1
logvar = self.fc2(x) #fc2
z = self.reparametrize(mu, logvar)
z = self.fc3(z).reshape(-1, self.nz, 1, 1) #fc3
#del x
x = self.decode3(z)
out128 = self.output_layer(self.conv_layer128(x))
x = self.decode2(x)
out256 = self.output_layer(self.conv_layer256(x))
out512 = self.decode1(x)
return out128, out256, out512, mu, logvar
| [
"torch.nn.ReLU",
"torch.nn.ConvTranspose2d",
"torch.nn.Tanh",
"torch.autograd.Variable",
"torch.nn.Conv2d",
"torch.nn.BatchNorm2d",
"torch.nn.Linear",
"torch.nn.LeakyReLU",
"torch.nn.Sigmoid"
] | [((4241, 4250), 'torch.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (4248, 4250), True, 'import torch.nn as nn\n'), ((8780, 8789), 'torch.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (8787, 8789), True, 'import torch.nn as nn\n'), ((8824, 8841), 'torch.nn.Linear', 'nn.Linear', (['nz', '(64)'], {}), '(nz, 64)\n', (8833, 8841), True, 'import torch.nn as nn\n'), ((8861, 8878), 'torch.nn.Linear', 'nn.Linear', (['nz', '(64)'], {}), '(nz, 64)\n', (8870, 8878), True, 'import torch.nn as nn\n'), ((8898, 8915), 'torch.nn.Linear', 'nn.Linear', (['(64)', 'nz'], {}), '(64, nz)\n', (8907, 8915), True, 'import torch.nn as nn\n'), ((9076, 9089), 'torch.autograd.Variable', 'Variable', (['eps'], {}), '(eps)\n', (9084, 9089), False, 'from torch.autograd import Variable\n'), ((382, 421), 'torch.nn.Conv2d', 'nn.Conv2d', (['nc', 'nef', '(4)', '(2)', '(1)'], {'bias': '(False)'}), '(nc, nef, 4, 2, 1, bias=False)\n', (391, 421), True, 'import torch.nn as nn\n'), ((435, 454), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['nef'], {}), '(nef)\n', (449, 454), True, 'import torch.nn as nn\n'), ((468, 499), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {'inplace': '(True)'}), '(0.2, inplace=True)\n', (480, 499), True, 'import torch.nn as nn\n'), ((559, 603), 'torch.nn.Conv2d', 'nn.Conv2d', (['nef', '(nef * 2)', '(4)', '(2)', '(1)'], {'bias': '(False)'}), '(nef, nef * 2, 4, 2, 1, bias=False)\n', (568, 603), True, 'import torch.nn as nn\n'), ((617, 640), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(nef * 2)'], {}), '(nef * 2)\n', (631, 640), True, 'import torch.nn as nn\n'), ((654, 685), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {'inplace': '(True)'}), '(0.2, inplace=True)\n', (666, 685), True, 'import torch.nn as nn\n'), ((745, 793), 'torch.nn.Conv2d', 'nn.Conv2d', (['(nef * 2)', '(nef * 4)', '(4)', '(2)', '(1)'], {'bias': '(False)'}), '(nef * 2, nef * 4, 4, 2, 1, bias=False)\n', (754, 793), True, 'import torch.nn as nn\n'), ((805, 828), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(nef * 4)'], {}), '(nef * 4)\n', (819, 828), True, 'import torch.nn as nn\n'), ((842, 873), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {'inplace': '(True)'}), '(0.2, inplace=True)\n', (854, 873), True, 'import torch.nn as nn\n'), ((931, 979), 'torch.nn.Conv2d', 'nn.Conv2d', (['(nef * 4)', '(nef * 8)', '(4)', '(2)', '(1)'], {'bias': '(False)'}), '(nef * 4, nef * 8, 4, 2, 1, bias=False)\n', (940, 979), True, 'import torch.nn as nn\n'), ((993, 1016), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(nef * 8)'], {}), '(nef * 8)\n', (1007, 1016), True, 'import torch.nn as nn\n'), ((1030, 1061), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {'inplace': '(True)'}), '(0.2, inplace=True)\n', (1042, 1061), True, 'import torch.nn as nn\n'), ((1119, 1168), 'torch.nn.Conv2d', 'nn.Conv2d', (['(nef * 8)', '(nef * 16)', '(4)', '(2)', '(1)'], {'bias': '(False)'}), '(nef * 8, nef * 16, 4, 2, 1, bias=False)\n', (1128, 1168), True, 'import torch.nn as nn\n'), ((1180, 1204), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(nef * 16)'], {}), '(nef * 16)\n', (1194, 1204), True, 'import torch.nn as nn\n'), ((1218, 1249), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {'inplace': '(True)'}), '(0.2, inplace=True)\n', (1230, 1249), True, 'import torch.nn as nn\n'), ((1308, 1358), 'torch.nn.Conv2d', 'nn.Conv2d', (['(nef * 16)', '(nef * 32)', '(4)', '(2)', '(1)'], {'bias': '(False)'}), '(nef * 16, nef * 32, 4, 2, 1, bias=False)\n', (1317, 1358), True, 'import torch.nn as nn\n'), ((1372, 1396), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(nef * 32)'], {}), '(nef * 32)\n', (1386, 1396), True, 'import torch.nn as nn\n'), ((1410, 1441), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {'inplace': '(True)'}), '(0.2, inplace=True)\n', (1422, 1441), True, 'import torch.nn as nn\n'), ((1498, 1548), 'torch.nn.Conv2d', 'nn.Conv2d', (['(nef * 32)', '(nef * 64)', '(4)', '(2)', '(1)'], {'bias': '(False)'}), '(nef * 32, nef * 64, 4, 2, 1, bias=False)\n', (1507, 1548), True, 'import torch.nn as nn\n'), ((1562, 1586), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(nef * 64)'], {}), '(nef * 64)\n', (1576, 1586), True, 'import torch.nn as nn\n'), ((1600, 1631), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {'inplace': '(True)'}), '(0.2, inplace=True)\n', (1612, 1631), True, 'import torch.nn as nn\n'), ((1688, 1739), 'torch.nn.Conv2d', 'nn.Conv2d', (['(nef * 64)', '(nef * 128)', '(4)', '(1)', '(0)'], {'bias': '(False)'}), '(nef * 64, nef * 128, 4, 1, 0, bias=False)\n', (1697, 1739), True, 'import torch.nn as nn\n'), ((1753, 1778), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(nef * 128)'], {}), '(nef * 128)\n', (1767, 1778), True, 'import torch.nn as nn\n'), ((1792, 1823), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {'inplace': '(True)'}), '(0.2, inplace=True)\n', (1804, 1823), True, 'import torch.nn as nn\n'), ((1837, 1881), 'torch.nn.Conv2d', 'nn.Conv2d', (['(nef * 128)', 'nz', '(1)', '(1)', '(0)'], {'bias': '(True)'}), '(nef * 128, nz, 1, 1, 0, bias=True)\n', (1846, 1881), True, 'import torch.nn as nn\n'), ((1895, 1907), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (1905, 1907), True, 'import torch.nn as nn\n'), ((2030, 2084), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['nz', '(ngf * 128)', '(2)', '(1)', '(0)'], {'bias': '(False)'}), '(nz, ngf * 128, 2, 1, 0, bias=False)\n', (2048, 2084), True, 'import torch.nn as nn\n'), ((2098, 2123), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(ngf * 128)'], {}), '(ngf * 128)\n', (2112, 2123), True, 'import torch.nn as nn\n'), ((2137, 2150), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (2144, 2150), True, 'import torch.nn as nn\n'), ((2197, 2257), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(ngf * 128)', '(ngf * 64)', '(4)', '(2)', '(1)'], {'bias': '(False)'}), '(ngf * 128, ngf * 64, 4, 2, 1, bias=False)\n', (2215, 2257), True, 'import torch.nn as nn\n'), ((2271, 2295), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(ngf * 64)'], {}), '(ngf * 64)\n', (2285, 2295), True, 'import torch.nn as nn\n'), ((2309, 2322), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (2316, 2322), True, 'import torch.nn as nn\n'), ((2368, 2427), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(ngf * 64)', '(ngf * 32)', '(4)', '(2)', '(1)'], {'bias': '(False)'}), '(ngf * 64, ngf * 32, 4, 2, 1, bias=False)\n', (2386, 2427), True, 'import torch.nn as nn\n'), ((2441, 2465), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(ngf * 32)'], {}), '(ngf * 32)\n', (2455, 2465), True, 'import torch.nn as nn\n'), ((2479, 2492), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (2486, 2492), True, 'import torch.nn as nn\n'), ((2538, 2597), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(ngf * 32)', '(ngf * 16)', '(4)', '(2)', '(1)'], {'bias': '(False)'}), '(ngf * 32, ngf * 16, 4, 2, 1, bias=False)\n', (2556, 2597), True, 'import torch.nn as nn\n'), ((2609, 2633), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(ngf * 16)'], {}), '(ngf * 16)\n', (2623, 2633), True, 'import torch.nn as nn\n'), ((2647, 2660), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (2654, 2660), True, 'import torch.nn as nn\n'), ((2718, 2776), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(ngf * 16)', '(ngf * 8)', '(4)', '(2)', '(1)'], {'bias': '(False)'}), '(ngf * 16, ngf * 8, 4, 2, 1, bias=False)\n', (2736, 2776), True, 'import torch.nn as nn\n'), ((2790, 2813), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(ngf * 8)'], {}), '(ngf * 8)\n', (2804, 2813), True, 'import torch.nn as nn\n'), ((2827, 2840), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (2834, 2840), True, 'import torch.nn as nn\n'), ((2898, 2955), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(ngf * 8)', '(ngf * 4)', '(4)', '(2)', '(1)'], {'bias': '(False)'}), '(ngf * 8, ngf * 4, 4, 2, 1, bias=False)\n', (2916, 2955), True, 'import torch.nn as nn\n'), ((2969, 2992), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(ngf * 4)'], {}), '(ngf * 4)\n', (2983, 2992), True, 'import torch.nn as nn\n'), ((3006, 3019), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (3013, 3019), True, 'import torch.nn as nn\n'), ((3122, 3179), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(ngf * 4)', '(ngf * 2)', '(4)', '(2)', '(1)'], {'bias': '(False)'}), '(ngf * 4, ngf * 2, 4, 2, 1, bias=False)\n', (3140, 3179), True, 'import torch.nn as nn\n'), ((3193, 3216), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(ngf * 2)'], {}), '(ngf * 2)\n', (3207, 3216), True, 'import torch.nn as nn\n'), ((3230, 3243), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (3237, 3243), True, 'import torch.nn as nn\n'), ((3257, 3310), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(ngf * 2)', 'ngf', '(3)', '(1)', '(1)'], {'bias': '(False)'}), '(ngf * 2, ngf, 3, 1, 1, bias=False)\n', (3275, 3310), True, 'import torch.nn as nn\n'), ((3324, 3372), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['ngf', 'nc', '(3)', '(1)', '(1)'], {'bias': '(False)'}), '(ngf, nc, 3, 1, 1, bias=False)\n', (3342, 3372), True, 'import torch.nn as nn\n'), ((3425, 3482), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(ngf * 4)', '(ngf * 2)', '(4)', '(2)', '(1)'], {'bias': '(False)'}), '(ngf * 4, ngf * 2, 4, 2, 1, bias=False)\n', (3443, 3482), True, 'import torch.nn as nn\n'), ((3496, 3519), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(ngf * 2)'], {}), '(ngf * 2)\n', (3510, 3519), True, 'import torch.nn as nn\n'), ((3533, 3546), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (3540, 3546), True, 'import torch.nn as nn\n'), ((3651, 3704), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(ngf * 2)', 'ngf', '(4)', '(2)', '(1)'], {'bias': '(False)'}), '(ngf * 2, ngf, 4, 2, 1, bias=False)\n', (3669, 3704), True, 'import torch.nn as nn\n'), ((3718, 3737), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['ngf'], {}), '(ngf)\n', (3732, 3737), True, 'import torch.nn as nn\n'), ((3751, 3764), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (3758, 3764), True, 'import torch.nn as nn\n'), ((3778, 3826), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['ngf', 'nc', '(3)', '(1)', '(1)'], {'bias': '(False)'}), '(ngf, nc, 3, 1, 1, bias=False)\n', (3796, 3826), True, 'import torch.nn as nn\n'), ((3880, 3933), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(ngf * 2)', 'ngf', '(4)', '(2)', '(1)'], {'bias': '(False)'}), '(ngf * 2, ngf, 4, 2, 1, bias=False)\n', (3898, 3933), True, 'import torch.nn as nn\n'), ((3947, 3966), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['ngf'], {}), '(ngf)\n', (3961, 3966), True, 'import torch.nn as nn\n'), ((3980, 3993), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (3987, 3993), True, 'import torch.nn as nn\n'), ((4051, 4099), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['ngf', 'nc', '(4)', '(2)', '(1)'], {'bias': '(False)'}), '(ngf, nc, 4, 2, 1, bias=False)\n', (4069, 4099), True, 'import torch.nn as nn\n'), ((4113, 4122), 'torch.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (4120, 4122), True, 'import torch.nn as nn\n'), ((4922, 4961), 'torch.nn.Conv2d', 'nn.Conv2d', (['nc', 'nef', '(4)', '(2)', '(1)'], {'bias': '(False)'}), '(nc, nef, 4, 2, 1, bias=False)\n', (4931, 4961), True, 'import torch.nn as nn\n'), ((4975, 4994), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['nef'], {}), '(nef)\n', (4989, 4994), True, 'import torch.nn as nn\n'), ((5008, 5039), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {'inplace': '(True)'}), '(0.2, inplace=True)\n', (5020, 5039), True, 'import torch.nn as nn\n'), ((5099, 5143), 'torch.nn.Conv2d', 'nn.Conv2d', (['nef', '(nef * 2)', '(4)', '(2)', '(1)'], {'bias': '(False)'}), '(nef, nef * 2, 4, 2, 1, bias=False)\n', (5108, 5143), True, 'import torch.nn as nn\n'), ((5157, 5180), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(nef * 2)'], {}), '(nef * 2)\n', (5171, 5180), True, 'import torch.nn as nn\n'), ((5194, 5225), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {'inplace': '(True)'}), '(0.2, inplace=True)\n', (5206, 5225), True, 'import torch.nn as nn\n'), ((5285, 5333), 'torch.nn.Conv2d', 'nn.Conv2d', (['(nef * 2)', '(nef * 4)', '(4)', '(2)', '(1)'], {'bias': '(False)'}), '(nef * 2, nef * 4, 4, 2, 1, bias=False)\n', (5294, 5333), True, 'import torch.nn as nn\n'), ((5345, 5368), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(nef * 4)'], {}), '(nef * 4)\n', (5359, 5368), True, 'import torch.nn as nn\n'), ((5382, 5413), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {'inplace': '(True)'}), '(0.2, inplace=True)\n', (5394, 5413), True, 'import torch.nn as nn\n'), ((5471, 5519), 'torch.nn.Conv2d', 'nn.Conv2d', (['(nef * 4)', '(nef * 8)', '(4)', '(2)', '(1)'], {'bias': '(False)'}), '(nef * 4, nef * 8, 4, 2, 1, bias=False)\n', (5480, 5519), True, 'import torch.nn as nn\n'), ((5533, 5556), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(nef * 8)'], {}), '(nef * 8)\n', (5547, 5556), True, 'import torch.nn as nn\n'), ((5570, 5601), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {'inplace': '(True)'}), '(0.2, inplace=True)\n', (5582, 5601), True, 'import torch.nn as nn\n'), ((5659, 5708), 'torch.nn.Conv2d', 'nn.Conv2d', (['(nef * 8)', '(nef * 16)', '(4)', '(2)', '(1)'], {'bias': '(False)'}), '(nef * 8, nef * 16, 4, 2, 1, bias=False)\n', (5668, 5708), True, 'import torch.nn as nn\n'), ((5720, 5744), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(nef * 16)'], {}), '(nef * 16)\n', (5734, 5744), True, 'import torch.nn as nn\n'), ((5758, 5789), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {'inplace': '(True)'}), '(0.2, inplace=True)\n', (5770, 5789), True, 'import torch.nn as nn\n'), ((5848, 5898), 'torch.nn.Conv2d', 'nn.Conv2d', (['(nef * 16)', '(nef * 32)', '(4)', '(2)', '(1)'], {'bias': '(False)'}), '(nef * 16, nef * 32, 4, 2, 1, bias=False)\n', (5857, 5898), True, 'import torch.nn as nn\n'), ((5912, 5936), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(nef * 32)'], {}), '(nef * 32)\n', (5926, 5936), True, 'import torch.nn as nn\n'), ((5950, 5981), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {'inplace': '(True)'}), '(0.2, inplace=True)\n', (5962, 5981), True, 'import torch.nn as nn\n'), ((6038, 6088), 'torch.nn.Conv2d', 'nn.Conv2d', (['(nef * 32)', '(nef * 64)', '(4)', '(2)', '(1)'], {'bias': '(False)'}), '(nef * 32, nef * 64, 4, 2, 1, bias=False)\n', (6047, 6088), True, 'import torch.nn as nn\n'), ((6102, 6126), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(nef * 64)'], {}), '(nef * 64)\n', (6116, 6126), True, 'import torch.nn as nn\n'), ((6140, 6171), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {'inplace': '(True)'}), '(0.2, inplace=True)\n', (6152, 6171), True, 'import torch.nn as nn\n'), ((6228, 6279), 'torch.nn.Conv2d', 'nn.Conv2d', (['(nef * 64)', '(nef * 128)', '(4)', '(1)', '(0)'], {'bias': '(False)'}), '(nef * 64, nef * 128, 4, 1, 0, bias=False)\n', (6237, 6279), True, 'import torch.nn as nn\n'), ((6293, 6318), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(nef * 128)'], {}), '(nef * 128)\n', (6307, 6318), True, 'import torch.nn as nn\n'), ((6332, 6363), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {'inplace': '(True)'}), '(0.2, inplace=True)\n', (6344, 6363), True, 'import torch.nn as nn\n'), ((6377, 6421), 'torch.nn.Conv2d', 'nn.Conv2d', (['(nef * 128)', 'nz', '(1)', '(1)', '(0)'], {'bias': '(True)'}), '(nef * 128, nz, 1, 1, 0, bias=True)\n', (6386, 6421), True, 'import torch.nn as nn\n'), ((6435, 6447), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (6445, 6447), True, 'import torch.nn as nn\n'), ((6570, 6624), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['nz', '(ngf * 128)', '(2)', '(1)', '(0)'], {'bias': '(False)'}), '(nz, ngf * 128, 2, 1, 0, bias=False)\n', (6588, 6624), True, 'import torch.nn as nn\n'), ((6638, 6663), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(ngf * 128)'], {}), '(ngf * 128)\n', (6652, 6663), True, 'import torch.nn as nn\n'), ((6677, 6690), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (6684, 6690), True, 'import torch.nn as nn\n'), ((6737, 6797), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(ngf * 128)', '(ngf * 64)', '(4)', '(2)', '(1)'], {'bias': '(False)'}), '(ngf * 128, ngf * 64, 4, 2, 1, bias=False)\n', (6755, 6797), True, 'import torch.nn as nn\n'), ((6811, 6835), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(ngf * 64)'], {}), '(ngf * 64)\n', (6825, 6835), True, 'import torch.nn as nn\n'), ((6849, 6862), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (6856, 6862), True, 'import torch.nn as nn\n'), ((6908, 6967), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(ngf * 64)', '(ngf * 32)', '(4)', '(2)', '(1)'], {'bias': '(False)'}), '(ngf * 64, ngf * 32, 4, 2, 1, bias=False)\n', (6926, 6967), True, 'import torch.nn as nn\n'), ((6981, 7005), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(ngf * 32)'], {}), '(ngf * 32)\n', (6995, 7005), True, 'import torch.nn as nn\n'), ((7019, 7032), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (7026, 7032), True, 'import torch.nn as nn\n'), ((7078, 7137), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(ngf * 32)', '(ngf * 16)', '(4)', '(2)', '(1)'], {'bias': '(False)'}), '(ngf * 32, ngf * 16, 4, 2, 1, bias=False)\n', (7096, 7137), True, 'import torch.nn as nn\n'), ((7149, 7173), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(ngf * 16)'], {}), '(ngf * 16)\n', (7163, 7173), True, 'import torch.nn as nn\n'), ((7187, 7200), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (7194, 7200), True, 'import torch.nn as nn\n'), ((7258, 7316), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(ngf * 16)', '(ngf * 8)', '(4)', '(2)', '(1)'], {'bias': '(False)'}), '(ngf * 16, ngf * 8, 4, 2, 1, bias=False)\n', (7276, 7316), True, 'import torch.nn as nn\n'), ((7330, 7353), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(ngf * 8)'], {}), '(ngf * 8)\n', (7344, 7353), True, 'import torch.nn as nn\n'), ((7367, 7380), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (7374, 7380), True, 'import torch.nn as nn\n'), ((7438, 7495), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(ngf * 8)', '(ngf * 4)', '(4)', '(2)', '(1)'], {'bias': '(False)'}), '(ngf * 8, ngf * 4, 4, 2, 1, bias=False)\n', (7456, 7495), True, 'import torch.nn as nn\n'), ((7509, 7532), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(ngf * 4)'], {}), '(ngf * 4)\n', (7523, 7532), True, 'import torch.nn as nn\n'), ((7546, 7559), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (7553, 7559), True, 'import torch.nn as nn\n'), ((7662, 7719), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(ngf * 4)', '(ngf * 2)', '(4)', '(2)', '(1)'], {'bias': '(False)'}), '(ngf * 4, ngf * 2, 4, 2, 1, bias=False)\n', (7680, 7719), True, 'import torch.nn as nn\n'), ((7733, 7756), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(ngf * 2)'], {}), '(ngf * 2)\n', (7747, 7756), True, 'import torch.nn as nn\n'), ((7770, 7783), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (7777, 7783), True, 'import torch.nn as nn\n'), ((7797, 7850), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(ngf * 2)', 'ngf', '(3)', '(1)', '(1)'], {'bias': '(False)'}), '(ngf * 2, ngf, 3, 1, 1, bias=False)\n', (7815, 7850), True, 'import torch.nn as nn\n'), ((7864, 7912), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['ngf', 'nc', '(3)', '(1)', '(1)'], {'bias': '(False)'}), '(ngf, nc, 3, 1, 1, bias=False)\n', (7882, 7912), True, 'import torch.nn as nn\n'), ((7965, 8022), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(ngf * 4)', '(ngf * 2)', '(4)', '(2)', '(1)'], {'bias': '(False)'}), '(ngf * 4, ngf * 2, 4, 2, 1, bias=False)\n', (7983, 8022), True, 'import torch.nn as nn\n'), ((8036, 8059), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(ngf * 2)'], {}), '(ngf * 2)\n', (8050, 8059), True, 'import torch.nn as nn\n'), ((8073, 8086), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (8080, 8086), True, 'import torch.nn as nn\n'), ((8191, 8244), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(ngf * 2)', 'ngf', '(4)', '(2)', '(1)'], {'bias': '(False)'}), '(ngf * 2, ngf, 4, 2, 1, bias=False)\n', (8209, 8244), True, 'import torch.nn as nn\n'), ((8258, 8277), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['ngf'], {}), '(ngf)\n', (8272, 8277), True, 'import torch.nn as nn\n'), ((8291, 8304), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (8298, 8304), True, 'import torch.nn as nn\n'), ((8318, 8366), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['ngf', 'nc', '(3)', '(1)', '(1)'], {'bias': '(False)'}), '(ngf, nc, 3, 1, 1, bias=False)\n', (8336, 8366), True, 'import torch.nn as nn\n'), ((8419, 8472), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(ngf * 2)', 'ngf', '(4)', '(2)', '(1)'], {'bias': '(False)'}), '(ngf * 2, ngf, 4, 2, 1, bias=False)\n', (8437, 8472), True, 'import torch.nn as nn\n'), ((8486, 8505), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['ngf'], {}), '(ngf)\n', (8500, 8505), True, 'import torch.nn as nn\n'), ((8519, 8532), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (8526, 8532), True, 'import torch.nn as nn\n'), ((8590, 8638), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['ngf', 'nc', '(4)', '(2)', '(1)'], {'bias': '(False)'}), '(ngf, nc, 4, 2, 1, bias=False)\n', (8608, 8638), True, 'import torch.nn as nn\n'), ((8652, 8661), 'torch.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (8659, 8661), True, 'import torch.nn as nn\n')] |
import time
import sys
import ibmiotf.application
import ibmiotf.device
import random
import json
#Provide your IBM Watson Device Credentials
organization = "1tzgh7"
deviceType = "iotdevice"
deviceId = "0000"
authMethod = "token"
authToken = "<PASSWORD>"
# Initialize the device client\
def myCommandCallback(cmd):
print("Command received: %s" % cmd.data['command'])
if cmd.data['command']=='EXPIRED':
print("PRODUCT EXPIRED IS RECIEVED")
elif cmd.data['command']=='lightoff':
print("PRODUCT NOT EXPIRED IS RECIEVED")
if cmd.command == "setInterval":
if 'interval' not in cmd.data:
print("Error - command is missing required information: 'interval'")
else:
interval = cmd.data['interval']
elif cmd.command == "print":
if 'message' not in cmd.data:
print("Error - command is missing required information: 'message'")
else:
print(cmd.data['message'])
try:
deviceOptions = {"org": organization, "type": deviceType, "id": deviceId, "auth-method": authMethod, "auth-token": authToken}
deviceCli = ibmiotf.device.Client(deviceOptions)
#..............................................
except Exception as e:
print("Caught exception connecting device: %s" % str(e))
sys.exit()
# Connect and send a datapoint "hello" with value "world" into the cloud as an event of type "greeting" 10 times
deviceCli.connect()
while True:
products = "Pasta","bread","butter","panner"
product_ids = 12345,3413,2341,4501
expiry_dates = "20-02-2021","22-02-2021","12-05-2021","12-05-2021"
data = {"prod_name":products, "pro_id":product_ids, "expiry_date":expiry_dates}
#print data
def myOnPublishCallback():
print ("Published Data to IBM Watson")
success = deviceCli.publishEvent("Data", "json", data, qos=0, on_publish=myOnPublishCallback)
if not success:
print("Not connected to IoTF")
time.sleep(1)
deviceCli.commandCallback = myCommandCallback
# Disconnect the device and application from the cloud
deviceCli.disconnect()
| [
"sys.exit",
"time.sleep"
] | [((2220, 2233), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (2230, 2233), False, 'import time\n'), ((1492, 1502), 'sys.exit', 'sys.exit', ([], {}), '()\n', (1500, 1502), False, 'import sys\n')] |
import numpy as np
import pandas as pd
from sklearn import model_selection
import tensorflow as tf
from pathlib import Path
"""
<NAME>, WAK2116, ELEN-E6889, Spring 2019
Final Project
This python file trains a neural network that predicts an activity level
based on a jpg image from a traffic camera
This is an initial attempt at doing regression based on image data.
It is loosely based on TF image classification examples and
"Deep Leaning: Image Recognition" on Lynda.com
"""
# view sample image
img_path = "./labeled_data/"
df = pd.read_csv('./labeled_data/labels.txt')
#print(df)
df_train, df_valid = model_selection.train_test_split(df, test_size=.1)
#print(df_train)
#print("---")
#print(df_valid)
def keras_data(data):
# Output arrays
x = np.empty([0, 160, 160, 3], dtype=np.float32)
y = np.empty([data.datetime.count()], dtype=np.float32)
#print(x.shape)
#print(y.shape)
# Read in and preprocess a batch of images
sess = tf.Session()
for i in range(0, data.datetime.count()):
#print(data.people[data.index[i]])
y_value = data.vehicles[data.index[i]] + data.people[data.index[i]]
#print(y_value)
#y = np.append(y, [y_value])
y[i] = y_value
# convert image to a tensor
# img_raw = tf.read_file(sample_img_path)
image_file = img_path + data.datetime[data.index[i]]
img_raw = tf.read_file(image_file)
#print(repr(img_raw)[:200]+"...")
img_tensor = tf.image.decode_image(img_raw)
#print(img_tensor.shape)
cropped_tensor = tf.image.crop_to_bounding_box(img_tensor,80, 80, 160, 220)
#print(cropped_tensor.shape)
#output_image = tf.image.encode_png(cropped_tensor)
#file = tf.write_file("text.png",output_image)
img_tensor = tf.image.resize(cropped_tensor, [160, 160])
#img_tensor = tf.image.resize(img_tensor, [240, 240]) # squish it down a bit
img_tensor /= 255.0 # normalize to [0,1] range
# print(img_tensor)
#print(img_tensor.shape)
# print(img_tensor.dtype)
sess = tf.Session()
with sess.as_default():
np_array = img_tensor.eval()
#print("np from tensor", np_array.shape)
indexed_array = np.expand_dims(np_array, axis=0)
#print("np from tensor with index",indexed_array.shape)
x = np.append(x, indexed_array, axis=0)
#print("x shape", x.shape)
#print(y.shape)
return x, y
x_train, y_train = keras_data(df_train)
x_test, y_test = keras_data(df_valid)
#y_train = tf.keras.utils.to_categorical(y_train, 16)
#y_test = tf.keras.utils.to_categorical(y_test, 16)
y_train = y_train / 16
y_test = y_test / 16
#(x_train, y_train), (x_test,y_test) = tf.keras.datasets.cifar10.load_data()
#x_train = x_train.astype("float32")
#x_test = x_test.astype("float32")
#x_train = x_train / 255
#x_test = x_test / 255
#y_train = tf.keras.utils.to_categorical(y_train, 10)
#y_test = tf.keras.utils.to_categorical(y_test, 10)
model = tf.keras.Sequential()
#model.add(tf.keras.layers.Conv2D(32,(3,3), padding='same', activation='relu', input_shape=(32, 32, 3)))
model.add(tf.keras.layers.Conv2D(32,(3,3), padding='same', activation='relu', input_shape=(160, 160, 3)))
model.add(tf.keras.layers.Conv2D(32,(3,3), activation='relu'))
model.add(tf.keras.layers.MaxPooling2D(2,2))
model.add(tf.keras.layers.Dropout(0.25))
model.add(tf.keras.layers.Conv2D(64,(3,3), padding='same', activation='relu'))
model.add(tf.keras.layers.Conv2D(64,(3,3), activation='relu'))
model.add(tf.keras.layers.MaxPooling2D(2,2))
model.add(tf.keras.layers.Dropout(0.25))
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(512, activation="relu"))
model.add(tf.keras.layers.Dropout(0.5))
model.add(tf.keras.layers.Dense(100, activation='relu'))
model.add(tf.keras.layers.Dropout(.25))
#model.add(tf.keras.layers.Dense(10, activation="softmax"))
model.add(tf.keras.layers.Dense(10, activation="relu"))
model.add(tf.keras.layers.Dense(1))
model.compile(
#loss='categorical_crossentropy',
loss='mse',
optimizer='adam',
metrics=["accuracy", "mae"]
)
model.summary()
model.fit(
x_train,
y_train,
batch_size=10,
epochs=30,
validation_data=[x_test, y_test],
shuffle=True #,
#steps_per_epoch=1000
)
# save structure
model_structure = model.to_json()
f = Path("model_structure.json")
f.write_text(model_structure)
# save weights
model.save_weights("model_weights.h5")
| [
"tensorflow.image.crop_to_bounding_box",
"tensorflow.keras.layers.Conv2D",
"tensorflow.keras.layers.MaxPooling2D",
"tensorflow.keras.layers.Dropout",
"tensorflow.keras.layers.Dense",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"numpy.empty",
"tensorflow.Session",
"numpy.expand_dims",
"numpy.append",
"pathlib.Path",
"tensorflow.image.decode_image",
"tensorflow.keras.Sequential",
"tensorflow.read_file",
"tensorflow.image.resize",
"tensorflow.keras.layers.Flatten"
] | [((568, 608), 'pandas.read_csv', 'pd.read_csv', (['"""./labeled_data/labels.txt"""'], {}), "('./labeled_data/labels.txt')\n", (579, 608), True, 'import pandas as pd\n'), ((642, 693), 'sklearn.model_selection.train_test_split', 'model_selection.train_test_split', (['df'], {'test_size': '(0.1)'}), '(df, test_size=0.1)\n', (674, 693), False, 'from sklearn import model_selection\n'), ((3077, 3098), 'tensorflow.keras.Sequential', 'tf.keras.Sequential', ([], {}), '()\n', (3096, 3098), True, 'import tensorflow as tf\n'), ((4436, 4464), 'pathlib.Path', 'Path', (['"""model_structure.json"""'], {}), "('model_structure.json')\n", (4440, 4464), False, 'from pathlib import Path\n'), ((794, 838), 'numpy.empty', 'np.empty', (['[0, 160, 160, 3]'], {'dtype': 'np.float32'}), '([0, 160, 160, 3], dtype=np.float32)\n', (802, 838), True, 'import numpy as np\n'), ((998, 1010), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (1008, 1010), True, 'import tensorflow as tf\n'), ((3214, 3314), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['(32)', '(3, 3)'], {'padding': '"""same"""', 'activation': '"""relu"""', 'input_shape': '(160, 160, 3)'}), "(32, (3, 3), padding='same', activation='relu',\n input_shape=(160, 160, 3))\n", (3236, 3314), True, 'import tensorflow as tf\n'), ((3321, 3374), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['(32)', '(3, 3)'], {'activation': '"""relu"""'}), "(32, (3, 3), activation='relu')\n", (3343, 3374), True, 'import tensorflow as tf\n'), ((3384, 3418), 'tensorflow.keras.layers.MaxPooling2D', 'tf.keras.layers.MaxPooling2D', (['(2)', '(2)'], {}), '(2, 2)\n', (3412, 3418), True, 'import tensorflow as tf\n'), ((3429, 3458), 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['(0.25)'], {}), '(0.25)\n', (3452, 3458), True, 'import tensorflow as tf\n'), ((3471, 3540), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['(64)', '(3, 3)'], {'padding': '"""same"""', 'activation': '"""relu"""'}), "(64, (3, 3), padding='same', activation='relu')\n", (3493, 3540), True, 'import tensorflow as tf\n'), ((3550, 3603), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['(64)', '(3, 3)'], {'activation': '"""relu"""'}), "(64, (3, 3), activation='relu')\n", (3572, 3603), True, 'import tensorflow as tf\n'), ((3613, 3647), 'tensorflow.keras.layers.MaxPooling2D', 'tf.keras.layers.MaxPooling2D', (['(2)', '(2)'], {}), '(2, 2)\n', (3641, 3647), True, 'import tensorflow as tf\n'), ((3658, 3687), 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['(0.25)'], {}), '(0.25)\n', (3681, 3687), True, 'import tensorflow as tf\n'), ((3700, 3725), 'tensorflow.keras.layers.Flatten', 'tf.keras.layers.Flatten', ([], {}), '()\n', (3723, 3725), True, 'import tensorflow as tf\n'), ((3738, 3783), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(512)'], {'activation': '"""relu"""'}), "(512, activation='relu')\n", (3759, 3783), True, 'import tensorflow as tf\n'), ((3795, 3823), 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['(0.5)'], {}), '(0.5)\n', (3818, 3823), True, 'import tensorflow as tf\n'), ((3836, 3881), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(100)'], {'activation': '"""relu"""'}), "(100, activation='relu')\n", (3857, 3881), True, 'import tensorflow as tf\n'), ((3893, 3922), 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['(0.25)'], {}), '(0.25)\n', (3916, 3922), True, 'import tensorflow as tf\n'), ((3994, 4038), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(10)'], {'activation': '"""relu"""'}), "(10, activation='relu')\n", (4015, 4038), True, 'import tensorflow as tf\n'), ((4050, 4074), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(1)'], {}), '(1)\n', (4071, 4074), True, 'import tensorflow as tf\n'), ((1427, 1451), 'tensorflow.read_file', 'tf.read_file', (['image_file'], {}), '(image_file)\n', (1439, 1451), True, 'import tensorflow as tf\n'), ((1516, 1546), 'tensorflow.image.decode_image', 'tf.image.decode_image', (['img_raw'], {}), '(img_raw)\n', (1537, 1546), True, 'import tensorflow as tf\n'), ((1605, 1664), 'tensorflow.image.crop_to_bounding_box', 'tf.image.crop_to_bounding_box', (['img_tensor', '(80)', '(80)', '(160)', '(220)'], {}), '(img_tensor, 80, 80, 160, 220)\n', (1634, 1664), True, 'import tensorflow as tf\n'), ((1839, 1882), 'tensorflow.image.resize', 'tf.image.resize', (['cropped_tensor', '[160, 160]'], {}), '(cropped_tensor, [160, 160])\n', (1854, 1882), True, 'import tensorflow as tf\n'), ((2136, 2148), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (2146, 2148), True, 'import tensorflow as tf\n'), ((2303, 2335), 'numpy.expand_dims', 'np.expand_dims', (['np_array'], {'axis': '(0)'}), '(np_array, axis=0)\n', (2317, 2335), True, 'import numpy as np\n'), ((2420, 2455), 'numpy.append', 'np.append', (['x', 'indexed_array'], {'axis': '(0)'}), '(x, indexed_array, axis=0)\n', (2429, 2455), True, 'import numpy as np\n')] |
"""
Tools for Some Platformer Game
Created by sheepy0125
02/10/2021
"""
from pathlib import Path
###############
### Globals ###
###############
ROOT_PATH: Path = Path(__file__).parent.parent
####################
### Logger class ###
####################
class Logger:
"""Log messages with ease"""
colors: dict = {
"log": "\033[92m",
"warn": "\033[93m",
"fatal": "\033[91m",
"normal": "\033[0m",
}
@staticmethod
def log(message: str):
print(f"{Logger.colors['log']}[INFO] {message}{Logger.colors['normal']}")
@staticmethod
def warn(message: str):
print(f"{Logger.colors['warn']}[WARN] {message}{Logger.colors['normal']}")
@staticmethod
def fatal(message: str):
print(f"{Logger.colors['fatal']}[FAIL] {message}{Logger.colors['normal']}")
@staticmethod
def log_error(error: Exception):
Logger.fatal(
f"{type(error).__name__}: {str(error)} (line {error.__traceback__.tb_lineno})"
)
#############################
### Scroll handling class ###
#############################
class Scrolling:
scroll_x: float = 0
scroll_y: float = 0
max_scroll_x: float = 0
max_scroll_y: float = 0
@staticmethod
def setup_scrolling(map_size, tile_size, screen_size):
"""Setup scrolling"""
Scrolling.max_scroll_x = (map_size[0] * tile_size) - (screen_size[0])
Scrolling.max_scroll_y = (map_size[1] * tile_size) - (screen_size[1]) - 50
Logger.log(
f"Max scrolling: ({Scrolling.max_scroll_x}, {Scrolling.max_scroll_y})"
)
@staticmethod
def update_scrolling(player_pos, scroll_offset):
"""Update scrolling"""
# Center player
Scrolling.scroll_x += (
player_pos[0] - Scrolling.scroll_x - scroll_offset[0]
) / 10
Scrolling.scroll_y += (
player_pos[1] - Scrolling.scroll_y - scroll_offset[1]
) / 10
# Don't allow scrolling off the map
# X axis
if Scrolling.scroll_x <= 0:
Scrolling.scroll_x = 0
elif Scrolling.scroll_x >= Scrolling.max_scroll_x:
Scrolling.scroll_x = Scrolling.max_scroll_x
# Y axis
if Scrolling.scroll_y <= 0:
Scrolling.scroll_y = 0
elif Scrolling.scroll_y >= Scrolling.max_scroll_y:
Scrolling.scroll_y = Scrolling.max_scroll_y
class Animations:
def __init__(self,image_path,cols,rows,dict_names):
self.sprites = self.get_images_from_spritesheet(image_path,cols,rows)
self.dict = self.load_dict(dict_names)
def load_dict(self,dict_names):
for i in range(len(sprites)):
self.dict[dict_names[i]] = sprites[i]
def add_extra_sprites(self):
for i in self.dict:
copied_sprites = self.dict[i].copy()
squashed_sprites = []
stretched_sprites = []
for i in copied_sprites:
squashed_sprite = pygame.transform.scale()
squashed_sprites.append()
stretched_sprites.append()
def get_images_from_spritesheet(image_path, cols, rows):
"""
get the images from the spritesheet
cols is number of columns
rows is number of rows
"""
spritesheet = pygame.image.load(image_path)
sprite_width = spritesheet.get_width() / cols
sprite_height = spritesheet.get_height() / rows
empty_image = pygame.Surface((sprite_width,sprite_height)).get_buffer().raw
rows = []
# loop through the number of columns
for col_num in range(cols):
# get the x position of the sprite by multiplying
# the column that its on by the width
x_pos = col_num * sprite_width
row_images = []
for row_num in range(rows):
# loop through the number of rows
y_pos = row_num * sprite_height
sprite_rect = (x_pos, y_pos, sprite_width, sprite_height)
sprite = spritesheet.subsurface(sprite_rect)
if sprite.get_buffer().raw == empty_image:
continue
row_images.append(sprite)
rows.append(row_images)
return rows
| [
"pathlib.Path"
] | [((165, 179), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (169, 179), False, 'from pathlib import Path\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.