code
stringlengths 31
1.05M
| apis
list | extract_api
stringlengths 97
1.91M
|
---|---|---|
# Copyright 2017 Neural Networks and Deep Learning lab, MIPT
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
import os
import copy
import numpy as np
import tensorflow as tf
from . import utils
from os.path import isdir, join
tf.NotDifferentiable("Spans")
tf.NotDifferentiable("Antecedents")
tf.NotDifferentiable("ExtractMentions")
tf.NotDifferentiable("DistanceBins")
seed = 5
tf.set_random_seed(seed)
class CorefModel(object):
"""
End-to-end neural model for coreference resolution.
Class that create model from https://homes.cs.washington.edu/~kentonl/pub/lhlz-emnlp.2017.pdf
"""
def __init__(self, opt):
"""Initialize the class and model according to the given parameters in opt."""
self.opt = copy.deepcopy(opt)
tf.set_random_seed(opt['random_seed'])
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.8
coref_op_library = tf.load_op_library(join(opt['model_file'], "coref_kernels.so"))
self.spans = coref_op_library.spans
self.distance_bins = coref_op_library.distance_bins
self.extract_mentions = coref_op_library.extract_mentions
self.get_antecedents = coref_op_library.antecedents
dpath = join(self.opt['model_file'], self.opt['language'], 'agent')
self.log_root = join(dpath, 'logs')
self.char_embedding_size = self.opt["char_embedding_size"]
self.char_vocab_path = join(dpath, 'vocab', 'char_vocab.russian.txt')
self.char_dict = utils.load_char_dict(self.char_vocab_path)
if opt['emb_format'] == 'vec':
self.embedding_path = join(dpath, 'embeddings', 'embeddings_lenta_100.vec')
elif opt['emb_format'] == 'bin':
self.embedding_path = join(dpath, 'embeddings', 'ft_0.8.3_nltk_yalen_sg_300.bin')
else:
raise ValueError('Not supported embeddings format {}'.format(opt['emb_format']))
self.embedding_info = (self.opt["embedding_size"], self.opt["emb_lowercase"])
self.embedding_size = self.opt['embedding_size']
self.embedding_dicts = utils.load_embedding_dict(self.embedding_path, self.embedding_size,
self.opt["emb_format"])
self.max_mention_width = self.opt["max_mention_width"]
self.genres = {g: i for i, g in enumerate(self.opt["genres"])}
input_props = list()
input_props.append((tf.float64, [None, None, self.embedding_size])) # Text embeddings.
input_props.append((tf.int32, [None, None, None])) # Character indices.
input_props.append((tf.int32, [None])) # Text lengths.
input_props.append((tf.int32, [None])) # Speaker IDs.
input_props.append((tf.int32, [])) # Genre.
input_props.append((tf.bool, [])) # Is training.
input_props.append((tf.int32, [None])) # Gold starts.
input_props.append((tf.int32, [None])) # Gold ends.
input_props.append((tf.int32, [None])) # Cluster ids.
self.queue_input_tensors = [tf.placeholder(dtype, shape) for dtype, shape in input_props]
dtypes, shapes = zip(*input_props)
queue = tf.PaddingFIFOQueue(capacity=1, dtypes=dtypes, shapes=shapes)
self.enqueue_op = queue.enqueue(self.queue_input_tensors)
self.input_tensors = queue.dequeue()
# train type trigger
if self.opt['train_on_gold']:
self.predictions, self.loss = self.get_predictions_and_loss_on_gold(*self.input_tensors)
else:
self.predictions, self.loss = self.get_predictions_and_loss(*self.input_tensors)
self.global_step = tf.Variable(0, name="global_step", trainable=False)
self.reset_global_step = tf.assign(self.global_step, 0)
learning_rate = tf.train.exponential_decay(self.opt["learning_rate"], self.global_step,
self.opt["decay_frequency"], self.opt["decay_rate"],
staircase=True)
learning_rate = tf.cond(learning_rate < opt['final_rate'],
lambda: tf.Variable(opt['final_rate'], tf.float32),
lambda: learning_rate)
trainable_params = tf.trainable_variables()
gradients = tf.gradients(self.loss, trainable_params)
# gradients = [g if g is None else tf.cast(g, tf.float64) for g in gradients]
# gradients, _ = tf.clip_by_global_norm(gradients, self.opt["max_gradient_norm"])
optimizers = {
"adam": tf.train.AdamOptimizer,
"sgd": tf.train.GradientDescentOptimizer
}
optimizer = optimizers[self.opt["optimizer"]](learning_rate)
self.train_op = optimizer.apply_gradients(zip(gradients, trainable_params), global_step=self.global_step)
self.sess = tf.Session(config=config)
self.init_op = tf.global_variables_initializer()
self.sess.run(self.init_op)
def start_enqueue_thread(self, train_example, is_training, returning=False):
"""
Initialize queue of tensors that feed one at the input of the model.
Args:
train_example: modified dict from agent
is_training: training flag
returning: returning flag
Returns:
if returning is True, return list of variables:
[word_emb, char_index, text_len, speaker_ids, genre, is_training, gold_starts, gold_ends, cluster_ids]
"""
tensorized_example = self.tensorize_example(train_example, is_training=is_training)
feed_dict = dict(zip(self.queue_input_tensors, tensorized_example))
self.sess.run(self.enqueue_op, feed_dict=feed_dict)
if returning:
return tensorized_example
def tensorize_mentions(self, mentions):
"""
Create two np.array of starts end ends positions of gold mentions.
Args:
mentions: list of tuple
Returns:
np.array(starts positions), np.array(ends positions)
"""
if len(mentions) > 0:
starts, ends = zip(*mentions)
else:
starts, ends = [], []
return np.array(starts), np.array(ends)
def tensorize_example(self, example, is_training):
"""
Takes a dictionary from the observation and transforms it into a set of tensors
for tensorflow placeholders.
Args:
example: dict from observation
is_training: True or False value, use as a returned parameter or flag
Returns: word_emb, char_index, text_len, speaker_ids, genre, is_training, gold_starts, gold_ends, cluster_ids;
it numpy tensors for placeholders (is_training - bool)
If length of the longest sentence in the document is greater than parameter "max_training_sentences",
the returning method calls the 'truncate_example' function.
"""
clusters = example["clusters"]
gold_mentions = sorted(tuple(m) for m in utils.flatten(clusters))
gold_mention_map = {m: i for i, m in enumerate(gold_mentions)}
cluster_ids = np.zeros(len(gold_mentions))
for cluster_id, cluster in enumerate(clusters):
for mention in cluster:
cluster_ids[gold_mention_map[tuple(mention)]] = cluster_id
sentences = example["sentences"]
num_words = sum(len(s) for s in sentences)
speakers = utils.flatten(example["speakers"])
assert num_words == len(speakers)
max_sentence_length = max(len(s) for s in sentences)
max_word_length = max(max(max(len(w) for w in s) for s in sentences), max(self.opt["filter_widths"]))
word_emb = np.zeros([len(sentences), max_sentence_length, self.embedding_size])
char_index = np.zeros([len(sentences), max_sentence_length, max_word_length])
text_len = np.array([len(s) for s in sentences])
for i, sentence in enumerate(sentences):
for j, word in enumerate(sentence):
current_dim = 0
d = self.embedding_dicts
(s, l) = self.embedding_info
current_word = word
if l:
cerrent_word = word.lower()
if self.opt['emb_format'] == 'vec':
word_emb[i, j, current_dim:current_dim + s] = utils.normalize(d[current_word])
else:
word_emb[i, j, current_dim:current_dim + s] = utils.normalize(np.array(d[current_word]))
current_dim += s
char_index[i, j, :len(word)] = [self.char_dict[c] for c in word]
speaker_dict = {s: i for i, s in enumerate(set(speakers))}
speaker_ids = np.array([speaker_dict[s] for s in speakers]) # numpy
doc_key = example["doc_key"]
genre = self.genres[doc_key[:2]] # int 1
gold_starts, gold_ends = self.tensorize_mentions(gold_mentions) # numpy of unicode str
if is_training and len(sentences) > self.opt["max_training_sentences"]:
return self.truncate_example(word_emb, char_index, text_len, speaker_ids, genre, is_training, gold_starts,
gold_ends, cluster_ids)
else:
return word_emb, char_index, text_len, speaker_ids, genre, is_training, gold_starts, gold_ends, cluster_ids
def truncate_example(self, word_emb, char_index, text_len, speaker_ids, genre, is_training, gold_starts, gold_ends,
cluster_ids):
"""
It takes the output of the function "tensorize_example" and cuts off the excess part of the tensor.
Args:
word_emb: [Amount of sentences, Amount of words in sentence (max len), self.embedding_size],
float64, Text embeddings.
char_index: [Amount of words, Amount of chars in word (max len), char_embedding_size],
tf.int32, Character indices.
text_len: tf.int32, [Amount of sentences]
speaker_ids: [Amount of independent speakers], tf.int32, Speaker IDs.
genre: [Amount of independent genres], tf.int32, Genre
is_training: tf.bool
gold_starts: tf.int32, [Amount of gold mentions]
gold_ends: tf.int32, [Amount of gold mentions]
cluster_ids: tf.int32, [Amount of independent clusters]
Returns: word_emb, char_index, text_len, speaker_ids, genre, is_training, gold_starts, gold_ends, cluster_ids;
The same set of tensors as in the input, but with a corrected shape.
Additional Information:
"None" in some form-size tensors, for example "word_emb", means that this axis measurement can vary
from document to document.
"""
max_training_sentences = self.opt["max_training_sentences"]
num_sentences = word_emb.shape[0]
assert num_sentences > max_training_sentences
sentence_offset = random.randint(0, num_sentences - max_training_sentences)
word_offset = text_len[:sentence_offset].sum()
# don't clear what exactly is happening here
# why they cat the first part of tensor instead of second ???
num_words = text_len[sentence_offset:sentence_offset + max_training_sentences].sum()
word_emb = word_emb[sentence_offset:sentence_offset + max_training_sentences, :, :]
char_index = char_index[sentence_offset:sentence_offset + max_training_sentences, :, :]
text_len = text_len[sentence_offset:sentence_offset + max_training_sentences]
speaker_ids = speaker_ids[word_offset: word_offset + num_words]
assert len(gold_ends) == len(gold_starts)
Gold_starts = np.zeros((len(gold_starts)))
Gold_ends = np.zeros((len(gold_ends)))
for i in range(len(gold_ends)):
Gold_ends[i] = int(gold_ends[i])
Gold_starts[i] = int(gold_starts[i])
gold_starts = Gold_starts
gold_ends = Gold_ends
# here hernya
gold_spans = np.logical_and(gold_ends >= word_offset, gold_starts < word_offset + num_words)
gold_starts = gold_starts[gold_spans] - word_offset
gold_ends = gold_ends[gold_spans] - word_offset
cluster_ids = cluster_ids[gold_spans]
return word_emb, char_index, text_len, speaker_ids, genre, is_training, gold_starts, gold_ends, cluster_ids
def get_mention_emb(self, text_emb, text_outputs, mention_starts, mention_ends):
"""
Forms a tensor that contains of embeddings of specific mentions.
Args:
text_emb: boolean mask, [num_sentences, max_sentence_length, emb]
text_outputs: tf.float64, [num_sentences, max_sentence_length, emb]
mention_starts: tf.int32, [Amount of mentions]
mention_ends: tf.int32, [Amount of mentions]
Returns: tf.float64, [num_mentions, emb]
Mentions embeddings tensor.
"""
mention_emb_list = []
mention_start_emb = tf.gather(text_outputs, mention_starts) # [num_mentions, emb]
mention_emb_list.append(mention_start_emb)
mention_end_emb = tf.gather(text_outputs, mention_ends) # [num_mentions, emb]
mention_emb_list.append(mention_end_emb)
mention_width = 1 + mention_ends - mention_starts # [num_mentions]
if self.opt["use_features"]:
mention_width_index = mention_width - 1 # [num_mentions]
mention_width_emb = tf.gather(tf.get_variable("mention_width_embeddings", [self.opt["max_mention_width"],
self.opt["feature_size"]],
dtype=tf.float64),
mention_width_index) # [num_mentions, emb]
mention_width_emb = tf.nn.dropout(mention_width_emb, self.dropout)
mention_emb_list.append(mention_width_emb)
if self.opt["model_heads"]:
mention_indices = tf.expand_dims(tf.range(self.opt["max_mention_width"]), 0) + tf.expand_dims(
mention_starts, 1) # [num_mentions, max_mention_width]
mention_indices = tf.minimum(utils.shape(text_outputs, 0) - 1,
mention_indices) # [num_mentions, max_mention_width]
mention_text_emb = tf.gather(text_emb, mention_indices) # [num_mentions, max_mention_width, emb]
self.head_scores = utils.projection(text_outputs, 1) # [num_words, 1]
mention_head_scores = tf.gather(self.head_scores, mention_indices) # [num_mentions, max_mention_width, 1]
mention_mask = tf.expand_dims(
tf.sequence_mask(mention_width, self.opt["max_mention_width"], dtype=tf.float64),
2) # [num_mentions, max_mention_width, 1]
mention_attention = tf.nn.softmax(mention_head_scores + tf.log(mention_mask),
dim=1) # [num_mentions, max_mention_width, 1]
mention_head_emb = tf.reduce_sum(mention_attention * mention_text_emb, 1) # [num_mentions, emb]
mention_emb_list.append(mention_head_emb)
mention_emb = tf.concat(mention_emb_list, 1) # [num_mentions, emb]
return mention_emb
def get_mention_scores(self, mention_emb):
"""
Sends a mentions tensor to the input of a fully connected network, and outputs its output.
It compute mentions scores.
Args:
mention_emb: tf.float64, [num_mentions, emb], a tensor that contains of embeddings of specific mentions
Returns: [num_mentions, 1]
Output of the fully-connected network, that compute the mentions scores.
"""
with tf.variable_scope("mention_scores"):
return utils.ffnn(mention_emb, self.opt["ffnn_depth"], self.opt["ffnn_size"], 1,
self.dropout) # [num_mentions, 1]
def softmax_loss(self, antecedent_scores, antecedent_labels):
"""
Computes the value of the loss function using antecedent_scores and antecedent_labels.
Practically standard softmax function.
Args:
antecedent_scores: tf.float64, [num_mentions, max_ant + 1], output of fully-connected network that compute
antecedent scores.
antecedent_labels: True labels for antecedent.
Returns: [num_mentions]
The value of loss function.
"""
gold_scores = antecedent_scores + tf.log(tf.cast(antecedent_labels, tf.float64)) # [num_mentions, max_ant + 1]
marginalized_gold_scores = tf.reduce_logsumexp(gold_scores, [1]) # [num_mentions]
log_norm = tf.reduce_logsumexp(antecedent_scores, [1]) # [num_mentions]
return log_norm - marginalized_gold_scores # [num_mentions]
def get_antecedent_scores(self, mention_emb, mention_scores, antecedents, antecedents_len, mention_speaker_ids,
genre_emb):
"""
Forms a new tensor using special features, mentions embeddings, mentions scores, etc.
and passes it through a fully-connected network that compute antecedent scores.
Args:
mention_emb: [num_mentions, emb], a tensor that contains of embeddings of specific mentions
mention_scores: [num_mentions, 1], Output of the fully-connected network, that compute the mentions scores.
antecedents: [] get from C++ function
antecedents_len: [] get from C++ function
mention_speaker_ids: [num_mentions, speaker_emb_size], tf.float64, Speaker IDs.
genre_emb: [genre_emb_size], tf.float64, Genre
Returns: tf.float64, [num_mentions, max_ant + 1], antecedent scores.
"""
num_mentions = utils.shape(mention_emb, 0)
max_antecedents = utils.shape(antecedents, 1)
feature_emb_list = []
if self.opt["use_metadata"]:
antecedent_speaker_ids = tf.gather(mention_speaker_ids, antecedents) # [num_mentions, max_ant]
same_speaker = tf.equal(tf.expand_dims(mention_speaker_ids, 1),
antecedent_speaker_ids) # [num_mentions, max_ant]
speaker_pair_emb = tf.gather(tf.get_variable("same_speaker_emb", [2, self.opt["feature_size"]],
dtype=tf.float64),
tf.to_int32(same_speaker)) # [num_mentions, max_ant, emb]
feature_emb_list.append(speaker_pair_emb)
tiled_genre_emb = tf.tile(tf.expand_dims(tf.expand_dims(genre_emb, 0), 0),
[num_mentions, max_antecedents, 1]) # [num_mentions, max_ant, emb]
feature_emb_list.append(tiled_genre_emb)
if self.opt["use_features"]:
target_indices = tf.range(num_mentions) # [num_mentions]
mention_distance = tf.expand_dims(target_indices, 1) - antecedents # [num_mentions, max_ant]
mention_distance_bins = self.distance_bins(mention_distance) # [num_mentions, max_ant]
mention_distance_bins.set_shape([None, None])
mention_distance_emb = tf.gather(tf.get_variable("mention_distance_emb", [10, self.opt["feature_size"]],
dtype=tf.float64),
mention_distance_bins) # [num_mentions, max_ant]
feature_emb_list.append(mention_distance_emb)
feature_emb = tf.concat(feature_emb_list, 2) # [num_mentions, max_ant, emb]
feature_emb = tf.nn.dropout(feature_emb, self.dropout) # [num_mentions, max_ant, emb]
antecedent_emb = tf.gather(mention_emb, antecedents) # [num_mentions, max_ant, emb]
target_emb_tiled = tf.tile(tf.expand_dims(mention_emb, 1),
[1, max_antecedents, 1]) # [num_mentions, max_ant, emb]
similarity_emb = antecedent_emb * target_emb_tiled # [num_mentions, max_ant, emb]
pair_emb = tf.concat([target_emb_tiled, antecedent_emb, similarity_emb, feature_emb], 2)
# [num_mentions, max_ant, emb]
with tf.variable_scope("iteration"):
with tf.variable_scope("antecedent_scoring"):
antecedent_scores = utils.ffnn(pair_emb, self.opt["ffnn_depth"], self.opt["ffnn_size"], 1,
self.dropout) # [num_mentions, max_ant, 1]
antecedent_scores = tf.squeeze(antecedent_scores, 2) # [num_mentions, max_ant]
antecedent_mask = tf.log(
tf.sequence_mask(antecedents_len, max_antecedents, dtype=tf.float64)) # [num_mentions, max_ant]
antecedent_scores += antecedent_mask # [num_mentions, max_ant]
antecedent_scores += tf.expand_dims(mention_scores, 1) + tf.gather(mention_scores,
antecedents) # [num_mentions, max_ant]
antecedent_scores = tf.concat([tf.zeros([utils.shape(mention_scores, 0), 1], dtype=tf.float64),
antecedent_scores],
1) # [num_mentions, max_ant + 1]
return antecedent_scores # [num_mentions, max_ant + 1]
def flatten_emb_by_sentence(self, emb, text_len_mask):
"""
Create boolean mask for emb tensor.
Args:
emb: Some embeddings tensor with rank 2 or 3
text_len_mask: A mask tensor representing the first N positions of each row.
Returns: emb tensor after mask applications.
"""
num_sentences = tf.shape(emb)[0]
max_sentence_length = tf.shape(emb)[1]
emb_rank = len(emb.get_shape())
if emb_rank == 2:
flattened_emb = tf.reshape(emb, [num_sentences * max_sentence_length])
elif emb_rank == 3:
flattened_emb = tf.reshape(emb, [num_sentences * max_sentence_length, utils.shape(emb, 2)])
else:
raise ValueError("Unsupported rank: {}".format(emb_rank))
return tf.boolean_mask(flattened_emb, text_len_mask)
def encode_sentences(self, text_emb, text_len, text_len_mask):
"""
Passes the input tensor through bi_LSTM.
Args:
text_emb: [num_sentences, max_sentence_length, emb], text code in tensor
text_len: tf.int32, [Amount of sentences]
text_len_mask: boolean mask for text_emb
Returns: [num_sentences, max_sentence_length, emb], output of bi-LSTM after boolean mask application
"""
num_sentences = tf.shape(text_emb)[0]
max_sentence_length = tf.shape(text_emb)[1]
# Transpose before and after for efficiency.
inputs = tf.transpose(text_emb, [1, 0, 2]) # [max_sentence_length, num_sentences, emb]
with tf.variable_scope("fw_cell"):
cell_fw = utils.CustomLSTMCell(self.opt["lstm_size"], num_sentences, self.dropout)
preprocessed_inputs_fw = cell_fw.preprocess_input(inputs)
with tf.variable_scope("bw_cell"):
cell_bw = utils.CustomLSTMCell(self.opt["lstm_size"], num_sentences, self.dropout)
preprocessed_inputs_bw = cell_bw.preprocess_input(inputs)
preprocessed_inputs_bw = tf.reverse_sequence(preprocessed_inputs_bw,
seq_lengths=text_len,
seq_dim=0,
batch_dim=1)
state_fw = tf.contrib.rnn.LSTMStateTuple(tf.tile(cell_fw.initial_state.c, [num_sentences, 1]),
tf.tile(cell_fw.initial_state.h, [num_sentences, 1]))
state_bw = tf.contrib.rnn.LSTMStateTuple(tf.tile(cell_bw.initial_state.c, [num_sentences, 1]),
tf.tile(cell_bw.initial_state.h, [num_sentences, 1]))
with tf.variable_scope("lstm"):
with tf.variable_scope("fw_lstm"):
fw_outputs, fw_states = tf.nn.dynamic_rnn(cell=cell_fw,
inputs=preprocessed_inputs_fw,
sequence_length=text_len,
initial_state=state_fw,
time_major=True)
with tf.variable_scope("bw_lstm"):
bw_outputs, bw_states = tf.nn.dynamic_rnn(cell=cell_bw,
inputs=preprocessed_inputs_bw,
sequence_length=text_len,
initial_state=state_bw,
time_major=True)
bw_outputs = tf.reverse_sequence(bw_outputs,
seq_lengths=text_len,
seq_dim=0,
batch_dim=1)
text_outputs = tf.concat([fw_outputs, bw_outputs], 2)
text_outputs = tf.transpose(text_outputs, [1, 0, 2]) # [num_sentences, max_sentence_length, emb]
return self.flatten_emb_by_sentence(text_outputs, text_len_mask)
def get_predicted_antecedents(self, antecedents, antecedent_scores):
"""
Forms a list of predicted antecedent labels
Args:
antecedents: [] get from C++ function
antecedent_scores: [num_mentions, max_ant + 1] output of fully-connected network
that compute antecedent_scores
Returns: a list of predicted antecedent labels
"""
predicted_antecedents = []
for i, index in enumerate(np.argmax(antecedent_scores, axis=1) - 1):
if index < 0:
predicted_antecedents.append(-1)
else:
predicted_antecedents.append(antecedents[i, index])
return predicted_antecedents
def get_predictions_and_loss(self, word_emb, char_index, text_len, speaker_ids, genre, is_training, gold_starts,
gold_ends, cluster_ids):
"""
Connects all elements of the network to one complete graph, that compute mentions spans independently
And passes through it the tensors that came to the input of placeholders.
Args:
word_emb: [Amount of sentences, Amount of words in sentence (max len), self.embedding_size],
float64, Text embeddings.
char_index: [Amount of words, Amount of chars in word (max len), char_embedding_size],
tf.int32, Character indices.
text_len: tf.int32, [Amount of sentences]
speaker_ids: [Amount of independent speakers], tf.int32, Speaker IDs.
genre: [Amount of independent genres], tf.int32, Genre
is_training: tf.bool
gold_starts: tf.int32, [Amount of gold mentions]
gold_ends: tf.int32, [Amount of gold mentions]
cluster_ids: tf.int32, [Amount of independent clusters]
Returns:[candidate_starts, candidate_ends, candidate_mention_scores, mention_starts, mention_ends, antecedents,
antecedent_scores], loss
List of predictions and scores, and Loss function value
"""
self.dropout = 1 - (tf.cast(is_training, tf.float64) * self.opt["dropout_rate"])
self.lexical_dropout = 1 - (tf.cast(is_training, tf.float64) * self.opt["lexical_dropout_rate"])
num_sentences = tf.shape(word_emb)[0]
max_sentence_length = tf.shape(word_emb)[1]
text_emb_list = [word_emb]
if self.opt["char_embedding_size"] > 0:
char_emb = tf.gather(
tf.get_variable("char_embeddings", [len(self.char_dict), self.opt["char_embedding_size"]]),
char_index, tf.float64) # [num_sentences, max_sentence_length, max_word_length, emb]
flattened_char_emb = tf.reshape(char_emb, [num_sentences * max_sentence_length, utils.shape(char_emb, 2),
utils.shape(char_emb, 3)])
# [num_sentences * max_sentence_length, max_word_length, emb]
flattened_aggregated_char_emb = utils.cnn(flattened_char_emb, self.opt["filter_widths"], self.opt[
"filter_size"]) # [num_sentences * max_sentence_length, emb]
aggregated_char_emb = tf.reshape(flattened_aggregated_char_emb,
[num_sentences,
max_sentence_length,
utils.shape(flattened_aggregated_char_emb, 1)])
# [num_sentences, max_sentence_length, emb]
text_emb_list.append(aggregated_char_emb)
text_emb = tf.concat(text_emb_list, 2)
text_emb = tf.nn.dropout(text_emb, self.lexical_dropout)
text_len_mask = tf.sequence_mask(text_len, maxlen=max_sentence_length)
text_len_mask = tf.reshape(text_len_mask, [num_sentences * max_sentence_length])
text_outputs = self.encode_sentences(text_emb, text_len, text_len_mask)
text_outputs = tf.nn.dropout(text_outputs, self.dropout)
genre_emb = tf.gather(tf.get_variable("genre_embeddings", [len(self.genres), self.opt["feature_size"]],
dtype=tf.float64),
genre) # [emb]
sentence_indices = tf.tile(tf.expand_dims(tf.range(num_sentences), 1),
[1, max_sentence_length]) # [num_sentences, max_sentence_length]
flattened_sentence_indices = self.flatten_emb_by_sentence(sentence_indices, text_len_mask) # [num_words]
flattened_text_emb = self.flatten_emb_by_sentence(text_emb, text_len_mask) # [num_words]
candidate_starts, candidate_ends = self.spans(
sentence_indices=flattened_sentence_indices,
max_width=self.max_mention_width)
candidate_starts.set_shape([None])
candidate_ends.set_shape([None])
candidate_mention_emb = self.get_mention_emb(flattened_text_emb, text_outputs, candidate_starts,
candidate_ends) # [num_candidates, emb]
candidate_mention_scores = self.get_mention_scores(candidate_mention_emb) # [num_mentions, 1]
candidate_mention_scores = tf.squeeze(candidate_mention_scores, 1) # [num_mentions]
k = tf.to_int32(tf.floor(tf.to_float(tf.shape(text_outputs)[0]) * self.opt["mention_ratio"]))
predicted_mention_indices = self.extract_mentions(candidate_mention_scores, candidate_starts,
candidate_ends, k) # ([k], [k])
predicted_mention_indices.set_shape([None])
mention_starts = tf.gather(candidate_starts, predicted_mention_indices) # [num_mentions]
mention_ends = tf.gather(candidate_ends, predicted_mention_indices) # [num_mentions]
mention_emb = tf.gather(candidate_mention_emb, predicted_mention_indices) # [num_mentions, emb]
mention_scores = tf.gather(candidate_mention_scores, predicted_mention_indices) # [num_mentions]
# mention_start_emb = tf.gather(text_outputs, mention_starts) # [num_mentions, emb]
# mention_end_emb = tf.gather(text_outputs, mention_ends) # [num_mentions, emb]
mention_speaker_ids = tf.gather(speaker_ids, mention_starts) # [num_mentions]
max_antecedents = self.opt["max_antecedents"]
antecedents, antecedent_labels, antecedents_len = self.get_antecedents(mention_starts, mention_ends,
gold_starts, gold_ends, cluster_ids,
max_antecedents)
# ([num_mentions, max_ant], [num_mentions, max_ant + 1], [num_mentions]
antecedents.set_shape([None, None])
antecedent_labels.set_shape([None, None])
antecedents_len.set_shape([None])
antecedent_scores = self.get_antecedent_scores(mention_emb, mention_scores, antecedents, antecedents_len,
mention_speaker_ids, genre_emb) # [num_mentions, max_ant + 1]
loss = self.softmax_loss(antecedent_scores, antecedent_labels) # [num_mentions]
loss = tf.reduce_sum(loss) # []
return [candidate_starts, candidate_ends, candidate_mention_scores, mention_starts, mention_ends, antecedents,
antecedent_scores], loss
def get_predicted_clusters(self, mention_starts, mention_ends, predicted_antecedents):
"""
Creates a list of clusters, as in dict from observation, and dict mentions with a list of clusters
to which they belong. They are necessary for inference mode and marking a new conll documents without
last column.
Args:
mention_starts: tf.float64, [Amount of mentions]
mention_ends: tf.float64, [Amount of mentions]
predicted_antecedents: [len antecedent scores]
Returns:
predicted_clusters = [[(),(),()],[(),()]] list like, with mention id
mention_to_predicted = {mentions id: [(),(),()], ...}
"""
mention_to_predicted = {}
predicted_clusters = []
for i, predicted_index in enumerate(predicted_antecedents):
if predicted_index < 0:
continue
assert i > predicted_index
predicted_antecedent = (int(mention_starts[predicted_index]), int(mention_ends[predicted_index]))
if predicted_antecedent in mention_to_predicted:
predicted_cluster = mention_to_predicted[predicted_antecedent]
else:
predicted_cluster = len(predicted_clusters)
predicted_clusters.append([predicted_antecedent])
mention_to_predicted[predicted_antecedent] = predicted_cluster
mention = (int(mention_starts[i]), int(mention_ends[i]))
predicted_clusters[predicted_cluster].append(mention)
mention_to_predicted[mention] = predicted_cluster
predicted_clusters = [tuple(pc) for pc in predicted_clusters]
mention_to_predicted = {m: predicted_clusters[i] for m, i in mention_to_predicted.items()}
return predicted_clusters, mention_to_predicted
def init_from_saved(self, saver):
"""
Load model from saved checkpoint.
Args:
saver: tf.saver
Returns: Nothing
"""
# checkpoint_path = join(self.log_root, self.opt['name'])
checkpoint_path = self.opt['model_file']
if os.path.isfile(join(checkpoint_path, "model.max.ckpt.meta")):
saver.restore(self.sess, join(checkpoint_path, "model.max.ckpt"))
else:
print('{0} not found'.format(checkpoint_path))
print('Init from scratch')
def shutdown(self):
"""Reset the model"""
tf.reset_default_graph()
def save(self, saver):
"""Save model checkpoint"""
# log_dir = self.log_root
# if isdir(log_dir):
# if isdir(join(log_dir, self.opt['name'])):
# print('saving path ' + join(log_dir, self.opt['name'], 'model.max.ckpt'))
# saver.save(self.sess, join(log_dir, self.opt['name'], 'model.max.ckpt'))
# else:
# os.mkdir(self.opt['name'])
# print('saving path ' + join(log_dir, self.opt['name'], 'model.max.ckpt'))
# saver.save(self.sess, join(log_dir, self.opt['name'], 'model.max.ckpt'))
# else:
# os.mkdir(self.opt["log_root"])
# if isdir(join(log_dir, self.opt['name'])):
# print('saving path ' + join(log_dir, self.opt['name'], 'model.max.ckpt'))
# saver.save(self.sess, join(log_dir, self.opt['name'], 'model.max.ckpt'))
# else:
# os.mkdir(self.opt['name'])
# print('saving path ' + join(log_dir, self.opt['name'], 'model.max.ckpt'))
# saver.save(self.sess, join(log_dir, self.opt['name'], 'model.max.ckpt'))
# save in root folder
print('saving path ' + join(self.opt['model_file'], 'model.max.ckpt'))
saver.save(self.sess, join(self.opt['model_file'], 'model.max.ckpt'))
def train(self, batch):
"""
Run train operation on one batch/document
Args:
batch: list of tensors for placeholders, output of "tensorize_example" function
Returns: Loss functions value and tf.global_step
"""
self.start_enqueue_thread(batch, True)
self.tf_loss, tf_global_step, _ = self.sess.run([self.loss, self.global_step, self.train_op])
return self.tf_loss, tf_global_step
def predict(self, batch, out_file):
"""
Make prediction of new coreference clusters and write it conll document.
Args:
batch: list of tensors for placeholders, output of "tensorize_example" function
out_file: original conll document
Returns: str with new conll document, with new coreference clusters
"""
self.start_enqueue_thread(batch, False)
if self.opt['train_on_gold']:
_, mention_starts, mention_ends, antecedents, antecedent_scores = self.sess.run(self.predictions)
else:
_, _, _, mention_starts, mention_ends, antecedents, antecedent_scores = self.sess.run(self.predictions)
predicted_antecedents = self.get_predicted_antecedents(antecedents, antecedent_scores)
predicted_clusters, mention_to_predicted = self.get_predicted_clusters(mention_starts, mention_ends,
predicted_antecedents)
new_cluters = dict()
new_cluters[batch['doc_key']] = predicted_clusters
outconll = utils.output_conll(out_file, new_cluters)
return outconll
def get_predictions_and_loss_on_gold(self, word_emb, char_index, text_len, speaker_ids, genre, is_training,
gold_starts, gold_ends, cluster_ids):
"""
Connects all elements of the network to one complete graph, that use gold mentions.
And passes through it the tensors that came to the input of placeholders.
Args:
word_emb: [Amount of sentences, Amount of words in sentence (max len), self.embedding_size],
float64, Text embeddings.
char_index: [Amount of words, Amount of chars in word (max len), char_embedding_size],
tf.int32, Character indices.
text_len: tf.int32, [Amount of sentences]
speaker_ids: [Amount of independent speakers], tf.int32, Speaker IDs.
genre: [Amount of independent genres], tf.int32, Genre
is_training: tf.bool
gold_starts: tf.int32, [Amount of gold mentions]
gold_ends: tf.int32, [Amount of gold mentions]
cluster_ids: tf.int32, [Amount of independent clusters]
Returns:[candidate_starts, candidate_ends, candidate_mention_scores, mention_starts, mention_ends, antecedents,
antecedent_scores], loss
List of predictions and scores, and Loss function value
"""
self.dropout = 1 - (tf.cast(is_training, tf.float64) * self.opt["dropout_rate"])
self.lexical_dropout = 1 - (tf.cast(is_training, tf.float64) * self.opt["lexical_dropout_rate"])
# assert gold_ends.shape == gold_starts.shape,\
# ('Amount of starts and ends of gold mentions are not equal: '
# 'Length of gold starts: {1}; Length of gold ends: {0}'.format(gold_ends.shape, gold_starts.shape))
num_sentences = tf.shape(word_emb)[0]
max_sentence_length = tf.shape(word_emb)[1]
text_emb_list = [word_emb]
if self.opt["char_embedding_size"] > 0:
char_emb = tf.gather(
tf.get_variable("char_embeddings", [len(self.char_dict), self.opt["char_embedding_size"]],
dtype=tf.float64),
char_index) # [num_sentences, max_sentence_length, max_word_length, emb]
flattened_char_emb = tf.reshape(char_emb, [num_sentences * max_sentence_length, utils.shape(char_emb, 2),
utils.shape(char_emb,
3)])
# [num_sentences * max_sentence_length, max_word_length, emb]
flattened_aggregated_char_emb = utils.cnn(flattened_char_emb, self.opt["filter_widths"], self.opt[
"filter_size"]) # [num_sentences * max_sentence_length, emb]
aggregated_char_emb = tf.reshape(flattened_aggregated_char_emb,
[num_sentences,
max_sentence_length,
utils.shape(flattened_aggregated_char_emb, 1)])
# [num_sentences, max_sentence_length, emb]
text_emb_list.append(aggregated_char_emb)
text_emb = tf.concat(text_emb_list, 2)
text_emb = tf.nn.dropout(text_emb, self.lexical_dropout)
text_len_mask = tf.sequence_mask(text_len, maxlen=max_sentence_length)
text_len_mask = tf.reshape(text_len_mask, [num_sentences * max_sentence_length])
text_outputs = self.encode_sentences(text_emb, text_len, text_len_mask)
text_outputs = tf.nn.dropout(text_outputs, self.dropout)
genre_emb = tf.gather(tf.get_variable("genre_embeddings", [len(self.genres), self.opt["feature_size"]],
dtype=tf.float64),
genre) # [emb]
# sentence_indices = tf.tile(tf.expand_dims(tf.range(num_sentences), 1),
# [1, max_sentence_length]) # [num_sentences, max_sentence_length]
# flattened_sentence_indices = self.flatten_emb_by_sentence(sentence_indices, text_len_mask) # [num_words]
flattened_text_emb = self.flatten_emb_by_sentence(text_emb, text_len_mask) # [num_words]
candidate_mention_emb = self.get_mention_emb(flattened_text_emb, text_outputs, gold_starts,
gold_ends) # [num_candidates, emb]
# candidate_mention_scores = self.get_mention_scores(candidate_mention_emb) # [num_mentions, 1]
# candidate_mention_scores = tf.squeeze(candidate_mention_scores, 1) # [num_mentions]
gold_len = tf.shape(gold_ends)
candidate_mention_scores = tf.ones(gold_len, dtype=tf.float64)
mention_starts = gold_starts
mention_ends = gold_ends
mention_emb = candidate_mention_emb
mention_scores = candidate_mention_scores
# mention_start_emb = tf.gather(text_outputs, mention_starts) # [num_mentions, emb]
# mention_end_emb = tf.gather(text_outputs, mention_ends) # [num_mentions, emb]
mention_speaker_ids = tf.gather(speaker_ids, mention_starts) # [num_mentions]
max_antecedents = self.opt["max_antecedents"]
antecedents, antecedent_labels, antecedents_len = self.get_antecedents(mention_starts, mention_ends,
gold_starts, gold_ends, cluster_ids,
max_antecedents)
# ([num_mentions, max_ant], [num_mentions, max_ant + 1], [num_mentions]
antecedents.set_shape([None, None])
antecedent_labels.set_shape([None, None])
antecedents_len.set_shape([None])
antecedent_scores = self.get_antecedent_scores(mention_emb, mention_scores, antecedents, antecedents_len,
mention_speaker_ids, genre_emb) # [num_mentions, max_ant + 1]
loss = self.softmax_loss(tf.cast(antecedent_scores, tf.float64), antecedent_labels) # [num_mentions]
loss = tf.reduce_sum(loss) # []
return [candidate_mention_scores, mention_starts, mention_ends, antecedents, antecedent_scores], loss
|
[
"tensorflow.tile",
"tensorflow.NotDifferentiable",
"tensorflow.shape",
"tensorflow.get_variable",
"tensorflow.boolean_mask",
"tensorflow.transpose",
"tensorflow.reduce_sum",
"tensorflow.gradients",
"numpy.array",
"tensorflow.nn.dropout",
"tensorflow.reverse_sequence",
"copy.deepcopy",
"tensorflow.PaddingFIFOQueue",
"tensorflow.set_random_seed",
"tensorflow.cast",
"tensorflow.log",
"tensorflow.Session",
"tensorflow.placeholder",
"tensorflow.nn.dynamic_rnn",
"tensorflow.assign",
"tensorflow.concat",
"tensorflow.train.exponential_decay",
"tensorflow.ConfigProto",
"tensorflow.trainable_variables",
"random.randint",
"tensorflow.variable_scope",
"tensorflow.Variable",
"numpy.argmax",
"tensorflow.to_int32",
"tensorflow.range",
"tensorflow.gather",
"tensorflow.reshape",
"tensorflow.expand_dims",
"tensorflow.reset_default_graph",
"numpy.logical_and",
"tensorflow.ones",
"tensorflow.reduce_logsumexp",
"os.path.join",
"tensorflow.global_variables_initializer",
"tensorflow.sequence_mask",
"tensorflow.squeeze"
] |
[((741, 770), 'tensorflow.NotDifferentiable', 'tf.NotDifferentiable', (['"""Spans"""'], {}), "('Spans')\n", (761, 770), True, 'import tensorflow as tf\n'), ((771, 806), 'tensorflow.NotDifferentiable', 'tf.NotDifferentiable', (['"""Antecedents"""'], {}), "('Antecedents')\n", (791, 806), True, 'import tensorflow as tf\n'), ((807, 846), 'tensorflow.NotDifferentiable', 'tf.NotDifferentiable', (['"""ExtractMentions"""'], {}), "('ExtractMentions')\n", (827, 846), True, 'import tensorflow as tf\n'), ((847, 883), 'tensorflow.NotDifferentiable', 'tf.NotDifferentiable', (['"""DistanceBins"""'], {}), "('DistanceBins')\n", (867, 883), True, 'import tensorflow as tf\n'), ((894, 918), 'tensorflow.set_random_seed', 'tf.set_random_seed', (['seed'], {}), '(seed)\n', (912, 918), True, 'import tensorflow as tf\n'), ((1253, 1271), 'copy.deepcopy', 'copy.deepcopy', (['opt'], {}), '(opt)\n', (1266, 1271), False, 'import copy\n'), ((1281, 1319), 'tensorflow.set_random_seed', 'tf.set_random_seed', (["opt['random_seed']"], {}), "(opt['random_seed'])\n", (1299, 1319), True, 'import tensorflow as tf\n'), ((1337, 1353), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (1351, 1353), True, 'import tensorflow as tf\n'), ((1774, 1833), 'os.path.join', 'join', (["self.opt['model_file']", "self.opt['language']", '"""agent"""'], {}), "(self.opt['model_file'], self.opt['language'], 'agent')\n", (1778, 1833), False, 'from os.path import isdir, join\n'), ((1858, 1877), 'os.path.join', 'join', (['dpath', '"""logs"""'], {}), "(dpath, 'logs')\n", (1862, 1877), False, 'from os.path import isdir, join\n'), ((1976, 2022), 'os.path.join', 'join', (['dpath', '"""vocab"""', '"""char_vocab.russian.txt"""'], {}), "(dpath, 'vocab', 'char_vocab.russian.txt')\n", (1980, 2022), False, 'from os.path import isdir, join\n'), ((3709, 3770), 'tensorflow.PaddingFIFOQueue', 'tf.PaddingFIFOQueue', ([], {'capacity': '(1)', 'dtypes': 'dtypes', 'shapes': 'shapes'}), '(capacity=1, dtypes=dtypes, shapes=shapes)\n', (3728, 3770), True, 'import tensorflow as tf\n'), ((4186, 4237), 'tensorflow.Variable', 'tf.Variable', (['(0)'], {'name': '"""global_step"""', 'trainable': '(False)'}), "(0, name='global_step', trainable=False)\n", (4197, 4237), True, 'import tensorflow as tf\n'), ((4271, 4301), 'tensorflow.assign', 'tf.assign', (['self.global_step', '(0)'], {}), '(self.global_step, 0)\n', (4280, 4301), True, 'import tensorflow as tf\n'), ((4326, 4470), 'tensorflow.train.exponential_decay', 'tf.train.exponential_decay', (["self.opt['learning_rate']", 'self.global_step', "self.opt['decay_frequency']", "self.opt['decay_rate']"], {'staircase': '(True)'}), "(self.opt['learning_rate'], self.global_step,\n self.opt['decay_frequency'], self.opt['decay_rate'], staircase=True)\n", (4352, 4470), True, 'import tensorflow as tf\n'), ((4820, 4844), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (4842, 4844), True, 'import tensorflow as tf\n'), ((4866, 4907), 'tensorflow.gradients', 'tf.gradients', (['self.loss', 'trainable_params'], {}), '(self.loss, trainable_params)\n', (4878, 4907), True, 'import tensorflow as tf\n'), ((5426, 5451), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (5436, 5451), True, 'import tensorflow as tf\n'), ((5476, 5509), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (5507, 5509), True, 'import tensorflow as tf\n'), ((9332, 9377), 'numpy.array', 'np.array', (['[speaker_dict[s] for s in speakers]'], {}), '([speaker_dict[s] for s in speakers])\n', (9340, 9377), True, 'import numpy as np\n'), ((11559, 11616), 'random.randint', 'random.randint', (['(0)', '(num_sentences - max_training_sentences)'], {}), '(0, num_sentences - max_training_sentences)\n', (11573, 11616), False, 'import random\n'), ((12627, 12706), 'numpy.logical_and', 'np.logical_and', (['(gold_ends >= word_offset)', '(gold_starts < word_offset + num_words)'], {}), '(gold_ends >= word_offset, gold_starts < word_offset + num_words)\n', (12641, 12706), True, 'import numpy as np\n'), ((13606, 13645), 'tensorflow.gather', 'tf.gather', (['text_outputs', 'mention_starts'], {}), '(text_outputs, mention_starts)\n', (13615, 13645), True, 'import tensorflow as tf\n'), ((13747, 13784), 'tensorflow.gather', 'tf.gather', (['text_outputs', 'mention_ends'], {}), '(text_outputs, mention_ends)\n', (13756, 13784), True, 'import tensorflow as tf\n'), ((15837, 15867), 'tensorflow.concat', 'tf.concat', (['mention_emb_list', '(1)'], {}), '(mention_emb_list, 1)\n', (15846, 15867), True, 'import tensorflow as tf\n'), ((17273, 17310), 'tensorflow.reduce_logsumexp', 'tf.reduce_logsumexp', (['gold_scores', '[1]'], {}), '(gold_scores, [1])\n', (17292, 17310), True, 'import tensorflow as tf\n'), ((17348, 17391), 'tensorflow.reduce_logsumexp', 'tf.reduce_logsumexp', (['antecedent_scores', '[1]'], {}), '(antecedent_scores, [1])\n', (17367, 17391), True, 'import tensorflow as tf\n'), ((20191, 20221), 'tensorflow.concat', 'tf.concat', (['feature_emb_list', '(2)'], {}), '(feature_emb_list, 2)\n', (20200, 20221), True, 'import tensorflow as tf\n'), ((20276, 20316), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['feature_emb', 'self.dropout'], {}), '(feature_emb, self.dropout)\n', (20289, 20316), True, 'import tensorflow as tf\n'), ((20375, 20410), 'tensorflow.gather', 'tf.gather', (['mention_emb', 'antecedents'], {}), '(mention_emb, antecedents)\n', (20384, 20410), True, 'import tensorflow as tf\n'), ((20713, 20790), 'tensorflow.concat', 'tf.concat', (['[target_emb_tiled, antecedent_emb, similarity_emb, feature_emb]', '(2)'], {}), '([target_emb_tiled, antecedent_emb, similarity_emb, feature_emb], 2)\n', (20722, 20790), True, 'import tensorflow as tf\n'), ((21160, 21192), 'tensorflow.squeeze', 'tf.squeeze', (['antecedent_scores', '(2)'], {}), '(antecedent_scores, 2)\n', (21170, 21192), True, 'import tensorflow as tf\n'), ((22754, 22799), 'tensorflow.boolean_mask', 'tf.boolean_mask', (['flattened_emb', 'text_len_mask'], {}), '(flattened_emb, text_len_mask)\n', (22769, 22799), True, 'import tensorflow as tf\n'), ((23427, 23460), 'tensorflow.transpose', 'tf.transpose', (['text_emb', '[1, 0, 2]'], {}), '(text_emb, [1, 0, 2])\n', (23439, 23460), True, 'import tensorflow as tf\n'), ((25593, 25670), 'tensorflow.reverse_sequence', 'tf.reverse_sequence', (['bw_outputs'], {'seq_lengths': 'text_len', 'seq_dim': '(0)', 'batch_dim': '(1)'}), '(bw_outputs, seq_lengths=text_len, seq_dim=0, batch_dim=1)\n', (25612, 25670), True, 'import tensorflow as tf\n'), ((25818, 25856), 'tensorflow.concat', 'tf.concat', (['[fw_outputs, bw_outputs]', '(2)'], {}), '([fw_outputs, bw_outputs], 2)\n', (25827, 25856), True, 'import tensorflow as tf\n'), ((25880, 25917), 'tensorflow.transpose', 'tf.transpose', (['text_outputs', '[1, 0, 2]'], {}), '(text_outputs, [1, 0, 2])\n', (25892, 25917), True, 'import tensorflow as tf\n'), ((29619, 29646), 'tensorflow.concat', 'tf.concat', (['text_emb_list', '(2)'], {}), '(text_emb_list, 2)\n', (29628, 29646), True, 'import tensorflow as tf\n'), ((29666, 29711), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['text_emb', 'self.lexical_dropout'], {}), '(text_emb, self.lexical_dropout)\n', (29679, 29711), True, 'import tensorflow as tf\n'), ((29737, 29791), 'tensorflow.sequence_mask', 'tf.sequence_mask', (['text_len'], {'maxlen': 'max_sentence_length'}), '(text_len, maxlen=max_sentence_length)\n', (29753, 29791), True, 'import tensorflow as tf\n'), ((29816, 29880), 'tensorflow.reshape', 'tf.reshape', (['text_len_mask', '[num_sentences * max_sentence_length]'], {}), '(text_len_mask, [num_sentences * max_sentence_length])\n', (29826, 29880), True, 'import tensorflow as tf\n'), ((29985, 30026), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['text_outputs', 'self.dropout'], {}), '(text_outputs, self.dropout)\n', (29998, 30026), True, 'import tensorflow as tf\n'), ((31225, 31264), 'tensorflow.squeeze', 'tf.squeeze', (['candidate_mention_scores', '(1)'], {}), '(candidate_mention_scores, 1)\n', (31235, 31264), True, 'import tensorflow as tf\n'), ((31657, 31711), 'tensorflow.gather', 'tf.gather', (['candidate_starts', 'predicted_mention_indices'], {}), '(candidate_starts, predicted_mention_indices)\n', (31666, 31711), True, 'import tensorflow as tf\n'), ((31753, 31805), 'tensorflow.gather', 'tf.gather', (['candidate_ends', 'predicted_mention_indices'], {}), '(candidate_ends, predicted_mention_indices)\n', (31762, 31805), True, 'import tensorflow as tf\n'), ((31846, 31905), 'tensorflow.gather', 'tf.gather', (['candidate_mention_emb', 'predicted_mention_indices'], {}), '(candidate_mention_emb, predicted_mention_indices)\n', (31855, 31905), True, 'import tensorflow as tf\n'), ((31954, 32016), 'tensorflow.gather', 'tf.gather', (['candidate_mention_scores', 'predicted_mention_indices'], {}), '(candidate_mention_scores, predicted_mention_indices)\n', (31963, 32016), True, 'import tensorflow as tf\n'), ((32248, 32286), 'tensorflow.gather', 'tf.gather', (['speaker_ids', 'mention_starts'], {}), '(speaker_ids, mention_starts)\n', (32257, 32286), True, 'import tensorflow as tf\n'), ((33236, 33255), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['loss'], {}), '(loss)\n', (33249, 33255), True, 'import tensorflow as tf\n'), ((35882, 35906), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (35904, 35906), True, 'import tensorflow as tf\n'), ((42119, 42146), 'tensorflow.concat', 'tf.concat', (['text_emb_list', '(2)'], {}), '(text_emb_list, 2)\n', (42128, 42146), True, 'import tensorflow as tf\n'), ((42166, 42211), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['text_emb', 'self.lexical_dropout'], {}), '(text_emb, self.lexical_dropout)\n', (42179, 42211), True, 'import tensorflow as tf\n'), ((42237, 42291), 'tensorflow.sequence_mask', 'tf.sequence_mask', (['text_len'], {'maxlen': 'max_sentence_length'}), '(text_len, maxlen=max_sentence_length)\n', (42253, 42291), True, 'import tensorflow as tf\n'), ((42316, 42380), 'tensorflow.reshape', 'tf.reshape', (['text_len_mask', '[num_sentences * max_sentence_length]'], {}), '(text_len_mask, [num_sentences * max_sentence_length])\n', (42326, 42380), True, 'import tensorflow as tf\n'), ((42485, 42526), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['text_outputs', 'self.dropout'], {}), '(text_outputs, self.dropout)\n', (42498, 42526), True, 'import tensorflow as tf\n'), ((43562, 43581), 'tensorflow.shape', 'tf.shape', (['gold_ends'], {}), '(gold_ends)\n', (43570, 43581), True, 'import tensorflow as tf\n'), ((43617, 43652), 'tensorflow.ones', 'tf.ones', (['gold_len'], {'dtype': 'tf.float64'}), '(gold_len, dtype=tf.float64)\n', (43624, 43652), True, 'import tensorflow as tf\n'), ((44032, 44070), 'tensorflow.gather', 'tf.gather', (['speaker_ids', 'mention_starts'], {}), '(speaker_ids, mention_starts)\n', (44041, 44070), True, 'import tensorflow as tf\n'), ((45041, 45060), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['loss'], {}), '(loss)\n', (45054, 45060), True, 'import tensorflow as tf\n'), ((1474, 1517), 'os.path.join', 'join', (["opt['model_file']", '"""coref_kernels.so"""'], {}), "(opt['model_file'], 'coref_kernels.so')\n", (1478, 1517), False, 'from os.path import isdir, join\n'), ((2165, 2218), 'os.path.join', 'join', (['dpath', '"""embeddings"""', '"""embeddings_lenta_100.vec"""'], {}), "(dpath, 'embeddings', 'embeddings_lenta_100.vec')\n", (2169, 2218), False, 'from os.path import isdir, join\n'), ((3588, 3616), 'tensorflow.placeholder', 'tf.placeholder', (['dtype', 'shape'], {}), '(dtype, shape)\n', (3602, 3616), True, 'import tensorflow as tf\n'), ((6778, 6794), 'numpy.array', 'np.array', (['starts'], {}), '(starts)\n', (6786, 6794), True, 'import numpy as np\n'), ((6796, 6810), 'numpy.array', 'np.array', (['ends'], {}), '(ends)\n', (6804, 6810), True, 'import numpy as np\n'), ((14468, 14514), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['mention_width_emb', 'self.dropout'], {}), '(mention_width_emb, self.dropout)\n', (14481, 14514), True, 'import tensorflow as tf\n'), ((14987, 15023), 'tensorflow.gather', 'tf.gather', (['text_emb', 'mention_indices'], {}), '(text_emb, mention_indices)\n', (14996, 15023), True, 'import tensorflow as tf\n'), ((15183, 15227), 'tensorflow.gather', 'tf.gather', (['self.head_scores', 'mention_indices'], {}), '(self.head_scores, mention_indices)\n', (15192, 15227), True, 'import tensorflow as tf\n'), ((15682, 15736), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(mention_attention * mention_text_emb)', '(1)'], {}), '(mention_attention * mention_text_emb, 1)\n', (15695, 15736), True, 'import tensorflow as tf\n'), ((16389, 16424), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""mention_scores"""'], {}), "('mention_scores')\n", (16406, 16424), True, 'import tensorflow as tf\n'), ((18627, 18670), 'tensorflow.gather', 'tf.gather', (['mention_speaker_ids', 'antecedents'], {}), '(mention_speaker_ids, antecedents)\n', (18636, 18670), True, 'import tensorflow as tf\n'), ((19513, 19535), 'tensorflow.range', 'tf.range', (['num_mentions'], {}), '(num_mentions)\n', (19521, 19535), True, 'import tensorflow as tf\n'), ((20478, 20508), 'tensorflow.expand_dims', 'tf.expand_dims', (['mention_emb', '(1)'], {}), '(mention_emb, 1)\n', (20492, 20508), True, 'import tensorflow as tf\n'), ((20844, 20874), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""iteration"""'], {}), "('iteration')\n", (20861, 20874), True, 'import tensorflow as tf\n'), ((21267, 21335), 'tensorflow.sequence_mask', 'tf.sequence_mask', (['antecedents_len', 'max_antecedents'], {'dtype': 'tf.float64'}), '(antecedents_len, max_antecedents, dtype=tf.float64)\n', (21283, 21335), True, 'import tensorflow as tf\n'), ((21466, 21499), 'tensorflow.expand_dims', 'tf.expand_dims', (['mention_scores', '(1)'], {}), '(mention_scores, 1)\n', (21480, 21499), True, 'import tensorflow as tf\n'), ((21502, 21540), 'tensorflow.gather', 'tf.gather', (['mention_scores', 'antecedents'], {}), '(mention_scores, antecedents)\n', (21511, 21540), True, 'import tensorflow as tf\n'), ((22309, 22322), 'tensorflow.shape', 'tf.shape', (['emb'], {}), '(emb)\n', (22317, 22322), True, 'import tensorflow as tf\n'), ((22356, 22369), 'tensorflow.shape', 'tf.shape', (['emb'], {}), '(emb)\n', (22364, 22369), True, 'import tensorflow as tf\n'), ((22468, 22522), 'tensorflow.reshape', 'tf.reshape', (['emb', '[num_sentences * max_sentence_length]'], {}), '(emb, [num_sentences * max_sentence_length])\n', (22478, 22522), True, 'import tensorflow as tf\n'), ((23282, 23300), 'tensorflow.shape', 'tf.shape', (['text_emb'], {}), '(text_emb)\n', (23290, 23300), True, 'import tensorflow as tf\n'), ((23334, 23352), 'tensorflow.shape', 'tf.shape', (['text_emb'], {}), '(text_emb)\n', (23342, 23352), True, 'import tensorflow as tf\n'), ((23520, 23548), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""fw_cell"""'], {}), "('fw_cell')\n", (23537, 23548), True, 'import tensorflow as tf\n'), ((23728, 23756), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""bw_cell"""'], {}), "('bw_cell')\n", (23745, 23756), True, 'import tensorflow as tf\n'), ((23960, 24053), 'tensorflow.reverse_sequence', 'tf.reverse_sequence', (['preprocessed_inputs_bw'], {'seq_lengths': 'text_len', 'seq_dim': '(0)', 'batch_dim': '(1)'}), '(preprocessed_inputs_bw, seq_lengths=text_len, seq_dim=0,\n batch_dim=1)\n', (23979, 24053), True, 'import tensorflow as tf\n'), ((24270, 24322), 'tensorflow.tile', 'tf.tile', (['cell_fw.initial_state.c', '[num_sentences, 1]'], {}), '(cell_fw.initial_state.c, [num_sentences, 1])\n', (24277, 24322), True, 'import tensorflow as tf\n'), ((24373, 24425), 'tensorflow.tile', 'tf.tile', (['cell_fw.initial_state.h', '[num_sentences, 1]'], {}), '(cell_fw.initial_state.h, [num_sentences, 1])\n', (24380, 24425), True, 'import tensorflow as tf\n'), ((24476, 24528), 'tensorflow.tile', 'tf.tile', (['cell_bw.initial_state.c', '[num_sentences, 1]'], {}), '(cell_bw.initial_state.c, [num_sentences, 1])\n', (24483, 24528), True, 'import tensorflow as tf\n'), ((24579, 24631), 'tensorflow.tile', 'tf.tile', (['cell_bw.initial_state.h', '[num_sentences, 1]'], {}), '(cell_bw.initial_state.h, [num_sentences, 1])\n', (24586, 24631), True, 'import tensorflow as tf\n'), ((24646, 24671), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""lstm"""'], {}), "('lstm')\n", (24663, 24671), True, 'import tensorflow as tf\n'), ((28323, 28341), 'tensorflow.shape', 'tf.shape', (['word_emb'], {}), '(word_emb)\n', (28331, 28341), True, 'import tensorflow as tf\n'), ((28375, 28393), 'tensorflow.shape', 'tf.shape', (['word_emb'], {}), '(word_emb)\n', (28383, 28393), True, 'import tensorflow as tf\n'), ((35582, 35626), 'os.path.join', 'join', (['checkpoint_path', '"""model.max.ckpt.meta"""'], {}), "(checkpoint_path, 'model.max.ckpt.meta')\n", (35586, 35626), False, 'from os.path import isdir, join\n'), ((37211, 37257), 'os.path.join', 'join', (["self.opt['model_file']", '"""model.max.ckpt"""'], {}), "(self.opt['model_file'], 'model.max.ckpt')\n", (37215, 37257), False, 'from os.path import isdir, join\n'), ((40718, 40736), 'tensorflow.shape', 'tf.shape', (['word_emb'], {}), '(word_emb)\n', (40726, 40736), True, 'import tensorflow as tf\n'), ((40770, 40788), 'tensorflow.shape', 'tf.shape', (['word_emb'], {}), '(word_emb)\n', (40778, 40788), True, 'import tensorflow as tf\n'), ((44949, 44987), 'tensorflow.cast', 'tf.cast', (['antecedent_scores', 'tf.float64'], {}), '(antecedent_scores, tf.float64)\n', (44956, 44987), True, 'import tensorflow as tf\n'), ((2294, 2353), 'os.path.join', 'join', (['dpath', '"""embeddings"""', '"""ft_0.8.3_nltk_yalen_sg_300.bin"""'], {}), "(dpath, 'embeddings', 'ft_0.8.3_nltk_yalen_sg_300.bin')\n", (2298, 2353), False, 'from os.path import isdir, join\n'), ((4685, 4727), 'tensorflow.Variable', 'tf.Variable', (["opt['final_rate']", 'tf.float32'], {}), "(opt['final_rate'], tf.float32)\n", (4696, 4727), True, 'import tensorflow as tf\n'), ((14083, 14207), 'tensorflow.get_variable', 'tf.get_variable', (['"""mention_width_embeddings"""', "[self.opt['max_mention_width'], self.opt['feature_size']]"], {'dtype': 'tf.float64'}), "('mention_width_embeddings', [self.opt['max_mention_width'],\n self.opt['feature_size']], dtype=tf.float64)\n", (14098, 14207), True, 'import tensorflow as tf\n'), ((14698, 14731), 'tensorflow.expand_dims', 'tf.expand_dims', (['mention_starts', '(1)'], {}), '(mention_starts, 1)\n', (14712, 14731), True, 'import tensorflow as tf\n'), ((15327, 15412), 'tensorflow.sequence_mask', 'tf.sequence_mask', (['mention_width', "self.opt['max_mention_width']"], {'dtype': 'tf.float64'}), "(mention_width, self.opt['max_mention_width'], dtype=tf.float64\n )\n", (15343, 15412), True, 'import tensorflow as tf\n'), ((17167, 17205), 'tensorflow.cast', 'tf.cast', (['antecedent_labels', 'tf.float64'], {}), '(antecedent_labels, tf.float64)\n', (17174, 17205), True, 'import tensorflow as tf\n'), ((18734, 18772), 'tensorflow.expand_dims', 'tf.expand_dims', (['mention_speaker_ids', '(1)'], {}), '(mention_speaker_ids, 1)\n', (18748, 18772), True, 'import tensorflow as tf\n'), ((18902, 18991), 'tensorflow.get_variable', 'tf.get_variable', (['"""same_speaker_emb"""', "[2, self.opt['feature_size']]"], {'dtype': 'tf.float64'}), "('same_speaker_emb', [2, self.opt['feature_size']], dtype=tf\n .float64)\n", (18917, 18991), True, 'import tensorflow as tf\n'), ((19086, 19111), 'tensorflow.to_int32', 'tf.to_int32', (['same_speaker'], {}), '(same_speaker)\n', (19097, 19111), True, 'import tensorflow as tf\n'), ((19585, 19618), 'tensorflow.expand_dims', 'tf.expand_dims', (['target_indices', '(1)'], {}), '(target_indices, 1)\n', (19599, 19618), True, 'import tensorflow as tf\n'), ((19863, 19956), 'tensorflow.get_variable', 'tf.get_variable', (['"""mention_distance_emb"""', "[10, self.opt['feature_size']]"], {'dtype': 'tf.float64'}), "('mention_distance_emb', [10, self.opt['feature_size']],\n dtype=tf.float64)\n", (19878, 19956), True, 'import tensorflow as tf\n'), ((20893, 20932), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""antecedent_scoring"""'], {}), "('antecedent_scoring')\n", (20910, 20932), True, 'import tensorflow as tf\n'), ((24690, 24718), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""fw_lstm"""'], {}), "('fw_lstm')\n", (24707, 24718), True, 'import tensorflow as tf\n'), ((24760, 24893), 'tensorflow.nn.dynamic_rnn', 'tf.nn.dynamic_rnn', ([], {'cell': 'cell_fw', 'inputs': 'preprocessed_inputs_fw', 'sequence_length': 'text_len', 'initial_state': 'state_fw', 'time_major': '(True)'}), '(cell=cell_fw, inputs=preprocessed_inputs_fw,\n sequence_length=text_len, initial_state=state_fw, time_major=True)\n', (24777, 24893), True, 'import tensorflow as tf\n'), ((25139, 25167), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""bw_lstm"""'], {}), "('bw_lstm')\n", (25156, 25167), True, 'import tensorflow as tf\n'), ((25209, 25342), 'tensorflow.nn.dynamic_rnn', 'tf.nn.dynamic_rnn', ([], {'cell': 'cell_bw', 'inputs': 'preprocessed_inputs_bw', 'sequence_length': 'text_len', 'initial_state': 'state_bw', 'time_major': '(True)'}), '(cell=cell_bw, inputs=preprocessed_inputs_bw,\n sequence_length=text_len, initial_state=state_bw, time_major=True)\n', (25226, 25342), True, 'import tensorflow as tf\n'), ((26516, 26552), 'numpy.argmax', 'np.argmax', (['antecedent_scores'], {'axis': '(1)'}), '(antecedent_scores, axis=1)\n', (26525, 26552), True, 'import numpy as np\n'), ((28132, 28164), 'tensorflow.cast', 'tf.cast', (['is_training', 'tf.float64'], {}), '(is_training, tf.float64)\n', (28139, 28164), True, 'import tensorflow as tf\n'), ((28229, 28261), 'tensorflow.cast', 'tf.cast', (['is_training', 'tf.float64'], {}), '(is_training, tf.float64)\n', (28236, 28261), True, 'import tensorflow as tf\n'), ((30302, 30325), 'tensorflow.range', 'tf.range', (['num_sentences'], {}), '(num_sentences)\n', (30310, 30325), True, 'import tensorflow as tf\n'), ((35666, 35705), 'os.path.join', 'join', (['checkpoint_path', '"""model.max.ckpt"""'], {}), "(checkpoint_path, 'model.max.ckpt')\n", (35670, 35705), False, 'from os.path import isdir, join\n'), ((37133, 37179), 'os.path.join', 'join', (["self.opt['model_file']", '"""model.max.ckpt"""'], {}), "(self.opt['model_file'], 'model.max.ckpt')\n", (37137, 37179), False, 'from os.path import isdir, join\n'), ((40280, 40312), 'tensorflow.cast', 'tf.cast', (['is_training', 'tf.float64'], {}), '(is_training, tf.float64)\n', (40287, 40312), True, 'import tensorflow as tf\n'), ((40377, 40409), 'tensorflow.cast', 'tf.cast', (['is_training', 'tf.float64'], {}), '(is_training, tf.float64)\n', (40384, 40409), True, 'import tensorflow as tf\n'), ((14652, 14691), 'tensorflow.range', 'tf.range', (["self.opt['max_mention_width']"], {}), "(self.opt['max_mention_width'])\n", (14660, 14691), True, 'import tensorflow as tf\n'), ((15536, 15556), 'tensorflow.log', 'tf.log', (['mention_mask'], {}), '(mention_mask)\n', (15542, 15556), True, 'import tensorflow as tf\n'), ((19253, 19281), 'tensorflow.expand_dims', 'tf.expand_dims', (['genre_emb', '(0)'], {}), '(genre_emb, 0)\n', (19267, 19281), True, 'import tensorflow as tf\n'), ((9100, 9125), 'numpy.array', 'np.array', (['d[current_word]'], {}), '(d[current_word])\n', (9108, 9125), True, 'import numpy as np\n'), ((31329, 31351), 'tensorflow.shape', 'tf.shape', (['text_outputs'], {}), '(text_outputs)\n', (31337, 31351), True, 'import tensorflow as tf\n')]
|
#!/usr/bin/env python
# ------------------------------------------------------------------------------------------------------%
# Created by "Thieu" at 17:12, 09/07/2021 %
# %
# Email: <EMAIL> %
# Homepage: https://www.researchgate.net/profile/Nguyen_Thieu2 %
# Github: https://github.com/thieu1995 %
# ------------------------------------------------------------------------------------------------------%
import platform
from matplotlib import pyplot as plt
from numpy import arange
from pathlib import Path
import re
LIST_LINESTYLES = [
'-', # solid line style
'--', # dashed line style
'-.', # dash-dot line style
':', # point marker
's', # square marker
'*', # star marker
'p', # pentagon marker
'+', # plus marker
'x', # x marker
'd', # thin diamond marker
]
LIST_COLORS = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728',
'#9467bd', '#8c564b', '#e377c2', '#7f7f7f',
'#bcbd22', '#17becf']
def __clean_filename__(filename):
chars_to_remove = ["`", "~", "!", "@", "#", "$", "%", "^", "&", "*", ":", ",", "<", ">", ";", "+", "|"]
regular_expression = '[' + re.escape(''.join(chars_to_remove)) + ']'
temp = filename.encode("ascii", "ignore")
fname = temp.decode() # Removed all non-ascii characters
fname = re.sub(regular_expression, '', fname) # Removed all special characters
fname.replace("_", "-") # Replaced _ by -
return fname
def __check_filepath__(filename):
filename.replace("\\", "/") # For better handling the parent folder
if "/" in filename:
list_names = filename.split("/")[:-1] # Remove last element because it is filename
filepath = "/".join(list_names)
print(f"Fucking for real? {filepath}")
Path(filepath).mkdir(parents=True, exist_ok=True)
return filename
def _draw_line_(data=None, title=None, linestyle='-', color='b', x_label="#Iteration", y_label="Function Value",
filename=None, exts=(".png", ".pdf"), verbose=True):
x = arange(0, len(data))
y = data
plt.title(title)
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.plot(x, y, linestyle=linestyle, color=color,)
plt.legend() # show a legend on the plot
if filename is not None:
filepath = __check_filepath__(__clean_filename__(filename))
for idx, ext in enumerate(exts):
plt.savefig(f"{filepath}{ext}", bbox_inches='tight')
if platform.system() != "Linux" and verbose:
plt.show()
plt.close()
def _draw_multi_line_(data=None, title=None, list_legends=None, list_styles=None, list_colors=None,
x_label="#Iteration", y_label="Function Value", filename=None, exts=(".png", ".pdf"), verbose=True):
x = arange(0, len(data[0]))
for idx, y in enumerate(data):
plt.plot(x, y, label=list_legends[idx], markerfacecolor=list_colors[idx], linestyle=list_styles[idx])
plt.title(title)
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.legend() # show a legend on the plot
if filename is not None:
filepath = __check_filepath__(__clean_filename__(filename))
for idx, ext in enumerate(exts):
plt.savefig(f"{filepath}{ext}", bbox_inches='tight')
if platform.system() != "Linux" and verbose:
plt.show()
plt.close()
def _draw_multi_line_in_same_figure_(data=None, title=None, list_legends=None, list_styles=None, list_colors=None,
x_label="#Iteration", y_label="Objective", filename=None, exts=(".png", ".pdf"), verbose=True):
n_lines = len(data)
len_lines = len(data[0])
x = arange(0, len_lines)
if n_lines == 1:
fig, ax = plt.subplots()
if list_legends is None:
ax.plot(x, data[0])
else:
ax.plot(x, data[0], label=list_legends[0])
ax.set_title(title)
elif n_lines > 1:
fig, ax_list = plt.subplots(n_lines, sharex=True)
fig.suptitle(title)
for idx, ax in enumerate(ax_list):
if list_legends is None:
ax.plot(x, data[idx], markerfacecolor=list_colors[idx], linestyle=list_styles[idx])
else:
ax.plot(x, data[idx], label=list_legends[idx], markerfacecolor=list_colors[idx], linestyle=list_styles[idx])
ax.set_ylabel(f"Objective {idx + 1}")
if idx == (n_lines - 1):
ax.set_xlabel(x_label)
if filename is not None:
filepath = __check_filepath__(__clean_filename__(filename))
for idx, ext in enumerate(exts):
plt.savefig(f"{filepath}{ext}", bbox_inches='tight')
if platform.system() != "Linux" and verbose:
plt.show()
plt.close()
def export_convergence_chart(data=None, title="Convergence Chart", linestyle='-', color='b', x_label="#Iteration",
y_label="Function Value", filename="convergence_chart", exts=(".png", ".pdf"), verbose=True):
_draw_line_(data, title=title, linestyle=linestyle, color=color, x_label=x_label, y_label=y_label,
filename=filename, exts=exts, verbose=verbose)
def export_explore_exploit_chart(data=None, title="Exploration vs Exploitation Percentages", list_legends=("Exploration %", "Exploitation %"),
list_styles=('-', '-'), list_colors=('blue', 'orange'), x_label="#Iteration", y_label="Percentage",
filename="explore_exploit_chart", exts=(".png", ".pdf"), verbose=True):
_draw_multi_line_(data=data, title=title, list_legends=list_legends, list_styles=list_styles, list_colors=list_colors,
x_label=x_label, y_label=y_label, filename=filename, exts=exts, verbose=verbose)
def export_diversity_chart(data=None, title='Diversity Measurement Chart', list_legends=None,
list_styles=None, list_colors=None, x_label="#Iteration", y_label="Diversity Measurement",
filename="diversity_chart", exts=(".png", ".pdf"), verbose=True):
if list_styles is None:
list_styles = LIST_LINESTYLES[:len(data)]
if list_colors is None:
list_colors = LIST_COLORS[:len(data)]
_draw_multi_line_(data=data, title=title, list_legends=list_legends, list_styles=list_styles, list_colors=list_colors,
x_label=x_label, y_label=y_label, filename=filename, exts=exts, verbose=verbose)
def export_objectives_chart(data=None, title="Objectives chart", list_legends=None, list_styles=None, list_colors=None,
x_label="#Iteration", y_label="Function Value", filename="Objective-chart", exts=(".png", ".pdf"), verbose=True):
if list_styles is None:
list_styles = LIST_LINESTYLES[:len(data)]
if list_colors is None:
list_colors = LIST_COLORS[:len(data)]
_draw_multi_line_in_same_figure_(data=data, title=title, list_legends=list_legends, list_styles=list_styles, list_colors=list_colors,
x_label=x_label, y_label=y_label, filename=filename, exts=exts, verbose=verbose)
def export_trajectory_chart(data=None, n_dimensions=1, title="Trajectory of some first agents after generations", list_legends=None,
list_styles=None, list_colors=None, x_label="#Iteration", y_label="X1",
filename="1d_trajectory", exts=(".png", ".pdf"), verbose=True):
if list_styles is None:
list_styles = LIST_LINESTYLES[:len(data)]
if list_colors is None:
list_colors = LIST_COLORS[:len(data)]
if n_dimensions == 1:
x = arange(0, len(data[0]))
for idx, y in enumerate(data):
plt.plot(x, y, label=list_legends[idx], markerfacecolor=list_colors[idx], linestyle=list_styles[idx])
elif n_dimensions == 2:
for idx, point in enumerate(data):
plt.plot(point[0], point[1], label=list_legends[idx], markerfacecolor=list_colors[idx], linestyle=list_styles[idx])
plt.title(title)
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.legend() # show a legend on the plot
if filename is not None:
filepath = __check_filepath__(__clean_filename__(filename))
for idx, ext in enumerate(exts):
plt.savefig(f"{filepath}{ext}", bbox_inches='tight')
if platform.system() != "Linux" and verbose:
plt.show()
plt.close()
|
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.ylabel",
"numpy.arange",
"pathlib.Path",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.close",
"platform.system",
"re.sub",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] |
[((1732, 1769), 're.sub', 're.sub', (['regular_expression', '""""""', 'fname'], {}), "(regular_expression, '', fname)\n", (1738, 1769), False, 'import re\n'), ((2541, 2557), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (2550, 2557), True, 'from matplotlib import pyplot as plt\n'), ((2562, 2581), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['x_label'], {}), '(x_label)\n', (2572, 2581), True, 'from matplotlib import pyplot as plt\n'), ((2586, 2605), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['y_label'], {}), '(y_label)\n', (2596, 2605), True, 'from matplotlib import pyplot as plt\n'), ((2610, 2658), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {'linestyle': 'linestyle', 'color': 'color'}), '(x, y, linestyle=linestyle, color=color)\n', (2618, 2658), True, 'from matplotlib import pyplot as plt\n'), ((2664, 2676), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2674, 2676), True, 'from matplotlib import pyplot as plt\n'), ((2981, 2992), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (2990, 2992), True, 'from matplotlib import pyplot as plt\n'), ((3400, 3416), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (3409, 3416), True, 'from matplotlib import pyplot as plt\n'), ((3421, 3440), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['x_label'], {}), '(x_label)\n', (3431, 3440), True, 'from matplotlib import pyplot as plt\n'), ((3445, 3464), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['y_label'], {}), '(y_label)\n', (3455, 3464), True, 'from matplotlib import pyplot as plt\n'), ((3469, 3481), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (3479, 3481), True, 'from matplotlib import pyplot as plt\n'), ((3786, 3797), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3795, 3797), True, 'from matplotlib import pyplot as plt\n'), ((4109, 4129), 'numpy.arange', 'arange', (['(0)', 'len_lines'], {}), '(0, len_lines)\n', (4115, 4129), False, 'from numpy import arange\n'), ((5180, 5191), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (5189, 5191), True, 'from matplotlib import pyplot as plt\n'), ((8456, 8472), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (8465, 8472), True, 'from matplotlib import pyplot as plt\n'), ((8477, 8496), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['x_label'], {}), '(x_label)\n', (8487, 8496), True, 'from matplotlib import pyplot as plt\n'), ((8501, 8520), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['y_label'], {}), '(y_label)\n', (8511, 8520), True, 'from matplotlib import pyplot as plt\n'), ((8525, 8537), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (8535, 8537), True, 'from matplotlib import pyplot as plt\n'), ((8842, 8853), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (8851, 8853), True, 'from matplotlib import pyplot as plt\n'), ((2966, 2976), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2974, 2976), True, 'from matplotlib import pyplot as plt\n'), ((3293, 3398), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {'label': 'list_legends[idx]', 'markerfacecolor': 'list_colors[idx]', 'linestyle': 'list_styles[idx]'}), '(x, y, label=list_legends[idx], markerfacecolor=list_colors[idx],\n linestyle=list_styles[idx])\n', (3301, 3398), True, 'from matplotlib import pyplot as plt\n'), ((3771, 3781), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3779, 3781), True, 'from matplotlib import pyplot as plt\n'), ((4170, 4184), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (4182, 4184), True, 'from matplotlib import pyplot as plt\n'), ((5165, 5175), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5173, 5175), True, 'from matplotlib import pyplot as plt\n'), ((8827, 8837), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8835, 8837), True, 'from matplotlib import pyplot as plt\n'), ((2856, 2908), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""{filepath}{ext}"""'], {'bbox_inches': '"""tight"""'}), "(f'{filepath}{ext}', bbox_inches='tight')\n", (2867, 2908), True, 'from matplotlib import pyplot as plt\n'), ((2916, 2933), 'platform.system', 'platform.system', ([], {}), '()\n', (2931, 2933), False, 'import platform\n'), ((3661, 3713), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""{filepath}{ext}"""'], {'bbox_inches': '"""tight"""'}), "(f'{filepath}{ext}', bbox_inches='tight')\n", (3672, 3713), True, 'from matplotlib import pyplot as plt\n'), ((3721, 3738), 'platform.system', 'platform.system', ([], {}), '()\n', (3736, 3738), False, 'import platform\n'), ((4392, 4426), 'matplotlib.pyplot.subplots', 'plt.subplots', (['n_lines'], {'sharex': '(True)'}), '(n_lines, sharex=True)\n', (4404, 4426), True, 'from matplotlib import pyplot as plt\n'), ((5055, 5107), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""{filepath}{ext}"""'], {'bbox_inches': '"""tight"""'}), "(f'{filepath}{ext}', bbox_inches='tight')\n", (5066, 5107), True, 'from matplotlib import pyplot as plt\n'), ((5115, 5132), 'platform.system', 'platform.system', ([], {}), '()\n', (5130, 5132), False, 'import platform\n'), ((8150, 8255), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {'label': 'list_legends[idx]', 'markerfacecolor': 'list_colors[idx]', 'linestyle': 'list_styles[idx]'}), '(x, y, label=list_legends[idx], markerfacecolor=list_colors[idx],\n linestyle=list_styles[idx])\n', (8158, 8255), True, 'from matplotlib import pyplot as plt\n'), ((8717, 8769), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""{filepath}{ext}"""'], {'bbox_inches': '"""tight"""'}), "(f'{filepath}{ext}', bbox_inches='tight')\n", (8728, 8769), True, 'from matplotlib import pyplot as plt\n'), ((8777, 8794), 'platform.system', 'platform.system', ([], {}), '()\n', (8792, 8794), False, 'import platform\n'), ((2236, 2250), 'pathlib.Path', 'Path', (['filepath'], {}), '(filepath)\n', (2240, 2250), False, 'from pathlib import Path\n'), ((8335, 8455), 'matplotlib.pyplot.plot', 'plt.plot', (['point[0]', 'point[1]'], {'label': 'list_legends[idx]', 'markerfacecolor': 'list_colors[idx]', 'linestyle': 'list_styles[idx]'}), '(point[0], point[1], label=list_legends[idx], markerfacecolor=\n list_colors[idx], linestyle=list_styles[idx])\n', (8343, 8455), True, 'from matplotlib import pyplot as plt\n')]
|
#encoding: UTF-8
# Copyright (C) 2016 <NAME>
# This file is distributed under the terms of the # MIT License.
# See the file `License' in the root directory of the present distribution.
"""
An earlier and now oblosete implementation of functions for computing the
thermal expansion tensor as a function
of temperature from the Gruneisein parameters, the mode contributions to the
heat capacity, the elastic tensor and the unit cell volume.
Use :py:mod:`alphagruneisenp` instead.
"""
import numpy as np
import time
import math
import sys
from .read import read_Etot, read_freq, read_freq_ext, read_elastic_constants, \
read_elastic_constants_geo, read_freq_ext_geo
from .write import write_freq, write_freq_ext, write_alphaT, write_qha_C, write_qha_CT
from .constants import RY_KBAR, K_BOLTZMANN_RY, kb1
from .fitutils import fit_anis
from .minutils import find_min, fquadratic, fquartic
from .fitfreqgrun import fitfreq, fitfreqxx, freqmingrun, rearrange_freqx
from .fitFvib import fitFvib
from .fitC import rearrange_Cx, fitCxx
from .grunc import c_qvc # This is the same routine c_qv implemented in C to speed it up
################################################################################
#
# Compute the volume given the celldms, only for ibrav=4 for now
def compute_volume(celldms,ibrav=4):
if ibrav==4:
return 0.866025404*celldms[0]*celldms[0]*celldms[2]
#return 0.866025404*celldms[0]*celldms[0]*celldms[0]*celldms[2]
################################################################################
#
# Function to calculate the mode contribution to the heat capacity at a given T
# and omega
# This is a possible bottleneck as it is implemented in Python. It would be
# better to write it in C and link it to CPython or similar
#
#
def c_qv(T,omega):
if (T<1E-9 or omega<1E-9):
return 0.0
x = omega * kb1 / T
expx = math.exp(-x) # exponential term
x2 = math.pow(x,2)
if expx>1E-3: # compute normally
return x2*K_BOLTZMANN_RY*expx/math.pow(expx-1.0,2)
else: # Taylor series
return K_BOLTZMANN_RY*expx* (x/math.pow(x-0.5*math.pow(x,2)+
0.16666666666666667*math.pow(x,3)+0.04166666666666666667*math.pow(x,4),2))
# Same as c_qv but no if. Slightly more efficient, roughly a 30% faster
def c_qv2(T,omega):
x = omega * kb1 / T
expx = math.exp(-x) # exponential term
x2 = math.pow(x,2)
return x2*K_BOLTZMANN_RY*expx/math.pow(expx-1.0,2)
################################################################################
#
# This function computes the thermal expansions alpha using the Gruneisein
# parameters
# more comments to be added
# First with min0, freq and grun T-independent
#
# More ibrav types to be implemented
def compute_alpha_grun(T,V,S,weights,freq,grun,ibrav=4):
nq = freq.shape[0] # total number of q points
modes = freq.shape[1] # number of frequency modes
alpha = np.zeros(6) # inizializations
alphaaux = np.zeros(6)
# compute the Cqv*gruneisen terms, weights for each q-point, and sum
# for each ibrav (crystalline system) proceed in the proper way
if ibrav ==1:
for iq in range(0,nq):
for mode in range(0,modes):
alphaaux[0] += c_qv(T,freq[iq,mode]) * weights[iq] * grun[0,iq,mode]
alphaaux[0] = alphaaux[0] / 3.0
alphaaux[1] = alphaaux[0]
alphaaux[2] = alphaaux[0]
if ibrav ==4:
for iq in range(0,nq):
for mode in range(0,modes):
temp = c_qvc(T,freq[iq,mode]) * weights[iq] # should be quicker with this additional variable
alphaaux[0] += temp * grun[0,iq,mode]
alphaaux[2] += temp * grun[2,iq,mode]
alphaaux[0] = alphaaux[0] / 2.0
alphaaux[1] = alphaaux[0]
else:
print ("Not implemented yet")
# multiply for the elastic compliances
for i in range(0,6):
for j in range(0,6):
alpha[i] += alphaaux[j]*S[i,j]
alpha = -alpha/V
return alpha
def compute_alpha_gruneisein(inputfileEtot,inputfileC,inputfilefreq,rangeT,typeEtot,typefreq,ibrav):
# Read the energies
celldmsx, Ex = read_Etot(inputfileEtot)
# Fit and find the minimun at 0 K
a0, chia0 = fit_anis(celldmsx, Ex, ibrav, out=True, type=typeEtot)
if chia0!=None:
min0, fmin0 = find_min(a0, ibrav, type=typeEtot, guess=guess)
# First read the elastic compliances which are need for the thermal expansions
print ("Reading elastic constants and compliances from file "+inputfileC+"...")
C, S = read_elastic_constants(inputfileC)
#print (S)
# Compute the Gruneisen parameters
weights, freq, grun = fitfreq(celldmsx, min0, inputfilefreq, ibrav, typefreq="quadratic", compute_grun=True)
# Alternatively, we can read the gruneisen parameters from files (already written before)
#weights, freq = read_freq_ext("average_freq0K")
#weights, gruntemp1 = read_freq_ext("output_grun_along_a_ext3Dfit1.0")
#weights, gruntemp2 = read_freq_ext("output_grun_along_c_ext3Dfit1.0")
#nq = gruntemp1.shape[0]
#modes = gruntemp1.shape[1]
#grun = np.zeros((6,nq,modes))
#grun[0] = gruntemp1
#grun[1] = gruntemp1
#grun[2] = gruntemp2
V=compute_volume(min0,ibrav) # eq. volume at 0 K
print ("V = ",str(V))
S = S * RY_KBAR # convert elastic compliances in (Ryd/au)^-1
alphaT= np.zeros((len(rangeT),6))
counterT=0
for T in rangeT:
alpha = compute_alpha_grun(T,V,S,weights,freq,grun)
alphaT[counterT]=alpha
counterT += 1
print ("T= "+str(T)+"\t"+str(alpha[0])+"\t"+str(alpha[2]))
write_alphaT("alpha_gruneisen",rangeT,alphaT,4)
def compute_alpha_gruneiseinT(inputfileEtot,inputfileFvib,inputfileC,inputfilefreq,typeEtot,typeFvib,typefreq,ibrav,guess):
# Read the energies
celldmsx, Ex = read_Etot(inputfileEtot)
T, minT, fminT = fitFvib(inputfileEtot,inputfileFvib,ibrav,typeEtot,typeFvib,guess)
# First read the elastic compliances which are need for the thermal expansions
print ("Reading elastic constants and compliances from file "+inputfileC+"...")
C, S = read_elastic_constants(inputfileC)
print (S)
S = S * RY_KBAR # convert elastic compliances in (Ryd/au)^-1
# get the weigths and the frequencies from files
weightsx, freqx = read_freq_ext_geo(inputfilefreq,range(1,celldmsx.shape[0]+1))
weights = weightsx[0,:]
print ("Rearranging frequencies...")
freqxx = rearrange_freqx(freqx)
print ("Done!")
del freqx
print ("Fitting frequencies...")
afreq, chifreq = fitfreqxx(celldmsx, freqxx, ibrav, True, typefreq)
print ("Done!")
alphaT= np.zeros((len(T),6))
for i in range(0,len(T)):
# Compute the Gruneisen parameters, the average frequencies and alpha at each T
V=compute_volume(minT[i],ibrav)
print ("V = ",str(V))
freq, grun = freqmingrun(afreq, minT[i], freqxx.shape[0],freqxx.shape[1], ibrav, typefreq)
#write_freq_ext(weights,freq,"average_freqPython"+str(T[i]))
#write_freq_ext(weights,grun[0],"output_grun_along_a_ext3Dfit"+str(T[i]))
#write_freq_ext(weights,grun[2],"output_grun_along_c_ext3Dfit"+str(T[i]))
alpha = compute_alpha_grun(T[i],V,S,weights,freq,grun)
print ("T= "+str(T[i]))
print (alpha)
alphaT[i,:] = alpha
write_alphaT("alpha_gruneisenT",T,alphaT,4)
################################################################################
#
# This function is only meant to test the Cqv modes. It has to be removed later...
#
def testCqv(inputfilefreq, rangeT, out="Cqvtest"):
weights, freq = read_freq_ext(inputfilefreq)
nq = freq.shape[0] # total number of q points read
modes = freq.shape[1] # number of frequency modes
for T in rangeT:
Cqv = []
for iq in range(0,nq):
Cqvq=[]
for ifreq in range(0,modes):
temp = c_qv2(T,freq[iq,ifreq])
Cqvq.append(temp)
Cqv.append(Cqvq)
Cqv = np.array(Cqv)
outT = out+str(T)
write_freq_ext(weights,Cqv,outT)
################################################################################
# An auxiliary function for fitting the elastic constant elements of Sxx
#
#
def fitS(inputfileEtot, inputpathCx, ibrav, typeSx="quadratic"):
# Read the energies (this is necessary to read the celldmsx)
celldmsx, Ex = read_Etot(inputfileEtot)
ngeo = len(Ex)
Cx, Sx = read_elastic_constants_geo(ngeo, inputpathCx)
# This function works for both C and S, here I use it for S
Sxx = rearrange_Cx(Sx,ngeo)
write_qha_C(celldmsx, Sxx, ibrav, inputpathCx) # Write the S as a function of T for reference
aS, chiS = fitCxx(celldmsx, Sxx, ibrav, True, typeSx)
return aS, chiS
def fitST(aS,mintemp,typeCx):
S = np.zeros((6,6))
for i in range(0,6):
for j in range(0,6):
if typeCx=="quadratic":
S[i,j] = fquadratic(mintemp,aS[i,j],ibrav=4)
elif typeCx=="quartic":
S[i,j] = fquartic(mintemp,aS[i,j],ibrav=4)
return S
def compute_alpha_gruneiseinCT(inputfileEtot,inputfileFvib,inputpathCx,inputfilefreq,typeEtot,typeFvib,typeSx,typefreq,ibrav,guess):
# Read the energies
celldmsx, Ex = read_Etot(inputfileEtot)
T, minT, fminT = fitFvib(inputfileEtot,inputfileFvib,ibrav,typeEtot,typeFvib,guess)
# Get the polynomial coefficients aS from fitting the elastic compliances (to be used later to get S(T))
aS, chiS = fitS(inputfileEtot, inputpathCx, ibrav, typeSx)
# Now get the polynomial coeffients afreq from fitting the frequencies (to be used later to get average frequencies and
# gruneisen parameters as a function of T)
weightsx, freqx = read_freq_ext_geo(inputfilefreq,range(1,celldmsx.shape[0]+1))
weights = weightsx[0,:]
print ("Rearranging frequencies...")
freqxx = rearrange_freqx(freqx)
print ("Done!")
del freqx
print ("Fitting frequencies...")
afreq, chifreq = fitfreqxx(celldmsx, freqxx, ibrav, True, typefreq)
print ("Done!")
alphaT= np.zeros((len(T),6))
for i in range(0,len(T)):
# Compute the Gruneisen parameters, the average frequencies and alpha at each T
V=compute_volume(minT[i],ibrav)
print ("V = ",str(V))
S = fitST(aS,minT[i],typeSx)
print (S)
S = S * RY_KBAR # convert elastic compliances in (Ryd/au)^-1
freq, grun = freqmingrun(afreq, minT[i], freqxx.shape[0],freqxx.shape[1], ibrav, typefreq)
#write_freq_ext(weights,freq,"average_freqPython"+str(T[i]))
#write_freq_ext(weights,grun[0],"output_grun_along_a_ext3Dfit"+str(T[i]))
#write_freq_ext(weights,grun[2],"output_grun_along_c_ext3Dfit"+str(T[i]))
alpha = compute_alpha_grun(T[i],V,S,weights,freq,grun)
print ("T= "+str(T[i]))
print (alpha)
alphaT[i,:] = alpha
write_alphaT("alpha_gruneisenT",T,alphaT,4)
|
[
"math.pow",
"numpy.array",
"math.exp",
"numpy.zeros"
] |
[((1910, 1922), 'math.exp', 'math.exp', (['(-x)'], {}), '(-x)\n', (1918, 1922), False, 'import math\n'), ((1953, 1967), 'math.pow', 'math.pow', (['x', '(2)'], {}), '(x, 2)\n', (1961, 1967), False, 'import math\n'), ((2399, 2411), 'math.exp', 'math.exp', (['(-x)'], {}), '(-x)\n', (2407, 2411), False, 'import math\n'), ((2442, 2456), 'math.pow', 'math.pow', (['x', '(2)'], {}), '(x, 2)\n', (2450, 2456), False, 'import math\n'), ((2985, 2996), 'numpy.zeros', 'np.zeros', (['(6)'], {}), '(6)\n', (2993, 2996), True, 'import numpy as np\n'), ((3034, 3045), 'numpy.zeros', 'np.zeros', (['(6)'], {}), '(6)\n', (3042, 3045), True, 'import numpy as np\n'), ((9113, 9129), 'numpy.zeros', 'np.zeros', (['(6, 6)'], {}), '((6, 6))\n', (9121, 9129), True, 'import numpy as np\n'), ((2491, 2514), 'math.pow', 'math.pow', (['(expx - 1.0)', '(2)'], {}), '(expx - 1.0, 2)\n', (2499, 2514), False, 'import math\n'), ((8291, 8304), 'numpy.array', 'np.array', (['Cqv'], {}), '(Cqv)\n', (8299, 8304), True, 'import numpy as np\n'), ((2052, 2075), 'math.pow', 'math.pow', (['(expx - 1.0)', '(2)'], {}), '(expx - 1.0, 2)\n', (2060, 2075), False, 'import math\n'), ((2252, 2266), 'math.pow', 'math.pow', (['x', '(4)'], {}), '(x, 4)\n', (2260, 2266), False, 'import math\n'), ((2215, 2229), 'math.pow', 'math.pow', (['x', '(3)'], {}), '(x, 3)\n', (2223, 2229), False, 'import math\n'), ((2171, 2185), 'math.pow', 'math.pow', (['x', '(2)'], {}), '(x, 2)\n', (2179, 2185), False, 'import math\n')]
|
from __future__ import division
import numpy as np
from sklearn.utils import shuffle
from sklearn.metrics import *
"""
Module with different fitness functions implemented to be used by the CRO algorithm.
The functions' only argument must be an individual (coral) and return its fitness, a number.
The fitness might require other arguments, in that case the partial function in python's functools module is a very good option
"""
def max_ones(coral):
"""
Description: Returns the percentage of 1's in the coral. This function assumes 'coral' is a list,
it could be further improved if it was a numpy array
Input:
- coral
Output:
- fitness
"""
return 100*(sum(coral) / len(coral))
def feature_selection(coral, X, y, model,
get_prediction = lambda model, X: model.predict(X),
metric=roc_auc_score, random_seed=None):
"""
Description: Returns the fitness (given by metric) of the selected features given by coral,
when using Xt and yt for training the model clf
Input:
- coral : an individual
- X: Data input
- y: Data output
- model: instance of the model to be trained
- get_prediction: function that accepts the model and X and outputs the vector
that will be used in the metric (predictions, scores...)
- metric: metric that will be used as fitness
Output:
- fitness
"""
# offset % of data for training, the rest for testing
offset = int(X.shape[0] * 0.9)
Xs, ys = shuffle(X, y, random_state=random_seed)
Xs = np.multiply(Xs, coral)
X_train, y_train = Xs[:offset], ys[:offset]
X_test, y_test = Xs[offset:], ys[offset:]
# train model
model.fit(X_train, y_train)
# Compute metric
y_pred = get_prediction(model, X_test)
fitness = metric(y_test, y_pred)
return fitness
|
[
"sklearn.utils.shuffle",
"numpy.multiply"
] |
[((1584, 1623), 'sklearn.utils.shuffle', 'shuffle', (['X', 'y'], {'random_state': 'random_seed'}), '(X, y, random_state=random_seed)\n', (1591, 1623), False, 'from sklearn.utils import shuffle\n'), ((1633, 1655), 'numpy.multiply', 'np.multiply', (['Xs', 'coral'], {}), '(Xs, coral)\n', (1644, 1655), True, 'import numpy as np\n')]
|
import numpy as np
from ..local_interpolation import ThirdOrderHermitePolynomialInterpolation
from .runge_kutta import AbstractESDIRK, ButcherTableau
γ = 0.26
a21 = γ
a31 = 0.13
a32 = 0.84033320996790809
a41 = 0.22371961478320505
a42 = 0.47675532319799699
a43 = -0.06470895363112615
a51 = 0.16648564323248321
a52 = 0.10450018841591720
a53 = 0.03631482272098715
a54 = -0.13090704451073998
a61 = 0.13855640231268224
a62 = 0
a63 = -0.04245337201752043
a64 = 0.02446657898003141
a65 = 0.61943039072480676
a71 = 0.13659751177640291
a72 = 0
a73 = -0.05496908796538376
a74 = -0.04118626728321046
a75 = 0.62993304899016403
a76 = 0.06962479448202728
# Predictors taken from
# https://github.com/SciML/OrdinaryDiffEq.jl/blob/54fb35870fa402fc95d665cd5f9502e2759ea436/src/tableaus/sdirk_tableaus.jl#L1444 # noqa: E501
# https://github.com/SciML/OrdinaryDiffEq.jl/blob/54fb35870fa402fc95d665cd5f9502e2759ea436/src/perform_step/kencarp_kvaerno_perform_step.jl#L1123 # noqa: E501
# This is with the exception of α21, which is mistakenly set to zero.
#
# See also /devdocs/predictor_dirk.md
α21 = 1.0
α31 = -1.366025403784441
α32 = 2.3660254037844357
α41 = -0.19650552613122207
α42 = 0.8113579546496623
α43 = 0.38514757148155954
α51 = 0.10375304369958693
α52 = 0.937994698066431
α53 = -0.04174774176601781
α61 = -0.17281112873898072
α62 = 0.6235784481025847
α63 = 0.5492326806363959
α71 = a61
α72 = a62
α73 = a63
α74 = a64
α75 = a65
α76 = γ
_kvaerno5_tableau = ButcherTableau(
a_lower=(
np.array([a21]),
np.array([a31, a32]),
np.array([a41, a42, a43]),
np.array([a51, a52, a53, a54]),
np.array([a61, a62, a63, a64, a65]),
np.array([a71, a72, a73, a74, a75, a76]),
),
a_diagonal=np.array([0, γ, γ, γ, γ, γ, γ]),
a_predictor=(
np.array([α21]),
np.array([α31, α32]),
np.array([α41, α42, α43]),
np.array([α51, α52, α53, 0]),
np.array([α61, α62, α63, 0, 0]),
np.array([α71, α72, α73, α74, α75, α76]),
),
b_sol=np.array([a71, a72, a73, a74, a75, a76, γ]),
b_error=np.array(
[a71 - a61, a72 - a62, a73 - a63, a74 - a64, a75 - a65, a76 - γ, γ]
),
c=np.array(
[0.52, 1.230333209967908, 0.8957659843500759, 0.43639360985864756, 1.0, 1.0]
),
)
class Kvaerno5(AbstractESDIRK):
r"""Kvaerno's 5/4 method.
A-L stable stiffly accurate 5th order ESDIRK method. Has an embedded 4th order
method for adaptive step sizing. Uses 7 stages.
When solving an ODE over the interval $[t_0, t_1]$, note that this method will make
some evaluations slightly past $t_1$.
??? cite "Reference"
```bibtex
@article{kvaerno2004singly,
title={Singly diagonally implicit Runge--Kutta methods with an explicit first
stage},
author={Kv{\ae}rn{\o}, Anne},
journal={BIT Numerical Mathematics},
volume={44},
number={3},
pages={489--502},
year={2004},
publisher={Springer}
}
```
"""
tableau = _kvaerno5_tableau
interpolation_cls = ThirdOrderHermitePolynomialInterpolation.from_k
def order(self, terms):
return 5
|
[
"numpy.array"
] |
[((1729, 1760), 'numpy.array', 'np.array', (['[0, γ, γ, γ, γ, γ, γ]'], {}), '([0, γ, γ, γ, γ, γ, γ])\n', (1737, 1760), True, 'import numpy as np\n'), ((2016, 2059), 'numpy.array', 'np.array', (['[a71, a72, a73, a74, a75, a76, γ]'], {}), '([a71, a72, a73, a74, a75, a76, γ])\n', (2024, 2059), True, 'import numpy as np\n'), ((2073, 2150), 'numpy.array', 'np.array', (['[a71 - a61, a72 - a62, a73 - a63, a74 - a64, a75 - a65, a76 - γ, γ]'], {}), '([a71 - a61, a72 - a62, a73 - a63, a74 - a64, a75 - a65, a76 - γ, γ])\n', (2081, 2150), True, 'import numpy as np\n'), ((2172, 2262), 'numpy.array', 'np.array', (['[0.52, 1.230333209967908, 0.8957659843500759, 0.43639360985864756, 1.0, 1.0]'], {}), '([0.52, 1.230333209967908, 0.8957659843500759, 0.43639360985864756,\n 1.0, 1.0])\n', (2180, 2262), True, 'import numpy as np\n'), ((1490, 1505), 'numpy.array', 'np.array', (['[a21]'], {}), '([a21])\n', (1498, 1505), True, 'import numpy as np\n'), ((1515, 1535), 'numpy.array', 'np.array', (['[a31, a32]'], {}), '([a31, a32])\n', (1523, 1535), True, 'import numpy as np\n'), ((1545, 1570), 'numpy.array', 'np.array', (['[a41, a42, a43]'], {}), '([a41, a42, a43])\n', (1553, 1570), True, 'import numpy as np\n'), ((1580, 1610), 'numpy.array', 'np.array', (['[a51, a52, a53, a54]'], {}), '([a51, a52, a53, a54])\n', (1588, 1610), True, 'import numpy as np\n'), ((1620, 1655), 'numpy.array', 'np.array', (['[a61, a62, a63, a64, a65]'], {}), '([a61, a62, a63, a64, a65])\n', (1628, 1655), True, 'import numpy as np\n'), ((1665, 1705), 'numpy.array', 'np.array', (['[a71, a72, a73, a74, a75, a76]'], {}), '([a71, a72, a73, a74, a75, a76])\n', (1673, 1705), True, 'import numpy as np\n'), ((1788, 1803), 'numpy.array', 'np.array', (['[α21]'], {}), '([α21])\n', (1796, 1803), True, 'import numpy as np\n'), ((1813, 1833), 'numpy.array', 'np.array', (['[α31, α32]'], {}), '([α31, α32])\n', (1821, 1833), True, 'import numpy as np\n'), ((1843, 1868), 'numpy.array', 'np.array', (['[α41, α42, α43]'], {}), '([α41, α42, α43])\n', (1851, 1868), True, 'import numpy as np\n'), ((1878, 1906), 'numpy.array', 'np.array', (['[α51, α52, α53, 0]'], {}), '([α51, α52, α53, 0])\n', (1886, 1906), True, 'import numpy as np\n'), ((1916, 1947), 'numpy.array', 'np.array', (['[α61, α62, α63, 0, 0]'], {}), '([α61, α62, α63, 0, 0])\n', (1924, 1947), True, 'import numpy as np\n'), ((1957, 1997), 'numpy.array', 'np.array', (['[α71, α72, α73, α74, α75, α76]'], {}), '([α71, α72, α73, α74, α75, α76])\n', (1965, 1997), True, 'import numpy as np\n')]
|
import unittest
import torch
import numpy as np
from spectralgp.samplers import MeanEllipticalSlice
class TestMeanEllipticalSlice(unittest.TestCase):
def test_m_ess(self, nsamples=10000):
pmean = torch.zeros(2)
pmean[0] = -2.
prior_dist = torch.distributions.MultivariateNormal(pmean, covariance_matrix=torch.eye(2))
lmean = torch.zeros(2)
lmean[0] = 2.
likelihood = torch.distributions.MultivariateNormal(lmean, covariance_matrix=torch.eye(2))
prior_inv = torch.inverse(prior_dist.covariance_matrix)
lik_inv = torch.inverse(likelihood.covariance_matrix)
true_postsigma = torch.inverse(prior_inv + lik_inv)
true_postmu = true_postsigma.matmul(prior_inv.matmul(pmean) + lik_inv.matmul(lmean))
def lfn(x):
lmean = torch.zeros(2)
lmean[0] = 2.
likelihood = torch.distributions.MultivariateNormal(lmean, covariance_matrix=torch.eye(2))
return likelihood.log_prob(x)
#lfn = lambda x: likelihood.log_prob(x)
init = torch.zeros(2)
m_ess_runner = MeanEllipticalSlice(init, prior_dist, lfn, nsamples)
samples, _ = m_ess_runner.run()
samples = samples.numpy()
samples = samples[:, int(nsamples/2):]
est_mean = np.mean(samples,1)
print(est_mean)
est_cov = np.cov(samples)
print(np.linalg.norm(est_mean - true_postmu.numpy()))
print(np.linalg.norm(est_cov - true_postsigma.numpy()))
# import matplotlib.pyplot as plt
# N = 60
# X = np.linspace(-3, 3, N)
# Y = np.linspace(-3, 4, N)
# X, Y = np.meshgrid(X, Y)
# # Pack X and Y into a single 3-dimensional array
# pos = np.empty(X.shape + (2,))
# pos[:, :, 0] = X
# pos[:, :, 1] = Y
# pos = torch.tensor(pos).float()
# posterior_dist = torch.distributions.MultivariateNormal(true_postmu, true_postsigma)
# Z = posterior_dist.log_prob(pos).numpy()
# plt.contourf(X, Y, Z)
# plt.scatter(samples[0,:], samples[1,:], color='black', alpha = 0.3)
# plt.show()
if __name__ == "__main__":
unittest.main()
|
[
"numpy.mean",
"spectralgp.samplers.MeanEllipticalSlice",
"torch.eye",
"unittest.main",
"numpy.cov",
"torch.zeros",
"torch.inverse"
] |
[((2193, 2208), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2206, 2208), False, 'import unittest\n'), ((210, 224), 'torch.zeros', 'torch.zeros', (['(2)'], {}), '(2)\n', (221, 224), False, 'import torch\n'), ((372, 386), 'torch.zeros', 'torch.zeros', (['(2)'], {}), '(2)\n', (383, 386), False, 'import torch\n'), ((529, 572), 'torch.inverse', 'torch.inverse', (['prior_dist.covariance_matrix'], {}), '(prior_dist.covariance_matrix)\n', (542, 572), False, 'import torch\n'), ((591, 634), 'torch.inverse', 'torch.inverse', (['likelihood.covariance_matrix'], {}), '(likelihood.covariance_matrix)\n', (604, 634), False, 'import torch\n'), ((661, 695), 'torch.inverse', 'torch.inverse', (['(prior_inv + lik_inv)'], {}), '(prior_inv + lik_inv)\n', (674, 695), False, 'import torch\n'), ((1081, 1095), 'torch.zeros', 'torch.zeros', (['(2)'], {}), '(2)\n', (1092, 1095), False, 'import torch\n'), ((1120, 1172), 'spectralgp.samplers.MeanEllipticalSlice', 'MeanEllipticalSlice', (['init', 'prior_dist', 'lfn', 'nsamples'], {}), '(init, prior_dist, lfn, nsamples)\n', (1139, 1172), False, 'from spectralgp.samplers import MeanEllipticalSlice\n'), ((1314, 1333), 'numpy.mean', 'np.mean', (['samples', '(1)'], {}), '(samples, 1)\n', (1321, 1333), True, 'import numpy as np\n'), ((1375, 1390), 'numpy.cov', 'np.cov', (['samples'], {}), '(samples)\n', (1381, 1390), True, 'import numpy as np\n'), ((830, 844), 'torch.zeros', 'torch.zeros', (['(2)'], {}), '(2)\n', (841, 844), False, 'import torch\n'), ((333, 345), 'torch.eye', 'torch.eye', (['(2)'], {}), '(2)\n', (342, 345), False, 'import torch\n'), ((494, 506), 'torch.eye', 'torch.eye', (['(2)'], {}), '(2)\n', (503, 506), False, 'import torch\n'), ((960, 972), 'torch.eye', 'torch.eye', (['(2)'], {}), '(2)\n', (969, 972), False, 'import torch\n')]
|
"""Steps up and down"""
import calendar
import numpy as np
from pandas.io.sql import read_sql
from pyiem import network
from pyiem.plot.use_agg import plt
from pyiem.util import get_autoplot_context, get_dbconn
PDICT = {'spring': '1 January - 30 June',
'fall': '1 July - 31 December'}
def get_description():
""" Return a dict describing how to call this plotter """
desc = dict()
desc['data'] = True
desc['description'] = """This plot analyzes the number of steps down in
low temperature during the fall season and the number of steps up in
high temperature during the spring season. These steps are simply having
a newer colder low or warmer high for the season to date period.
"""
desc['arguments'] = [
dict(type='station', name='station', default='IA2203',
label='Select Station', network='IACLIMATE'),
dict(type='select', name='season', options=PDICT,
label='Select which half of year', default='fall'),
]
return desc
def plotter(fdict):
""" Go """
pgconn = get_dbconn('coop')
ctx = get_autoplot_context(fdict, get_description())
station = ctx['station']
season = ctx['season']
table = "alldata_%s" % (station[:2],)
nt = network.Table("%sCLIMATE" % (station[:2],))
df = read_sql("""
WITH obs as (
SELECT day, year, month, high, low,
case when month > 6 then 'fall' else 'spring' end as season
from """ + table + """ WHERE station = %s),
data as (
SELECT year, day, season,
max(high) OVER (PARTITION by year, season ORDER by day ASC
ROWS BETWEEN 183 PRECEDING and CURRENT ROW) as mh,
min(low) OVER (PARTITION by year, season ORDER by day ASC
ROWS BETWEEN 183 PRECEDING and CURRENT ROW) as ml
from obs),
lows as (
SELECT year, day, ml as level, season,
rank() OVER (PARTITION by year, ml ORDER by day ASC) from data
WHERE season = 'fall'),
highs as (
SELECT year, day, mh as level, season,
rank() OVER (PARTITION by year, mh ORDER by day ASC) from data
WHERE season = 'spring')
(SELECT year, day, extract(doy from day) as doy,
level, season from lows WHERE rank = 1) UNION
(SELECT year, day, extract(doy from day) as doy,
level, season from highs WHERE rank = 1)
""", pgconn, params=[station])
df2 = df[df['season'] == season]
(fig, ax) = plt.subplots(3, 1, figsize=(7, 10))
dyear = df2.groupby(['year']).count()
ax[0].bar(dyear.index, dyear['level'], facecolor='tan', edgecolor='tan')
ax[0].axhline(dyear['level'].mean(), lw=2)
ax[0].set_ylabel("Yearly Events Avg: %.1f" % (dyear['level'].mean(), ))
ax[0].set_xlim(dyear.index.min()-1, dyear.index.max()+1)
title = "%s Steps %s" % (PDICT[season],
"Down" if season == 'fall' else 'Up')
ax[0].set_title("%s [%s]\n%s in Temperature" % (nt.sts[station]['name'],
station, title))
ax[0].grid(True)
ax[1].hist(np.array(df2['level'], 'f'),
bins=np.arange(df2['level'].min(),
df2['level'].max()+1, 2),
normed=True, facecolor='tan')
ax[1].set_ylabel("Probability Density")
ax[1].axvline(32, lw=2)
ax[1].grid(True)
ax[1].set_xlabel(r"Temperature $^\circ$F, 32 degrees highlighted")
ax[2].hist(np.array(df2['doy'], 'f'),
bins=np.arange(df2['doy'].min(),
df2['doy'].max()+1, 3),
normed=True, facecolor='tan')
ax[2].set_xticks((1, 32, 60, 91, 121, 152, 182, 213, 244, 274,
305, 335, 365))
ax[2].set_xticklabels(calendar.month_abbr[1:])
ax[2].set_xlim(df2['doy'].min() - 3,
df2['doy'].max() + 3)
ax[2].set_ylabel("Probability Density")
ax[2].grid(True)
ax[2].set_xlabel("Day of Year, 3 Day Bins")
return fig, df
if __name__ == '__main__':
plotter(dict())
|
[
"pyiem.network.Table",
"pandas.io.sql.read_sql",
"pyiem.util.get_dbconn",
"numpy.array",
"pyiem.plot.use_agg.plt.subplots"
] |
[((1072, 1090), 'pyiem.util.get_dbconn', 'get_dbconn', (['"""coop"""'], {}), "('coop')\n", (1082, 1090), False, 'from pyiem.util import get_autoplot_context, get_dbconn\n'), ((1256, 1299), 'pyiem.network.Table', 'network.Table', (["('%sCLIMATE' % (station[:2],))"], {}), "('%sCLIMATE' % (station[:2],))\n", (1269, 1299), False, 'from pyiem import network\n'), ((1310, 2440), 'pandas.io.sql.read_sql', 'read_sql', (['(\n """\n WITH obs as (\n SELECT day, year, month, high, low,\n case when month > 6 then \'fall\' else \'spring\' end as season\n from """\n + table +\n """ WHERE station = %s),\n data as (\n SELECT year, day, season,\n max(high) OVER (PARTITION by year, season ORDER by day ASC\n ROWS BETWEEN 183 PRECEDING and CURRENT ROW) as mh,\n min(low) OVER (PARTITION by year, season ORDER by day ASC\n ROWS BETWEEN 183 PRECEDING and CURRENT ROW) as ml\n from obs),\n lows as (\n SELECT year, day, ml as level, season,\n rank() OVER (PARTITION by year, ml ORDER by day ASC) from data\n WHERE season = \'fall\'),\n highs as (\n SELECT year, day, mh as level, season,\n rank() OVER (PARTITION by year, mh ORDER by day ASC) from data\n WHERE season = \'spring\')\n\n (SELECT year, day, extract(doy from day) as doy,\n level, season from lows WHERE rank = 1) UNION\n (SELECT year, day, extract(doy from day) as doy,\n level, season from highs WHERE rank = 1)\n """\n )', 'pgconn'], {'params': '[station]'}), '(\n """\n WITH obs as (\n SELECT day, year, month, high, low,\n case when month > 6 then \'fall\' else \'spring\' end as season\n from """\n + table +\n """ WHERE station = %s),\n data as (\n SELECT year, day, season,\n max(high) OVER (PARTITION by year, season ORDER by day ASC\n ROWS BETWEEN 183 PRECEDING and CURRENT ROW) as mh,\n min(low) OVER (PARTITION by year, season ORDER by day ASC\n ROWS BETWEEN 183 PRECEDING and CURRENT ROW) as ml\n from obs),\n lows as (\n SELECT year, day, ml as level, season,\n rank() OVER (PARTITION by year, ml ORDER by day ASC) from data\n WHERE season = \'fall\'),\n highs as (\n SELECT year, day, mh as level, season,\n rank() OVER (PARTITION by year, mh ORDER by day ASC) from data\n WHERE season = \'spring\')\n\n (SELECT year, day, extract(doy from day) as doy,\n level, season from lows WHERE rank = 1) UNION\n (SELECT year, day, extract(doy from day) as doy,\n level, season from highs WHERE rank = 1)\n """\n , pgconn, params=[station])\n', (1318, 2440), False, 'from pandas.io.sql import read_sql\n'), ((2475, 2510), 'pyiem.plot.use_agg.plt.subplots', 'plt.subplots', (['(3)', '(1)'], {'figsize': '(7, 10)'}), '(3, 1, figsize=(7, 10))\n', (2487, 2510), False, 'from pyiem.plot.use_agg import plt\n'), ((3108, 3135), 'numpy.array', 'np.array', (["df2['level']", '"""f"""'], {}), "(df2['level'], 'f')\n", (3116, 3135), True, 'import numpy as np\n'), ((3468, 3493), 'numpy.array', 'np.array', (["df2['doy']", '"""f"""'], {}), "(df2['doy'], 'f')\n", (3476, 3493), True, 'import numpy as np\n')]
|
"""An environment to skip k frames and return a max between the last two."""
import gym
import numpy as np
class MaxFrameskipEnv(gym.Wrapper):
"""An environment to skip k frames and return a max between the last two."""
def __init__(self, env, skip: int=4) -> None:
"""
Initialize a new max frame skip env around an existing environment.
Args:
env: the environment to wrap around
skip: the number of frames to skip (i.e. hold an action for)
Returns:
None
"""
gym.Wrapper.__init__(self, env)
# most recent raw observations (for max pooling across time steps)
self._obs_buffer = np.zeros((2, *env.observation_space.shape), dtype=np.uint8)
self._skip = skip
def step(self, action):
"""Repeat action, sum reward, and max over last observations."""
# the total reward from `skip` frames having `action` held on them
total_reward = 0.0
done = None
# perform the action `skip` times
for i in range(self._skip):
obs, reward, done, info = self.env.step(action)
total_reward += reward
# assign the buffer with the last two frames
if i == self._skip - 2:
self._obs_buffer[0] = obs
if i == self._skip - 1:
self._obs_buffer[1] = obs
# break the loop if the game terminated
if done:
break
# Note that the observation on the done=True frame doesn't matter
# (because the next state isn't evaluated when done is true)
max_frame = self._obs_buffer.max(axis=0)
return max_frame, total_reward, done, info
def reset(self, **kwargs):
return self.env.reset(**kwargs)
# explicitly define the outward facing API of this module
__all__ = [MaxFrameskipEnv.__name__]
|
[
"numpy.zeros",
"gym.Wrapper.__init__"
] |
[((557, 588), 'gym.Wrapper.__init__', 'gym.Wrapper.__init__', (['self', 'env'], {}), '(self, env)\n', (577, 588), False, 'import gym\n'), ((691, 750), 'numpy.zeros', 'np.zeros', (['(2, *env.observation_space.shape)'], {'dtype': 'np.uint8'}), '((2, *env.observation_space.shape), dtype=np.uint8)\n', (699, 750), True, 'import numpy as np\n')]
|
"""Script containing the DeepLoco environments."""
import gym
import numpy as np
import os
import sys
import cv2
try:
sys.path.append(os.path.join(os.environ["TERRAINRL_PATH"], "simAdapter"))
import terrainRLSim # noqa: F401
except (KeyError, ImportError, ModuleNotFoundError):
pass
class BipedalSoccer(gym.Env):
"""Bipedal Soccer environment.
In this environment, a bipedal agent is placed in an open field with a
soccer ball. The agent is rewarded for moving to the ball, and additionally
dribbling the ball to the target. The reward function is a weighted sum of
the agent's distance from the ball and the distance of the ball from a
desired goal position. This reward is positive to discourage the agent from
falling prematurely.
Attributes
----------
wrapped_env : gym.Env
the original environment, which add more dimensions than wanted here
"""
def __init__(self):
"""Instantiate the environment."""
self.wrapped_env = terrainRLSim.getEnv(
"PD-Biped3D-HLC-Soccer-v1", render=False)
# Add the time horizon.
self.horizon = 512
@property
def observation_space(self):
"""See parent class."""
return self.wrapped_env.observation_space
@property
def action_space(self):
"""See parent class."""
return self.wrapped_env.action_space
def step(self, action):
"""See parent class."""
obs, rew, done, info = self.wrapped_env.step(np.array([action]))
return obs[0], rew[0][0], done, info
def reset(self):
"""See parent class."""
return self.wrapped_env.reset()[0]
def render(self, mode='human'):
"""See parent class."""
return self.wrapped_env.render(mode=mode)
class BipedalObstacles(gym.Env):
"""Bipedal Obstacles environment.
In this environment, a bipedal agent is placed in an open field with
obstacles scattered throughout the world. The goal of the agent is to
walk around the world and reach a goal position.
Attributes
----------
wrapped_env : gym.Env
the original environment, which add more dimensions than wanted here
"""
def __init__(self, render):
"""Instantiate the environment.
Parameters
----------
render : bool
whether to render the environment
"""
self.t = 0
if render:
self.wrapped_env = gym.make("PD-Biped3D-HLC-Obstacles-render-v2")
else:
self.wrapped_env = gym.make("PD-Biped3D-HLC-Obstacles-v2")
# Add the time horizon.
self.horizon = 2000
@property
def observation_space(self):
"""See parent class."""
return gym.spaces.Box(
low=20 * self.wrapped_env.observation_space.low[:-2],
high=20 * self.wrapped_env.observation_space.high[:-2],
dtype=np.float32)
@property
def context_space(self):
"""See parent class."""
return gym.spaces.Box(
low=20 * self.wrapped_env.observation_space.low[-2:],
high=20 * self.wrapped_env.observation_space.high[-2:],
dtype=np.float32)
@property
def action_space(self):
"""See parent class."""
return self.wrapped_env.action_space
@property
def current_context(self):
"""See parent class."""
return self.wrapped_env.env.getObservation()[-2:]
def step(self, action):
"""See parent class."""
self.t += 1
obs, rew, done, info = self.wrapped_env.step(action)
done = done or self.t >= self.horizon
return obs[:-2], rew, done, info
def reset(self):
"""See parent class."""
self.t = 0
return self.wrapped_env.reset()[:-2]
def render(self, mode='human'):
"""See parent class."""
image = self.wrapped_env.env.render(
headless_step=True)
if mode == 'human':
f = np.flip(image.astype(np.float32) / 255.0, axis=0)
f = np.flip(f, axis=2)
cv2.imshow("PD-Biped3D-HLC-Obstacles-v2", f)
cv2.waitKey(1)
elif mode == 'rgb_array':
return image
|
[
"numpy.flip",
"terrainRLSim.getEnv",
"os.path.join",
"gym.spaces.Box",
"cv2.imshow",
"numpy.array",
"cv2.waitKey",
"gym.make"
] |
[((139, 195), 'os.path.join', 'os.path.join', (["os.environ['TERRAINRL_PATH']", '"""simAdapter"""'], {}), "(os.environ['TERRAINRL_PATH'], 'simAdapter')\n", (151, 195), False, 'import os\n'), ((1016, 1077), 'terrainRLSim.getEnv', 'terrainRLSim.getEnv', (['"""PD-Biped3D-HLC-Soccer-v1"""'], {'render': '(False)'}), "('PD-Biped3D-HLC-Soccer-v1', render=False)\n", (1035, 1077), False, 'import terrainRLSim\n'), ((2762, 2909), 'gym.spaces.Box', 'gym.spaces.Box', ([], {'low': '(20 * self.wrapped_env.observation_space.low[:-2])', 'high': '(20 * self.wrapped_env.observation_space.high[:-2])', 'dtype': 'np.float32'}), '(low=20 * self.wrapped_env.observation_space.low[:-2], high=\n 20 * self.wrapped_env.observation_space.high[:-2], dtype=np.float32)\n', (2776, 2909), False, 'import gym\n'), ((3033, 3180), 'gym.spaces.Box', 'gym.spaces.Box', ([], {'low': '(20 * self.wrapped_env.observation_space.low[-2:])', 'high': '(20 * self.wrapped_env.observation_space.high[-2:])', 'dtype': 'np.float32'}), '(low=20 * self.wrapped_env.observation_space.low[-2:], high=\n 20 * self.wrapped_env.observation_space.high[-2:], dtype=np.float32)\n', (3047, 3180), False, 'import gym\n'), ((1515, 1533), 'numpy.array', 'np.array', (['[action]'], {}), '([action])\n', (1523, 1533), True, 'import numpy as np\n'), ((2474, 2520), 'gym.make', 'gym.make', (['"""PD-Biped3D-HLC-Obstacles-render-v2"""'], {}), "('PD-Biped3D-HLC-Obstacles-render-v2')\n", (2482, 2520), False, 'import gym\n'), ((2566, 2605), 'gym.make', 'gym.make', (['"""PD-Biped3D-HLC-Obstacles-v2"""'], {}), "('PD-Biped3D-HLC-Obstacles-v2')\n", (2574, 2605), False, 'import gym\n'), ((4073, 4091), 'numpy.flip', 'np.flip', (['f'], {'axis': '(2)'}), '(f, axis=2)\n', (4080, 4091), True, 'import numpy as np\n'), ((4104, 4148), 'cv2.imshow', 'cv2.imshow', (['"""PD-Biped3D-HLC-Obstacles-v2"""', 'f'], {}), "('PD-Biped3D-HLC-Obstacles-v2', f)\n", (4114, 4148), False, 'import cv2\n'), ((4161, 4175), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (4172, 4175), False, 'import cv2\n')]
|
import numpy as np
class Loss():
def output_gradient(self):
return
class MSE(Loss):
def __call__(self, predicted, labels):
return 0.5 * np.square(predicted - labels)
def output_gradient(self, predicted, labels):
return predicted - labels
class BinaryCrossEntropy(Loss):
def __call__(self, predicted, labels):
return - np.nan_to_num((labels*np.log(predicted) + (1-labels)*np.log(1-predicted)))
def output_gradient(self, predicted, labels):
return np.nan_to_num(-(labels/predicted) + (1-labels)/(1-predicted))
class CategoricalCrossEntropy(Loss):
def __call__(self, predicted, labels):
return -np.nan_to_num(np.sum(labels*np.log(predicted), axis=0, keepdims=True))
def output_gradient(self, predicted, labels):
return -np.nan_to_num(labels/predicted)
|
[
"numpy.log",
"numpy.nan_to_num",
"numpy.square"
] |
[((512, 581), 'numpy.nan_to_num', 'np.nan_to_num', (['(-(labels / predicted) + (1 - labels) / (1 - predicted))'], {}), '(-(labels / predicted) + (1 - labels) / (1 - predicted))\n', (525, 581), True, 'import numpy as np\n'), ((163, 192), 'numpy.square', 'np.square', (['(predicted - labels)'], {}), '(predicted - labels)\n', (172, 192), True, 'import numpy as np\n'), ((809, 842), 'numpy.nan_to_num', 'np.nan_to_num', (['(labels / predicted)'], {}), '(labels / predicted)\n', (822, 842), True, 'import numpy as np\n'), ((393, 410), 'numpy.log', 'np.log', (['predicted'], {}), '(predicted)\n', (399, 410), True, 'import numpy as np\n'), ((424, 445), 'numpy.log', 'np.log', (['(1 - predicted)'], {}), '(1 - predicted)\n', (430, 445), True, 'import numpy as np\n'), ((699, 716), 'numpy.log', 'np.log', (['predicted'], {}), '(predicted)\n', (705, 716), True, 'import numpy as np\n')]
|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import random
import numpy as np
import torch
import torch.nn.functional as F
from torch_utils import misc, training_stats
from torch_utils.ops import conv2d_gradfix
#----------------------------------------------------------------------------
class Loss:
def accumulate_gradients(self, phase, real_img, real_c, gen_z, gen_c, sync, gain): # to be overridden by subclass
raise NotImplementedError()
#----------------------------------------------------------------------------
class StyleGANVLoss(Loss):
def __init__(self, device, G_mapping, G_synthesis, D, augment_pipe=None, style_mixing_prob=0,
r1_gamma=0, pl_batch_shrink=2, pl_decay=0.01, pl_weight=2, video_consistent_aug=True,
sync_batch_start_time=False, motion_reg=0, motion_reg_num_frames=128, motion_reg_batch_size=256,
predict_dists_weight=0):
super().__init__()
self.device = device
self.G_mapping = G_mapping
self.G_synthesis = G_synthesis
self.D = D
self.augment_pipe = augment_pipe
self.style_mixing_prob = style_mixing_prob
self.r1_gamma = r1_gamma
self.pl_batch_shrink = pl_batch_shrink
self.pl_decay = pl_decay
self.pl_weight = pl_weight
self.pl_mean = torch.zeros([], device=device)
self.video_consistent_aug = video_consistent_aug
self.sync_batch_start_time = sync_batch_start_time
self.motion_reg = motion_reg
self.motion_reg_num_frames = motion_reg_num_frames
self.motion_reg_batch_size = motion_reg_batch_size
self.predict_dists_weight = predict_dists_weight
def run_G(self, z, c, t, l, sync):
with misc.ddp_sync(self.G_mapping, sync):
ws = self.G_mapping(z, c, l=l)
if self.style_mixing_prob > 0:
with torch.autograd.profiler.record_function('style_mixing'):
cutoff = torch.empty([], dtype=torch.int64, device=ws.device).random_(1, ws.shape[1])
cutoff = torch.where(torch.rand([], device=ws.device) < self.style_mixing_prob, cutoff, torch.full_like(cutoff, ws.shape[1]))
ws[:, cutoff:] = self.G_mapping(torch.randn_like(z), c, l=l, skip_w_avg_update=True)[:, cutoff:]
with misc.ddp_sync(self.G_synthesis, sync):
out = self.G_synthesis(ws, t=t, c=c, l=l)
return out, ws
def run_D(self, img, c, t, sync):
if self.augment_pipe is not None:
if self.video_consistent_aug:
nf, ch, h, w = img.shape
f = self.G_synthesis.motion_encoder.num_frames_per_motion
n = nf // f
img = img.view(n, f * ch, h, w) # [n, f * ch, h, w]
img = self.augment_pipe(img) # [n, f * ch, h, w]
if self.video_consistent_aug:
img = img.view(n * f, ch, h, w) # [n * f, ch, h, w]
with misc.ddp_sync(self.D, sync):
outputs = self.D(img, c, t)
return outputs
def accumulate_gradients(self, phase, real_img, real_c, real_t, gen_z, gen_c, gen_t, gen_l, sync, gain):
assert phase in ['Gmain', 'Greg', 'Gboth', 'Dmain', 'Dreg', 'Dboth']
do_Gmain = (phase in ['Gmain', 'Gboth'])
do_Dmain = (phase in ['Dmain', 'Dboth'])
do_Gpl = (phase in ['Greg', 'Gboth']) and (self.pl_weight != 0)
do_Dr1 = (phase in ['Dreg', 'Dboth']) and (self.r1_gamma != 0)
real_img = real_img.view(-1, *real_img.shape[2:]) # [batch_size * num_frames, c, h, w]
if self.sync_batch_start_time:
# Syncing the batch to the same start time
if self.sync_batch_start_time == 'random':
offset = gen_t[random.randint(0, len(gen_t) - 1), 0] # [1]
elif self.sync_batch_start_time == 'zero':
offset = 0 # [1]
elif self.sync_batch_start_time == 'min':
offset = gen_t.min() # [1]
else:
offset = None
if not offset is None:
gen_t = (gen_t - gen_t[:, [0]]) + offset # [batch_size, nf]
# Gmain: Maximize logits for generated images.
if do_Gmain:
with torch.autograd.profiler.record_function('Gmain_forward'):
gen_img, _gen_ws = self.run_G(gen_z, gen_c, gen_t, gen_l, sync=(sync and not do_Gpl)) # [batch_size * num_frames, c, h, w]
D_out_gen = self.run_D(gen_img, gen_c, gen_t, sync=False) # [batch_size]
training_stats.report('Loss/scores/fake', D_out_gen['image_logits'])
training_stats.report('Loss/signs/fake', D_out_gen['image_logits'].sign())
loss_Gmain = F.softplus(-D_out_gen['image_logits']) # -log(sigmoid(y))
if 'video_logits' in D_out_gen:
loss_Gmain_video = F.softplus(-D_out_gen['video_logits']).mean() # -log(sigmoid(y)) # [1]
training_stats.report('Loss/scores/fake_video', D_out_gen['video_logits'])
training_stats.report('Loss/G/loss_video', loss_Gmain_video)
else:
loss_Gmain_video = 0.0 # [1]
training_stats.report('Loss/G/loss', loss_Gmain)
with torch.autograd.profiler.record_function('Gmain_backward'):
(loss_Gmain + loss_Gmain_video).mean().mul(gain).backward()
if self.motion_reg > 0.0:
with torch.autograd.profiler.record_function('Gmotion_reg_forward'):
w = torch.zeros(self.motion_reg_batch_size, self.G_mapping.w_dim, device=self.device) # [batch_size, w_dim]
c = torch.zeros(self.motion_reg_batch_size, self.G_mapping.c_dim) # [batch_size, c_dim]
l = torch.zeros(self.motion_reg_batch_size) # [batch_size]
t = torch.linspace(0, self.G_motion_encoder.max_num_frames, self.motion_reg_num_frames, device=self.device).unsqueeze(0).repeat_interleave(self.motion_reg_batch_size, dim=0) # [batch_size, num_frames]
time_emb_coefs = self.G_motion_encoder(c=c, t=t, l=l, w=w, return_time_embs_coefs=True) # {...}
periods = time_emb_coefs['periods'].view(self.motion_reg_batch_size, self.motion_reg_num_frames, -1) # [batch_size, num_frames, num_feats * num_fourier_feats]
phases = time_emb_coefs['phases'].view(self.motion_reg_batch_size, self.motion_reg_num_frames, -1) # [batch_size, num_frames, num_feats * num_fourier_feats]
periods_logvar = -(periods.var(dim=0) + 1e-8).log() # [num_frames, num_feats * num_fourier_feats]
phases_logvar = -(phases.var(dim=0) + 1e-8).log() # [num_frames, num_feats * num_fourier_feats]
loss_Gmotion_reg = (periods_logvar.mean() + phases_logvar.mean()) * self.motion_reg # [1]
dummy = time_emb_coefs['time_embs'].sum() * 0.0 # [1] <- for DDP consistency
training_stats.report('Loss/G/motion_reg', loss_Gmotion_reg)
with torch.autograd.profiler.record_function('Gmotion_reg_backward'):
(loss_Gmotion_reg + dummy).mul(gain).backward()
# Gpl: Apply path length regularization.
if do_Gpl:
with torch.autograd.profiler.record_function('Gpl_forward'):
batch_size = gen_z.shape[0] // self.pl_batch_shrink
gen_img, gen_ws = self.run_G(gen_z[:batch_size], gen_c[:batch_size], gen_t[:batch_size], gen_l[:batch_size], sync=sync) # [batch_size * num_frames, c, h, w]
pl_noise = torch.randn_like(gen_img) / np.sqrt(gen_img.shape[2] * gen_img.shape[3])
with torch.autograd.profiler.record_function('pl_grads'), conv2d_gradfix.no_weight_gradients():
pl_grads = torch.autograd.grad(outputs=[(gen_img * pl_noise).sum()], inputs=[gen_ws], create_graph=True, only_inputs=True)[0]
pl_lengths = pl_grads.square().sum(2).mean(1).sqrt()
pl_mean = self.pl_mean.lerp(pl_lengths.mean(), self.pl_decay)
self.pl_mean.copy_(pl_mean.detach())
pl_penalty = (pl_lengths - pl_mean).square()
training_stats.report('Loss/pl_penalty', pl_penalty)
loss_Gpl = pl_penalty * self.pl_weight
training_stats.report('Loss/G/reg', loss_Gpl)
with torch.autograd.profiler.record_function('Gpl_backward'):
loss_Gpl.mean().mul(gain).backward()
# Dmain: Minimize logits for generated images.
loss_Dgen = 0
if do_Dmain:
with torch.autograd.profiler.record_function('Dgen_forward'):
with torch.no_grad():
gen_img, _gen_ws = self.run_G(gen_z, gen_c, gen_t, gen_l, sync=False) # [batch_size * num_frames, c, h, w]
D_out_gen = self.run_D(gen_img, gen_c, gen_t, sync=False) # Gets synced by loss_Dreal.
training_stats.report('Loss/scores/fake', D_out_gen['image_logits'])
training_stats.report('Loss/signs/fake', D_out_gen['image_logits'].sign())
loss_Dgen = F.softplus(D_out_gen['image_logits']) # -log(1 - sigmoid(y))
if self.predict_dists_weight > 0.0:
t_diffs_gen = gen_t[:, 1] - gen_t[:, 0] # [batch_size]
loss_Dgen_dist_preds = F.cross_entropy(D_out_gen['dist_preds'], t_diffs_gen.long()) # [batch_size]
training_stats.report('Loss/D/dist_preds_gen', loss_Dgen_dist_preds)
else:
loss_Dgen_dist_preds = 0.0
if 'video_logits' in D_out_gen:
loss_Dgen_video = F.softplus(D_out_gen['video_logits']).mean() # [1]
training_stats.report('Loss/scores/fake_video', D_out_gen['video_logits'])
else:
loss_Dgen_video = 0.0 # [1]
with torch.autograd.profiler.record_function('Dgen_backward'):
(loss_Dgen + loss_Dgen_video + loss_Dgen_dist_preds).mean().mul(gain).backward()
# Dmain: Maximize logits for real images.
# Dr1: Apply R1 regularization.
if do_Dmain or do_Dr1:
name = 'Dreal_Dr1' if do_Dmain and do_Dr1 else 'Dreal' if do_Dmain else 'Dr1'
with torch.autograd.profiler.record_function(name + '_forward'):
real_img_tmp = real_img.detach().requires_grad_(do_Dr1)
D_out_real = self.run_D(real_img_tmp, real_c, real_t, sync=sync)
training_stats.report('Loss/scores/real', D_out_real['image_logits'])
training_stats.report('Loss/signs/real', D_out_real['image_logits'].sign())
loss_Dreal = 0
loss_Dreal_dist_preds = 0
loss_Dreal_video = 0.0 # [1]
if do_Dmain:
loss_Dreal = F.softplus(-D_out_real['image_logits']) # -log(sigmoid(y))
training_stats.report('Loss/D/loss', loss_Dgen + loss_Dreal)
if 'video_logits' in D_out_gen:
loss_Dreal_video = F.softplus(-D_out_real['video_logits']).mean() # [1]
training_stats.report('Loss/scores/real_video', D_out_real['video_logits'])
training_stats.report('Loss/D/loss_video', loss_Dgen_video + loss_Dreal_video)
if self.predict_dists_weight > 0.0:
t_diffs_real = real_t[:, 1] - real_t[:, 0] # [batch_size]
loss_Dreal_dist_preds = F.cross_entropy(D_out_real['dist_preds'], t_diffs_real.long()) # [batch_size]
training_stats.report('Loss/D/dist_preds_real', loss_Dreal_dist_preds)
loss_Dr1 = 0
if do_Dr1:
with torch.autograd.profiler.record_function('r1_grads'), conv2d_gradfix.no_weight_gradients():
r1_grads = torch.autograd.grad(outputs=[D_out_real['image_logits'].sum()], inputs=[real_img_tmp], create_graph=True, only_inputs=True)[0]
r1_penalty = r1_grads.square().sum([1,2,3])
loss_Dr1 = r1_penalty * (self.r1_gamma / 2) # [batch_size * num_frames_per_sample]
loss_Dr1 = loss_Dr1.view(-1, len(real_img_tmp) // len(D_out_real['image_logits'])).mean(dim=1) # [batch_size]
training_stats.report('Loss/r1_penalty', r1_penalty)
training_stats.report('Loss/D/reg', loss_Dr1)
dummy_video_logits = (D_out_real["video_logits"].sum() * 0.0) if "video_logits" in D_out_real else 0.0
with torch.autograd.profiler.record_function(name + '_backward'):
(D_out_real["image_logits"] * 0 + dummy_video_logits + loss_Dreal + loss_Dreal_video + loss_Dr1 + loss_Dreal_dist_preds).mean().mul(gain).backward()
#----------------------------------------------------------------------------
|
[
"numpy.sqrt",
"torch.full_like",
"torch.nn.functional.softplus",
"torch.randn_like",
"torch_utils.ops.conv2d_gradfix.no_weight_gradients",
"torch.autograd.profiler.record_function",
"torch.no_grad",
"torch_utils.training_stats.report",
"torch.linspace",
"torch_utils.misc.ddp_sync",
"torch.empty",
"torch.zeros",
"torch.rand"
] |
[((1718, 1748), 'torch.zeros', 'torch.zeros', (['[]'], {'device': 'device'}), '([], device=device)\n', (1729, 1748), False, 'import torch\n'), ((2130, 2165), 'torch_utils.misc.ddp_sync', 'misc.ddp_sync', (['self.G_mapping', 'sync'], {}), '(self.G_mapping, sync)\n', (2143, 2165), False, 'from torch_utils import misc, training_stats\n'), ((2713, 2750), 'torch_utils.misc.ddp_sync', 'misc.ddp_sync', (['self.G_synthesis', 'sync'], {}), '(self.G_synthesis, sync)\n', (2726, 2750), False, 'from torch_utils import misc, training_stats\n'), ((3350, 3377), 'torch_utils.misc.ddp_sync', 'misc.ddp_sync', (['self.D', 'sync'], {}), '(self.D, sync)\n', (3363, 3377), False, 'from torch_utils import misc, training_stats\n'), ((4663, 4719), 'torch.autograd.profiler.record_function', 'torch.autograd.profiler.record_function', (['"""Gmain_forward"""'], {}), "('Gmain_forward')\n", (4702, 4719), False, 'import torch\n'), ((4965, 5033), 'torch_utils.training_stats.report', 'training_stats.report', (['"""Loss/scores/fake"""', "D_out_gen['image_logits']"], {}), "('Loss/scores/fake', D_out_gen['image_logits'])\n", (4986, 5033), False, 'from torch_utils import misc, training_stats\n'), ((5154, 5192), 'torch.nn.functional.softplus', 'F.softplus', (["(-D_out_gen['image_logits'])"], {}), "(-D_out_gen['image_logits'])\n", (5164, 5192), True, 'import torch.nn.functional as F\n'), ((5633, 5681), 'torch_utils.training_stats.report', 'training_stats.report', (['"""Loss/G/loss"""', 'loss_Gmain'], {}), "('Loss/G/loss', loss_Gmain)\n", (5654, 5681), False, 'from torch_utils import misc, training_stats\n'), ((5699, 5756), 'torch.autograd.profiler.record_function', 'torch.autograd.profiler.record_function', (['"""Gmain_backward"""'], {}), "('Gmain_backward')\n", (5738, 5756), False, 'import torch\n'), ((7732, 7786), 'torch.autograd.profiler.record_function', 'torch.autograd.profiler.record_function', (['"""Gpl_forward"""'], {}), "('Gpl_forward')\n", (7771, 7786), False, 'import torch\n'), ((8664, 8716), 'torch_utils.training_stats.report', 'training_stats.report', (['"""Loss/pl_penalty"""', 'pl_penalty'], {}), "('Loss/pl_penalty', pl_penalty)\n", (8685, 8716), False, 'from torch_utils import misc, training_stats\n'), ((8788, 8833), 'torch_utils.training_stats.report', 'training_stats.report', (['"""Loss/G/reg"""', 'loss_Gpl'], {}), "('Loss/G/reg', loss_Gpl)\n", (8809, 8833), False, 'from torch_utils import misc, training_stats\n'), ((8851, 8906), 'torch.autograd.profiler.record_function', 'torch.autograd.profiler.record_function', (['"""Gpl_backward"""'], {}), "('Gpl_backward')\n", (8890, 8906), False, 'import torch\n'), ((9077, 9132), 'torch.autograd.profiler.record_function', 'torch.autograd.profiler.record_function', (['"""Dgen_forward"""'], {}), "('Dgen_forward')\n", (9116, 9132), False, 'import torch\n'), ((9418, 9486), 'torch_utils.training_stats.report', 'training_stats.report', (['"""Loss/scores/fake"""', "D_out_gen['image_logits']"], {}), "('Loss/scores/fake', D_out_gen['image_logits'])\n", (9439, 9486), False, 'from torch_utils import misc, training_stats\n'), ((9606, 9643), 'torch.nn.functional.softplus', 'F.softplus', (["D_out_gen['image_logits']"], {}), "(D_out_gen['image_logits'])\n", (9616, 9643), True, 'import torch.nn.functional as F\n'), ((10393, 10449), 'torch.autograd.profiler.record_function', 'torch.autograd.profiler.record_function', (['"""Dgen_backward"""'], {}), "('Dgen_backward')\n", (10432, 10449), False, 'import torch\n'), ((10777, 10835), 'torch.autograd.profiler.record_function', 'torch.autograd.profiler.record_function', (["(name + '_forward')"], {}), "(name + '_forward')\n", (10816, 10835), False, 'import torch\n'), ((11006, 11075), 'torch_utils.training_stats.report', 'training_stats.report', (['"""Loss/scores/real"""', "D_out_real['image_logits']"], {}), "('Loss/scores/real', D_out_real['image_logits'])\n", (11027, 11075), False, 'from torch_utils import misc, training_stats\n'), ((13105, 13164), 'torch.autograd.profiler.record_function', 'torch.autograd.profiler.record_function', (["(name + '_backward')"], {}), "(name + '_backward')\n", (13144, 13164), False, 'import torch\n'), ((2274, 2329), 'torch.autograd.profiler.record_function', 'torch.autograd.profiler.record_function', (['"""style_mixing"""'], {}), "('style_mixing')\n", (2313, 2329), False, 'import torch\n'), ((5390, 5464), 'torch_utils.training_stats.report', 'training_stats.report', (['"""Loss/scores/fake_video"""', "D_out_gen['video_logits']"], {}), "('Loss/scores/fake_video', D_out_gen['video_logits'])\n", (5411, 5464), False, 'from torch_utils import misc, training_stats\n'), ((5485, 5545), 'torch_utils.training_stats.report', 'training_stats.report', (['"""Loss/G/loss_video"""', 'loss_Gmain_video'], {}), "('Loss/G/loss_video', loss_Gmain_video)\n", (5506, 5545), False, 'from torch_utils import misc, training_stats\n'), ((5894, 5956), 'torch.autograd.profiler.record_function', 'torch.autograd.profiler.record_function', (['"""Gmotion_reg_forward"""'], {}), "('Gmotion_reg_forward')\n", (5933, 5956), False, 'import torch\n'), ((5982, 6068), 'torch.zeros', 'torch.zeros', (['self.motion_reg_batch_size', 'self.G_mapping.w_dim'], {'device': 'self.device'}), '(self.motion_reg_batch_size, self.G_mapping.w_dim, device=self.\n device)\n', (5993, 6068), False, 'import torch\n'), ((6110, 6171), 'torch.zeros', 'torch.zeros', (['self.motion_reg_batch_size', 'self.G_mapping.c_dim'], {}), '(self.motion_reg_batch_size, self.G_mapping.c_dim)\n', (6121, 6171), False, 'import torch\n'), ((6218, 6257), 'torch.zeros', 'torch.zeros', (['self.motion_reg_batch_size'], {}), '(self.motion_reg_batch_size)\n', (6229, 6257), False, 'import torch\n'), ((7430, 7490), 'torch_utils.training_stats.report', 'training_stats.report', (['"""Loss/G/motion_reg"""', 'loss_Gmotion_reg'], {}), "('Loss/G/motion_reg', loss_Gmotion_reg)\n", (7451, 7490), False, 'from torch_utils import misc, training_stats\n'), ((7513, 7576), 'torch.autograd.profiler.record_function', 'torch.autograd.profiler.record_function', (['"""Gmotion_reg_backward"""'], {}), "('Gmotion_reg_backward')\n", (7552, 7576), False, 'import torch\n'), ((8056, 8081), 'torch.randn_like', 'torch.randn_like', (['gen_img'], {}), '(gen_img)\n', (8072, 8081), False, 'import torch\n'), ((8084, 8128), 'numpy.sqrt', 'np.sqrt', (['(gen_img.shape[2] * gen_img.shape[3])'], {}), '(gen_img.shape[2] * gen_img.shape[3])\n', (8091, 8128), True, 'import numpy as np\n'), ((8150, 8201), 'torch.autograd.profiler.record_function', 'torch.autograd.profiler.record_function', (['"""pl_grads"""'], {}), "('pl_grads')\n", (8189, 8201), False, 'import torch\n'), ((8203, 8239), 'torch_utils.ops.conv2d_gradfix.no_weight_gradients', 'conv2d_gradfix.no_weight_gradients', ([], {}), '()\n', (8237, 8239), False, 'from torch_utils.ops import conv2d_gradfix\n'), ((9155, 9170), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (9168, 9170), False, 'import torch\n'), ((9934, 10002), 'torch_utils.training_stats.report', 'training_stats.report', (['"""Loss/D/dist_preds_gen"""', 'loss_Dgen_dist_preds'], {}), "('Loss/D/dist_preds_gen', loss_Dgen_dist_preds)\n", (9955, 10002), False, 'from torch_utils import misc, training_stats\n'), ((10230, 10304), 'torch_utils.training_stats.report', 'training_stats.report', (['"""Loss/scores/fake_video"""', "D_out_gen['video_logits']"], {}), "('Loss/scores/fake_video', D_out_gen['video_logits'])\n", (10251, 10304), False, 'from torch_utils import misc, training_stats\n'), ((11349, 11388), 'torch.nn.functional.softplus', 'F.softplus', (["(-D_out_real['image_logits'])"], {}), "(-D_out_real['image_logits'])\n", (11359, 11388), True, 'import torch.nn.functional as F\n'), ((11428, 11488), 'torch_utils.training_stats.report', 'training_stats.report', (['"""Loss/D/loss"""', '(loss_Dgen + loss_Dreal)'], {}), "('Loss/D/loss', loss_Dgen + loss_Dreal)\n", (11449, 11488), False, 'from torch_utils import misc, training_stats\n'), ((12853, 12905), 'torch_utils.training_stats.report', 'training_stats.report', (['"""Loss/r1_penalty"""', 'r1_penalty'], {}), "('Loss/r1_penalty', r1_penalty)\n", (12874, 12905), False, 'from torch_utils import misc, training_stats\n'), ((12926, 12971), 'torch_utils.training_stats.report', 'training_stats.report', (['"""Loss/D/reg"""', 'loss_Dr1'], {}), "('Loss/D/reg', loss_Dr1)\n", (12947, 12971), False, 'from torch_utils import misc, training_stats\n'), ((2545, 2581), 'torch.full_like', 'torch.full_like', (['cutoff', 'ws.shape[1]'], {}), '(cutoff, ws.shape[1])\n', (2560, 2581), False, 'import torch\n'), ((11662, 11737), 'torch_utils.training_stats.report', 'training_stats.report', (['"""Loss/scores/real_video"""', "D_out_real['video_logits']"], {}), "('Loss/scores/real_video', D_out_real['video_logits'])\n", (11683, 11737), False, 'from torch_utils import misc, training_stats\n'), ((11762, 11840), 'torch_utils.training_stats.report', 'training_stats.report', (['"""Loss/D/loss_video"""', '(loss_Dgen_video + loss_Dreal_video)'], {}), "('Loss/D/loss_video', loss_Dgen_video + loss_Dreal_video)\n", (11783, 11840), False, 'from torch_utils import misc, training_stats\n'), ((12130, 12200), 'torch_utils.training_stats.report', 'training_stats.report', (['"""Loss/D/dist_preds_real"""', 'loss_Dreal_dist_preds'], {}), "('Loss/D/dist_preds_real', loss_Dreal_dist_preds)\n", (12151, 12200), False, 'from torch_utils import misc, training_stats\n'), ((12283, 12334), 'torch.autograd.profiler.record_function', 'torch.autograd.profiler.record_function', (['"""r1_grads"""'], {}), "('r1_grads')\n", (12322, 12334), False, 'import torch\n'), ((12336, 12372), 'torch_utils.ops.conv2d_gradfix.no_weight_gradients', 'conv2d_gradfix.no_weight_gradients', ([], {}), '()\n', (12370, 12372), False, 'from torch_utils.ops import conv2d_gradfix\n'), ((2360, 2412), 'torch.empty', 'torch.empty', (['[]'], {'dtype': 'torch.int64', 'device': 'ws.device'}), '([], dtype=torch.int64, device=ws.device)\n', (2371, 2412), False, 'import torch\n'), ((2478, 2510), 'torch.rand', 'torch.rand', (['[]'], {'device': 'ws.device'}), '([], device=ws.device)\n', (2488, 2510), False, 'import torch\n'), ((2635, 2654), 'torch.randn_like', 'torch.randn_like', (['z'], {}), '(z)\n', (2651, 2654), False, 'import torch\n'), ((5299, 5337), 'torch.nn.functional.softplus', 'F.softplus', (["(-D_out_gen['video_logits'])"], {}), "(-D_out_gen['video_logits'])\n", (5309, 5337), True, 'import torch.nn.functional as F\n'), ((10159, 10196), 'torch.nn.functional.softplus', 'F.softplus', (["D_out_gen['video_logits']"], {}), "(D_out_gen['video_logits'])\n", (10169, 10196), True, 'import torch.nn.functional as F\n'), ((11585, 11624), 'torch.nn.functional.softplus', 'F.softplus', (["(-D_out_real['video_logits'])"], {}), "(-D_out_real['video_logits'])\n", (11595, 11624), True, 'import torch.nn.functional as F\n'), ((6297, 6405), 'torch.linspace', 'torch.linspace', (['(0)', 'self.G_motion_encoder.max_num_frames', 'self.motion_reg_num_frames'], {'device': 'self.device'}), '(0, self.G_motion_encoder.max_num_frames, self.\n motion_reg_num_frames, device=self.device)\n', (6311, 6405), False, 'import torch\n')]
|
import numpy as np
import pandas as pd
import seaborn as sns
from nninst.backend.tensorflow.model import AlexNet
from nninst.backend.tensorflow.trace.alexnet_imagenet_inter_class_similarity import (
alexnet_imagenet_inter_class_similarity_frequency,
)
from nninst.op import Conv2dOp, DenseOp
np.random.seed(0)
sns.set()
threshold = 0.5
frequency = int(500 * 0.1)
label = "import"
variant = None
base_name = f"alexnet_imagenet_inter_class_similarity_frequency_{frequency}"
cmap = "Greens"
same_class_similarity = []
diff_class_similarity = []
layer_names = []
layers = AlexNet.graph().load().ops_in_layers(Conv2dOp, DenseOp)
for layer_name in [
None,
*layers,
]:
similarity = alexnet_imagenet_inter_class_similarity_frequency(
threshold, frequency, label, variant=variant, layer_name=layer_name
).load()
same_class_similarity.append(
np.mean(similarity[np.eye(similarity.shape[0], dtype=bool)])
)
diff_class_similarity.append(
np.mean(
similarity[
np.tri(similarity.shape[0], similarity.shape[1], k=-1, dtype=bool)
]
)
)
if layer_name is None:
file_name = base_name
layer_names.append("All")
else:
file_name = base_name + "_" + layer_name[: layer_name.index("/")]
layer_names.append(layer_name[: layer_name.index("/")])
plot_array = np.around(similarity, decimals=2)
ax = sns.heatmap(plot_array, cmap=cmap, vmax=plot_array.max(), annot=True)
ax.set(xlabel="Class", ylabel="Class")
fig = ax.get_figure()
# fig.savefig(f"{file_name}.pdf", bbox_inches="tight")
fig.savefig(f"{file_name}.png", bbox_inches="tight")
# np.savetxt(f"{file_name}.csv", similarity, delimiter=",")
fig.clf()
for layer_name, similarity in zip(
["avg", "first_half", "second_half"],
[
np.mean(
[
alexnet_imagenet_inter_class_similarity_frequency(
threshold, frequency, label, variant=variant, layer_name=layer
).load()
for layer in layers
],
axis=0,
),
# np.mean([alexnet_imagenet_inter_class_similarity_frequency(
# threshold, frequency, label, variant=variant, layer_name=layer
# ).load()
# for layer in layers[:len(layers) // 2]], axis=0),
# np.mean([alexnet_imagenet_inter_class_similarity_frequency(
# threshold, frequency, label, variant=variant, layer_name=layer
# ).load()
# for layer in layers[len(layers) // 2:]], axis=0),
],
):
file_name = base_name + "_" + layer_name
plot_array = np.around(similarity, decimals=2)
ax = sns.heatmap(plot_array, cmap=cmap, vmax=plot_array.max(), annot=True)
ax.set(xlabel="Class", ylabel="Class")
fig = ax.get_figure()
# fig.savefig(f"{file_name}.pdf", bbox_inches="tight")
fig.savefig(f"{file_name}.png", bbox_inches="tight")
# np.savetxt(f"{file_name}.csv", similarity, delimiter=",")
fig.clf()
summary_df = pd.DataFrame(
{
"Same Class": same_class_similarity,
"Diff Class": diff_class_similarity,
"Layer": layer_names,
}
)
summary_df.to_csv(f"{base_name}_summary.csv", index=False)
|
[
"seaborn.set",
"nninst.backend.tensorflow.model.AlexNet.graph",
"numpy.eye",
"nninst.backend.tensorflow.trace.alexnet_imagenet_inter_class_similarity.alexnet_imagenet_inter_class_similarity_frequency",
"numpy.around",
"numpy.random.seed",
"pandas.DataFrame",
"numpy.tri"
] |
[((298, 315), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (312, 315), True, 'import numpy as np\n'), ((316, 325), 'seaborn.set', 'sns.set', ([], {}), '()\n', (323, 325), True, 'import seaborn as sns\n'), ((3070, 3184), 'pandas.DataFrame', 'pd.DataFrame', (["{'Same Class': same_class_similarity, 'Diff Class': diff_class_similarity,\n 'Layer': layer_names}"], {}), "({'Same Class': same_class_similarity, 'Diff Class':\n diff_class_similarity, 'Layer': layer_names})\n", (3082, 3184), True, 'import pandas as pd\n'), ((1390, 1423), 'numpy.around', 'np.around', (['similarity'], {'decimals': '(2)'}), '(similarity, decimals=2)\n', (1399, 1423), True, 'import numpy as np\n'), ((2680, 2713), 'numpy.around', 'np.around', (['similarity'], {'decimals': '(2)'}), '(similarity, decimals=2)\n', (2689, 2713), True, 'import numpy as np\n'), ((697, 819), 'nninst.backend.tensorflow.trace.alexnet_imagenet_inter_class_similarity.alexnet_imagenet_inter_class_similarity_frequency', 'alexnet_imagenet_inter_class_similarity_frequency', (['threshold', 'frequency', 'label'], {'variant': 'variant', 'layer_name': 'layer_name'}), '(threshold, frequency,\n label, variant=variant, layer_name=layer_name)\n', (746, 819), False, 'from nninst.backend.tensorflow.trace.alexnet_imagenet_inter_class_similarity import alexnet_imagenet_inter_class_similarity_frequency\n'), ((577, 592), 'nninst.backend.tensorflow.model.AlexNet.graph', 'AlexNet.graph', ([], {}), '()\n', (590, 592), False, 'from nninst.backend.tensorflow.model import AlexNet\n'), ((898, 937), 'numpy.eye', 'np.eye', (['similarity.shape[0]'], {'dtype': 'bool'}), '(similarity.shape[0], dtype=bool)\n', (904, 937), True, 'import numpy as np\n'), ((1037, 1103), 'numpy.tri', 'np.tri', (['similarity.shape[0]', 'similarity.shape[1]'], {'k': '(-1)', 'dtype': 'bool'}), '(similarity.shape[0], similarity.shape[1], k=-1, dtype=bool)\n', (1043, 1103), True, 'import numpy as np\n'), ((1897, 2014), 'nninst.backend.tensorflow.trace.alexnet_imagenet_inter_class_similarity.alexnet_imagenet_inter_class_similarity_frequency', 'alexnet_imagenet_inter_class_similarity_frequency', (['threshold', 'frequency', 'label'], {'variant': 'variant', 'layer_name': 'layer'}), '(threshold, frequency,\n label, variant=variant, layer_name=layer)\n', (1946, 2014), False, 'from nninst.backend.tensorflow.trace.alexnet_imagenet_inter_class_similarity import alexnet_imagenet_inter_class_similarity_frequency\n')]
|
import sys
import os
import argparse
import numpy as np
parser = argparse.ArgumentParser(
description="""Command-line bin abundance estimator.
Print the median RPKM abundance for each bin in each sample to STDOUT.
Will read the RPKM file into memory - beware.""",
formatter_class=argparse.RawDescriptionHelpFormatter,
add_help=False)
parser.add_argument('rpkmpath', help='Path to RPKM file')
parser.add_argument('clusterspath', help='Path to clusters.tsv')
parser.add_argument('headerpath', help='Path to list of headers')
if len(sys.argv) == 1:
parser.print_help()
sys.exit()
args = parser.parse_args()
# Check files
for infile in (args.rpkmpath, args.clusterspath, args.headerpath):
if not os.path.isfile(infile):
raise FileNotFoundError(infile)
# Load Vamb
sys.path.append('../vamb')
import vamb
# Load in files
with open(args.headerpath) as file:
indexof = {line.strip():i for i,line in enumerate(file)}
with open(args.clusterspath) as file:
clusters = vamb.vambtools.read_clusters(file)
# Check that all clusters names are in headers:
for cluster in clusters.values():
for header in cluster:
if header not in indexof:
raise KeyError("Header not found in headerlist: {}".format(header))
# Load RPKM and check it
rpkm = vamb.vambtools.read_npz(args.rpkmpath)
nsamples = rpkm.shape[1]
if len(indexof) != len(rpkm):
raise ValueError("Not the same number of headers as rows in RPKM file")
# Now estimate abundances
for clustername, cluster in clusters.items():
depths = np.empty((len(cluster), nsamples), dtype=np.float32)
for row, header in enumerate(cluster):
index = indexof[header]
depths[row] = rpkm[index]
median_depths = np.median(depths, axis=0)
print(clustername, end='\t')
print('\t'.join([str(i) for i in median_depths]))
|
[
"numpy.median",
"argparse.ArgumentParser",
"vamb.vambtools.read_npz",
"vamb.vambtools.read_clusters",
"os.path.isfile",
"sys.exit",
"sys.path.append"
] |
[((66, 343), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Command-line bin abundance estimator.\nPrint the median RPKM abundance for each bin in each sample to STDOUT.\nWill read the RPKM file into memory - beware."""', 'formatter_class': 'argparse.RawDescriptionHelpFormatter', 'add_help': '(False)'}), '(description=\n """Command-line bin abundance estimator.\nPrint the median RPKM abundance for each bin in each sample to STDOUT.\nWill read the RPKM file into memory - beware."""\n , formatter_class=argparse.RawDescriptionHelpFormatter, add_help=False)\n', (89, 343), False, 'import argparse\n'), ((798, 824), 'sys.path.append', 'sys.path.append', (['"""../vamb"""'], {}), "('../vamb')\n", (813, 824), False, 'import sys\n'), ((1297, 1335), 'vamb.vambtools.read_npz', 'vamb.vambtools.read_npz', (['args.rpkmpath'], {}), '(args.rpkmpath)\n', (1320, 1335), False, 'import vamb\n'), ((589, 599), 'sys.exit', 'sys.exit', ([], {}), '()\n', (597, 599), False, 'import sys\n'), ((1005, 1039), 'vamb.vambtools.read_clusters', 'vamb.vambtools.read_clusters', (['file'], {}), '(file)\n', (1033, 1039), False, 'import vamb\n'), ((1738, 1763), 'numpy.median', 'np.median', (['depths'], {'axis': '(0)'}), '(depths, axis=0)\n', (1747, 1763), True, 'import numpy as np\n'), ((721, 743), 'os.path.isfile', 'os.path.isfile', (['infile'], {}), '(infile)\n', (735, 743), False, 'import os\n')]
|
import numpy as np
from numpy import linalg as LA
import pickle
from collections import Counter
import csv
class Vocabulary(object):
def __init__(self, vocab_file, emb_file='', dim_emb=0):
with open(vocab_file, 'rb') as f:
self.size, self.word2id, self.id2word = pickle.load(f)
self.dim_emb = dim_emb
self.embedding = np.random.random_sample(
(self.size, self.dim_emb)) - 0.5
if emb_file:
with open(emb_file) as f:
for line in f:
parts = line.split()
word = parts[0]
vec = np.array([float(x) for x in parts[1:]])
if word in self.word2id:
self.embedding[self.word2id[word]] = vec
for i in range(self.size):
self.embedding[i] /= LA.norm(self.embedding[i])
def build_vocab(data, vocab_path, vocab_metadata_path, min_occur=5):
word2id = {'<pad>':0, '<go>':1, '<eos>':2, '<unk>':3}
id2word = ['<pad>', '<go>', '<eos>', '<unk>']
words = [word for sent in data for word in sent]
cnt = Counter(words)
for word in cnt:
if cnt[word] >= min_occur:
word2id[word] = len(word2id)
id2word.append(word)
vocab_size = len(word2id)
with open(vocab_path, 'wb') as f:
pickle.dump((vocab_size, word2id, id2word), f, pickle.HIGHEST_PROTOCOL)
"""Writes metadata file for Tensorboard word embedding visualizer as described here:
https://www.tensorflow.org/get_started/embedding_viz
"""
print("Writing word embedding metadata file to %s" % (vocab_metadata_path))
with open(vocab_metadata_path, "w") as f:
fieldnames = ['word']
writer = csv.DictWriter(f, delimiter="\t", fieldnames=fieldnames)
for w in id2word:
writer.writerow({"word": w})
|
[
"csv.DictWriter",
"pickle.dump",
"numpy.random.random_sample",
"pickle.load",
"collections.Counter",
"numpy.linalg.norm"
] |
[((1050, 1064), 'collections.Counter', 'Counter', (['words'], {}), '(words)\n', (1057, 1064), False, 'from collections import Counter\n'), ((1271, 1342), 'pickle.dump', 'pickle.dump', (['(vocab_size, word2id, id2word)', 'f', 'pickle.HIGHEST_PROTOCOL'], {}), '((vocab_size, word2id, id2word), f, pickle.HIGHEST_PROTOCOL)\n', (1282, 1342), False, 'import pickle\n'), ((1673, 1729), 'csv.DictWriter', 'csv.DictWriter', (['f'], {'delimiter': '"""\t"""', 'fieldnames': 'fieldnames'}), "(f, delimiter='\\t', fieldnames=fieldnames)\n", (1687, 1729), False, 'import csv\n'), ((278, 292), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (289, 292), False, 'import pickle\n'), ((341, 391), 'numpy.random.random_sample', 'np.random.random_sample', (['(self.size, self.dim_emb)'], {}), '((self.size, self.dim_emb))\n', (364, 391), True, 'import numpy as np\n'), ((780, 806), 'numpy.linalg.norm', 'LA.norm', (['self.embedding[i]'], {}), '(self.embedding[i])\n', (787, 806), True, 'from numpy import linalg as LA\n')]
|
import os
import numpy as np
import csv
import matplotlib.pyplot as plt
from moviepy.editor import *
from matplotlib.image import imsave
import matplotlib
matplotlib.use('Agg')
# import tensorflow as tf
# from stable_baselines.common.callbacks import BaseCallback, EvalCallback
# from stable_baselines.common.vec_env import DummyVecEnv
class MonitorCallback(EvalCallback):
"""
Callback for saving a model (the check is done every ``check_freq`` steps)
based on the training reward (in practice, we recommend using ``EvalCallback``).
:param check_freq: (int)
:param log_dir: (str) Path to the folder where the model will be saved.
It must contains the file created by the ``Monitor`` wrapper.
:param verbose: (int)
"""
def __init__(self, eval_env, check_freq: int, save_example_freq: int, log_dir: str,sacred=None, n_eval_episodes=5, render=False, verbose=1):
super(MonitorCallback, self).__init__(verbose=verbose,
eval_env=eval_env,
best_model_save_path=log_dir,
log_path=log_dir,
eval_freq=check_freq,
n_eval_episodes=n_eval_episodes,
deterministic=False,
render=render)
self.render = render
self.verbose = verbose
self.env = eval_env
self.check_freq = check_freq
self.save_example_freq = save_example_freq
self.log_dir = log_dir
self.save_path = os.path.join(log_dir, 'best_model')
self.best_mean_reward = -np.inf
self.sacred = sacred
self.sequence = False
if self.env.__class__.__name__ in ['DarSeqEnv','DummyVecEnv'] :
self.sequence = True
self.statistics = {
'step_reward': [],
'reward': [],
'std_reward': [],
'duration': [],
'GAP': [],
'GAP*': [],
'fit_solution': [],
'delivered': []
# 'policy_loss': [],
# 'value_loss': [],
# 'policy_entropy': []
}
def _init_callback(self) -> None:
# Create folder if needed
if self.log_dir is not None:
os.makedirs(self.log_dir, exist_ok=True)
def _on_training_start(self) -> None:
"""
This method is called before the first rollout starts.
"""
pass
def _on_training_end(self) -> None:
"""
This event is triggered before exiting the `learn()` method.
"""
pass
def plot_statistics(self, show=False):
# Print them
if self.verbose:
print('\t ->[Epoch %d]<- mean episodic reward: %.3f' % (self.num_timesteps + 1, self.statistics['reward'][-1]))
print('\t * Mean duration : %0.3f' % (self.statistics['duration'][-1]))
print('\t * Mean std_reward : %0.3f' % (self.statistics['std_reward'][-1]))
print('\t * Mean step_reward : %0.3f' % (self.statistics['step_reward'][-1]))
# print('\t ** policy_loss : %0.3f' % (self.statistics['policy_loss'][-1]))
# print('\t ** value_loss : %0.3f' % (self.statistics['value_loss'][-1]))
# print('\t ** policy_entropy : %0.3f' % (self.statistics['policy_entropy'][-1]))
# Create plot of the statiscs, saved in folder
colors = [plt.cm.tab20(0),plt.cm.tab20(1),plt.cm.tab20c(2),
plt.cm.tab20c(3), plt.cm.tab20c(4),
plt.cm.tab20c(5),plt.cm.tab20c(6),plt.cm.tab20c(7)]
fig, (axis) = plt.subplots(1, len(self.statistics), figsize=(20, 10))
fig.suptitle(' - PPO Training: ' + self.log_dir)
for i, key in enumerate(self.statistics):
# Sacred (The one thing to keep here)
if self.sacred :
self.sacred.get_logger().report_scalar(title='Train stats',
series=key, value=self.statistics[key][-1], iteration=self.num_timesteps)
# self.sacred.log_scalar(key, self.statistics[key][-1], len(self.statistics[key]))
axis[i].plot(self.statistics[key], color=colors[i])
axis[i].set_title(' Plot of ' + key)
if show :
fig.show()
fig.savefig(self.log_dir + '/result_figure.jpg')
fig.clf()
plt.close(fig)
# Save the statistics as CSV file
if not self.sacred:
try:
with open(self.log_dir + '/statistics.csv', 'w') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=self.statistics.keys())
writer.writeheader()
# for key in statistics
writer.writerow(self.statistics)
except IOError:
print("I/O error")
def save_image_batch(self, images, rewards, txt='test'):
''' Saving some examples of input -> output to see how the model behave '''
print(' - Saving some examples - ')
number_i = min(len(images), 50)
plt.figure()
fig, axis = plt.subplots(number_i, 2, figsize=(10, 50)) #2 rows for input, output
fig.tight_layout()
fig.suptitle(' - examples of network - ')
for i in range(min(self.batch_size, number_i)):
input_map = indices2image(data[0][i], self.image_size)
axis[i, 0].imshow(input_map)
im = indice_map2image(outputs[i], self.image_size).cpu().numpy()
normalized = (im - im.min() ) / (im.max() - im.min())
axis[i, 1].imshow(normalized)
img_name = self.path_name + '/example_' + str(self.num_timesteps) + '.png'
plt.savefig(img_name)
plt.close()
if self.sacred :
self.sacred.add_artifact(img_name, content_type='image')
def save_example(self, observations, rewards, number):
noms = []
dir = self.log_dir + '/example/' + str(self.num_timesteps) + '/ex_number' + str(number)
if dir is not None:
os.makedirs(dir, exist_ok=True)
for i, obs in enumerate(observations):
save_name = dir + '/' + str(i) + '_r=' + str(rewards[i]) + '.png' #[np.array(img) for i, img in enumerate(images)
if self.env.__class__.__name__ == 'DummyVecEnv':
image = self.norm_image(obs[0], scale=1)
else :
image = self.norm_image(obs, scale=1)
# print('SHae after image', np.shape(image))
imsave(save_name, image)
noms.append(save_name)
# Save the imges as video
video_name = dir + 'r=' + str(np.sum(rewards)) + '.mp4'
clips = [ImageClip(m).set_duration(0.2)
for m in noms]
concat_clip = concatenate_videoclips(clips, method="compose")
concat_clip.write_videofile(video_name, fps=24, verbose=None, logger=None)
if self.sacred :
self.sacred.get_logger().report_media('video', 'Res_' + str(number) + '_Rwd=' + str(np.sum(rewards)),
iteration=self.num_timesteps // self.check_freq,
local_path=video_name)
del concat_clip
del clips
def norm_image(self, image, type=None, scale=10):
image = np.kron(image, np.ones((scale, scale)))
if type=='rgb':
ret = np.empty((image.shape[0], image.shape[0], 3), dtype=np.uint8)
ret[:, :, 0] = image.copy()
ret[:, :, 1] = image.copy()
ret[:, :, 2] = image.copy()
image = ret.copy()
return (255 * (image - np.min(image)) / (np.max(image) - np.min(image))).astype(np.uint8)
def save_gif(self, observations, rewards):
# print(observations)
# print(rewards)
# length = min(len(observations), 10)
# observations = 255 * ((np.array(observations) + 1) / (np.max(observations) + 1)).astype(np.uint8)
save_name = self.log_dir + '/example' + str(self.num_timesteps) + '.gif'
images = [self.norm_image(observations[i]) for i in range(len(observations)) if rewards[i] >= 0] #[np.array(img) for i, img in enumerate(images)]
# imageio.mimsave(save_name, images, fps=1)
if self.sacred :
self.sacred.get_logger().report_media('GIF', 'isgif', iteration=self.num_timesteps, local_path=save_name)
def save_video(self, observations, rewards):
save_name = self.log_dir + '/example' + str(self.num_timesteps) + '.mp4'
images = [self.norm_image(observations[i], type='rgb') for i in range(len(observations)) if rewards[i] >= 0] #[np.array(img) for i, img in enumerate(images)
clips = [ImageClip(m).set_duration(2)
for m in images]
concat_clip = concatenate_videoclips(clips, method="compose").resize(100)
concat_clip.write_videofile(save_name, fps=24, verbose=False)
if self.sacred :
self.sacred.get_logger().report_media('video', 'results', iteration=self.num_timesteps, local_path=save_name)
def _on_step(self) -> bool:
"""
In addition to EvalCallback we needs
Examples of eviroonment elements -> Save them as gif for exemple
Statistics to save -> save as plot and in database
-> reward, length, loss, additional metrics (accuraccy, best move ?)
"""
# super(MonitorCallback, self)._on_step()
if self.num_timesteps % self.check_freq == 0 :
episode_rewards, episode_lengths = [], []
gap, fit_solution, delivered = [], [], []
wrapped_env = DummyVecEnv([lambda: self.env])
for i in range(self.n_eval_episodes):
obs = wrapped_env.reset()
done, state = False, None
last_time = 0
if self.sequence :
if self.env.__class__.__name__ == 'DummyVecEnv':
observations = [self.env.env_method('get_image_representation')]
else :
observations = [self.env.get_image_representation()]
else :
observations = [obs.copy()]
episode_reward = [0.0]
episode_lengths.append(0)
while not done:
# Run of simulation
action, state = self.model.predict(obs, state=state, deterministic=False)
new_obs, reward, done, info = wrapped_env.step(action)
obs = new_obs
# Save observation only if time step evolved
if self.sequence:
if self.env.__class__.__name__ == 'DummyVecEnv':
if self.env.get_attr('time_step')[0] > last_time :
last_time = self.env.get_attr('time_step')[0]
observations.append(self.env.env_method('get_image_representation'))
else :
if self.env.time_step > last_time :
last_time = self.env.time_step
observations.append(self.env.get_image_representation())
else :
observations.append(obs.copy())
episode_reward.append(reward)
episode_lengths[-1] += 1
if self.render:
self.env.render()
info = info[0]
gap.append(info['GAP'])
delivered.append(info['delivered'])
fit_solution.append(info['fit_solution'])
episode_rewards.append(np.sum(episode_reward))
# self.save_gif(observations, episode_reward)
if self.num_timesteps % self.save_example_freq == 0 :
self.save_example(observations, episode_reward,number=i)
del observations
self.statistics['GAP'].append(np.mean(gap))
self.statistics['GAP*'].append(np.min(gap))
self.statistics['fit_solution'].append(np.mean(fit_solution))
self.statistics['delivered'].append(np.mean(delivered))
self.statistics['reward'].append(np.mean(episode_rewards))
self.statistics['std_reward'].append(np.std(episode_rewards))
self.statistics['step_reward'].append(np.mean([episode_rewards[i]/episode_lengths[i] for i in range(len(episode_lengths))]))
self.statistics['duration'].append(np.mean(episode_lengths))
# self.statistics['policy_loss'].append(self.model.pg_loss.numpy())
# self.statistics['value_loss'].append(self.model.vf_loss.numpy())
# self.statistics['policy_entropy'].append(self.model.entropy.numpy())
self.plot_statistics()
# Save best model
if self.statistics['reward'][-1] == np.max(self.statistics['reward']):
save_path = self.log_dir + '/best_model'
if self.verbose > 0:
print("Saving new best model to {}".format(save_path))
self.model.save(save_path)
return True
|
[
"numpy.mean",
"matplotlib.pyplot.savefig",
"numpy.ones",
"os.makedirs",
"matplotlib.use",
"matplotlib.pyplot.cm.tab20",
"numpy.std",
"os.path.join",
"matplotlib.pyplot.cm.tab20c",
"matplotlib.image.imsave",
"numpy.max",
"matplotlib.pyplot.close",
"numpy.sum",
"matplotlib.pyplot.figure",
"numpy.empty",
"numpy.min",
"matplotlib.pyplot.subplots"
] |
[((155, 176), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (169, 176), False, 'import matplotlib\n'), ((1675, 1710), 'os.path.join', 'os.path.join', (['log_dir', '"""best_model"""'], {}), "(log_dir, 'best_model')\n", (1687, 1710), False, 'import os\n'), ((4490, 4504), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (4499, 4504), True, 'import matplotlib.pyplot as plt\n'), ((5198, 5210), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (5208, 5210), True, 'import matplotlib.pyplot as plt\n'), ((5231, 5274), 'matplotlib.pyplot.subplots', 'plt.subplots', (['number_i', '(2)'], {'figsize': '(10, 50)'}), '(number_i, 2, figsize=(10, 50))\n', (5243, 5274), True, 'import matplotlib.pyplot as plt\n'), ((5818, 5839), 'matplotlib.pyplot.savefig', 'plt.savefig', (['img_name'], {}), '(img_name)\n', (5829, 5839), True, 'import matplotlib.pyplot as plt\n'), ((5848, 5859), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (5857, 5859), True, 'import matplotlib.pyplot as plt\n'), ((2400, 2440), 'os.makedirs', 'os.makedirs', (['self.log_dir'], {'exist_ok': '(True)'}), '(self.log_dir, exist_ok=True)\n', (2411, 2440), False, 'import os\n'), ((3550, 3565), 'matplotlib.pyplot.cm.tab20', 'plt.cm.tab20', (['(0)'], {}), '(0)\n', (3562, 3565), True, 'import matplotlib.pyplot as plt\n'), ((3566, 3581), 'matplotlib.pyplot.cm.tab20', 'plt.cm.tab20', (['(1)'], {}), '(1)\n', (3578, 3581), True, 'import matplotlib.pyplot as plt\n'), ((3582, 3598), 'matplotlib.pyplot.cm.tab20c', 'plt.cm.tab20c', (['(2)'], {}), '(2)\n', (3595, 3598), True, 'import matplotlib.pyplot as plt\n'), ((3618, 3634), 'matplotlib.pyplot.cm.tab20c', 'plt.cm.tab20c', (['(3)'], {}), '(3)\n', (3631, 3634), True, 'import matplotlib.pyplot as plt\n'), ((3636, 3652), 'matplotlib.pyplot.cm.tab20c', 'plt.cm.tab20c', (['(4)'], {}), '(4)\n', (3649, 3652), True, 'import matplotlib.pyplot as plt\n'), ((3672, 3688), 'matplotlib.pyplot.cm.tab20c', 'plt.cm.tab20c', (['(5)'], {}), '(5)\n', (3685, 3688), True, 'import matplotlib.pyplot as plt\n'), ((3689, 3705), 'matplotlib.pyplot.cm.tab20c', 'plt.cm.tab20c', (['(6)'], {}), '(6)\n', (3702, 3705), True, 'import matplotlib.pyplot as plt\n'), ((3706, 3722), 'matplotlib.pyplot.cm.tab20c', 'plt.cm.tab20c', (['(7)'], {}), '(7)\n', (3719, 3722), True, 'import matplotlib.pyplot as plt\n'), ((6168, 6199), 'os.makedirs', 'os.makedirs', (['dir'], {'exist_ok': '(True)'}), '(dir, exist_ok=True)\n', (6179, 6199), False, 'import os\n'), ((7502, 7525), 'numpy.ones', 'np.ones', (['(scale, scale)'], {}), '((scale, scale))\n', (7509, 7525), True, 'import numpy as np\n'), ((7569, 7630), 'numpy.empty', 'np.empty', (['(image.shape[0], image.shape[0], 3)'], {'dtype': 'np.uint8'}), '((image.shape[0], image.shape[0], 3), dtype=np.uint8)\n', (7577, 7630), True, 'import numpy as np\n'), ((6667, 6691), 'matplotlib.image.imsave', 'imsave', (['save_name', 'image'], {}), '(save_name, image)\n', (6673, 6691), False, 'from matplotlib.image import imsave\n'), ((12190, 12202), 'numpy.mean', 'np.mean', (['gap'], {}), '(gap)\n', (12197, 12202), True, 'import numpy as np\n'), ((12247, 12258), 'numpy.min', 'np.min', (['gap'], {}), '(gap)\n', (12253, 12258), True, 'import numpy as np\n'), ((12311, 12332), 'numpy.mean', 'np.mean', (['fit_solution'], {}), '(fit_solution)\n', (12318, 12332), True, 'import numpy as np\n'), ((12382, 12400), 'numpy.mean', 'np.mean', (['delivered'], {}), '(delivered)\n', (12389, 12400), True, 'import numpy as np\n'), ((12448, 12472), 'numpy.mean', 'np.mean', (['episode_rewards'], {}), '(episode_rewards)\n', (12455, 12472), True, 'import numpy as np\n'), ((12523, 12546), 'numpy.std', 'np.std', (['episode_rewards'], {}), '(episode_rewards)\n', (12529, 12546), True, 'import numpy as np\n'), ((12732, 12756), 'numpy.mean', 'np.mean', (['episode_lengths'], {}), '(episode_lengths)\n', (12739, 12756), True, 'import numpy as np\n'), ((13114, 13147), 'numpy.max', 'np.max', (["self.statistics['reward']"], {}), "(self.statistics['reward'])\n", (13120, 13147), True, 'import numpy as np\n'), ((6804, 6819), 'numpy.sum', 'np.sum', (['rewards'], {}), '(rewards)\n', (6810, 6819), True, 'import numpy as np\n'), ((11880, 11902), 'numpy.sum', 'np.sum', (['episode_reward'], {}), '(episode_reward)\n', (11886, 11902), True, 'import numpy as np\n'), ((7183, 7198), 'numpy.sum', 'np.sum', (['rewards'], {}), '(rewards)\n', (7189, 7198), True, 'import numpy as np\n'), ((7831, 7844), 'numpy.max', 'np.max', (['image'], {}), '(image)\n', (7837, 7844), True, 'import numpy as np\n'), ((7847, 7860), 'numpy.min', 'np.min', (['image'], {}), '(image)\n', (7853, 7860), True, 'import numpy as np\n'), ((7813, 7826), 'numpy.min', 'np.min', (['image'], {}), '(image)\n', (7819, 7826), True, 'import numpy as np\n')]
|
import logging
import signal
import gevent
import msgpack
from zerorpc import Publisher, Puller, Pusher, Server
import numpy as np
import jsonpickle
from .store import store
from .data import Data
from .operations.operation import Operation
from .utils.singleton import Singleton
__all__ = ['ServerAPI']
class ServerAPI(Server, metaclass=Singleton):
"""
RPC server class.
"""
def __init__(self, publisher=None, *args, **kwargs):
super(ServerAPI, self).__init__(*args, **kwargs)
self.publisher = publisher
def undo(self):
"""
Undo an operation popping from the stack and calling its `undo` method.
"""
Operation.pop().undo()
def redo(self):
"""
Call the `redo` method on the latest operation to be added to stack.
"""
Operation.redo()
def register(self, msg):
pass
# self.publisher.testing("This is a test on client.")
def load_data(self, path, format):
"""
Load a data file given path and format.
"""
import astropy.units as u
# data = Data.read(path, format=format)
data = Data(np.random.sample(100) * u.Jy, spectral_axis=np.linspace(1100, 1200, 100) * u.AA)
self.publisher.data_loaded(data.identifier)
def create_data(self, *args, **kwargs):
data = Data(*args, **kwargs)
self.publisher.data_created(data.identifier)
return data.identifier
def query_loader_formats(self):
"""
Returns a list of available data loader formats.
"""
from specutils import Spectrum1D
from astropy.io import registry as io_registry
all_formats = io_registry.get_formats(Spectrum1D)['Format']
return all_formats
def query_data(self, identifier):
data = store[identifier]
data_dict = {
'name': data.name,
'identifier': data.identifier,
'spectral_axis': data.spectral_axis.value.tolist(),
'spectral_axis_unit': data.spectral_axis.unit.to_string(),
'flux': data.flux.value.tolist(),
'unit': data.flux.unit.to_string()
}
return data_dict
def query_data_attribute(self, identifier, name):
data = store[identifier]
data_attr = getattr(data, name)
packed_data_attr = data.encode(data_attr)
return packed_data_attr
def launch(server_address=None, publisher_address=None, block=True):
server_address = server_address or "tcp://127.0.0.1:4242"
publisher_address = publisher_address or "tcp://127.0.0.1:4243"
# Establish the publisher service. This will send events to any
# subscribed services along the designated address.
publisher = Publisher()
publisher.connect(publisher_address)
# Setup the server service. This will be the api that clients
# will send events to.
server = ServerAPI(publisher)
server.bind(server_address)
logging.info(
"Server is now listening on %s and sending on %s.",
server_address, publisher_address)
# Allow for stopping the server via ctrl-c
gevent.signal(signal.SIGINT, server.stop)
server.run() if block else gevent.spawn(server.run)
|
[
"zerorpc.Publisher",
"gevent.signal",
"numpy.linspace",
"numpy.random.sample",
"astropy.io.registry.get_formats",
"logging.info",
"gevent.spawn"
] |
[((2766, 2777), 'zerorpc.Publisher', 'Publisher', ([], {}), '()\n', (2775, 2777), False, 'from zerorpc import Publisher, Puller, Pusher, Server\n'), ((2984, 3087), 'logging.info', 'logging.info', (['"""Server is now listening on %s and sending on %s."""', 'server_address', 'publisher_address'], {}), "('Server is now listening on %s and sending on %s.',\n server_address, publisher_address)\n", (2996, 3087), False, 'import logging\n'), ((3153, 3194), 'gevent.signal', 'gevent.signal', (['signal.SIGINT', 'server.stop'], {}), '(signal.SIGINT, server.stop)\n', (3166, 3194), False, 'import gevent\n'), ((3227, 3251), 'gevent.spawn', 'gevent.spawn', (['server.run'], {}), '(server.run)\n', (3239, 3251), False, 'import gevent\n'), ((1704, 1739), 'astropy.io.registry.get_formats', 'io_registry.get_formats', (['Spectrum1D'], {}), '(Spectrum1D)\n', (1727, 1739), True, 'from astropy.io import registry as io_registry\n'), ((1165, 1186), 'numpy.random.sample', 'np.random.sample', (['(100)'], {}), '(100)\n', (1181, 1186), True, 'import numpy as np\n'), ((1209, 1237), 'numpy.linspace', 'np.linspace', (['(1100)', '(1200)', '(100)'], {}), '(1100, 1200, 100)\n', (1220, 1237), True, 'import numpy as np\n')]
|
import numpy as np
from GeneralUtils import list_to_sum
class Fourier:
def __init__(self,amp=[1],freq=[1],ph=[0]):
self.amp = amp
self.freq = freq
self.ph = ph
def __str__(self):
out = []
for i in range(len(self.amp)):
if self.amp[i] != 1:
a = f"{self.amp[i]}*"
else:
a = ""
if self.freq[i] != 1:
f = f"*{self.freq[i]}"
else:
f = ""
if self.ph[i] != 0:
p = f"+{self.ph[i]}"
else:
p = ""
out.append(f"{a}sin(x{f}{p})")
return list_to_sum(out)
def __add__(self,other):
a = self.amp + other.amp
f = self.freq + other.freq
p = self.ph + other.ph
return Fourier(a,f,p)
def evaluate_series(F,x):
out = np.zeros_like(x)
for i in range(len(F.amp)):
a = F.amp[i]
f = F.freq[i]
p = F.ph[i]
out += a*np.sin(x*f+p)
return out
|
[
"numpy.sin",
"numpy.zeros_like",
"GeneralUtils.list_to_sum"
] |
[((966, 982), 'numpy.zeros_like', 'np.zeros_like', (['x'], {}), '(x)\n', (979, 982), True, 'import numpy as np\n'), ((740, 756), 'GeneralUtils.list_to_sum', 'list_to_sum', (['out'], {}), '(out)\n', (751, 756), False, 'from GeneralUtils import list_to_sum\n'), ((1095, 1112), 'numpy.sin', 'np.sin', (['(x * f + p)'], {}), '(x * f + p)\n', (1101, 1112), True, 'import numpy as np\n')]
|
import numpy as np
import moch
import soch
import os
import sys
import scipy.io
import thorns
def main(parseID):
parseIn = parseID + 'In.mat'
parseOut = parseID + 'Out.mat'
parse = scipy.io.loadmat(parseIn)
os.remove(parseIn)
lagSpace = 1. * parse['lagSpace'] / 1000
parsStruct = parse['pars'][0, 0]
# Parametres
est = {'duration' : 1. * parsStruct['est'][0,0]['dur'][0][0] / 1000,
'loudness' : 1. * parsStruct['est'][0,0]['loud'][0][0],
'intv' : 1. * parsStruct['est'][0,0]['interval'][0] / 1000,
'onset' : 1. * parsStruct['est'][0,0]['onset' ][0][0] / 1000,
'tail' : 1. * parsStruct['est'][0,0]['tail'][0][0] / 1000,
'maskN' : parsStruct['est'][0,0]['maskNoise'][0][0],
'filename' : parsStruct['est'][0,0]['filename'][0],
'bandpass' : parsStruct['est'][0,0]['bandpass'][0],
'save' : parsStruct['est'][0,0]['save'][0]
}
if est['filename'] == -1:
est['type'] = parsStruct['est'][0,0]['type'][0]
est['freq'] = parsStruct['est'][0,0]['f'][0][0]
est['harms'] = parsStruct['est'][0,0]['harms'][0]
est['harmFact'] = parsStruct['est'][0,0]['harmFact'][0][0]
est['shift'] = parsStruct['est'][0,0]['shift'][0][0]
est['nOfIts'] = parsStruct['est'][0,0]['nOfIts'][0][0]
est['notes'] = parsStruct['est'][0,0]['notes'][0]
est['tuning'] = parsStruct['est'][0,0]['tuning'][0]
est['noiseOff'] = 1. * parsStruct['est'][0,0]['noiseOff'][0][0] / 1000
else:
est['type'] = 'external'
par = {'periphFs' : 100000,
'cochChanns' : (125, 10000, 30),
'SACFTau' : 1. * parsStruct['tauSACF'][0,0] / 1000,
'subCortTau' : 1. * parsStruct['tauSubthal'][0,0] / 1000,
'solvOnset' : 1. * parsStruct['solvOnset'][0] / 1000,
'subCortFs' : 100000,
'subCortAff' : parsStruct['subCortAff'][0,0],
'regularise' : parsStruct['regularise'][0,0],
'mu0' : parsStruct['mu0'][0,0],
'SACFGround' : parsStruct['SACFGround'][0,0],
'cortFs' : parsStruct['cortFs'][0,0],
'subDelay' : 1. * parsStruct['subDelay'][0,0] / 1000,
'subDelayDy' : 1. * parsStruct['subDelayDy'][0,0] / 1000,
}
if ('chord' in est['type']) and (est['notes'][0] != est['notes'][1]):
est['onset'] += par['subDelayDy']
par['mu0'] = 2 * par['mu0']
else:
est['onset'] += par['subDelay']
[A, n, b] = thalamicInput(lagSpace, par, est)
duration = 1.* len(A) / par['cortFs']
dti = 1./par['cortFs']
timeSpace = np.arange(start = dti, stop = duration + dti, step = dti)
if 'off' in est.keys():
timeSpace = timeSpace - est['off']
scipy.io.savemat(parseOut, {'A':A, 'n':n, 'b':b, 'timeSpace': timeSpace})
def thalamicInput(lagSpace, par, est, raster = False):
fs = par['periphFs']
# Subcortical processing
sound = soch.createStimulus(est, par['periphFs'])
prob = moch.peripheral(sound, par)
[A, n, b] = moch.subcortical(prob, lagSpace, par)
for i in range(1, par['subCortAff']):
sound = soch.createStimulus(est, par['periphFs'])
prob = moch.peripheral(sound, par)
[A0, n0, b0] = moch.subcortical(prob, lagSpace, par)
A = A + A0
n = n + n0
b = b + b0
A = (1. / par['subCortAff']) * A
n = (1. / par['subCortAff']) * n
b = (1. / par['subCortAff']) * b
if raster:
anfTrains = moch.peripheralSpikes(sound, par, fs = -1)
thorns.plot_raster(anfTrains)
thorns.show()
return [A, n, b]
main(sys.argv[1])
|
[
"moch.peripheral",
"moch.peripheralSpikes",
"thorns.show",
"soch.createStimulus",
"thorns.plot_raster",
"moch.subcortical",
"numpy.arange",
"os.remove"
] |
[((234, 252), 'os.remove', 'os.remove', (['parseIn'], {}), '(parseIn)\n', (243, 252), False, 'import os\n'), ((2726, 2777), 'numpy.arange', 'np.arange', ([], {'start': 'dti', 'stop': '(duration + dti)', 'step': 'dti'}), '(start=dti, stop=duration + dti, step=dti)\n', (2735, 2777), True, 'import numpy as np\n'), ((3065, 3106), 'soch.createStimulus', 'soch.createStimulus', (['est', "par['periphFs']"], {}), "(est, par['periphFs'])\n", (3084, 3106), False, 'import soch\n'), ((3118, 3145), 'moch.peripheral', 'moch.peripheral', (['sound', 'par'], {}), '(sound, par)\n', (3133, 3145), False, 'import moch\n'), ((3163, 3200), 'moch.subcortical', 'moch.subcortical', (['prob', 'lagSpace', 'par'], {}), '(prob, lagSpace, par)\n', (3179, 3200), False, 'import moch\n'), ((3260, 3301), 'soch.createStimulus', 'soch.createStimulus', (['est', "par['periphFs']"], {}), "(est, par['periphFs'])\n", (3279, 3301), False, 'import soch\n'), ((3317, 3344), 'moch.peripheral', 'moch.peripheral', (['sound', 'par'], {}), '(sound, par)\n', (3332, 3344), False, 'import moch\n'), ((3368, 3405), 'moch.subcortical', 'moch.subcortical', (['prob', 'lagSpace', 'par'], {}), '(prob, lagSpace, par)\n', (3384, 3405), False, 'import moch\n'), ((3615, 3655), 'moch.peripheralSpikes', 'moch.peripheralSpikes', (['sound', 'par'], {'fs': '(-1)'}), '(sound, par, fs=-1)\n', (3636, 3655), False, 'import moch\n'), ((3666, 3695), 'thorns.plot_raster', 'thorns.plot_raster', (['anfTrains'], {}), '(anfTrains)\n', (3684, 3695), False, 'import thorns\n'), ((3704, 3717), 'thorns.show', 'thorns.show', ([], {}), '()\n', (3715, 3717), False, 'import thorns\n')]
|
# (C) British Crown Copyright 2011 - 2018, Met Office
#
# This file is part of cartopy.
#
# cartopy is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# cartopy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with cartopy. If not, see <https://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
import types
import numpy as np
from numpy.testing import assert_array_almost_equal as assert_arr_almost
import pytest
import shapely.geometry as sgeom
import cartopy.crs as ccrs
import cartopy.io.img_tiles as cimgt
#: Maps Google tile coordinates to native mercator coordinates as defined
#: by https://goo.gl/pgJi.
KNOWN_EXTENTS = {(0, 0, 0): (-20037508.342789244, 20037508.342789244,
-20037508.342789244, 20037508.342789244),
(2, 0, 2): (0., 10018754.17139462,
10018754.17139462, 20037508.342789244),
(0, 2, 2): (-20037508.342789244, -10018754.171394622,
-10018754.171394622, 0),
(2, 2, 2): (0, 10018754.17139462,
-10018754.171394622, 0),
(8, 9, 4): (0, 2504688.542848654,
-5009377.085697312, -2504688.542848654),
}
if ccrs.PROJ4_VERSION == (5, 0, 0):
KNOWN_EXTENTS = {
(0, 0, 0): (-20037508.342789244, 20037508.342789244,
-19994827.892149, 19994827.892149),
(2, 0, 2): (0, 10018754.171395,
9997413.946075, 19994827.892149),
(0, 2, 2): (-20037508.342789244, -10018754.171394622,
-9997413.946075, 0),
(2, 2, 2): (0, 10018754.171395,
-9997413.946075, 0),
(8, 9, 4): (0, 2504688.542849,
-4998706.973037, -2499353.486519),
}
def GOOGLE_IMAGE_URL_REPLACEMENT(self, tile):
url = ('https://chart.googleapis.com/chart?chst=d_text_outline&'
'chs=256x256&chf=bg,s,00000055&chld=FFFFFF|16|h|000000|b||||'
'Google:%20%20(' + str(tile[0]) + ',' + str(tile[1]) + ')'
'|Zoom%20' + str(tile[2]) + '||||||______________________'
'______')
return url
def test_google_tile_styles():
"""
Tests that setting the Google Maps tile style works as expected.
This is essentially just assures information is properly propagated through
the class structure.
"""
reference_url = ("https://mts0.google.com/vt/lyrs={style}@177000000&hl=en"
"&src=api&x=1&y=2&z=3&s=G")
tile = ["1", "2", "3"]
# Default is street.
gt = cimgt.GoogleTiles()
url = gt._image_url(tile)
assert reference_url.format(style="m") == url
# Street
gt = cimgt.GoogleTiles(style="street")
url = gt._image_url(tile)
assert reference_url.format(style="m") == url
# Satellite
gt = cimgt.GoogleTiles(style="satellite")
url = gt._image_url(tile)
assert reference_url.format(style="s") == url
# Terrain
gt = cimgt.GoogleTiles(style="terrain")
url = gt._image_url(tile)
assert reference_url.format(style="t") == url
# Streets only
gt = cimgt.GoogleTiles(style="only_streets")
url = gt._image_url(tile)
assert reference_url.format(style="h") == url
# Exception is raised if unknown style is passed.
with pytest.raises(ValueError):
cimgt.GoogleTiles(style="random_style")
def test_google_wts():
gt = cimgt.GoogleTiles()
ll_target_domain = sgeom.box(-15, 50, 0, 60)
multi_poly = gt.crs.project_geometry(ll_target_domain, ccrs.PlateCarree())
target_domain = multi_poly.geoms[0]
with pytest.raises(AssertionError):
list(gt.find_images(target_domain, -1))
assert (tuple(gt.find_images(target_domain, 0)) ==
((0, 0, 0),))
assert (tuple(gt.find_images(target_domain, 2)) ==
((1, 1, 2), (2, 1, 2)))
assert (list(gt.subtiles((0, 0, 0))) ==
[(0, 0, 1), (0, 1, 1), (1, 0, 1), (1, 1, 1)])
assert (list(gt.subtiles((1, 0, 1))) ==
[(2, 0, 2), (2, 1, 2), (3, 0, 2), (3, 1, 2)])
with pytest.raises(AssertionError):
gt.tileextent((0, 1, 0))
assert_arr_almost(gt.tileextent((0, 0, 0)), KNOWN_EXTENTS[(0, 0, 0)])
assert_arr_almost(gt.tileextent((2, 0, 2)), KNOWN_EXTENTS[(2, 0, 2)])
assert_arr_almost(gt.tileextent((0, 2, 2)), KNOWN_EXTENTS[(0, 2, 2)])
assert_arr_almost(gt.tileextent((2, 2, 2)), KNOWN_EXTENTS[(2, 2, 2)])
assert_arr_almost(gt.tileextent((8, 9, 4)), KNOWN_EXTENTS[(8, 9, 4)])
def test_tile_bbox_y0_at_south_pole():
tms = cimgt.MapQuestOpenAerial()
# Check the y0_at_north_pole keywords returns the appropriate bounds.
assert_arr_almost(tms.tile_bbox(8, 6, 4, y0_at_north_pole=False),
np.array(KNOWN_EXTENTS[(8, 9, 4)]).reshape([2, 2]))
def test_tile_find_images():
gt = cimgt.GoogleTiles()
# Test the find_images method on a GoogleTiles instance.
ll_target_domain = sgeom.box(-10, 50, 10, 60)
multi_poly = gt.crs.project_geometry(ll_target_domain, ccrs.PlateCarree())
target_domain = multi_poly.geoms[0]
assert (list(gt.find_images(target_domain, 4)) ==
[(7, 4, 4), (7, 5, 4), (8, 4, 4), (8, 5, 4)])
@pytest.mark.network
def test_image_for_domain():
gt = cimgt.GoogleTiles()
gt._image_url = types.MethodType(GOOGLE_IMAGE_URL_REPLACEMENT, gt)
ll_target_domain = sgeom.box(-10, 50, 10, 60)
multi_poly = gt.crs.project_geometry(ll_target_domain, ccrs.PlateCarree())
target_domain = multi_poly.geoms[0]
_, extent, _ = gt.image_for_domain(target_domain, 6)
ll_extent = ccrs.Geodetic().transform_points(gt.crs,
np.array(extent[:2]),
np.array(extent[2:]))
if ccrs.PROJ4_VERSION == (5, 0, 0):
assert_arr_almost(ll_extent[:, :2],
[[-11.25, 49.033955],
[11.25, 61.687101]])
else:
assert_arr_almost(ll_extent[:, :2],
[[-11.25, 48.92249926],
[11.25, 61.60639637]])
def test_quadtree_wts():
qt = cimgt.QuadtreeTiles()
ll_target_domain = sgeom.box(-15, 50, 0, 60)
multi_poly = qt.crs.project_geometry(ll_target_domain, ccrs.PlateCarree())
target_domain = multi_poly.geoms[0]
with pytest.raises(ValueError):
list(qt.find_images(target_domain, 0))
assert qt.tms_to_quadkey((1, 1, 1)) == '1'
assert qt.quadkey_to_tms('1') == (1, 1, 1)
assert qt.tms_to_quadkey((8, 9, 4)) == '1220'
assert qt.quadkey_to_tms('1220') == (8, 9, 4)
assert tuple(qt.find_images(target_domain, 1)) == ('0', '1')
assert tuple(qt.find_images(target_domain, 2)) == ('03', '12')
assert list(qt.subtiles('0')) == ['00', '01', '02', '03']
assert list(qt.subtiles('11')) == ['110', '111', '112', '113']
with pytest.raises(ValueError):
qt.tileextent('4')
assert_arr_almost(qt.tileextent(''), KNOWN_EXTENTS[(0, 0, 0)])
assert_arr_almost(qt.tileextent(qt.tms_to_quadkey((2, 0, 2), google=True)),
KNOWN_EXTENTS[(2, 0, 2)])
assert_arr_almost(qt.tileextent(qt.tms_to_quadkey((0, 2, 2), google=True)),
KNOWN_EXTENTS[(0, 2, 2)])
assert_arr_almost(qt.tileextent(qt.tms_to_quadkey((2, 0, 2), google=True)),
KNOWN_EXTENTS[(2, 0, 2)])
assert_arr_almost(qt.tileextent(qt.tms_to_quadkey((2, 2, 2), google=True)),
KNOWN_EXTENTS[(2, 2, 2)])
assert_arr_almost(qt.tileextent(qt.tms_to_quadkey((8, 9, 4), google=True)),
KNOWN_EXTENTS[(8, 9, 4)])
def test_mapbox_tiles_api_url():
token = 'foo'
map_name = 'bar'
tile = [0, 1, 2]
exp_url = ('https://api.mapbox.com/v4/mapbox.bar'
'/2/0/1.png?access_token=foo')
mapbox_sample = cimgt.MapboxTiles(token, map_name)
url_str = mapbox_sample._image_url(tile)
assert url_str == exp_url
def test_mapbox_style_tiles_api_url():
token = 'foo'
username = 'baz'
map_id = 'bar'
tile = [0, 1, 2]
exp_url = ('https://api.mapbox.com/styles/v1/'
'baz/bar/tiles/256/2/0/1'
'?access_token=foo')
mapbox_sample = cimgt.MapboxStyleTiles(token, username, map_id)
url_str = mapbox_sample._image_url(tile)
assert url_str == exp_url
|
[
"cartopy.io.img_tiles.MapboxStyleTiles",
"numpy.testing.assert_array_almost_equal",
"cartopy.io.img_tiles.GoogleTiles",
"cartopy.io.img_tiles.MapQuestOpenAerial",
"shapely.geometry.box",
"cartopy.io.img_tiles.MapboxTiles",
"cartopy.crs.PlateCarree",
"cartopy.io.img_tiles.QuadtreeTiles",
"numpy.array",
"pytest.raises",
"types.MethodType",
"cartopy.crs.Geodetic"
] |
[((3086, 3105), 'cartopy.io.img_tiles.GoogleTiles', 'cimgt.GoogleTiles', ([], {}), '()\n', (3103, 3105), True, 'import cartopy.io.img_tiles as cimgt\n'), ((3209, 3242), 'cartopy.io.img_tiles.GoogleTiles', 'cimgt.GoogleTiles', ([], {'style': '"""street"""'}), "(style='street')\n", (3226, 3242), True, 'import cartopy.io.img_tiles as cimgt\n'), ((3349, 3385), 'cartopy.io.img_tiles.GoogleTiles', 'cimgt.GoogleTiles', ([], {'style': '"""satellite"""'}), "(style='satellite')\n", (3366, 3385), True, 'import cartopy.io.img_tiles as cimgt\n'), ((3490, 3524), 'cartopy.io.img_tiles.GoogleTiles', 'cimgt.GoogleTiles', ([], {'style': '"""terrain"""'}), "(style='terrain')\n", (3507, 3524), True, 'import cartopy.io.img_tiles as cimgt\n'), ((3634, 3673), 'cartopy.io.img_tiles.GoogleTiles', 'cimgt.GoogleTiles', ([], {'style': '"""only_streets"""'}), "(style='only_streets')\n", (3651, 3673), True, 'import cartopy.io.img_tiles as cimgt\n'), ((3927, 3946), 'cartopy.io.img_tiles.GoogleTiles', 'cimgt.GoogleTiles', ([], {}), '()\n', (3944, 3946), True, 'import cartopy.io.img_tiles as cimgt\n'), ((3971, 3996), 'shapely.geometry.box', 'sgeom.box', (['(-15)', '(50)', '(0)', '(60)'], {}), '(-15, 50, 0, 60)\n', (3980, 3996), True, 'import shapely.geometry as sgeom\n'), ((5088, 5114), 'cartopy.io.img_tiles.MapQuestOpenAerial', 'cimgt.MapQuestOpenAerial', ([], {}), '()\n', (5112, 5114), True, 'import cartopy.io.img_tiles as cimgt\n'), ((5374, 5393), 'cartopy.io.img_tiles.GoogleTiles', 'cimgt.GoogleTiles', ([], {}), '()\n', (5391, 5393), True, 'import cartopy.io.img_tiles as cimgt\n'), ((5478, 5504), 'shapely.geometry.box', 'sgeom.box', (['(-10)', '(50)', '(10)', '(60)'], {}), '(-10, 50, 10, 60)\n', (5487, 5504), True, 'import shapely.geometry as sgeom\n'), ((5798, 5817), 'cartopy.io.img_tiles.GoogleTiles', 'cimgt.GoogleTiles', ([], {}), '()\n', (5815, 5817), True, 'import cartopy.io.img_tiles as cimgt\n'), ((5838, 5888), 'types.MethodType', 'types.MethodType', (['GOOGLE_IMAGE_URL_REPLACEMENT', 'gt'], {}), '(GOOGLE_IMAGE_URL_REPLACEMENT, gt)\n', (5854, 5888), False, 'import types\n'), ((5913, 5939), 'shapely.geometry.box', 'sgeom.box', (['(-10)', '(50)', '(10)', '(60)'], {}), '(-10, 50, 10, 60)\n', (5922, 5939), True, 'import shapely.geometry as sgeom\n'), ((6687, 6708), 'cartopy.io.img_tiles.QuadtreeTiles', 'cimgt.QuadtreeTiles', ([], {}), '()\n', (6706, 6708), True, 'import cartopy.io.img_tiles as cimgt\n'), ((6733, 6758), 'shapely.geometry.box', 'sgeom.box', (['(-15)', '(50)', '(0)', '(60)'], {}), '(-15, 50, 0, 60)\n', (6742, 6758), True, 'import shapely.geometry as sgeom\n'), ((8409, 8443), 'cartopy.io.img_tiles.MapboxTiles', 'cimgt.MapboxTiles', (['token', 'map_name'], {}), '(token, map_name)\n', (8426, 8443), True, 'import cartopy.io.img_tiles as cimgt\n'), ((8788, 8835), 'cartopy.io.img_tiles.MapboxStyleTiles', 'cimgt.MapboxStyleTiles', (['token', 'username', 'map_id'], {}), '(token, username, map_id)\n', (8810, 8835), True, 'import cartopy.io.img_tiles as cimgt\n'), ((3818, 3843), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3831, 3843), False, 'import pytest\n'), ((3853, 3892), 'cartopy.io.img_tiles.GoogleTiles', 'cimgt.GoogleTiles', ([], {'style': '"""random_style"""'}), "(style='random_style')\n", (3870, 3892), True, 'import cartopy.io.img_tiles as cimgt\n'), ((4056, 4074), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (4072, 4074), True, 'import cartopy.crs as ccrs\n'), ((4126, 4155), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (4139, 4155), False, 'import pytest\n'), ((4602, 4631), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (4615, 4631), False, 'import pytest\n'), ((5564, 5582), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (5580, 5582), True, 'import cartopy.crs as ccrs\n'), ((5999, 6017), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (6015, 6017), True, 'import cartopy.crs as ccrs\n'), ((6224, 6244), 'numpy.array', 'np.array', (['extent[:2]'], {}), '(extent[:2])\n', (6232, 6244), True, 'import numpy as np\n'), ((6295, 6315), 'numpy.array', 'np.array', (['extent[2:]'], {}), '(extent[2:])\n', (6303, 6315), True, 'import numpy as np\n'), ((6365, 6443), 'numpy.testing.assert_array_almost_equal', 'assert_arr_almost', (['ll_extent[:, :2]', '[[-11.25, 49.033955], [11.25, 61.687101]]'], {}), '(ll_extent[:, :2], [[-11.25, 49.033955], [11.25, 61.687101]])\n', (6382, 6443), True, 'from numpy.testing import assert_array_almost_equal as assert_arr_almost\n'), ((6515, 6602), 'numpy.testing.assert_array_almost_equal', 'assert_arr_almost', (['ll_extent[:, :2]', '[[-11.25, 48.92249926], [11.25, 61.60639637]]'], {}), '(ll_extent[:, :2], [[-11.25, 48.92249926], [11.25, \n 61.60639637]])\n', (6532, 6602), True, 'from numpy.testing import assert_array_almost_equal as assert_arr_almost\n'), ((6818, 6836), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (6834, 6836), True, 'import cartopy.crs as ccrs\n'), ((6888, 6913), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (6901, 6913), False, 'import pytest\n'), ((7431, 7456), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (7444, 7456), False, 'import pytest\n'), ((6134, 6149), 'cartopy.crs.Geodetic', 'ccrs.Geodetic', ([], {}), '()\n', (6147, 6149), True, 'import cartopy.crs as ccrs\n'), ((5282, 5314), 'numpy.array', 'np.array', (['KNOWN_EXTENTS[8, 9, 4]'], {}), '(KNOWN_EXTENTS[8, 9, 4])\n', (5290, 5314), True, 'import numpy as np\n')]
|
import numpy as np
x = np.array([0,1])
w = np.array([0.5,0.5])
b = -0.7
print(w*x)
print(np.sum(w*x))
print(np.sum(w*x)+b)
def AND(x1,x2):
x = np.array([x1,x2])
w = np.array([0.5,0.5])
b = -0.7
tmp = np.sum(w*x)+b
if tmp <= 0:
return 0
else:
return 1
def NAND(x1,x2):
x = np.array([x1,x2])
w = np.array([-0.5,-0.5])
b = 0.7
tmp = np.sum(w * x) + b
if tmp <= 0:
return 0
else:
return 1
def OR(x1,x2):
x = np.array([x1,x2])
w = np.array([0.5,0.5])
b = -0.2
tmp = np.sum(w*x)+b
if tmp <= 0:
return 0
else:
return 1
def XOR(x1,x2):
s1 = NAND(x1,x2)
s2 = OR(x1,x2)
y = AND(s1,s2)
return y
print(XOR(0,0))
print(XOR(1,0))
print(XOR(0,1))
print(XOR(1,1))
|
[
"numpy.array",
"numpy.sum"
] |
[((23, 39), 'numpy.array', 'np.array', (['[0, 1]'], {}), '([0, 1])\n', (31, 39), True, 'import numpy as np\n'), ((43, 63), 'numpy.array', 'np.array', (['[0.5, 0.5]'], {}), '([0.5, 0.5])\n', (51, 63), True, 'import numpy as np\n'), ((90, 103), 'numpy.sum', 'np.sum', (['(w * x)'], {}), '(w * x)\n', (96, 103), True, 'import numpy as np\n'), ((149, 167), 'numpy.array', 'np.array', (['[x1, x2]'], {}), '([x1, x2])\n', (157, 167), True, 'import numpy as np\n'), ((175, 195), 'numpy.array', 'np.array', (['[0.5, 0.5]'], {}), '([0.5, 0.5])\n', (183, 195), True, 'import numpy as np\n'), ((319, 337), 'numpy.array', 'np.array', (['[x1, x2]'], {}), '([x1, x2])\n', (327, 337), True, 'import numpy as np\n'), ((345, 367), 'numpy.array', 'np.array', (['[-0.5, -0.5]'], {}), '([-0.5, -0.5])\n', (353, 367), True, 'import numpy as np\n'), ((492, 510), 'numpy.array', 'np.array', (['[x1, x2]'], {}), '([x1, x2])\n', (500, 510), True, 'import numpy as np\n'), ((518, 538), 'numpy.array', 'np.array', (['[0.5, 0.5]'], {}), '([0.5, 0.5])\n', (526, 538), True, 'import numpy as np\n'), ((109, 122), 'numpy.sum', 'np.sum', (['(w * x)'], {}), '(w * x)\n', (115, 122), True, 'import numpy as np\n'), ((218, 231), 'numpy.sum', 'np.sum', (['(w * x)'], {}), '(w * x)\n', (224, 231), True, 'import numpy as np\n'), ((389, 402), 'numpy.sum', 'np.sum', (['(w * x)'], {}), '(w * x)\n', (395, 402), True, 'import numpy as np\n'), ((561, 574), 'numpy.sum', 'np.sum', (['(w * x)'], {}), '(w * x)\n', (567, 574), True, 'import numpy as np\n')]
|
print("From python: Within python module")
import os,sys
HERE = os.getcwd()
sys.path.insert(0,HERE)
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
data_array = np.zeros(shape=(2001,258)) # Very important that this matches the number of timesteps in the main solver
x = np.arange(start=0,stop=2.0*np.pi,step=2.0*np.pi/256)
iternum = 0
def collection_func(input_array):
global data_array,iternum
data_array[iternum,:] = input_array[:]
iternum+=1
return None
def analyses_func():
global data_array, x
plt.figure()
for i in range(0,np.shape(data_array)[0],400):
plt.plot(x,data_array[i,1:-1],label='Timestep '+str(i))
plt.legend()
plt.xlabel('x')
plt.xlabel('u')
plt.title('Field evolution')
plt.savefig('Field_evolution.png')
plt.close()
# Perform an SVD
data_array = data_array[:,1:-1]
print('Performing SVD')
u,s,v = np.linalg.svd(data_array,full_matrices=False)
# Plot SVD eigenvectors
plt.figure()
plt.plot(x, v[0,:],label='Mode 0')
plt.plot(x, v[1,:],label='Mode 1')
plt.plot(x, v[2,:],label='Mode 2')
plt.legend()
plt.title('SVD Eigenvectors')
plt.xlabel('x')
plt.xlabel('u')
plt.savefig('SVD_Eigenvectors.png')
plt.close()
np.save('eigenvectors.npy',v[0:3,:].T)
# Train an LSTM on the coefficients of the eigenvectors
time_series = np.matmul(v[0:3,:],data_array.T).T
num_timesteps = np.shape(time_series)[0]
train_series = time_series[:num_timesteps//2]
test_series = time_series[num_timesteps//2:]
# import the LSTM architecture and initialize
from ml_module import standard_lstm
ml_model = standard_lstm(train_series)
# Train the model
ml_model.train_model()
# Restore best weights and perform an inference
print('Performing inference on testing data')
ml_model.model_inference(test_series)
return_data = v[0:3,:].T
return return_data
if __name__ == '__main__':
pass
|
[
"sys.path.insert",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"os.getcwd",
"matplotlib.pyplot.close",
"numpy.zeros",
"matplotlib.pyplot.figure",
"numpy.matmul",
"numpy.linalg.svd",
"matplotlib.pyplot.title",
"numpy.shape",
"numpy.save",
"ml_module.standard_lstm",
"numpy.arange"
] |
[((65, 76), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (74, 76), False, 'import os, sys\n'), ((77, 101), 'sys.path.insert', 'sys.path.insert', (['(0)', 'HERE'], {}), '(0, HERE)\n', (92, 101), False, 'import os, sys\n'), ((191, 218), 'numpy.zeros', 'np.zeros', ([], {'shape': '(2001, 258)'}), '(shape=(2001, 258))\n', (199, 218), True, 'import numpy as np\n'), ((300, 360), 'numpy.arange', 'np.arange', ([], {'start': '(0)', 'stop': '(2.0 * np.pi)', 'step': '(2.0 * np.pi / 256)'}), '(start=0, stop=2.0 * np.pi, step=2.0 * np.pi / 256)\n', (309, 360), True, 'import numpy as np\n'), ((562, 574), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (572, 574), True, 'import matplotlib.pyplot as plt\n'), ((694, 706), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (704, 706), True, 'import matplotlib.pyplot as plt\n'), ((711, 726), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x"""'], {}), "('x')\n", (721, 726), True, 'import matplotlib.pyplot as plt\n'), ((731, 746), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""u"""'], {}), "('u')\n", (741, 746), True, 'import matplotlib.pyplot as plt\n'), ((751, 779), 'matplotlib.pyplot.title', 'plt.title', (['"""Field evolution"""'], {}), "('Field evolution')\n", (760, 779), True, 'import matplotlib.pyplot as plt\n'), ((784, 818), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""Field_evolution.png"""'], {}), "('Field_evolution.png')\n", (795, 818), True, 'import matplotlib.pyplot as plt\n'), ((823, 834), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (832, 834), True, 'import matplotlib.pyplot as plt\n'), ((933, 979), 'numpy.linalg.svd', 'np.linalg.svd', (['data_array'], {'full_matrices': '(False)'}), '(data_array, full_matrices=False)\n', (946, 979), True, 'import numpy as np\n'), ((1012, 1024), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1022, 1024), True, 'import matplotlib.pyplot as plt\n'), ((1029, 1065), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'v[0, :]'], {'label': '"""Mode 0"""'}), "(x, v[0, :], label='Mode 0')\n", (1037, 1065), True, 'import matplotlib.pyplot as plt\n'), ((1068, 1104), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'v[1, :]'], {'label': '"""Mode 1"""'}), "(x, v[1, :], label='Mode 1')\n", (1076, 1104), True, 'import matplotlib.pyplot as plt\n'), ((1107, 1143), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'v[2, :]'], {'label': '"""Mode 2"""'}), "(x, v[2, :], label='Mode 2')\n", (1115, 1143), True, 'import matplotlib.pyplot as plt\n'), ((1146, 1158), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1156, 1158), True, 'import matplotlib.pyplot as plt\n'), ((1163, 1192), 'matplotlib.pyplot.title', 'plt.title', (['"""SVD Eigenvectors"""'], {}), "('SVD Eigenvectors')\n", (1172, 1192), True, 'import matplotlib.pyplot as plt\n'), ((1197, 1212), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x"""'], {}), "('x')\n", (1207, 1212), True, 'import matplotlib.pyplot as plt\n'), ((1217, 1232), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""u"""'], {}), "('u')\n", (1227, 1232), True, 'import matplotlib.pyplot as plt\n'), ((1237, 1272), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""SVD_Eigenvectors.png"""'], {}), "('SVD_Eigenvectors.png')\n", (1248, 1272), True, 'import matplotlib.pyplot as plt\n'), ((1277, 1288), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (1286, 1288), True, 'import matplotlib.pyplot as plt\n'), ((1294, 1334), 'numpy.save', 'np.save', (['"""eigenvectors.npy"""', 'v[0:3, :].T'], {}), "('eigenvectors.npy', v[0:3, :].T)\n", (1301, 1334), True, 'import numpy as np\n'), ((1698, 1725), 'ml_module.standard_lstm', 'standard_lstm', (['train_series'], {}), '(train_series)\n', (1711, 1725), False, 'from ml_module import standard_lstm\n'), ((1412, 1446), 'numpy.matmul', 'np.matmul', (['v[0:3, :]', 'data_array.T'], {}), '(v[0:3, :], data_array.T)\n', (1421, 1446), True, 'import numpy as np\n'), ((1467, 1488), 'numpy.shape', 'np.shape', (['time_series'], {}), '(time_series)\n', (1475, 1488), True, 'import numpy as np\n'), ((596, 616), 'numpy.shape', 'np.shape', (['data_array'], {}), '(data_array)\n', (604, 616), True, 'import numpy as np\n')]
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.math_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import googletest
exp = np.exp
log = np.log
# changes to turn test into imperative mode test
try:
from tensorflow.contrib import imperative
from tensorflow.contrib.imperative.python.imperative import test_util
except:
import imperative
from imperative import test_util
import tensorflow as tf
env = imperative.Env(tf)
math_ops = env.tf
constant_op = env.tf
class ReduceTest(test_util.TensorFlowTestCase):
def testReduceAllDims(self):
x = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.int32)
with self.test_session():
y_tf = math_ops.reduce_sum(x).eval()
self.assertEqual(y_tf, 21)
class RoundTest(test_util.TensorFlowTestCase):
def testRounding(self):
try:
x = [0.49, 0.7, -0.3, -0.8]
for dtype in [np.float32, np.double]:
x_np = np.array(x, dtype=dtype)
for use_gpu in [True, False]:
with self.test_session(use_gpu=use_gpu):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y_tf = math_ops.round(x_tf)
y_tf_np = y_tf.eval()
y_np = np.round(x_np)
self.assertAllClose(y_tf_np, y_np, atol=1e-2)
except:
import sys, pdb, traceback
type, value, tb = sys.exc_info()
traceback.print_exc()
pdb.post_mortem(tb)
class ModTest(test_util.TensorFlowTestCase):
def testFloat(self):
x = [0.5, 0.7, 0.3]
for dtype in [np.float32, np.double]:
# Test scalar and vector versions.
for denom in [x[0], [x[0]] * 3]:
x_np = np.array(x, dtype=dtype)
with self.test_session():
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y_tf = math_ops.mod(x_tf, denom)
y_tf_np = y_tf.eval()
y_np = np.fmod(x_np, denom)
self.assertAllClose(y_tf_np, y_np, atol=1e-2)
def testFixed(self):
x = [5, 10, 23]
for dtype in [np.int32, np.int64]:
# Test scalar and vector versions.
for denom in [x[0], x]:
x_np = np.array(x, dtype=dtype)
with self.test_session():
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y_tf = math_ops.mod(x_tf, denom)
y_tf_np = y_tf.eval()
y_np = np.mod(x_np, denom)
self.assertAllClose(y_tf_np, y_np)
class SquaredDifferenceTest(test_util.TensorFlowTestCase):
def testSquaredDifference(self):
x = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.int32)
y = np.array([-3, -2, -1], dtype=np.int32)
z = (x - y)*(x - y)
with self.test_session():
z_tf = math_ops.squared_difference(x, y).eval()
self.assertAllClose(z, z_tf)
if __name__ == "__main__":
googletest.main()
|
[
"imperative.Env",
"tensorflow.python.ops.math_ops.round",
"pdb.post_mortem",
"tensorflow.python.ops.math_ops.reduce_sum",
"tensorflow.python.framework.constant_op.constant",
"tensorflow.python.ops.math_ops.squared_difference",
"numpy.array",
"sys.exc_info",
"tensorflow.python.platform.googletest.main",
"numpy.fmod",
"tensorflow.python.ops.math_ops.mod",
"traceback.print_exc",
"numpy.mod",
"numpy.round"
] |
[((1291, 1309), 'imperative.Env', 'imperative.Env', (['tf'], {}), '(tf)\n', (1305, 1309), False, 'import imperative\n'), ((3592, 3609), 'tensorflow.python.platform.googletest.main', 'googletest.main', ([], {}), '()\n', (3607, 3609), False, 'from tensorflow.python.platform import googletest\n'), ((1439, 1487), 'numpy.array', 'np.array', (['[[1, 2, 3], [4, 5, 6]]'], {'dtype': 'np.int32'}), '([[1, 2, 3], [4, 5, 6]], dtype=np.int32)\n', (1447, 1487), True, 'import numpy as np\n'), ((3323, 3371), 'numpy.array', 'np.array', (['[[1, 2, 3], [4, 5, 6]]'], {'dtype': 'np.int32'}), '([[1, 2, 3], [4, 5, 6]], dtype=np.int32)\n', (3331, 3371), True, 'import numpy as np\n'), ((3380, 3418), 'numpy.array', 'np.array', (['[-3, -2, -1]'], {'dtype': 'np.int32'}), '([-3, -2, -1], dtype=np.int32)\n', (3388, 3418), True, 'import numpy as np\n'), ((1771, 1795), 'numpy.array', 'np.array', (['x'], {'dtype': 'dtype'}), '(x, dtype=dtype)\n', (1779, 1795), True, 'import numpy as np\n'), ((2184, 2198), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (2196, 2198), False, 'import sys, pdb, traceback\n'), ((2205, 2226), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (2224, 2226), False, 'import sys, pdb, traceback\n'), ((2233, 2252), 'pdb.post_mortem', 'pdb.post_mortem', (['tb'], {}), '(tb)\n', (2248, 2252), False, 'import sys, pdb, traceback\n'), ((2485, 2509), 'numpy.array', 'np.array', (['x'], {'dtype': 'dtype'}), '(x, dtype=dtype)\n', (2493, 2509), True, 'import numpy as np\n'), ((2942, 2966), 'numpy.array', 'np.array', (['x'], {'dtype': 'dtype'}), '(x, dtype=dtype)\n', (2950, 2966), True, 'import numpy as np\n'), ((1531, 1553), 'tensorflow.python.ops.math_ops.reduce_sum', 'math_ops.reduce_sum', (['x'], {}), '(x)\n', (1550, 1553), False, 'from tensorflow.python.ops import math_ops\n'), ((2561, 2605), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['x_np'], {'shape': 'x_np.shape'}), '(x_np, shape=x_np.shape)\n', (2581, 2605), False, 'from tensorflow.python.framework import constant_op\n'), ((2623, 2648), 'tensorflow.python.ops.math_ops.mod', 'math_ops.mod', (['x_tf', 'denom'], {}), '(x_tf, denom)\n', (2635, 2648), False, 'from tensorflow.python.ops import math_ops\n'), ((2698, 2718), 'numpy.fmod', 'np.fmod', (['x_np', 'denom'], {}), '(x_np, denom)\n', (2705, 2718), True, 'import numpy as np\n'), ((3018, 3062), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['x_np'], {'shape': 'x_np.shape'}), '(x_np, shape=x_np.shape)\n', (3038, 3062), False, 'from tensorflow.python.framework import constant_op\n'), ((3080, 3105), 'tensorflow.python.ops.math_ops.mod', 'math_ops.mod', (['x_tf', 'denom'], {}), '(x_tf, denom)\n', (3092, 3105), False, 'from tensorflow.python.ops import math_ops\n'), ((3155, 3174), 'numpy.mod', 'np.mod', (['x_np', 'denom'], {}), '(x_np, denom)\n', (3161, 3174), True, 'import numpy as np\n'), ((3486, 3519), 'tensorflow.python.ops.math_ops.squared_difference', 'math_ops.squared_difference', (['x', 'y'], {}), '(x, y)\n', (3513, 3519), False, 'from tensorflow.python.ops import math_ops\n'), ((1904, 1948), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['x_np'], {'shape': 'x_np.shape'}), '(x_np, shape=x_np.shape)\n', (1924, 1948), False, 'from tensorflow.python.framework import constant_op\n'), ((1968, 1988), 'tensorflow.python.ops.math_ops.round', 'math_ops.round', (['x_tf'], {}), '(x_tf)\n', (1982, 1988), False, 'from tensorflow.python.ops import math_ops\n'), ((2042, 2056), 'numpy.round', 'np.round', (['x_np'], {}), '(x_np)\n', (2050, 2056), True, 'import numpy as np\n')]
|
import matplotlib.pyplot as plt
import numpy.random as rnd
from matplotlib.patches import Ellipse
NUM = 250
ells = [Ellipse(xy=rnd.rand(2)*10, width=rnd.rand(), height=rnd.rand(), angle=rnd.rand()*360)
for i in range(NUM)]
fig = plt.figure(0)
ax = fig.add_subplot(111, aspect='equal')
for e in ells:
ax.add_artist(e)
e.set_clip_box(ax.bbox)
e.set_alpha(rnd.rand())
e.set_facecolor(rnd.rand(3))
ax.set_xlim(0, 10)
ax.set_ylim(0, 10)
plt.show()
|
[
"matplotlib.pyplot.figure",
"numpy.random.rand",
"matplotlib.pyplot.show"
] |
[((240, 253), 'matplotlib.pyplot.figure', 'plt.figure', (['(0)'], {}), '(0)\n', (250, 253), True, 'import matplotlib.pyplot as plt\n'), ((461, 471), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (469, 471), True, 'import matplotlib.pyplot as plt\n'), ((376, 386), 'numpy.random.rand', 'rnd.rand', ([], {}), '()\n', (384, 386), True, 'import numpy.random as rnd\n'), ((408, 419), 'numpy.random.rand', 'rnd.rand', (['(3)'], {}), '(3)\n', (416, 419), True, 'import numpy.random as rnd\n'), ((151, 161), 'numpy.random.rand', 'rnd.rand', ([], {}), '()\n', (159, 161), True, 'import numpy.random as rnd\n'), ((170, 180), 'numpy.random.rand', 'rnd.rand', ([], {}), '()\n', (178, 180), True, 'import numpy.random as rnd\n'), ((129, 140), 'numpy.random.rand', 'rnd.rand', (['(2)'], {}), '(2)\n', (137, 140), True, 'import numpy.random as rnd\n'), ((188, 198), 'numpy.random.rand', 'rnd.rand', ([], {}), '()\n', (196, 198), True, 'import numpy.random as rnd\n')]
|
"""
Core implementation of :mod:`sklearndf.transformation.wrapper`
"""
import logging
from abc import ABCMeta, abstractmethod
from typing import Any, Generic, List, Optional, TypeVar, Union
import numpy as np
import pandas as pd
from sklearn.base import TransformerMixin
from sklearn.compose import ColumnTransformer
from sklearn.impute import MissingIndicator, SimpleImputer
from sklearn.kernel_approximation import AdditiveChi2Sampler
from sklearn.manifold import Isomap
from sklearn.preprocessing import KBinsDiscretizer, OneHotEncoder, PolynomialFeatures
from pytools.api import AllTracker
from ... import TransformerDF
from ...wrapper import TransformerWrapperDF
log = logging.getLogger(__name__)
__all__ = [
"BaseDimensionalityReductionWrapperDF",
"BaseMultipleInputsPerOutputTransformerWrapperDF",
"ColumnPreservingTransformerWrapperDF",
"ColumnSubsetTransformerWrapperDF",
"ComponentsDimensionalityReductionWrapperDF",
"FeatureSelectionWrapperDF",
"NComponentsDimensionalityReductionWrapperDF",
"NumpyTransformerWrapperDF",
"ColumnTransformerWrapperDF",
"IsomapWrapperDF",
"ImputerWrapperDF",
"MissingIndicatorWrapperDF",
"AdditiveChi2SamplerWrapperDF",
"KBinsDiscretizerWrapperDF",
"PolynomialFeaturesWrapperDF",
"OneHotEncoderWrapperDF",
]
#
# type variables
#
T_Transformer = TypeVar("T_Transformer", bound=TransformerMixin)
# T_Imputer is needed because sklearn's _BaseImputer only exists from v0.22 onwards.
# Once we drop support for sklearn 0.21, _BaseImputer can be used instead.
# The following TypeVar helps to annotate availability of "add_indicator" and
# "missing_values" attributes on an imputer instance for ImputerWrapperDF below
# noinspection PyProtectedMember
from sklearn.impute._iterative import IterativeImputer
T_Imputer = TypeVar("T_Imputer", SimpleImputer, IterativeImputer)
#
# Ensure all symbols introduced below are included in __all__
#
__tracker = AllTracker(globals())
#
# wrapper classes for transformers
#
class NumpyTransformerWrapperDF(
TransformerWrapperDF[T_Transformer], Generic[T_Transformer], metaclass=ABCMeta
):
"""
Abstract base class of DF wrappers for transformers that only accept numpy arrays.
Converts data frames to numpy arrays before handing off to the native transformer.
Implementations must define :meth:`_get_features_original`.
"""
# noinspection PyPep8Naming
def _adjust_X_type_for_delegate(
self, X: pd.DataFrame, *, to_numpy: Optional[bool] = None
) -> np.ndarray:
assert to_numpy is not False, "X must be converted to a numpy array"
return super()._adjust_X_type_for_delegate(X, to_numpy=True)
def _adjust_y_type_for_delegate(
self,
y: Optional[Union[pd.Series, pd.DataFrame]],
*,
to_numpy: Optional[bool] = None,
) -> Optional[np.ndarray]:
assert to_numpy is not False, "y must be converted to a numpy array"
return super()._adjust_y_type_for_delegate(y, to_numpy=True)
class ColumnSubsetTransformerWrapperDF(
TransformerWrapperDF[T_Transformer], Generic[T_Transformer], metaclass=ABCMeta
):
"""
Abstract base class of DF wrappers for transformers that do not change column names,
but that may remove one or more columns.
Implementations must define :meth:`_get_features_out`.
"""
@abstractmethod
def _get_features_out(self) -> pd.Index:
# return column labels for arrays returned by the fitted transformer.
pass
def _get_features_original(self) -> pd.Series:
# return the series with output columns in index and output columns as values
features_out = self._get_features_out()
return pd.Series(index=features_out, data=features_out.values)
class ColumnPreservingTransformerWrapperDF(
ColumnSubsetTransformerWrapperDF[T_Transformer],
Generic[T_Transformer],
):
"""
DF wrapper for transformers whose output columns match the input columns.
The native transformer must not add, remove, reorder, or rename any of the input
columns.
"""
def _get_features_out(self) -> pd.Index:
return self.feature_names_in_
class BaseMultipleInputsPerOutputTransformerWrapperDF(
TransformerWrapperDF[T_Transformer], Generic[T_Transformer]
):
"""
DF wrapper for transformers mapping multiple input columns to individual output
columns.
"""
@abstractmethod
def _get_features_out(self) -> pd.Index:
# make this method abstract to ensure subclasses override the default
# behaviour, which usually relies on method ``_get_features_original``
pass
def _get_features_original(self) -> pd.Series:
raise NotImplementedError(
f"{type(self.native_estimator).__name__} transformers map multiple "
"inputs to individual output columns; current sklearndf implementation "
"only supports many-to-1 mappings from output columns to input columns"
)
class BaseDimensionalityReductionWrapperDF(
BaseMultipleInputsPerOutputTransformerWrapperDF[T_Transformer],
Generic[T_Transformer],
metaclass=ABCMeta,
):
"""
Base class of DF wrappers for dimensionality-reducing transformers.
The native transformer is considered to map all input columns to each output column.
"""
@property
@abstractmethod
def _n_components_(self) -> int:
pass
def _get_features_out(self) -> pd.Index:
return pd.Index([f"x_{i}" for i in range(self._n_components_)])
class NComponentsDimensionalityReductionWrapperDF(
BaseDimensionalityReductionWrapperDF[T_Transformer],
Generic[T_Transformer],
metaclass=ABCMeta,
):
"""
Base class of DF wrappers for dimensionality-reducing transformers supporting the
:attr:`n_components` attribute.
Subclasses must implement :meth:`_get_features_original`.
"""
_ATTR_N_COMPONENTS = "n_components"
def _validate_delegate_estimator(self) -> None:
self._validate_delegate_attribute(attribute_name=self._ATTR_N_COMPONENTS)
@property
def _n_components_(self) -> int:
return getattr(self.native_estimator, self._ATTR_N_COMPONENTS)
class ComponentsDimensionalityReductionWrapperDF(
BaseDimensionalityReductionWrapperDF[T_Transformer],
Generic[T_Transformer],
metaclass=ABCMeta,
):
"""
Base class of DF wrappers for dimensionality-reducing transformers supporting the
``components_`` attribute.
The native transformer must provide a ``components_`` attribute once fitted,
as an array of shape (n_components, n_features).
"""
_ATTR_COMPONENTS = "components_"
# noinspection PyPep8Naming
def _post_fit(
self, X: pd.DataFrame, y: Optional[pd.Series] = None, **fit_params
) -> None:
# noinspection PyProtectedMember
super()._post_fit(X, y, **fit_params)
self._validate_delegate_attribute(attribute_name=self._ATTR_COMPONENTS)
@property
def _n_components_(self) -> int:
return len(getattr(self.native_estimator, self._ATTR_COMPONENTS))
class FeatureSelectionWrapperDF(
ColumnSubsetTransformerWrapperDF[T_Transformer],
Generic[T_Transformer],
metaclass=ABCMeta,
):
"""
DF wrapper for feature selection transformers.
The native transformer must implement a ``get_support`` method, providing the
indices of the selected input columns
"""
_ATTR_GET_SUPPORT = "get_support"
def _validate_delegate_estimator(self) -> None:
self._validate_delegate_attribute(attribute_name=self._ATTR_GET_SUPPORT)
def _get_features_out(self) -> pd.Index:
get_support = getattr(self.native_estimator, self._ATTR_GET_SUPPORT)
return self.feature_names_in_[get_support()]
class ColumnTransformerWrapperDF(
TransformerWrapperDF[ColumnTransformer], metaclass=ABCMeta
):
"""
DF wrapper for :class:`sklearn.compose.ColumnTransformer`.
Requires all transformers passed as the ``transformers`` parameter to implement
:class:`.TransformerDF`.
"""
__DROP = "drop"
__PASSTHROUGH = "passthrough"
__SPECIAL_TRANSFORMERS = (__DROP, __PASSTHROUGH)
def _validate_delegate_estimator(self) -> None:
column_transformer: ColumnTransformer = self.native_estimator
if (
column_transformer.remainder
not in ColumnTransformerWrapperDF.__SPECIAL_TRANSFORMERS
):
raise ValueError(
f"unsupported value for arg remainder: ({column_transformer.remainder})"
)
non_compliant_transformers: List[str] = [
type(transformer).__name__
for _, transformer, _ in column_transformer.transformers
if not (
isinstance(transformer, TransformerDF)
or transformer in ColumnTransformerWrapperDF.__SPECIAL_TRANSFORMERS
)
]
if non_compliant_transformers:
from .. import ColumnTransformerDF
raise ValueError(
f"{ColumnTransformerDF.__name__} only accepts instances of "
f"{TransformerDF.__name__} or special values "
f'"{" and ".join(ColumnTransformerWrapperDF.__SPECIAL_TRANSFORMERS)}" '
"as valid transformers, but "
f'also got: {", ".join(non_compliant_transformers)}'
)
def _get_features_original(self) -> pd.Series:
"""
Return the series mapping output column names to original columns names.
:return: the series with index the column names of the output dataframe and
values the corresponding input column names.
"""
def _features_original(df_transformer: TransformerDF, columns: List[Any]):
if df_transformer == ColumnTransformerWrapperDF.__PASSTHROUGH:
# we may get positional indices for columns selected by the
# 'passthrough' transformer, and in that case so need to look up the
# associated column names
if all(isinstance(column, int) for column in columns):
column_names = self._get_features_in()[columns]
else:
column_names = columns
return pd.Series(index=column_names, data=column_names)
else:
return df_transformer.feature_names_original_
return pd.concat(
[
_features_original(df_transformer, columns)
for _, df_transformer, columns in self.native_estimator.transformers_
if (
len(columns) > 0
and df_transformer != ColumnTransformerWrapperDF.__DROP
)
]
)
class ImputerWrapperDF(TransformerWrapperDF[T_Imputer], metaclass=ABCMeta):
"""
DF wrapper for imputation transformers, e.g., :class:`sklearn.impute.SimpleImputer`.
"""
def _get_features_original(self) -> pd.Series:
# get the columns that were dropped during imputation
delegate_estimator = self.native_estimator
nan_mask = []
def _nan_mask_from_statistics(stats: np.array):
if issubclass(stats.dtype.type, float):
na_mask = np.isnan(stats)
else:
na_mask = [
x is None or (isinstance(x, float) and np.isnan(x)) for x in stats
]
return na_mask
# implementation for i.e. SimpleImputer
if hasattr(delegate_estimator, "statistics_"):
nan_mask = _nan_mask_from_statistics(stats=delegate_estimator.statistics_)
# implementation for IterativeImputer
elif hasattr(delegate_estimator, "initial_imputer_"):
initial_imputer: SimpleImputer = delegate_estimator.initial_imputer_
nan_mask = _nan_mask_from_statistics(stats=initial_imputer.statistics_)
# implementation for i.e. KNNImputer
elif hasattr(delegate_estimator, "_mask_fit_X"):
# noinspection PyProtectedMember
nan_mask = np.all(delegate_estimator._mask_fit_X, axis=0)
# the imputed columns are all ingoing columns, except the ones that were dropped
imputed_columns = self.feature_names_in_.delete(np.argwhere(nan_mask).tolist())
features_original = pd.Series(
index=imputed_columns, data=imputed_columns.values
)
# if the add_indicator flag is set, we will get additional "missing" columns
if delegate_estimator.add_indicator:
from .. import MissingIndicatorDF
missing_indicator = MissingIndicatorDF.from_fitted(
estimator=delegate_estimator.indicator_,
features_in=self.feature_names_in_,
n_outputs=self.n_outputs_,
)
return features_original.append(missing_indicator.feature_names_original_)
else:
return features_original
class MissingIndicatorWrapperDF(
TransformerWrapperDF[MissingIndicator], metaclass=ABCMeta
):
"""
DF wrapper for :class:`sklearn.impute.MissingIndicator`.
"""
def _get_features_original(self) -> pd.Series:
features_original: np.ndarray = self.feature_names_in_[
self.native_estimator.features_
].values
features_out = pd.Index([f"{name}__missing" for name in features_original])
return pd.Series(index=features_out, data=features_original)
class IsomapWrapperDF(BaseDimensionalityReductionWrapperDF[Isomap], metaclass=ABCMeta):
"""
DF wrapper for :class:`sklearn.manifold.Isomap`.
"""
@property
def _n_components_(self) -> int:
return self.native_estimator.embedding_.shape[1]
class AdditiveChi2SamplerWrapperDF(
BaseDimensionalityReductionWrapperDF[AdditiveChi2Sampler], metaclass=ABCMeta
):
"""
DF wrapper for :class:`sklearn.kernel_approximation.AdditiveChi2Sampler`.
"""
@property
def _n_components_(self) -> int:
return len(self._features_in) * (2 * self.native_estimator.sample_steps + 1)
class PolynomialFeaturesWrapperDF(
BaseMultipleInputsPerOutputTransformerWrapperDF[PolynomialFeatures],
metaclass=ABCMeta,
):
"""
DF wrapper for :class:`sklearn.preprocessing.PolynomialFeatures`.
"""
def _get_features_out(self) -> pd.Index:
return pd.Index(
data=self.native_estimator.get_feature_names(
input_features=self.feature_names_in_.astype(str)
)
)
class OneHotEncoderWrapperDF(TransformerWrapperDF[OneHotEncoder], metaclass=ABCMeta):
"""
DF wrapper for :class:`sklearn.preprocessing.OneHotEncoder`.
"""
def _validate_delegate_estimator(self) -> None:
if self.native_estimator.sparse:
raise NotImplementedError("sparse matrices not supported; use sparse=False")
def _get_features_original(self) -> pd.Series:
# Return the series mapping output column names to original column names.
#
# Remove 1st category column if argument drop == 'first'
# Remove 1st category column only of binary features if arg drop == 'if_binary'
feature_names_out = pd.Index(
self.native_estimator.get_feature_names(self.feature_names_in_)
)
if self.drop == "first":
feature_names_in = [
column_original
for column_original, category in zip(
self.feature_names_in_, self.native_estimator.categories_
)
for _ in range(len(category) - 1)
]
elif self.drop == "if_binary":
feature_names_in = [
column_original
for column_original, category in zip(
self.feature_names_in_, self.native_estimator.categories_
)
for _ in (range(1) if len(category) == 2 else category)
]
else:
feature_names_in = [
column_original
for column_original, category in zip(
self.feature_names_in_, self.native_estimator.categories_
)
for _ in category
]
return pd.Series(index=feature_names_out, data=feature_names_in)
class KBinsDiscretizerWrapperDF(
TransformerWrapperDF[KBinsDiscretizer], metaclass=ABCMeta
):
"""
DF wrapper for :class:`sklearn.preprocessing.KBinsDiscretizer`.
"""
def _validate_delegate_estimator(self) -> None:
if self.native_estimator.encode == "onehot":
raise NotImplementedError(
'property encode="onehot" is not supported due to sparse matrices;'
'consider using "onehot-dense" instead'
)
def _get_features_original(self) -> pd.Series:
"""
Return the series mapping output column names to original columns names.
:return: the series with index the column names of the output dataframe and
values the corresponding input column names.
"""
if self.native_estimator.encode == "onehot-dense":
n_bins_per_feature = self.native_estimator.n_bins_
features_in, features_out = zip(
*(
(feature_name, f"{feature_name}_bin_{bin_index}")
for feature_name, n_bins in zip(
self.feature_names_in_, n_bins_per_feature
)
for bin_index in range(n_bins)
)
)
return pd.Series(index=features_out, data=features_in)
elif self.native_estimator.encode == "ordinal":
return pd.Series(
index=self.feature_names_in_.astype(str) + "_bin",
data=self.feature_names_in_,
)
else:
raise ValueError(
f"unexpected value for property encode={self.native_estimator.encode}"
)
#
# validate __all__
#
__tracker.validate()
|
[
"logging.getLogger",
"pandas.Series",
"pandas.Index",
"numpy.argwhere",
"numpy.isnan",
"numpy.all",
"typing.TypeVar"
] |
[((679, 706), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (696, 706), False, 'import logging\n'), ((1360, 1408), 'typing.TypeVar', 'TypeVar', (['"""T_Transformer"""'], {'bound': 'TransformerMixin'}), "('T_Transformer', bound=TransformerMixin)\n", (1367, 1408), False, 'from typing import Any, Generic, List, Optional, TypeVar, Union\n'), ((1830, 1883), 'typing.TypeVar', 'TypeVar', (['"""T_Imputer"""', 'SimpleImputer', 'IterativeImputer'], {}), "('T_Imputer', SimpleImputer, IterativeImputer)\n", (1837, 1883), False, 'from typing import Any, Generic, List, Optional, TypeVar, Union\n'), ((3738, 3793), 'pandas.Series', 'pd.Series', ([], {'index': 'features_out', 'data': 'features_out.values'}), '(index=features_out, data=features_out.values)\n', (3747, 3793), True, 'import pandas as pd\n'), ((12399, 12460), 'pandas.Series', 'pd.Series', ([], {'index': 'imputed_columns', 'data': 'imputed_columns.values'}), '(index=imputed_columns, data=imputed_columns.values)\n', (12408, 12460), True, 'import pandas as pd\n'), ((13406, 13466), 'pandas.Index', 'pd.Index', (["[f'{name}__missing' for name in features_original]"], {}), "([f'{name}__missing' for name in features_original])\n", (13414, 13466), True, 'import pandas as pd\n'), ((13482, 13535), 'pandas.Series', 'pd.Series', ([], {'index': 'features_out', 'data': 'features_original'}), '(index=features_out, data=features_original)\n', (13491, 13535), True, 'import pandas as pd\n'), ((16318, 16375), 'pandas.Series', 'pd.Series', ([], {'index': 'feature_names_out', 'data': 'feature_names_in'}), '(index=feature_names_out, data=feature_names_in)\n', (16327, 16375), True, 'import pandas as pd\n'), ((17655, 17702), 'pandas.Series', 'pd.Series', ([], {'index': 'features_out', 'data': 'features_in'}), '(index=features_out, data=features_in)\n', (17664, 17702), True, 'import pandas as pd\n'), ((10317, 10365), 'pandas.Series', 'pd.Series', ([], {'index': 'column_names', 'data': 'column_names'}), '(index=column_names, data=column_names)\n', (10326, 10365), True, 'import pandas as pd\n'), ((11316, 11331), 'numpy.isnan', 'np.isnan', (['stats'], {}), '(stats)\n', (11324, 11331), True, 'import numpy as np\n'), ((12146, 12192), 'numpy.all', 'np.all', (['delegate_estimator._mask_fit_X'], {'axis': '(0)'}), '(delegate_estimator._mask_fit_X, axis=0)\n', (12152, 12192), True, 'import numpy as np\n'), ((12339, 12360), 'numpy.argwhere', 'np.argwhere', (['nan_mask'], {}), '(nan_mask)\n', (12350, 12360), True, 'import numpy as np\n'), ((11437, 11448), 'numpy.isnan', 'np.isnan', (['x'], {}), '(x)\n', (11445, 11448), True, 'import numpy as np\n')]
|
import numpy as np
import pandas as pd
from pandas import DataFrame, Series
import matplotlib.pyplot as plt
num = np.array(['3.14','-2.7','30'], dtype=np.string_) #코드 이해 쉽게 : dtype=np.string_
# num=num.astype(int)
# print(num)
# ValueError: invalid literal for int() with base 10: '3.14'
num=num.astype(float).astype(int)
print(num)
# [ 3 -2 30] : 바로 int형 변형이 안되면 float으로 바꿨다가 바꿀 수 있다.
num=num.astype(float)
print(num)
# [ 3.14 -2.7 30. ]
arr=np.arange(32).reshape((8,4))
print(arr)
# [[ 0 1 2 3]
# [ 4 5 6 7]
# [ 8 9 10 11]
# [12 13 14 15]
# [16 17 18 19]
# [20 21 22 23]
# [24 25 26 27]
# [28 29 30 31]]
print(arr[[1,5,7,2],[0,3,1,2]]) #지정된 데이터 추출[[행번호],[열번호]]==>(행,열)순서쌍으로 요소 확인
# [ 4 23 29 10]
print(arr[[1,5,7,2]][:,[0,3,1,2]]) #[[행]][:,[열]] : 연속의 의미==>행 1,5,7,2번 index에 해당하는 행
# [[ 4 7 5 6]
# [20 23 21 22]
# [28 31 29 30]
# [ 8 11 9 10]]
print(arr[[1,5,7,2]][:,[3,1]]) #[[행]][:,[열]] : 연속의 의미==>index행에 대한 열 1,3번 index에 해당하는 열
# [[ 7 5]
# [23 21]
# [31 29]
# [11 9]]
import random
walk = []
position =0
steps=1000
for i in range(steps):
step = 1 if random.randint(0,1) else -1 #randint,randn,rannormal
position+=step
walk.append(position)
print("position : ",position)
# position : 18
print("walk : ",walk)
# walk : [-1, 0, 1, 0, -1, -2, -1, -....]
print(min(walk))
# -7
print(max(walk))
# 28
# print(abs(walk)) #abs : 절대값 변환
obj = Series([1,2,-3,4])
print(obj)
# 0 1
# 1 2
# 2 -3
# 3 4
# dtype: int64
print(obj.values) #values : 값만 추출함(속성, 함수)
# [ 1 2 -3 4]
print(obj.index) #index : 인덱스 추출
# RangeIndex(start=0, stop=4, step=1)
#인덱스 지정
obj = Series([1,2,-3,4],index=['x','y','z','k']) #index의 이름을 직접 부여
print(obj)
# 지정 인덱스 출력
# x 1
# y 2
# z -3
# k 4
# dtype: int64
print(obj['y'])
# 2
obj['x']=10
print(obj)
# x 10
# y 2
# z -3
# k 4
# dtype: int64
#여러개를 참조하는 방법
# print(obj['x','y'])
# # KeyError: ('x', 'y')
print(obj[['x','y','z']]) #index 1개 참조시 [],2개이상 참조시 [[]]
# x 10
# y 2
# z -3
# dtype: int64
print('='*50)
print(obj>0) #조건식 사용 가능
# x True
# y True
# z False
# k True
# dtype: bool
print(obj[obj>0]) #조건식으로 추출 가능
# x 10
# y 2
# k 4
# dtype: int64
print(obj*2) # 사칙연산 가능
# x 20
# y 4
# z -6
# k 8
# dtype: int64
print(np.exp(obj)) # 지수승
# x 22026.465795
# y 7.389056
# z 0.049787
# k 54.598150
# dtype: float64
# null(초기화 되지 않은 상태), na(결측치)
print(obj)
print('a' in obj) #in : 특정 문자가 있는지 확인
print('x' in obj) # 열: 특징, 행 : 관측치
print('='*50)
#key & value->Series->index & value 변환(key=>index,value=>value)
sdata = {'Ohio': 35000, 'Texas': 71000, "Oregon":16000, "Utah":5000}
obj3=Series(sdata) #dictionaly도 Series로 변환 가능
print(obj3)
# Ohio 35000
# Texas 71000
# Oregon 16000
# Utah 5000
# dtype: int64
print(type(obj3))
# <class 'pandas.core.series.Series'>
states = ['California','Ohio','Oregon','Texas']
obj99 = Series(states) #list를 Series로 변환
# print(obj99)
# # 0 California
# # 1 Ohio
# # 2 Oregon
# # 3 Texas
# # dtype: object
obj4 = Series(sdata, index=states) #sdata를 사용하여 index는 states기준으로 Series자료구조 변환
print(obj4)
# California NaN
# Ohio 35000.0
# Oregon 16000.0
# Texas 71000.0
# dtype: float64
print(pd.isnull(obj4))
# California True
# Ohio False
# Oregon False
# Texas False
# dtype: bool
#일반적인 개념 nan : 숫자가 아닌 문자같은 것.
#na : 값이 누락, null : 값이 초기화 되지 않은 상태
#pandas개념 : 혼용하여 사용
#isnull함수 : na(null,nan) 인지 아닌지 확인
print(obj4+obj3) # 교집합만의 value만 출력
obj4.name = 'population'
obj.index.name = 'state'
print(obj4)
# California NaN
# Ohio 35000.0
# Oregon 16000.0
# Texas 71000.0
# Name: population, dtype: float64
obj4.index=['w','x','y','z'] #index를 직접 변환
print(obj4)
# w NaN
# x 35000.0
# y 16000.0
# z 71000.0
# Name: population, dtype: float64
data = {
'state' : ['Ohio','Ohio','Ohio','Nevada','Nevada'],
'year': [2000,2001,2002,2001,2002],
'pop': [1.5,1.7,3.6,2.4,2.9]}
frame = DataFrame(data) #series 들의 묶음과 같음
print(frame)
# state year pop
# 0 Ohio 2000 1.5
# 1 Ohio 2001 1.7
# 2 Ohio 2002 3.6
# 3 Nevada 2001 2.4
# 4 Nevada 2002 2.9
print(DataFrame(data, columns=['year','state','pop'])) # column의 순서 변경(임시적)
# year state pop
# 0 2000 Ohio 1.5
# 1 2001 Ohio 1.7
# 2 2002 Ohio 3.6
# 3 2001 Nevada 2.4
# 4 2002 Nevada 2.9
frame = DataFrame(data, columns=['year','state','pop']) #fram으로 완전히 순서 변경
frame2= DataFrame(data, columns=['year','state','pop','debt'], index=['one','two','three','four','five'])
print(frame2)
# year state pop debt
# one 2000 Ohio 1.5 NaN
# two 2001 Ohio 1.7 NaN
# three 2002 Ohio 3.6 NaN
# four 2001 Nevada 2.4 NaN
# five 2002 Nevada 2.9 NaN
print(frame2['state']) # 원하는 열만 출력
# one Ohio
# two Ohio
# three Ohio
# four Nevada
# five Nevada
# Name: state, dtype: object
print(frame2['year'])
# one 2000
# two 2001
# three 2002
# four 2001
# five 2002
# Name: year, dtype: int64
print(frame2.ix['three']) #ix : 특정 index(행)만 참조
#두개 이상의 열 또는 행을 추출 => [[]]사용
# print(frame2[['year','state']])
#
# print(frame2.ix[['three','five']])
print(frame2)
frame2['debt']=16.5
print(frame2)
# year state pop debt
# one 2000 Ohio 1.5 16.5
# two 2001 Ohio 1.7 16.5
# three 2002 Ohio 3.6 16.5
# four 2001 Nevada 2.4 16.5
# five 2002 Nevada 2.9 16.5
# frame2['debt']=np.arange(3)
# print(frame2)
# # ValueError: Length of values does not match length of index
frame2['debt']=np.arange(5)
print(frame2)
# year state pop debt
# one 2000 Ohio 1.5 0
# two 2001 Ohio 1.7 1
# three 2002 Ohio 3.6 2
# four 2001 Nevada 2.4 3
# five 2002 Nevada 2.9 4
print('='*50)
val = Series([-1.2,-1.5,-1.7],index=['two','three','five'])
print(val)
# two -1.2
# three -1.5
# five -1.7
# dtype: float64
#길이가 다른 데이터 열을추가시 -> 시리즈를 생성하여 추가
frame2['debt']=val # index를 지정하여 value 변경(index의 숫자가 동일하지 않아도 index가 지정되어있어서 대입가능)
print(frame2)
# 새로운 열 추가 : 동부에 속하는 Ohio는 True, 나머지는 False로 한다.(조건 제시형)
frame2['eastern']=frame2.state=='Ohio'
print(frame2)
# year state pop debt eastern
# one 2000 Ohio 1.5 NaN True
# two 2001 Ohio 1.7 -1.2 True
# three 2002 Ohio 3.6 -1.5 True
# four 2001 Nevada 2.4 NaN False
# five 2002 Nevada 2.9 -1.7 False
#열 제거
del frame2['eastern']
print(frame2)
# year state pop debt
# one 2000 Ohio 1.5 NaN
# two 2001 Ohio 1.7 -1.2
# three 2002 Ohio 3.6 -1.5
# four 2001 Nevada 2.4 NaN
# five 2002 Nevada 2.9 -1.7
print(frame2.columns)
# Index(['year', 'state', 'pop', 'debt'], dtype='object')
print(frame2.index)
# Index(['one', 'two', 'three', 'four', 'five'], dtype='object')
pop = {'Nevada' : {2001 : 2.4,2002:2.9},'Ohio' : {2000 : 1.5,2001:1.7,2002:3.6}}
frame3 = DataFrame(pop)
print(frame3)
# Nevada Ohio
# 2000 NaN 1.5
# 2001 2.4 1.7
# 2002 2.9 3.6
# 열과 행 바꿈(transfer)
print(frame3.T)
# 2000 2001 2002
# Nevada NaN 2.4 2.9
# Ohio 1.5 1.7 3.6
# frame4 = DataFrame(pop,index=[2001,2002,2003]) #index 지정을 하려면 DataFrame을 사용해야한다.(딕셔너리엔 index가 없음)
# print(frame4)
# # AttributeError: 'list' object has no attribute 'astype'
frame4 = DataFrame(frame3,index=[2001,2002,2003])
print(frame4)
# Nevada Ohio
# 2001 2.4 1.7
# 2002 2.9 3.6
# 2003 NaN NaN
print(frame3)
# Nevada Ohio
# 2000 NaN 1.5
# 2001 2.4 1.7
# 2002 2.9 3.6
pdata = {'Ohio':frame3['Ohio'][:-1],'Nevada':frame3['Nevada'][:2]} #[:-1] : 마지막 행 제외,[:2] : 0,1 행만 출력
frame5=DataFrame(pdata)
print(frame5)
# Ohio Nevada
# 2000 1.5 NaN
# 2001 1.7 2.4
pdata = {'Ohio':frame3['Ohio'][:-1],'Nevada':frame3['Nevada']}
#'Nevada'-모두 출력이기 때문에 [:-1]사용으로 자료가 없는 'Ohio'의 2002는 NaN이 된다.
frame5=DataFrame(pdata)
print(frame5)
# Ohio Nevada
# 2000 1.5 NaN
# 2001 1.7 2.4
# 2002 NaN 2.9
|
[
"pandas.Series",
"pandas.isnull",
"numpy.exp",
"numpy.array",
"pandas.DataFrame",
"random.randint",
"numpy.arange"
] |
[((115, 165), 'numpy.array', 'np.array', (["['3.14', '-2.7', '30']"], {'dtype': 'np.string_'}), "(['3.14', '-2.7', '30'], dtype=np.string_)\n", (123, 165), True, 'import numpy as np\n'), ((1457, 1478), 'pandas.Series', 'Series', (['[1, 2, -3, 4]'], {}), '([1, 2, -3, 4])\n', (1463, 1478), False, 'from pandas import DataFrame, Series\n'), ((1738, 1787), 'pandas.Series', 'Series', (['[1, 2, -3, 4]'], {'index': "['x', 'y', 'z', 'k']"}), "([1, 2, -3, 4], index=['x', 'y', 'z', 'k'])\n", (1744, 1787), False, 'from pandas import DataFrame, Series\n'), ((2932, 2945), 'pandas.Series', 'Series', (['sdata'], {}), '(sdata)\n', (2938, 2945), False, 'from pandas import DataFrame, Series\n'), ((3238, 3252), 'pandas.Series', 'Series', (['states'], {}), '(states)\n', (3244, 3252), False, 'from pandas import DataFrame, Series\n'), ((3421, 3448), 'pandas.Series', 'Series', (['sdata'], {'index': 'states'}), '(sdata, index=states)\n', (3427, 3448), False, 'from pandas import DataFrame, Series\n'), ((4462, 4477), 'pandas.DataFrame', 'DataFrame', (['data'], {}), '(data)\n', (4471, 4477), False, 'from pandas import DataFrame, Series\n'), ((4920, 4969), 'pandas.DataFrame', 'DataFrame', (['data'], {'columns': "['year', 'state', 'pop']"}), "(data, columns=['year', 'state', 'pop'])\n", (4929, 4969), False, 'from pandas import DataFrame, Series\n'), ((5002, 5110), 'pandas.DataFrame', 'DataFrame', (['data'], {'columns': "['year', 'state', 'pop', 'debt']", 'index': "['one', 'two', 'three', 'four', 'five']"}), "(data, columns=['year', 'state', 'pop', 'debt'], index=['one',\n 'two', 'three', 'four', 'five'])\n", (5011, 5110), False, 'from pandas import DataFrame, Series\n'), ((6169, 6181), 'numpy.arange', 'np.arange', (['(5)'], {}), '(5)\n', (6178, 6181), True, 'import numpy as np\n'), ((6416, 6474), 'pandas.Series', 'Series', (['[-1.2, -1.5, -1.7]'], {'index': "['two', 'three', 'five']"}), "([-1.2, -1.5, -1.7], index=['two', 'three', 'five'])\n", (6422, 6474), False, 'from pandas import DataFrame, Series\n'), ((7552, 7566), 'pandas.DataFrame', 'DataFrame', (['pop'], {}), '(pop)\n', (7561, 7566), False, 'from pandas import DataFrame, Series\n'), ((7984, 8027), 'pandas.DataFrame', 'DataFrame', (['frame3'], {'index': '[2001, 2002, 2003]'}), '(frame3, index=[2001, 2002, 2003])\n', (7993, 8027), False, 'from pandas import DataFrame, Series\n'), ((8344, 8360), 'pandas.DataFrame', 'DataFrame', (['pdata'], {}), '(pdata)\n', (8353, 8360), False, 'from pandas import DataFrame, Series\n'), ((8571, 8587), 'pandas.DataFrame', 'DataFrame', (['pdata'], {}), '(pdata)\n', (8580, 8587), False, 'from pandas import DataFrame, Series\n'), ((2485, 2496), 'numpy.exp', 'np.exp', (['obj'], {}), '(obj)\n', (2491, 2496), True, 'import numpy as np\n'), ((3642, 3657), 'pandas.isnull', 'pd.isnull', (['obj4'], {}), '(obj4)\n', (3651, 3657), True, 'import pandas as pd\n'), ((4694, 4743), 'pandas.DataFrame', 'DataFrame', (['data'], {'columns': "['year', 'state', 'pop']"}), "(data, columns=['year', 'state', 'pop'])\n", (4703, 4743), False, 'from pandas import DataFrame, Series\n'), ((458, 471), 'numpy.arange', 'np.arange', (['(32)'], {}), '(32)\n', (467, 471), True, 'import numpy as np\n'), ((1124, 1144), 'random.randint', 'random.randint', (['(0)', '(1)'], {}), '(0, 1)\n', (1138, 1144), False, 'import random\n')]
|
#! -*- coding: utf-8 -*-
# SimBERT_v2预训练代码stage2,把simbert的相似度蒸馏到roformer-sim上
# 官方项目:https://github.com/ZhuiyiTechnology/roformer-sim
import json
import numpy as np
import torch
from torch import nn, optim
from torch.utils.data import DataLoader
import torch.nn.functional as F
from bert4torch.models import build_transformer_model, BaseModel
from bert4torch.snippets import sequence_padding, ListDataset, text_segmentate, AutoRegressiveDecoder, Callback, truncate_sequences
from bert4torch.tokenizers import Tokenizer
import jieba
jieba.initialize()
# 基本信息
maxlen = 64
batch_size = 12
# bert配置,需要加载stage1训练后的权重,这里直接加载官方最终的权重以示例
config_path = 'F:/Projects/pretrain_ckpt/simbert/[sushen_torch_base]--roformer_chinese_sim_char_base/config.json'
checkpoint_path = 'F:/Projects/pretrain_ckpt/simbert/[sushen_torch_base]--roformer_chinese_sim_char_base/pytorch_model.bin'
dict_path = 'F:/Projects/pretrain_ckpt/simbert/[sushen_torch_base]--roformer_chinese_sim_char_base/vocab.txt'
device = 'cuda' if torch.cuda.is_available() else 'cpu'
# 建立分词器
tokenizer = Tokenizer(dict_path, do_lower_case=True)
# 这里语料和stage1保持一致
class MyDataset(ListDataset):
@staticmethod
def load_data(filename):
"""读取语料,每行一个json
示例:{"text": "懂英语的来!", "synonyms": ["懂英语的来!!!", "懂英语的来", "一句英语翻译 懂英语的来"]}
"""
D = []
with open(filename, encoding='utf-8') as f:
for l in f:
D.append(json.loads(l))
return D
def truncate(text):
"""截断句子
"""
seps, strips = u'\n。!?!?;;,, ', u';;,, '
return text_segmentate(text, maxlen - 2, seps, strips)[0]
def masked_encode(text):
"""wwm随机mask
"""
words = jieba.lcut(text)
rands = np.random.random(len(words))
source, target = [tokenizer._token_start_id], [0]
for r, w in zip(rands, words):
ids = tokenizer.encode(w)[0][1:-1]
if r < 0.15 * 0.8:
source.extend([tokenizer._token_mask_id] * len(ids))
target.extend(ids)
elif r < 0.15 * 0.9:
source.extend(ids)
target.extend(ids)
elif r < 0.15:
source.extend(
np.random.choice(tokenizer._vocab_size - 1, size=len(ids)) + 1
)
target.extend(ids)
else:
source.extend(ids)
target.extend([0] * len(ids))
source = source[:maxlen - 1] + [tokenizer._token_end_id]
target = target[:maxlen - 1] + [0]
return source, target
# ========== 蒸馏用:开始 ==========
# simbert配置
sim_config_path = 'F:/Projects/pretrain_ckpt/simbert/[sushen_torch_base]--simbert_chinese_base/config.json'
sim_checkpoint_path = 'F:/Projects/pretrain_ckpt/simbert/[sushen_torch_base]--simbert_chinese_base/pytorch_model.bin'
sim_dict_path = 'F:/Projects/pretrain_ckpt/simbert/[sushen_torch_base]--simbert_chinese_base/vocab.txt'
# 建立分词器
sim_tokenizer = Tokenizer(sim_dict_path, do_lower_case=True) # 建立分词器
# 建立加载模型
simbert = build_transformer_model(sim_config_path, sim_checkpoint_path, with_pool='linear', application='unilm').to(device)
# ========== 蒸馏用:结束 ==========
def collate_fn(batch):
batch_token_ids, batch_segment_ids = [], []
batch_sim_token_ids, batch_sim_segment_ids = [], []
for d in batch:
text, synonyms = d['text'], d['synonyms']
synonyms = [text] + synonyms
np.random.shuffle(synonyms)
for _ in range(2):
text, synonym = synonyms[:2]
if np.random.random() < 0.5:
text_ids = masked_encode(text)[0]
else:
text_ids = tokenizer.encode(text)[0]
synonym_ids = tokenizer.encode(synonym)[0][1:]
truncate_sequences(maxlen * 2, -2, text_ids, synonym_ids)
token_ids = text_ids + synonym_ids
segment_ids = [0] * len(text_ids) + [1] * len(synonym_ids)
batch_token_ids.append(token_ids)
batch_segment_ids.append(segment_ids)
# ==== 蒸馏用:开始 ====
token_ids, segment_ids = sim_tokenizer.encode(text, maxlen=maxlen)
batch_sim_token_ids.append(token_ids)
batch_sim_segment_ids.append(segment_ids)
# ==== 蒸馏用:结束 ====
text, synonym = synonym, text
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device)
batch_segment_ids = torch.tensor(sequence_padding(batch_segment_ids), dtype=torch.long, device=device)
# ==== 蒸馏用:开始 ====
batch_sim_token_ids = torch.tensor(sequence_padding(batch_sim_token_ids), dtype=torch.long, device=device)
batch_sim_segment_ids = torch.tensor(sequence_padding(batch_sim_segment_ids), dtype=torch.long, device=device)
sim_vecs = simbert.predict([batch_sim_token_ids, batch_sim_segment_ids])[1]
sim_vecs /= (sim_vecs**2).sum(dim=-1, keepdims=True)**0.5
sims = torch.matmul(sim_vecs, sim_vecs.T)
# ==== 蒸馏用:结束 ====
return [batch_token_ids, batch_segment_ids], [batch_token_ids, batch_segment_ids, sims]
train_dataloader = DataLoader(MyDataset('../datasets/data_similarity.json'), batch_size=batch_size, shuffle=True, collate_fn=collate_fn)
# 建立加载模型
class Model(BaseModel):
def __init__(self, pool_method='cls'):
super().__init__()
self.bert = build_transformer_model(config_path=config_path, checkpoint_path=checkpoint_path, model='roformer',
with_pool='linear', with_mlm='linear', dropout_rate=0.2, application='unilm')
self.pool_method = pool_method
def get_pool_emb(self, hidden_state, pool_cls, attention_mask):
if self.pool_method == 'cls':
return pool_cls
elif self.pool_method == 'mean':
hidden_state = torch.sum(hidden_state * attention_mask[:, :, None], dim=1)
attention_mask = torch.sum(attention_mask, dim=1)[:, None]
return hidden_state / attention_mask
elif self.pool_method == 'max':
seq_state = hidden_state * attention_mask[:, :, None]
return torch.max(seq_state, dim=1)
else:
raise ValueError('pool_method illegal')
def forward(self, token_ids, segment_ids):
hidden_state, pool_cls, seq_logit = self.bert([token_ids, segment_ids])
sen_emb = self.get_pool_emb(hidden_state, pool_cls, attention_mask=token_ids.gt(0).long())
return seq_logit, sen_emb
model = Model(pool_method='cls').to(device)
class TotalLoss(nn.Module):
"""loss分两部分,一是seq2seq的交叉熵,二是相似度的交叉熵。
"""
def forward(self, outputs, target):
seq_logit, sen_emb = outputs
seq_label, seq_mask, sims = target
seq2seq_loss = self.compute_loss_of_seq2seq(seq_logit, seq_label, seq_mask)
similarity_loss = self.compute_loss_of_similarity(sen_emb, sims)
return {'loss': seq2seq_loss + similarity_loss, 'seq2seq_loss': seq2seq_loss, 'similarity_loss': similarity_loss}
def compute_loss_of_seq2seq(self, y_pred, y_true, y_mask):
'''
y_pred: [btz, seq_len, hdsz]
y_true: [btz, seq_len]
y_mask: [btz, seq_len]
'''
y_true = y_true[:, 1:] # 目标token_ids
y_mask = y_mask[:, 1:] # 指示了要预测的部分
y_pred = y_pred[:, :-1, :] # 预测序列,错开一位
y_pred = y_pred.reshape(-1, y_pred.shape[-1])
y_true = (y_true*y_mask).flatten()
return F.cross_entropy(y_pred, y_true, ignore_index=0)
def compute_loss_of_similarity(self, y_pred, y_true):
y_pred = F.normalize(y_pred, p=2, dim=-1) # 句向量归一化
similarities = torch.matmul(y_pred, y_pred.T) # 相似度矩阵
loss = 100 * torch.mean((similarities - y_true) ** 2)
return loss
model.compile(loss=TotalLoss(), optimizer=optim.Adam(model.parameters(), 1e-5), metrics=['seq2seq_loss', 'similarity_loss'])
class SynonymsGenerator(AutoRegressiveDecoder):
"""seq2seq解码器
"""
@AutoRegressiveDecoder.wraps('logits')
def predict(self, inputs, output_ids, states):
token_ids, segment_ids = inputs
token_ids = torch.cat([token_ids, output_ids], 1)
segment_ids = torch.cat([segment_ids, torch.ones_like(output_ids, device=device)], 1)
seq_logit, _ = model.predict([token_ids, segment_ids])
return seq_logit[:, -1, :]
def generate(self, text, n=1, topk=5):
token_ids, segment_ids = tokenizer.encode(text, maxlen=maxlen)
output_ids = self.random_sample([token_ids, segment_ids], n, topk) # 基于随机采样
return [tokenizer.decode(ids.cpu().numpy()) for ids in output_ids]
synonyms_generator = SynonymsGenerator(start_id=None, end_id=tokenizer._token_end_id, maxlen=maxlen, device=device)
def cal_sen_emb(text_list):
'''输入text的list,计算sentence的embedding
'''
X, S = [], []
for t in text_list:
x, s = tokenizer.encode(t)
X.append(x)
S.append(s)
X = torch.tensor(sequence_padding(X), dtype=torch.long, device=device)
S = torch.tensor(sequence_padding(S), dtype=torch.long, device=device)
_, Z = model.predict([X, S])
return Z
def gen_synonyms(text, n=100, k=20):
""""含义: 产生sent的n个相似句,然后返回最相似的k个。
做法:用seq2seq生成,并用encoder算相似度并排序。
效果:
>>> gen_synonyms(u'微信和支付宝哪个好?')
[
u'微信和支付宝,哪个好?',
u'微信和支付宝哪个好',
u'支付宝和微信哪个好',
u'支付宝和微信哪个好啊',
u'微信和支付宝那个好用?',
u'微信和支付宝哪个好用',
u'支付宝和微信那个更好',
u'支付宝和微信哪个好用',
u'微信和支付宝用起来哪个好?',
u'微信和支付宝选哪个好',
]
"""
r = synonyms_generator.generate(text, n)
r = [i for i in set(r) if i != text] # 不和原文相同
r = [text] + r
Z = cal_sen_emb(r)
Z /= (Z**2).sum(dim=1, keepdims=True)**0.5
argsort = torch.matmul(Z[1:], -Z[0]).argsort()
return [r[i + 1] for i in argsort[:k]]
def just_show(some_samples):
"""随机观察一些样本的效果
"""
S = [np.random.choice(some_samples) for _ in range(3)]
for s in S:
try:
print(u'原句子:%s' % s)
print(u'同义句子:', gen_synonyms(s, 10, 10))
print()
except:
pass
class Evaluator(Callback):
"""评估模型
"""
def __init__(self):
self.lowest = 1e10
def on_epoch_end(self, global_step, epoch, logs=None):
# 保存最优
if logs['loss'] <= self.lowest:
self.lowest = logs['loss']
# model.save_weights('./best_model.pt')
# 演示效果
just_show(['微信和支付宝拿个好用?',
'微信和支付宝,哪个好?',
'微信和支付宝哪个好',
'支付宝和微信哪个好',
'支付宝和微信哪个好啊',
'微信和支付宝那个好用?',
'微信和支付宝哪个好用',
'支付宝和微信那个更好',
'支付宝和微信哪个好用',
'微信和支付宝用起来哪个好?',
'微信和支付宝选哪个好'
])
if __name__ == '__main__':
evaluator = Evaluator()
model.fit(train_dataloader, epochs=50, steps_per_epoch=200, callbacks=[evaluator])
else:
model.load_weights('./best_model.pt')
|
[
"jieba.initialize",
"torch.max",
"bert4torch.tokenizers.Tokenizer",
"torch.cuda.is_available",
"torch.sum",
"bert4torch.snippets.AutoRegressiveDecoder.wraps",
"jieba.lcut",
"torch.mean",
"numpy.random.random",
"torch.matmul",
"torch.ones_like",
"json.loads",
"numpy.random.choice",
"bert4torch.snippets.truncate_sequences",
"torch.nn.functional.normalize",
"bert4torch.models.build_transformer_model",
"torch.cat",
"bert4torch.snippets.sequence_padding",
"torch.nn.functional.cross_entropy",
"bert4torch.snippets.text_segmentate",
"numpy.random.shuffle"
] |
[((533, 551), 'jieba.initialize', 'jieba.initialize', ([], {}), '()\n', (549, 551), False, 'import jieba\n'), ((1057, 1097), 'bert4torch.tokenizers.Tokenizer', 'Tokenizer', (['dict_path'], {'do_lower_case': '(True)'}), '(dict_path, do_lower_case=True)\n', (1066, 1097), False, 'from bert4torch.tokenizers import Tokenizer\n'), ((2862, 2906), 'bert4torch.tokenizers.Tokenizer', 'Tokenizer', (['sim_dict_path'], {'do_lower_case': '(True)'}), '(sim_dict_path, do_lower_case=True)\n', (2871, 2906), False, 'from bert4torch.tokenizers import Tokenizer\n'), ((999, 1024), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1022, 1024), False, 'import torch\n'), ((1672, 1688), 'jieba.lcut', 'jieba.lcut', (['text'], {}), '(text)\n', (1682, 1688), False, 'import jieba\n'), ((4837, 4871), 'torch.matmul', 'torch.matmul', (['sim_vecs', 'sim_vecs.T'], {}), '(sim_vecs, sim_vecs.T)\n', (4849, 4871), False, 'import torch\n'), ((7865, 7902), 'bert4torch.snippets.AutoRegressiveDecoder.wraps', 'AutoRegressiveDecoder.wraps', (['"""logits"""'], {}), "('logits')\n", (7892, 7902), False, 'from bert4torch.snippets import sequence_padding, ListDataset, text_segmentate, AutoRegressiveDecoder, Callback, truncate_sequences\n'), ((1558, 1605), 'bert4torch.snippets.text_segmentate', 'text_segmentate', (['text', '(maxlen - 2)', 'seps', 'strips'], {}), '(text, maxlen - 2, seps, strips)\n', (1573, 1605), False, 'from bert4torch.snippets import sequence_padding, ListDataset, text_segmentate, AutoRegressiveDecoder, Callback, truncate_sequences\n'), ((2936, 3043), 'bert4torch.models.build_transformer_model', 'build_transformer_model', (['sim_config_path', 'sim_checkpoint_path'], {'with_pool': '"""linear"""', 'application': '"""unilm"""'}), "(sim_config_path, sim_checkpoint_path, with_pool=\n 'linear', application='unilm')\n", (2959, 3043), False, 'from bert4torch.models import build_transformer_model, BaseModel\n'), ((3325, 3352), 'numpy.random.shuffle', 'np.random.shuffle', (['synonyms'], {}), '(synonyms)\n', (3342, 3352), True, 'import numpy as np\n'), ((4255, 4288), 'bert4torch.snippets.sequence_padding', 'sequence_padding', (['batch_token_ids'], {}), '(batch_token_ids)\n', (4271, 4288), False, 'from bert4torch.snippets import sequence_padding, ListDataset, text_segmentate, AutoRegressiveDecoder, Callback, truncate_sequences\n'), ((4360, 4395), 'bert4torch.snippets.sequence_padding', 'sequence_padding', (['batch_segment_ids'], {}), '(batch_segment_ids)\n', (4376, 4395), False, 'from bert4torch.snippets import sequence_padding, ListDataset, text_segmentate, AutoRegressiveDecoder, Callback, truncate_sequences\n'), ((4497, 4534), 'bert4torch.snippets.sequence_padding', 'sequence_padding', (['batch_sim_token_ids'], {}), '(batch_sim_token_ids)\n', (4513, 4534), False, 'from bert4torch.snippets import sequence_padding, ListDataset, text_segmentate, AutoRegressiveDecoder, Callback, truncate_sequences\n'), ((4610, 4649), 'bert4torch.snippets.sequence_padding', 'sequence_padding', (['batch_sim_segment_ids'], {}), '(batch_sim_segment_ids)\n', (4626, 4649), False, 'from bert4torch.snippets import sequence_padding, ListDataset, text_segmentate, AutoRegressiveDecoder, Callback, truncate_sequences\n'), ((5251, 5438), 'bert4torch.models.build_transformer_model', 'build_transformer_model', ([], {'config_path': 'config_path', 'checkpoint_path': 'checkpoint_path', 'model': '"""roformer"""', 'with_pool': '"""linear"""', 'with_mlm': '"""linear"""', 'dropout_rate': '(0.2)', 'application': '"""unilm"""'}), "(config_path=config_path, checkpoint_path=\n checkpoint_path, model='roformer', with_pool='linear', with_mlm=\n 'linear', dropout_rate=0.2, application='unilm')\n", (5274, 5438), False, 'from bert4torch.models import build_transformer_model, BaseModel\n'), ((7345, 7392), 'torch.nn.functional.cross_entropy', 'F.cross_entropy', (['y_pred', 'y_true'], {'ignore_index': '(0)'}), '(y_pred, y_true, ignore_index=0)\n', (7360, 7392), True, 'import torch.nn.functional as F\n'), ((7469, 7501), 'torch.nn.functional.normalize', 'F.normalize', (['y_pred'], {'p': '(2)', 'dim': '(-1)'}), '(y_pred, p=2, dim=-1)\n', (7480, 7501), True, 'import torch.nn.functional as F\n'), ((7535, 7565), 'torch.matmul', 'torch.matmul', (['y_pred', 'y_pred.T'], {}), '(y_pred, y_pred.T)\n', (7547, 7565), False, 'import torch\n'), ((8014, 8051), 'torch.cat', 'torch.cat', (['[token_ids, output_ids]', '(1)'], {}), '([token_ids, output_ids], 1)\n', (8023, 8051), False, 'import torch\n'), ((8853, 8872), 'bert4torch.snippets.sequence_padding', 'sequence_padding', (['X'], {}), '(X)\n', (8869, 8872), False, 'from bert4torch.snippets import sequence_padding, ListDataset, text_segmentate, AutoRegressiveDecoder, Callback, truncate_sequences\n'), ((8928, 8947), 'bert4torch.snippets.sequence_padding', 'sequence_padding', (['S'], {}), '(S)\n', (8944, 8947), False, 'from bert4torch.snippets import sequence_padding, ListDataset, text_segmentate, AutoRegressiveDecoder, Callback, truncate_sequences\n'), ((9839, 9869), 'numpy.random.choice', 'np.random.choice', (['some_samples'], {}), '(some_samples)\n', (9855, 9869), True, 'import numpy as np\n'), ((3654, 3711), 'bert4torch.snippets.truncate_sequences', 'truncate_sequences', (['(maxlen * 2)', '(-2)', 'text_ids', 'synonym_ids'], {}), '(maxlen * 2, -2, text_ids, synonym_ids)\n', (3672, 3711), False, 'from bert4torch.snippets import sequence_padding, ListDataset, text_segmentate, AutoRegressiveDecoder, Callback, truncate_sequences\n'), ((7596, 7636), 'torch.mean', 'torch.mean', (['((similarities - y_true) ** 2)'], {}), '((similarities - y_true) ** 2)\n', (7606, 7636), False, 'import torch\n'), ((9692, 9718), 'torch.matmul', 'torch.matmul', (['Z[1:]', '(-Z[0])'], {}), '(Z[1:], -Z[0])\n', (9704, 9718), False, 'import torch\n'), ((3436, 3454), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (3452, 3454), True, 'import numpy as np\n'), ((5716, 5775), 'torch.sum', 'torch.sum', (['(hidden_state * attention_mask[:, :, None])'], {'dim': '(1)'}), '(hidden_state * attention_mask[:, :, None], dim=1)\n', (5725, 5775), False, 'import torch\n'), ((8098, 8140), 'torch.ones_like', 'torch.ones_like', (['output_ids'], {'device': 'device'}), '(output_ids, device=device)\n', (8113, 8140), False, 'import torch\n'), ((1429, 1442), 'json.loads', 'json.loads', (['l'], {}), '(l)\n', (1439, 1442), False, 'import json\n'), ((5805, 5837), 'torch.sum', 'torch.sum', (['attention_mask'], {'dim': '(1)'}), '(attention_mask, dim=1)\n', (5814, 5837), False, 'import torch\n'), ((6021, 6048), 'torch.max', 'torch.max', (['seq_state'], {'dim': '(1)'}), '(seq_state, dim=1)\n', (6030, 6048), False, 'import torch\n')]
|
# Copyright 2021 Fedlearn authors.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pandas as pd
from sklearn.datasets import fetch_20newsgroups
from sklearn.model_selection import train_test_split
is_visual = True
is_to_csv = True #False
def visulize_distribution(df):
if 1:
print(df.target.value_counts())
#df.target.value_counts()
else:
import matplotlib.pyplot as plt
print('++')
df['target'].plot.hist(width=0.1, )
#plt.hist(column='target')
#plt.hist(out['target'])
print('--')
plt.show()
def read_20newsgroups(data_file=None, test_file=None, dataset=None, test_size=0.2):
if test_file is not None:
testset = pd.read_csv(test_file)
testset = testset.dropna()
if is_visual:
visulize_distribution(testset)
valid_texts = list(testset['text'])
valid_labels = np.array(testset['target'])
classifier_types = list(testset['title'].unique())
dataset = pd.read_csv(data_file)
dataset = dataset.dropna()
train_texts = list(dataset['text'])
train_labels = np.array(dataset['target'])
classifier_types = list(dataset['title'].unique())
if is_visual:
visulize_distribution(dataset)
return (train_texts, valid_texts, train_labels, valid_labels), classifier_types
else:
if data_file is not None:
print(data_file)
dataset = pd.read_csv(data_file)
#https://stackoverflow.com/questions/63517293/valueerror-textencodeinput-must-be-uniontextinputsequence-tupleinputsequence
dataset = dataset.dropna()
#print(dataset.shape)
if dataset is not None:
#print(dataset.shape)
#print(dataset.columns)
documents = list(dataset['text'])
labels = np.array(dataset['target'])
classifier_types = list(dataset['title'].unique())
#print(type(documents), len(documents), documents[0])
#print(type(labels), len(labels), labels[0])
#print(classifier_types, len(classifier_types))
else:
# download & load 20newsgroups dataset from sklearn's repos
dataset = fetch_20newsgroups(subset="all", shuffle=True, remove=("headers", "footers", "quotes"))
print(type(dataset))
documents = dataset.data
labels = dataset.target
classifier_types = dataset.target_names
#print(type(labels), len(labels), labels[0])
#print(type(dataset.target_names), dataset.target_names, len(dataset.target_names))
# split into training & testing a return data as well as label names
print(type(documents), len(documents))
print('>>', documents[0])
print('>>', documents[1])
return train_test_split(documents, labels, test_size=test_size), classifier_types
def twenty_newsgroup_to_csv(subset=None):
#newsgroups_train = fetch_20newsgroups(subset='train', remove=('headers', 'footers', 'quotes'))
#newsgroups = fetch_20newsgroups(subset="all", shuffle=True, remove=("headers", "footers", "quotes"))
#newsgroups = fetch_20newsgroups(subset="all", remove=("headers", "footers", "quotes"))
#newsgroups = fetch_20newsgroups(subset="train", remove=("headers", "footers", "quotes"))
#newsgroups = fetch_20newsgroups(subset="test", remove=("headers", "footers", "quotes"))
if subset is not None:
newsgroups = fetch_20newsgroups(subset=subset, remove=("headers", "footers", "quotes"))
df = pd.DataFrame([newsgroups.data, newsgroups.target.tolist()]).T
df.columns = ['text', 'target']
targets = pd.DataFrame( newsgroups.target_names)
targets.columns=['title']
out = pd.merge(df, targets, left_on='target', right_index=True)
print(out.shape, out.columns)
#out.describe(include=['target'])
#out.to_csv('20_newsgroup.csv')
#out.groupby('target').count().plot.bar()
if is_visual:
visulize_distribution(out)
return out
def test_20newsgroups(dataset):
if is_to_csv:
dataset.to_csv('test_20newsgroups.csv', index=False)
def iid_20newsgroups(dataset, num_users):
"""
Sample I.I.D. client data from 20newsgroups dataset
:param dataset:
:param num_users:
:return: dict of users' dataset
"""
num_items = int(len(dataset)/num_users)
dict_users, all_idxs = {}, [i for i in range(len(dataset))]
print(dict_users, num_items)
for i in range(num_users):
chosen_idxs = np.random.choice(all_idxs, num_items, replace=False)
dict_users[i] = dataset.iloc[chosen_idxs]
all_idxs = list(set(all_idxs) - set(chosen_idxs))
#print({x for i, x in enumerate(dict_users[i]) if i < 5})
if is_visual:
print(dict_users[i].head(), dict_users[i].shape)
visulize_distribution(dict_users[i])
if is_to_csv:
dict_users[i].to_csv('iid_20newsgroups_'+str(i)+'.csv', index=False)
#print(dict_users.keys())
return dict_users
def noniid_label_20newsgroups(dataset, num_users, alpha=None):
"""
Sample non-I.I.D client data from 20newsgroups dataset: label imbalance, quantity uniform
:param dataset:
:param num_users:
:alpha: label ratio, total number = 20lables
:return:
"""
if is_visual:
visulize_distribution(dataset)
#dict_users, all_idxs = {}, [i for i in range(len(dataset))]
dict_users = {i: np.array([]) for i in range(num_users)}
labels = np.array(dataset['target'])
num_samples = len(dataset)
num_labels = 20
num_shards = int(len(dataset)/num_labels)
idxs = np.arange(num_samples)
print(dict_users)
print(labels, len(labels))
print(idxs, len(idxs))
# sort labels
idxs_labels = np.vstack((idxs, labels))
#print(idxs_labels, len(idxs_labels))
#idxs_labels = idxs_labels[:, idxs_labels[1, :].argsort()]
#print(idxs_labels)
#idxs = idxs_labels[0, :]
#print(idxs, len(idxs))
safe_idxs = []
seed_idxs = {}
for i in range(len(dataset)): #only two users
key = idxs_labels[1][i]
if key in seed_idxs:
if seed_idxs[key] < 3:
safe_idxs.append(idxs_labels[0][i])
seed_idxs[key] += 1
else:
safe_idxs.append(idxs_labels[0][i])
seed_idxs[key] = 1
#seed_idxs[idxs_labels[1][i]] = idxs_labels[0][i]
print('seed_idxs', seed_idxs)
chosen_idxs = {i:[] for i in range(num_users)}
#for i in range(18000,len(idxs)):
#for i in range(100):
for i in range(len(dataset)): #only two users
user_id = idxs_labels[1][i] % 2
if user_id == 0:
#print(i, idxs_labels[0][i], idxs_labels[1][i])
chosen_idxs[user_id].append(idxs_labels[0][i])
else:
chosen_idxs[user_id].append(idxs_labels[0][i])
for i in range(num_users):
dict_users[i] = dataset.iloc[chosen_idxs[i] + safe_idxs]
#all_idxs = list(set(all_idxs) - set(chosen_idxs))
#print({x for i, x in enumerate(dict_users[i]) if i < 5})
if is_visual:
print(dict_users[i].head(), dict_users[i].shape)
visulize_distribution(dict_users[i])
if is_to_csv:
dict_users[i].to_csv('noniid_label_20newsgroups_alpha'+ str(alpha)+ '_'+str(i)+'.csv', index=False)
return dict_users
def noniid_quantity_20newsgroups(dataset, num_users=2, beta=None):
"""
Sample non-I.I.D client data from 20newsgroups dataset: quantity imbalance, label uniform
:param dataset:
:param num_users:
:return:
"""
if is_visual:
visulize_distribution(dataset)
#dict_users, all_idxs = {}, [i for i in range(len(dataset))]
num_items = {} #int(len(dataset)/num_users)
for i in range(len(beta)):
num_items[i] = int(len(dataset) * beta[i])
dict_users, all_idxs = {}, [i for i in range(len(dataset))]
print(dict_users, num_items)
for i in range(num_users):
chosen_idxs = np.random.choice(all_idxs, num_items[i], replace=False)
dict_users[i] = dataset.iloc[chosen_idxs]
all_idxs = list(set(all_idxs) - set(chosen_idxs))
#print({x for i, x in enumerate(dict_users[i]) if i < 5})
if is_visual:
print(dict_users[i].head(), dict_users[i].shape)
visulize_distribution(dict_users[i])
if is_to_csv:
dict_users[i].to_csv('noniid_quantity_20newsgroups_beta'+ str(beta[i])+ '_'+str(i)+'.csv', index=False)
#print(dict_users.keys())
return dict_users
if __name__ == '__main__':
if 0:
(train_texts, valid_texts, train_labels, valid_labels), target_names = read_20newsgroups()
print(type(train_texts), len(train_texts))
print(type(train_labels), len(train_labels))
if 0:
start=0
valid_sample_n = 2
sample_n = valid_sample_n*5
train_texts = train_texts[start:sample_n]
train_labels = train_labels[start:sample_n]
valid_texts = valid_texts[start:valid_sample_n]
valid_labels = valid_labels[start:valid_sample_n]
print(len(train_texts), len(train_labels))
print(len(valid_texts), len(valid_labels))
#print(valid_texts, valid_labels)
print(target_names)
if 0: #generate iid-dataset
dataset = twenty_newsgroup_to_csv()
#print(dataset.head(10))
#dataset = fetch_20newsgroups(subset="all", shuffle=True, remove=("headers", "footers", "quotes"))
dict_user = iid_20newsgroups(dataset, 2)
read_20newsgroups(dict_user[0])
read_20newsgroups()
if 0: #load dataset via read_20newsgroups
#(train_texts, valid_texts, train_labels, valid_labels), target_names = read_20newsgroups(data_file=None)
#(train_texts, valid_texts, train_labels, valid_labels), target_names = read_20newsgroups(data_file='iid_20newsgroups_1.csv')
(train_texts, valid_texts, train_labels, valid_labels), target_names = read_20newsgroups(data_file='noniid_label_20newsgroups_alpha0.5_0.csv', test_file='test_20newsgroups.csv')
print(type(train_texts), len(train_texts))
print(type(train_labels), len(train_labels))
print(train_labels[:2])
if 1:
dataset = twenty_newsgroup_to_csv(subset='train')
#print(dataset.head(10))
#dataset = fetch_20newsgroups(subset="all", shuffle=True, remove=("headers", "footers", "quotes"))
#dict_user = noniid_20newsgroups(dataset, 2)
noniid_label_20newsgroups(dataset, 2, alpha=0.5)
num_users = 2
#noniid_quantity_20newsgroups(dataset, beta=[0.1, 0.9])
if 0:
dataset = twenty_newsgroup_to_csv(subset='test')
test_20newsgroups(dataset)
|
[
"pandas.read_csv",
"numpy.random.choice",
"sklearn.model_selection.train_test_split",
"pandas.merge",
"sklearn.datasets.fetch_20newsgroups",
"numpy.array",
"numpy.vstack",
"pandas.DataFrame",
"numpy.arange",
"matplotlib.pyplot.show"
] |
[((4240, 4277), 'pandas.DataFrame', 'pd.DataFrame', (['newsgroups.target_names'], {}), '(newsgroups.target_names)\n', (4252, 4277), True, 'import pandas as pd\n'), ((4320, 4377), 'pandas.merge', 'pd.merge', (['df', 'targets'], {'left_on': '"""target"""', 'right_index': '(True)'}), "(df, targets, left_on='target', right_index=True)\n", (4328, 4377), True, 'import pandas as pd\n'), ((6094, 6121), 'numpy.array', 'np.array', (["dataset['target']"], {}), "(dataset['target'])\n", (6102, 6121), True, 'import numpy as np\n'), ((6230, 6252), 'numpy.arange', 'np.arange', (['num_samples'], {}), '(num_samples)\n', (6239, 6252), True, 'import numpy as np\n'), ((6370, 6395), 'numpy.vstack', 'np.vstack', (['(idxs, labels)'], {}), '((idxs, labels))\n', (6379, 6395), True, 'import numpy as np\n'), ((1087, 1097), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1095, 1097), True, 'import matplotlib.pyplot as plt\n'), ((1231, 1253), 'pandas.read_csv', 'pd.read_csv', (['test_file'], {}), '(test_file)\n', (1242, 1253), True, 'import pandas as pd\n'), ((1421, 1448), 'numpy.array', 'np.array', (["testset['target']"], {}), "(testset['target'])\n", (1429, 1448), True, 'import numpy as np\n'), ((1527, 1549), 'pandas.read_csv', 'pd.read_csv', (['data_file'], {}), '(data_file)\n', (1538, 1549), True, 'import pandas as pd\n'), ((1652, 1679), 'numpy.array', 'np.array', (["dataset['target']"], {}), "(dataset['target'])\n", (1660, 1679), True, 'import numpy as np\n'), ((4042, 4116), 'sklearn.datasets.fetch_20newsgroups', 'fetch_20newsgroups', ([], {'subset': 'subset', 'remove': "('headers', 'footers', 'quotes')"}), "(subset=subset, remove=('headers', 'footers', 'quotes'))\n", (4060, 4116), False, 'from sklearn.datasets import fetch_20newsgroups\n'), ((5105, 5157), 'numpy.random.choice', 'np.random.choice', (['all_idxs', 'num_items'], {'replace': '(False)'}), '(all_idxs, num_items, replace=False)\n', (5121, 5157), True, 'import numpy as np\n'), ((6041, 6053), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (6049, 6053), True, 'import numpy as np\n'), ((8616, 8671), 'numpy.random.choice', 'np.random.choice', (['all_idxs', 'num_items[i]'], {'replace': '(False)'}), '(all_idxs, num_items[i], replace=False)\n', (8632, 8671), True, 'import numpy as np\n'), ((1989, 2011), 'pandas.read_csv', 'pd.read_csv', (['data_file'], {}), '(data_file)\n', (2000, 2011), True, 'import pandas as pd\n'), ((2399, 2426), 'numpy.array', 'np.array', (["dataset['target']"], {}), "(dataset['target'])\n", (2407, 2426), True, 'import numpy as np\n'), ((2781, 2872), 'sklearn.datasets.fetch_20newsgroups', 'fetch_20newsgroups', ([], {'subset': '"""all"""', 'shuffle': '(True)', 'remove': "('headers', 'footers', 'quotes')"}), "(subset='all', shuffle=True, remove=('headers', 'footers',\n 'quotes'))\n", (2799, 2872), False, 'from sklearn.datasets import fetch_20newsgroups\n'), ((3391, 3447), 'sklearn.model_selection.train_test_split', 'train_test_split', (['documents', 'labels'], {'test_size': 'test_size'}), '(documents, labels, test_size=test_size)\n', (3407, 3447), False, 'from sklearn.model_selection import train_test_split\n')]
|
from apps.quiver.models import AnalyticsService, AnalyticsServiceExecution
from django.shortcuts import render, HttpResponseRedirect
from django.core.exceptions import PermissionDenied
from django.views.generic import FormView, CreateView, ListView, DetailView, UpdateView
from django.contrib.auth.mixins import LoginRequiredMixin
from .forms import AnalyticsServiceForm
from django.core import serializers
from django.utils.encoding import uri_to_iri
from django.shortcuts import render, HttpResponseRedirect
from apps.calc.measurement import measurement_obj
from django.contrib.auth.decorators import login_required
from django.http import JsonResponse
import json
from apps.analysis.json import NumPyArangeEncoder
from apps.projects.models import Experiment, Project, Datarow, Value
from apps.projects.serializer import project_serialize
from django.conf import settings
from django.core.exceptions import PermissionDenied
import numpy as np
import random
from apps.quiver import service_executor
# Create your views here.
class NewAnalyticsService(LoginRequiredMixin, CreateView):
form_class = AnalyticsServiceForm
template_name = 'quiver/analyticsservice_create.html'
def get_context_data(self, **kwargs):
data = super(NewAnalyticsService, self).get_context_data(**kwargs)
return data
def form_valid(self, form):
user = self.request.user
form.instance.user = user
context = self.get_context_data()
self.object = form.save()
return super(NewAnalyticsService, self).form_valid(form)
class UpdateAnalyticsService(LoginRequiredMixin, UpdateView):
model = AnalyticsService
form_class = AnalyticsServiceForm
pk_url_kwarg = 'id'
def get(self, request, *args, **kwargs):
self.object = self.get_object()
if not self.object.user == self.request.user and not self.object.visibility:
raise PermissionDenied()
return super(UpdateAnalyticsService, self).get(request, *args, **kwargs)
def get_context_data(self, **kwargs):
data = super(UpdateAnalyticsService, self).get_context_data(**kwargs)
return data
def form_valid(self, form):
context = self.get_context_data()
return super(UpdateAnalyticsService, self).form_valid(form)
class MyAnalyticsService(LoginRequiredMixin, ListView):
model = AnalyticsService
allow_empty = True
paginate_by = 10
def get_queryset(self):
user = self.request.user
return AnalyticsService.objects.filter(user=user).order_by('updated')
class AnalyticsServiceDetail(DetailView):
model = AnalyticsService
pk_url_kwarg = 'id'
def get_context_data(self, **kwargs):
user = self.request.user
# Call the base implementation first to get a context
context = super().get_context_data(**kwargs)
# Add in a QuerySet of all the projects
context['project_list'] = Project.objects.filter(user=user).order_by('updated')
return context
#def get(self, request, *args, **kwargs):
# self.object = self.get_object()
# if self.object.user != self.request.user and not self.object.visibility:
# raise PermissionDenied()
# return super(AnalyticsServiceDetail, self).get(request, *args, **kwargs)
def delete_analytics_service(request, analytics_service_id):
AnalyticsService.objects.get(id=analytics_service_id).delete()
return HttpResponseRedirect('/quiver/')
@login_required
def analytics_service_detail(request, experimentId):
if request.method != 'POST':
return HttpResponseRedirect('/dashboard/')
# current user
curruser_id = request.user.id
projectId = Experiment.objects.get(id=experimentId).project_id
# owner of experiment
expowner_id = Project.objects.get(id=projectId).user_id
# read graph visibility from post
graph_visibility = request.POST.get("graphVisibilities", "").split(',')
# Read Data from DB
header_list = np.asarray(Datarow.objects.filter(experiment_id=experimentId).values_list('name', flat=True))
einheiten_list = np.asarray(Datarow.objects.filter(experiment_id=experimentId).values_list('unit', flat=True))
mInstruments_list = np.asarray(
Datarow.objects.filter(experiment_id=experimentId).values_list('measuring_instrument', flat=True))
experimentName = Experiment.objects.get(id=experimentId).name
dateCreated = Experiment.objects.get(id=experimentId).created
timerow = Experiment.objects.get(id=experimentId).timerow
datarow_id = Datarow.objects.filter(experiment_id=experimentId).values_list('id', flat=True)
value_amount = len(Value.objects.filter(datarow_id=datarow_id[0]))
datarow_amount = len(datarow_id)
# values in the right order will be put in here, but for now initialize with 0
values_wo = [0] * datarow_amount
#fill values_wo with only datarow_amount-times of database fetches
i = 0
while i < datarow_amount:
values_wo[i] = Value.objects.filter(datarow_id=datarow_id[i]).values_list('value', flat=True)
i += 1
# order the values in values_wo, so that they can be used without database fetching
data = np.transpose(values_wo).astype(float)
# Create/Initialize the measurement object
measurement = measurement_obj.Measurement(json.dumps(data, cls=NumPyArangeEncoder),json.dumps(header_list, cls=NumPyArangeEncoder),
json.dumps(einheiten_list, cls=NumPyArangeEncoder),timerow)
# Prepare the Data for Rendering
dataForRender = {
'jsonData': json.dumps(measurement.data, cls=NumPyArangeEncoder),
'jsonHeader': json.dumps(measurement.colNames, cls=NumPyArangeEncoder),
'jsonEinheiten': json.dumps(measurement.colUnits, cls=NumPyArangeEncoder),
'jsonZeitreihenSpalte': json.dumps(measurement.timeIndex, cls=NumPyArangeEncoder),
'jsonMeasurementInstruments': json.dumps(mInstruments_list, cls=NumPyArangeEncoder),
'experimentId': experimentId,
'experimentName': experimentName,
'projectId': projectId,
'dateCreated': dateCreated,
'current_user_id': curruser_id,
'experiment_owner_id': expowner_id,
'graphVisibility': json.dumps(graph_visibility, cls=NumPyArangeEncoder),
}
# save experimentId to get it in ajax call when refreshing graph
request.session['experimentId'] = experimentId
return render(request, "quiver/index.html", dataForRender)
#def analyticsService(request):
#
# if request.method == 'POST':
# form = AnalyticsServiceForm(request.POST)
# if form.is_valid():
# print('hi')
#
# form = AnalyticsServiceForm()
#
# return render(request, 'analytics_service_detail.html', {'form': form})
def execute_service(request, analytics_service_id):
#data = request.body
#data = json.loads(data)
#read data and get project id:
if request.method == 'POST':
project_id = request.POST.get("project_id", )
rowcounter = int(request.POST.get("rowcounter", ))
#read out of ajax and adjust format for follwing execution of service
#read and prepare parameter data to send it to the service
input = [];
parameter = [];
i = 0;
while i < rowcounter:
param_attributes = {
'name': request.POST.get('parameter_name_' + str(i), ),
'value': request.POST.get('parameter_value_' + str(i), ),
'type': request.POST.get('type_select_' + str(i), )
}
parameter.append(param_attributes)
i = i + 1;
# work that input
#serialize project as preparation to send it to the service
input = project_serialize(project_id)
#generate a random number between 0 and 9999 as task_id
task_id = random.randrange(0, 10000, 1)
service = AnalyticsService.objects.get(id=analytics_service_id)
status = service_executor.get_status_for_service(service)
if status == service_executor.ServiceState.READY:
user = request.user
service_execution = AnalyticsServiceExecution(service=service, last_state=1, user=user)
service_execution.save()
#while service_execution.last_state != service_executor.ServiceState.DONE:
if service_execution.last_state == service_executor.ServiceState.READY:
task_url = service_executor.execute_next_state(service_execution, None, input, parameter)
if service_execution.last_state == service_executor.ServiceState.RUNNING:
result = service_executor.execute_next_state(service_execution, task_url, None, None).decode('ascii')
return JsonResponse(result, safe=False)
else: raise ValueError('Service does not exist right now.')
return
|
[
"django.shortcuts.render",
"apps.quiver.models.AnalyticsService.objects.get",
"django.core.exceptions.PermissionDenied",
"apps.projects.models.Datarow.objects.filter",
"numpy.transpose",
"apps.quiver.models.AnalyticsService.objects.filter",
"random.randrange",
"django.http.JsonResponse",
"apps.projects.models.Project.objects.get",
"json.dumps",
"apps.quiver.service_executor.execute_next_state",
"apps.quiver.service_executor.get_status_for_service",
"django.shortcuts.HttpResponseRedirect",
"apps.projects.models.Value.objects.filter",
"apps.projects.serializer.project_serialize",
"apps.quiver.models.AnalyticsServiceExecution",
"apps.projects.models.Project.objects.filter",
"apps.projects.models.Experiment.objects.get"
] |
[((3445, 3477), 'django.shortcuts.HttpResponseRedirect', 'HttpResponseRedirect', (['"""/quiver/"""'], {}), "('/quiver/')\n", (3465, 3477), False, 'from django.shortcuts import render, HttpResponseRedirect\n'), ((6457, 6508), 'django.shortcuts.render', 'render', (['request', '"""quiver/index.html"""', 'dataForRender'], {}), "(request, 'quiver/index.html', dataForRender)\n", (6463, 6508), False, 'from django.shortcuts import render, HttpResponseRedirect\n'), ((3597, 3632), 'django.shortcuts.HttpResponseRedirect', 'HttpResponseRedirect', (['"""/dashboard/"""'], {}), "('/dashboard/')\n", (3617, 3632), False, 'from django.shortcuts import render, HttpResponseRedirect\n'), ((3702, 3741), 'apps.projects.models.Experiment.objects.get', 'Experiment.objects.get', ([], {'id': 'experimentId'}), '(id=experimentId)\n', (3724, 3741), False, 'from apps.projects.models import Experiment, Project, Datarow, Value\n'), ((3797, 3830), 'apps.projects.models.Project.objects.get', 'Project.objects.get', ([], {'id': 'projectId'}), '(id=projectId)\n', (3816, 3830), False, 'from apps.projects.models import Experiment, Project, Datarow, Value\n'), ((4370, 4409), 'apps.projects.models.Experiment.objects.get', 'Experiment.objects.get', ([], {'id': 'experimentId'}), '(id=experimentId)\n', (4392, 4409), False, 'from apps.projects.models import Experiment, Project, Datarow, Value\n'), ((4433, 4472), 'apps.projects.models.Experiment.objects.get', 'Experiment.objects.get', ([], {'id': 'experimentId'}), '(id=experimentId)\n', (4455, 4472), False, 'from apps.projects.models import Experiment, Project, Datarow, Value\n'), ((4495, 4534), 'apps.projects.models.Experiment.objects.get', 'Experiment.objects.get', ([], {'id': 'experimentId'}), '(id=experimentId)\n', (4517, 4534), False, 'from apps.projects.models import Experiment, Project, Datarow, Value\n'), ((4663, 4709), 'apps.projects.models.Value.objects.filter', 'Value.objects.filter', ([], {'datarow_id': 'datarow_id[0]'}), '(datarow_id=datarow_id[0])\n', (4683, 4709), False, 'from apps.projects.models import Experiment, Project, Datarow, Value\n'), ((5327, 5367), 'json.dumps', 'json.dumps', (['data'], {'cls': 'NumPyArangeEncoder'}), '(data, cls=NumPyArangeEncoder)\n', (5337, 5367), False, 'import json\n'), ((5368, 5415), 'json.dumps', 'json.dumps', (['header_list'], {'cls': 'NumPyArangeEncoder'}), '(header_list, cls=NumPyArangeEncoder)\n', (5378, 5415), False, 'import json\n'), ((5463, 5513), 'json.dumps', 'json.dumps', (['einheiten_list'], {'cls': 'NumPyArangeEncoder'}), '(einheiten_list, cls=NumPyArangeEncoder)\n', (5473, 5513), False, 'import json\n'), ((5604, 5656), 'json.dumps', 'json.dumps', (['measurement.data'], {'cls': 'NumPyArangeEncoder'}), '(measurement.data, cls=NumPyArangeEncoder)\n', (5614, 5656), False, 'import json\n'), ((5680, 5736), 'json.dumps', 'json.dumps', (['measurement.colNames'], {'cls': 'NumPyArangeEncoder'}), '(measurement.colNames, cls=NumPyArangeEncoder)\n', (5690, 5736), False, 'import json\n'), ((5763, 5819), 'json.dumps', 'json.dumps', (['measurement.colUnits'], {'cls': 'NumPyArangeEncoder'}), '(measurement.colUnits, cls=NumPyArangeEncoder)\n', (5773, 5819), False, 'import json\n'), ((5853, 5910), 'json.dumps', 'json.dumps', (['measurement.timeIndex'], {'cls': 'NumPyArangeEncoder'}), '(measurement.timeIndex, cls=NumPyArangeEncoder)\n', (5863, 5910), False, 'import json\n'), ((5950, 6003), 'json.dumps', 'json.dumps', (['mInstruments_list'], {'cls': 'NumPyArangeEncoder'}), '(mInstruments_list, cls=NumPyArangeEncoder)\n', (5960, 6003), False, 'import json\n'), ((6264, 6316), 'json.dumps', 'json.dumps', (['graph_visibility'], {'cls': 'NumPyArangeEncoder'}), '(graph_visibility, cls=NumPyArangeEncoder)\n', (6274, 6316), False, 'import json\n'), ((7833, 7862), 'apps.projects.serializer.project_serialize', 'project_serialize', (['project_id'], {}), '(project_id)\n', (7850, 7862), False, 'from apps.projects.serializer import project_serialize\n'), ((7946, 7975), 'random.randrange', 'random.randrange', (['(0)', '(10000)', '(1)'], {}), '(0, 10000, 1)\n', (7962, 7975), False, 'import random\n'), ((7995, 8048), 'apps.quiver.models.AnalyticsService.objects.get', 'AnalyticsService.objects.get', ([], {'id': 'analytics_service_id'}), '(id=analytics_service_id)\n', (8023, 8048), False, 'from apps.quiver.models import AnalyticsService, AnalyticsServiceExecution\n'), ((8066, 8114), 'apps.quiver.service_executor.get_status_for_service', 'service_executor.get_status_for_service', (['service'], {}), '(service)\n', (8105, 8114), False, 'from apps.quiver import service_executor\n'), ((1912, 1930), 'django.core.exceptions.PermissionDenied', 'PermissionDenied', ([], {}), '()\n', (1928, 1930), False, 'from django.core.exceptions import PermissionDenied\n'), ((3370, 3423), 'apps.quiver.models.AnalyticsService.objects.get', 'AnalyticsService.objects.get', ([], {'id': 'analytics_service_id'}), '(id=analytics_service_id)\n', (3398, 3423), False, 'from apps.quiver.models import AnalyticsService, AnalyticsServiceExecution\n'), ((4560, 4610), 'apps.projects.models.Datarow.objects.filter', 'Datarow.objects.filter', ([], {'experiment_id': 'experimentId'}), '(experiment_id=experimentId)\n', (4582, 4610), False, 'from apps.projects.models import Experiment, Project, Datarow, Value\n'), ((5195, 5218), 'numpy.transpose', 'np.transpose', (['values_wo'], {}), '(values_wo)\n', (5207, 5218), True, 'import numpy as np\n'), ((8237, 8304), 'apps.quiver.models.AnalyticsServiceExecution', 'AnalyticsServiceExecution', ([], {'service': 'service', 'last_state': '(1)', 'user': 'user'}), '(service=service, last_state=1, user=user)\n', (8262, 8304), False, 'from apps.quiver.models import AnalyticsService, AnalyticsServiceExecution\n'), ((2504, 2546), 'apps.quiver.models.AnalyticsService.objects.filter', 'AnalyticsService.objects.filter', ([], {'user': 'user'}), '(user=user)\n', (2535, 2546), False, 'from apps.quiver.models import AnalyticsService, AnalyticsServiceExecution\n'), ((2936, 2969), 'apps.projects.models.Project.objects.filter', 'Project.objects.filter', ([], {'user': 'user'}), '(user=user)\n', (2958, 2969), False, 'from apps.projects.models import Experiment, Project, Datarow, Value\n'), ((4008, 4058), 'apps.projects.models.Datarow.objects.filter', 'Datarow.objects.filter', ([], {'experiment_id': 'experimentId'}), '(experiment_id=experimentId)\n', (4030, 4058), False, 'from apps.projects.models import Experiment, Project, Datarow, Value\n'), ((4123, 4173), 'apps.projects.models.Datarow.objects.filter', 'Datarow.objects.filter', ([], {'experiment_id': 'experimentId'}), '(experiment_id=experimentId)\n', (4145, 4173), False, 'from apps.projects.models import Experiment, Project, Datarow, Value\n'), ((4250, 4300), 'apps.projects.models.Datarow.objects.filter', 'Datarow.objects.filter', ([], {'experiment_id': 'experimentId'}), '(experiment_id=experimentId)\n', (4272, 4300), False, 'from apps.projects.models import Experiment, Project, Datarow, Value\n'), ((5002, 5048), 'apps.projects.models.Value.objects.filter', 'Value.objects.filter', ([], {'datarow_id': 'datarow_id[i]'}), '(datarow_id=datarow_id[i])\n', (5022, 5048), False, 'from apps.projects.models import Experiment, Project, Datarow, Value\n'), ((8540, 8618), 'apps.quiver.service_executor.execute_next_state', 'service_executor.execute_next_state', (['service_execution', 'None', 'input', 'parameter'], {}), '(service_execution, None, input, parameter)\n', (8575, 8618), False, 'from apps.quiver import service_executor\n'), ((8846, 8878), 'django.http.JsonResponse', 'JsonResponse', (['result'], {'safe': '(False)'}), '(result, safe=False)\n', (8858, 8878), False, 'from django.http import JsonResponse\n'), ((8730, 8806), 'apps.quiver.service_executor.execute_next_state', 'service_executor.execute_next_state', (['service_execution', 'task_url', 'None', 'None'], {}), '(service_execution, task_url, None, None)\n', (8765, 8806), False, 'from apps.quiver import service_executor\n')]
|
import cv2
import numpy as np
from .augmentor import DataAugment
import math
class Rotate(DataAugment):
"""
Continuous rotatation.
The sample size for x- and y-axes should be at least sqrt(2) times larger
than the input size to make sure there is no non-valid region after center-crop.
Args:
p (float): probability of applying the augmentation
"""
def __init__(self, p=0.5):
super(Rotate, self).__init__(p=p)
self.image_interpolation = cv2.INTER_LINEAR
self.label_interpolation = cv2.INTER_NEAREST
self.border_mode = cv2.BORDER_CONSTANT
self.set_params()
def set_params(self):
self.sample_params['ratio'] = [1.0, 1.42, 1.42]
def rotate(self, imgs, M, interpolation):
height, width = imgs.shape[-2:]
if imgs.ndim == 4:
channels = imgs.shape[-4]
slices = imgs.shape[-3]
if imgs.ndim == 3:
channels = 1
slices = imgs.shape[-3]
transformedimgs = np.copy(imgs)
for z in range(slices):
if channels == 1:
img = transformedimgs[z, :, :]
dst = cv2.warpAffine(img, M, (height, width), 1.0, flags=interpolation, borderMode=self.border_mode)
transformedimgs[z, :, :] = dst
elif channels == 3:
img = transformedimgs[:, z, :, :]
img = np.moveaxis(img, 0, -1)
dst = cv2.warpAffine(img, M, (height, width), 1.0, flags=interpolation, borderMode=self.border_mode)
transformedimgs[:, z, :, :] = np.moveaxis(dst, -1, 0)
else:
raise Exception('Unknown number of channels in 2d slice')
return transformedimgs
def rotation_matrix(self, axis, theta):
"""
Return the rotation matrix associated with counterclockwise rotation about
the given axis by theta degrees.
"""
axis = np.asarray(axis)
axis = axis / math.sqrt(np.dot(axis, axis))
theta = float(theta) * np.pi / 180.0
a = math.cos(theta / 2.0)
b, c, d = -axis * math.sin(theta / 2.0)
aa, bb, cc, dd = a * a, b * b, c * c, d * d
bc, ad, ac, ab, bd, cd = b * c, a * d, a * c, a * b, b * d, c * d
return np.array([[aa + bb - cc - dd, 2 * (bc + ad), 2 * (bd - ac)],
[2 * (bc - ad), aa + cc - bb - dd, 2 * (cd + ab)],
[2 * (bd + ac), 2 * (cd - ab), aa + dd - bb - cc]])
def __call__(self, data, random_state=None):
if random_state is None:
random_state = np.random.RandomState()
image = data['image']
height, width = image.shape[-2:]
angle = random_state.rand()*360.0
M = cv2.getRotationMatrix2D((height/2, width/2), angle, 1)
output = {}
for key, val in data.items():
if key in ['label', 'skeleton', 'weight', 'context', 'skeleton_probability']:
output[key] = self.rotate(val, M, self.label_interpolation)
elif key == 'flux':
r_img = self.rotate(val, M, self.image_interpolation)
r_mat = self.rotation_matrix((1, 0, 0), angle)
r_field = np.matmul(r_mat, r_img.reshape((3, -1)))
output[key] = r_field.reshape(val.shape)
elif key == 'image':
output[key] = self.rotate(val, M, self.image_interpolation)
else:
raise TypeError('Input data key not identified, Key was: ' + key)
return output
|
[
"numpy.copy",
"cv2.warpAffine",
"numpy.asarray",
"math.cos",
"numpy.array",
"numpy.dot",
"numpy.moveaxis",
"cv2.getRotationMatrix2D",
"math.sin",
"numpy.random.RandomState"
] |
[((1025, 1038), 'numpy.copy', 'np.copy', (['imgs'], {}), '(imgs)\n', (1032, 1038), True, 'import numpy as np\n'), ((1960, 1976), 'numpy.asarray', 'np.asarray', (['axis'], {}), '(axis)\n', (1970, 1976), True, 'import numpy as np\n'), ((2086, 2107), 'math.cos', 'math.cos', (['(theta / 2.0)'], {}), '(theta / 2.0)\n', (2094, 2107), False, 'import math\n'), ((2297, 2468), 'numpy.array', 'np.array', (['[[aa + bb - cc - dd, 2 * (bc + ad), 2 * (bd - ac)], [2 * (bc - ad), aa + cc -\n bb - dd, 2 * (cd + ab)], [2 * (bd + ac), 2 * (cd - ab), aa + dd - bb - cc]]'], {}), '([[aa + bb - cc - dd, 2 * (bc + ad), 2 * (bd - ac)], [2 * (bc - ad),\n aa + cc - bb - dd, 2 * (cd + ab)], [2 * (bd + ac), 2 * (cd - ab), aa +\n dd - bb - cc]])\n', (2305, 2468), True, 'import numpy as np\n'), ((2771, 2829), 'cv2.getRotationMatrix2D', 'cv2.getRotationMatrix2D', (['(height / 2, width / 2)', 'angle', '(1)'], {}), '((height / 2, width / 2), angle, 1)\n', (2794, 2829), False, 'import cv2\n'), ((2134, 2155), 'math.sin', 'math.sin', (['(theta / 2.0)'], {}), '(theta / 2.0)\n', (2142, 2155), False, 'import math\n'), ((2621, 2644), 'numpy.random.RandomState', 'np.random.RandomState', ([], {}), '()\n', (2642, 2644), True, 'import numpy as np\n'), ((1171, 1269), 'cv2.warpAffine', 'cv2.warpAffine', (['img', 'M', '(height, width)', '(1.0)'], {'flags': 'interpolation', 'borderMode': 'self.border_mode'}), '(img, M, (height, width), 1.0, flags=interpolation,\n borderMode=self.border_mode)\n', (1185, 1269), False, 'import cv2\n'), ((2009, 2027), 'numpy.dot', 'np.dot', (['axis', 'axis'], {}), '(axis, axis)\n', (2015, 2027), True, 'import numpy as np\n'), ((1417, 1440), 'numpy.moveaxis', 'np.moveaxis', (['img', '(0)', '(-1)'], {}), '(img, 0, -1)\n', (1428, 1440), True, 'import numpy as np\n'), ((1463, 1561), 'cv2.warpAffine', 'cv2.warpAffine', (['img', 'M', '(height, width)', '(1.0)'], {'flags': 'interpolation', 'borderMode': 'self.border_mode'}), '(img, M, (height, width), 1.0, flags=interpolation,\n borderMode=self.border_mode)\n', (1477, 1561), False, 'import cv2\n'), ((1604, 1627), 'numpy.moveaxis', 'np.moveaxis', (['dst', '(-1)', '(0)'], {}), '(dst, -1, 0)\n', (1615, 1627), True, 'import numpy as np\n')]
|
import numpy as np
import random
import pandas as pd
import sqlalchemy
from sqlalchemy.orm import sessionmaker
from sqlalchemy.sql import select
from sqlalchemy import and_
from sqlalchemy import between
from sqlalchemy.sql import exists
from sqlalchemy import desc
from datetime import datetime, timezone, timedelta
from damadicsDBMapping import *
from sequenced_data_handler import SequenceDataHandler
# IP Address: 172.16.17.32
# User: dbAdmin
# Password: <PASSWORD>
# Database: damadics
class ValveDataHandler(SequenceDataHandler):
'''
TODO: column information here
'''
#Method definition
def __init__(self, start_time, end_time, selected_features, sequence_length = 1, sequence_stride = 1, data_scaler = None):
#Public properties
self._start_time = start_time
self._end_time = end_time
self._selected_features = selected_features
self._rectify_labels = False
self._data_scaler = data_scaler
# Database connection
# self._db_connection = mysql.connector.connect(user = 'root', password = '<PASSWORD>#', database = 'damadics')
self._load_from_db = True
self._column_names = {0: 'timestamp', 1: 'externalControllerOutput', 2: 'undisturbedMediumFlow', 3: 'pressureValveInlet', 4:'pressureValveOutlet',
5: 'mediumTemperature', 6: 'rodDisplacement', 7: 'disturbedMediumFlow', 8: 'selectedFault', 9: 'faultType', 10: 'faultIntensity'}
# Entire Dataset
self._df = None
self._X = None
self._y = None
# Splitting. This is what is used to train
self._df_train = None
self._df_test = None
#create one time session
self._sqlsession = None
print("init")
#super init
super().__init__(sequence_length, sequence_stride, len(selected_features), data_scaler)
def connect_to_db(self,username,pasw,host,dbname):
# self.username = username
# self.pasw = pasw
# self.host = host
self.dbname = dbname
databaseString = "mysql+mysqldb://"+username+":"+pasw+"@"+host+"/"+dbname
self._sqlsession = None
try:
sqlengine = sqlalchemy.create_engine(databaseString)
SQLSession = sessionmaker(bind=sqlengine)
self._sqlsession = SQLSession()
print("Connection to " + databaseString + " successfull")
except Exception as e:
print("e:", e)
print("Error in connection to the database")
def extract_data_from_db(self):
startTime = datetime.now()
self._df = self._sqlsession.query(ValveReading).filter(ValveReading.timestamp.between (self._start_time,self._end_time) )
self._df = pd.read_sql(self._df.statement, self._df.session.bind)
#dataPoints = self._sqlsession.query(exists().where(ValveReading.timestamp == '2018-07-27 15:56:22')).scalar()
#dataPoints = self._sqlsession.query(ValveReading).order_by(ValveReading.timestamp)
# TODO: need to check whether dataPoints is of type DataFrame. Needs to be in type DataFrame
# TODO: check whether column names are extracted out
# All the data with selected features is saved in this variable
# TODO: check if self._selected_features is an array of indexes or strings
# self._df = df.iloc[:, self._selected_features].values
# Assumption that the output is only one column and is located at the last column out of all the selected features
# Below if self._selected_features is an array of indexes
column_names = ['externalControllerOutput', 'pressureValveInlet',
'pressureValveOutlet', 'mediumTemperature','rodDisplacement', 'disturbedMediumFlow', 'selectedFault']
self._X = self._df.loc[:, column_names[:-1]].values
self._y = self._df.loc[:, column_names[len(column_names) - 1]].values
# Below if self._selected_features is an array of strings
# inputs = df.loc[:, column_names[:-1]].values
# outputs = df.loc[:, column_names[len(column_names) - 1]].values
# for data in self._df:
# print(self._df)
print("Extracting data from database runtime:", datetime.now() - startTime)
def one_hot_encode(self, num_readings):
startTime = datetime.now()
fault_column = list()
one_hot_matrix = np.zeros((num_readings, 20))
fault_column = self._y
for i in range(num_readings):
one_hot_matrix[i, int(fault_column[i] - 1)] = 1
print("One-hot-encoding:", datetime.now() - startTime)
return one_hot_matrix
# Private
def find_samples(self, data_samples):
'''
Assumptions made when using this functions
1.) The value always starts of as NOT BROKEN. First faultType value is 20.
2.) Function is used to entire dataset and not in chunks
'''
# TODO: handle cases when the first readings start of as a broken value
# TODO: ask David if he wants a minimum amount of samples in the dataset
startTime = datetime.now()
small_list, big_list = list(), list()
normal_status = 20.0
isBroken = False
counter = 0
for i in range(len(self._y)):
# If True, then the current status of the valve is that it is broken
if (isBroken):
# The valve has been fixed and is back to its normal status
if (self._y[i] == normal_status):
isBroken = False
counter += 1
# Save everything from the small_list into the big_list
small_list = np.vstack(small_list)
big_list.append(small_list)
small_list = list()
small_list.append(data_samples[i, :])
# The current status of the valve is that it is not broken
else:
if (self._y[i] != normal_status):
isBroken = True
# small_list = np.append(data_samples[i, :], small_list)
small_list.append(data_samples[i, :])
print("Splitting into samples:",datetime.now() - startTime)
print("counter:", counter)
return big_list, counter
#
#
#
#
#
#
#
# # Private
# def find_samples(self, data_samples):
#
# '''
# Assumptions made when using this function
# 1.) The valve always starts of as NOT BROKEN. First faultType value is 20.
# 2.) Function is used to entire dataset and not in chunks
# '''
#
# # TODO: handle cases when the first readings starts of as a broken valve
# # TODO: ask David if he wants a minimum amount of samples in the dataset
#
# small_list, big_list = list(), list()``
# normal_status = 20.0
# isBroken = False
# # Counter for the number of samples there are in the dataset
# counter = 0
#
# for i in range(len(self._y)):
# # If True, then the current status of the valve is that it is broken
# if (isBroken):
# # The valve has been fixed and is back to its normal status
# if (self._y[i] == normal_status):
# isBroken = False
# counter += 1
# # Save everything from the small_list into the big_list
# small_list = np.vstack(small_list)
# big_list.append(small_list)
# # Clear the small_list (reinitialize)
# small_list = list()
# small_list.append(data_samples[i, :])
# # The current status of the valve is that it is not broken
# else:
# # Broken valve discovered
# if (self._y[i] != normal_status):
# isBroken = True
# small_list.append(data_samples[i, :])
#
# # SPECIAL CASE: the simulation does not end with a fixed valve. Therefore we shall whatever is inside the small_list and say that it is an entire sample
# if (self._y[i] != 20):
# counter += 1
# small_list = np.vstack(small_list)
# big_list.append(small_list)
#
# return big_list, counter
# Public
def load_data(self, verbose = 0, cross_validation_ratio = 0, test_ratio = 0, unroll = True):
"""Load the data using the specified parameters"""
'''
TODO: extracting data from MySQL database using SQLALCHEMY
Functions called here: generate_df_with_rul(self, df), generate_train_arrays(self, cross_validation_ratio = 0), generate_test_arrays(self),
create_sequenced_train_data(self), create_sequenced_test_data(self)
X: df[timestamp, ..., selectedFault]
y: df['faultType']
'''
# dataPoints = self._sqlsession.query(ValveReading)
if verbose == 1:
print("Loading data for dataset {} with window_size of {}, stride of {}. Cros-Validation ratio {}".format(self._dataset_number,
self._sequence_length, self._sequence_stride, cross_validation_ratio))
if cross_validation_ratio < 0 or cross_validation_ratio > 1:
print("Error, cross validation must be between 0 and 1")
return
if test_ratio < 0 or test_ratio > 1:
print("Error, test ratio must be between 0 and 1")
return
if cross_validation_ratio + test_ratio > 1:
print("Sum of cross validation and test ratios is greater than 1. Need to pick smaller ratios.")
return
if self._load_from_db == True:
print("Loading data from database")
# These variables are where the entire data is saved at
self.extract_data_from_db()
# One hot encoding
output_one_hot_matrix = self.one_hot_encode(self._df.shape[0])
# Finds samples within the inputs
self._X, num_samples = self.find_samples(self._X)
self._y, _ = self.find_samples(output_one_hot_matrix)
# self._df_train = self.load_db_into_df(self._file_train_data)
# self._df_test = self.load_db_into_df(self._file_test_data)
# self._df_train, num_units, trimmed_rul_train = self.generate_df_with_rul(self._df_train)
else:
print("Loading data from memory")
#Reset arrays
"""
self._X_train_list = list()
self._X_crossVal_list = list()
self._X_test_list = list()
self._y_train_list = list()
self._y_crossVal_list = list()
self._y_test_list = list()
"""
# Split up the data into its different samples
#Modify properties in the parent class, and let the parent class finish the data processing
self.train_cv_test_split(cross_validation_ratio, test_ratio, num_samples)
self.print_sequence_shapes()
# Unroll = True for ANN
# Unroll = False for RNN
self.generate_train_data(unroll)
self.generate_crossValidation_data(unroll)
self.generate_test_data(unroll)
#
self._load_from_db = False # As long as the dataframe doesnt change, there is no need to reload from file
# Private
def train_cv_test_split(self, cross_validation_ratio, test_ratio, num_samples):
''' From the dataframes generate the feature arrays and their labels'''
print("split_samples num_samples:", num_samples)
print("cross_validation_ratio:", cross_validation_ratio)
print("test_ratio:", test_ratio)
startTime = datetime.now()
X_train_list, y_train_list = list(), list()
X_crossVal_list, y_crossVal_list = list(), list()
X_test_list, y_test_list = list(), list()
if cross_validation_ratio < 0 or cross_validation_ratio > 1:
print("Error, cross validation must be between 0 and 1")
return
if test_ratio < 0 or test_ratio > 1:
print("Error, test ratio must be between 0 and 1")
return
if cross_validation_ratio != 0 or test_ratio != 0:
self._X_train_list, self._y_train_list, self._X_crossVal_list, self._y_crossVal_list, self._X_test_list, self._y_test_list = self.split_samples(cross_validation_ratio, test_ratio, num_samples)
print("Train, cv, and test splitting:",datetime.now() - startTime)
print()
# Private
def split_samples(self, cross_validation_ratio, test_ratio, num_samples):
'''Split the samples according to their respective ratios'''
shuffled_samples = list(range(0, num_samples))
random.shuffle(shuffled_samples)
num_crossVal = int(cross_validation_ratio * num_samples)
#print("num_crossVal:", num_crossVal)
num_test = int(test_ratio * num_samples)
#print("num_test:", num_test)
num_train = num_samples - num_crossVal - num_test
#print("num_train:", num_train)
X_train_list, y_train_list = list(), list()
X_crossVal_list, y_crossVal_list = list(), list()
X_test_list, y_test_list = list(), list()
print(self._y[0])
for i in range(num_train):
#print("i:", i)
X_train_list.append(self._X[shuffled_samples[i]])
y_train_list.append(self._y[shuffled_samples[i]])
# y_train_list.append(self._y[shuffled_samples[i]][-1].reshape(1, 20))
# x = 0
# while(len(y_train_list) == 0):
# if (self._y[shuffled_samples[i]][x][19] != 1):
# y_train_list.append(self._y[shuffled_samples[i]])
# x += 1
# for x in range(self._y[shuffled_samples[i]].shape[0]):
# if (self._y[shuffled_samples[i]][x][19] != 1 and len(y_train_list) == 0):
# y_train_list.append(self._y[shuffled_samples[i]])
# print(len(y_train_list))
for j in range(num_train, num_train + num_crossVal):
#print("j:", j)
X_crossVal_list.append(self._X[shuffled_samples[j]])
y_crossVal_list.append(self._y[shuffled_samples[j]][-1].reshape(1, 20))
# y = 0
# while(len(y_train_list) == 0):
# if (self._y[shuffled_samples[i]][y][19] != 1):
# y_crossVal_list.append(self._y[shuffled_samples[i]])
# y += 1
# for y in range(self._y[shuffled_samples[j]].shape[0]):
# if (self._y[shuffled_samples[j]][y][19] != 1 and len(y_crossVal_list) == 0):
# y_crossVal_list.append(self._y[shuffled_samples[j]])
for k in range(num_train + num_crossVal, num_samples):
#print("k:", k)
X_test_list.append(self._X[shuffled_samples[k]])
y_test_list.append(self._y[shuffled_samples[k]][-1].reshape(1, 20))
# z = 0
# while(len(y_train_list) == 0):
# if (self._y[shuffled_samples[i]][x][19] != 1):
# y_test_list.append(self._y[shuffled_samples[i]])
# z += 1
# for z in range(self._y[shuffled_samples[k]].shape[0]):
# if (self._y[shuffled_samples[k]][z][19] != 1 and len(y_test_list) == 0):
# y_test_list.append(self._y[shuffled_samples[k]])
#print("X_test_list shape:", len(X_test_list[0].shape))
return X_train_list, y_train_list, X_crossVal_list, y_crossVal_list, X_test_list, y_test_list
# def train_cv_test_split(self, cross_validation_ratio = 0, test_ratio = 0, num_samples):
# """From the dataframes generate the feature arrays and their labels"""
#
# '''
# Functions called here: split_samples(self, df, splitting_ratio), generate_cross_validation_from_df(self, df, sequence_length)
# '''
#
# X_train_list, y_train_list = list(), list()
# X_crossVal_list, y_crossVal_list = list(), list()
# X_test_list, y_test_list = list()
#
# if cross_validation_ratio < 0 or cross_validation_ratio > 1 :
# print("Error, cross validation must be between 0 and 1")
# return
#
# if test_ratio < 0 or test_ratio > 1 :
# print("Error, test ratio must be between 0 and 1")
# return
#
# if cross_validation_ratio != 0 or test_ratio != 0:
# X_train_list, X_test_list, X_crossVal_list, y_crossVal_list, y_train_list, y_test_list = self.split_samples(cross_validation_ratio, test_ratio, num_samples)
#
# return X_train_list, y_train_list, X_crossVal_list, y_crossVal_list, X_test_list, y_test_list
# Private
# def split_samples(self, cross_validation_ratio, test_ratio, num_samples):
# """Split the samples according to their respective ratios"""
#
# shuffled_samples = list(range(0, num_samples))
# random.shuffle(shuffled_samples)
#
# num_crossVal = int(cross_validation_ratio * num_samples)
# num_test = int(test_ratio * num_samples)
# num_train = num_samples - num_crossVal - num_test
#
# X_train_list, y_train_list = list(), list()
# X_crossVal, y_crossVal_list = list(), list()
# X_test_list, y_test_list = list(), list()
#
# for i in range(num_train):
# X_train_list.append(self._X[shuffled_samples[i]])
# y_train_list.append(self._y[shuffled_samples[i]])
#
# for j in range(num_train, num_train + num_crossVal):
# X_crossVal.append(self._X[shuffled_samples[j]])
# y_crossVal_list.append(self._y[shuffled_samples[j]])
#
# for k in range(num_train + num_crossVal, num_samples):
# X_test.append(self._X[shuffled_samples[k]])
# y_test_list.append(self._y[shuffled_samples[k]])
#
# return X_train_list, X_test, X_crossVal, y_crossVal_list, y_train_list, y_test
#Property definition
@property
def df(self):
return self._df
@df.setter
def df(self, df):
self._df = df
@property
def X(self):
return self.X
@X.setter
def X(self, X):
self.X = X
@property
def y(self):
return self._y
@y.setter
def df(self, y):
self._y = y
@property
def start_time(self):
return self._start_time
@start_time.setter
def start_time(self,start_time):
self._start_time = start_time
@property
def sqlsession(self):
return self._sqlsession
@sqlsession.setter
def sqlsession(self,sqlsession):
self._sqlsession = sqlsession
def __str__(self):
return "<ValveReading(timestamp='%s',externalControllerOutput='%s',undisturbedMediumFlow='%s',pressureValveInlet='%s',pressureValveOutlet='%s',mediumTemperature='%s',\
rodDisplacement='%s',disturbedMediumFlow='%s',selectedFault='%s',faultType='%s',faultIntensity='%s')>"\
%(str(self._timestamp),self._externalControllerOutput,self._undisturbedMediumFlow,self.pressureValveInlet,\
self.pressureValveOutlet,self.mediumTemperature,self.rodDisplacement,self.disturbedMediumFlow,self.selectedFault,\
self.faultType,self.faultIntensity)
# def selectedFeatures(self):
# return self._selectedFeatures
#
# @selectedFeatures.setter
# def selectedFeatures(self, selectedFeatures):
# self._selectedFeatures = selectedFeatures
#
# @property
# def max_rul(self):
# return self._max_rul
#
# @max_rul.setter
# def max_rul(self, max_rul):
# self._max_rul = max_rul
#
# @property
# def rectify_labels(self):
# return self._rectify_labels
#
# @rectify_labels.setter
# def rectify_labels(self, rectify_labels):
# self._rectify_labels = rectify_labels
#
# #ReadOnly Properties
#
# @property
# def dataset_number(self):
# return self._dataset_number
#
# @property
# def data_folder(self):
# return self._data_folder
#
# @property
# def file_train_data(self):
# return self._file_train_data
#
# @property
# def file_test_data(self):
# return self._file_test_data
#
# @property
# def file_rul(self):
# return self._file_rul
#
# @property
# def load_from_file(self):
# return self._load_from_db
#
# @property
# def column_names(self):
# return self._column_names
#
# @property
# def df_train(self):
# return self._df_train
#
# @property
# def df_test(self):
# return self._df_test
#
#
#
# #Auxiliary functions
#
# def compute_training_rul(df_row, *args):
# """Compute the RUL at each entry of the DF"""
#
# max_rul = args[1]
# rul_vector = args[0]
# rul_vector_index = int(df_row['Unit Number']) - 1
#
#
# if max_rul > 0 and rul_vector[rul_vector_index] - df_row['Cycle'] > max_rul:
# return max_rul
# else:
# return rul_vector[rul_vector_index] - df_row['Cycle']
|
[
"sqlalchemy.orm.sessionmaker",
"random.shuffle",
"sqlalchemy.create_engine",
"datetime.datetime.now",
"numpy.zeros",
"numpy.vstack",
"pandas.read_sql"
] |
[((2315, 2329), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2327, 2329), False, 'from datetime import datetime, timezone, timedelta\n'), ((2468, 2522), 'pandas.read_sql', 'pd.read_sql', (['self._df.statement', 'self._df.session.bind'], {}), '(self._df.statement, self._df.session.bind)\n', (2479, 2522), True, 'import pandas as pd\n'), ((3929, 3943), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (3941, 3943), False, 'from datetime import datetime, timezone, timedelta\n'), ((3988, 4016), 'numpy.zeros', 'np.zeros', (['(num_readings, 20)'], {}), '((num_readings, 20))\n', (3996, 4016), True, 'import numpy as np\n'), ((4619, 4633), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (4631, 4633), False, 'from datetime import datetime, timezone, timedelta\n'), ((10186, 10200), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (10198, 10200), False, 'from datetime import datetime, timezone, timedelta\n'), ((11114, 11146), 'random.shuffle', 'random.shuffle', (['shuffled_samples'], {}), '(shuffled_samples)\n', (11128, 11146), False, 'import random\n'), ((1993, 2033), 'sqlalchemy.create_engine', 'sqlalchemy.create_engine', (['databaseString'], {}), '(databaseString)\n', (2017, 2033), False, 'import sqlalchemy\n'), ((2050, 2078), 'sqlalchemy.orm.sessionmaker', 'sessionmaker', ([], {'bind': 'sqlengine'}), '(bind=sqlengine)\n', (2062, 2078), False, 'from sqlalchemy.orm import sessionmaker\n'), ((3843, 3857), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (3855, 3857), False, 'from datetime import datetime, timezone, timedelta\n'), ((4156, 4170), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (4168, 4170), False, 'from datetime import datetime, timezone, timedelta\n'), ((5465, 5479), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (5477, 5479), False, 'from datetime import datetime, timezone, timedelta\n'), ((10874, 10888), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (10886, 10888), False, 'from datetime import datetime, timezone, timedelta\n'), ((5075, 5096), 'numpy.vstack', 'np.vstack', (['small_list'], {}), '(small_list)\n', (5084, 5096), True, 'import numpy as np\n')]
|
from self_organising_systems.texture_ca.config import cfg
from self_organising_systems.shared.util import imread
import tensorflow as tf
import numpy as np
style_layers = ['block%d_conv1'%i for i in range(1, 6)]
content_layer = 'block4_conv2'
class StyleModel:
def __init__(self, input_texture_path):
vgg = tf.keras.applications.vgg16.VGG16(include_top=False, weights='imagenet')
vgg.trainable = False
layers = style_layers + [content_layer]
layers = {name:vgg.get_layer(name).output for name in layers}
self.model = tf.keras.Model([vgg.input], layers)
self.style_img = imread(input_texture_path, cfg.texture_ca.vgg_input_img_size)
self.target_style, _ = self.calc_style_content(self.style_img[None,...])
def run_model(self, img):
img = img[..., ::-1]*255.0 - np.float32([103.939, 116.779, 123.68])
layers = self.model(img)
style = [layers[name] for name in style_layers]
return style, layers[content_layer]
def calc_style_content(self, img):
style_layers, content = self.run_model(img)
style = [self.gram_style(a) for a in style_layers]
return style, content
@tf.function
def __call__(self, x):
gs, content = self.calc_style_content(x)
sl = tf.reduce_mean(self.style_loss(gs, self.target_style))
return sl
@tf.function
def style_loss(self, a, b):
return tf.add_n([tf.reduce_mean(tf.square(x-y), [-2, -1]) for x, y in zip(a, b)])
def gram_style(self, a):
n, h, w, ch = tf.unstack(tf.shape(a))
a = tf.sqrt(a+1.0)-1.0
gram = tf.einsum('bhwc, bhwd -> bcd', a, a)
return gram / tf.cast(h*w, tf.float32)
class Inception:
def __init__(self, layer, ch):
with tf.io.gfile.GFile(cfg.texture_ca.inception_pb, 'rb') as f:
self.graph_def = tf.compat.v1.GraphDef.FromString(f.read())
self.layer = layer
self.ch = ch
avgpool0_idx = [n.name for n in self.graph_def.node].index('avgpool0')
del self.graph_def.node[avgpool0_idx:]
# use pre_relu layers for Concat nodes
node = {n.name:n for n in self.graph_def.node}[layer]
self.outputs = [layer+':0']
if 'Concat' in node.op:
self.outputs = [inp+'_pre_relu:0' for inp in node.input[1:]]
@tf.function
def __call__(self, x):
overflow_loss = tf.reduce_mean(tf.square(tf.clip_by_value(x, 0.0, 1.0)-x))
imgs = x*255.0-117.0
outputs = tf.import_graph_def(self.graph_def, {'input':imgs}, self.outputs)
a = tf.concat(outputs, -1)
return -tf.reduce_mean(a[...,self.ch]) + overflow_loss*cfg.texture_ca.overflow_loss_coef
|
[
"tensorflow.shape",
"tensorflow.io.gfile.GFile",
"tensorflow.einsum",
"tensorflow.concat",
"tensorflow.sqrt",
"tensorflow.clip_by_value",
"tensorflow.import_graph_def",
"tensorflow.keras.applications.vgg16.VGG16",
"tensorflow.keras.Model",
"tensorflow.reduce_mean",
"self_organising_systems.shared.util.imread",
"tensorflow.cast",
"numpy.float32",
"tensorflow.square"
] |
[((317, 389), 'tensorflow.keras.applications.vgg16.VGG16', 'tf.keras.applications.vgg16.VGG16', ([], {'include_top': '(False)', 'weights': '"""imagenet"""'}), "(include_top=False, weights='imagenet')\n", (350, 389), True, 'import tensorflow as tf\n'), ((543, 578), 'tensorflow.keras.Model', 'tf.keras.Model', (['[vgg.input]', 'layers'], {}), '([vgg.input], layers)\n', (557, 578), True, 'import tensorflow as tf\n'), ((600, 661), 'self_organising_systems.shared.util.imread', 'imread', (['input_texture_path', 'cfg.texture_ca.vgg_input_img_size'], {}), '(input_texture_path, cfg.texture_ca.vgg_input_img_size)\n', (606, 661), False, 'from self_organising_systems.shared.util import imread\n'), ((1532, 1568), 'tensorflow.einsum', 'tf.einsum', (['"""bhwc, bhwd -> bcd"""', 'a', 'a'], {}), "('bhwc, bhwd -> bcd', a, a)\n", (1541, 1568), True, 'import tensorflow as tf\n'), ((2344, 2410), 'tensorflow.import_graph_def', 'tf.import_graph_def', (['self.graph_def', "{'input': imgs}", 'self.outputs'], {}), "(self.graph_def, {'input': imgs}, self.outputs)\n", (2363, 2410), True, 'import tensorflow as tf\n'), ((2418, 2440), 'tensorflow.concat', 'tf.concat', (['outputs', '(-1)'], {}), '(outputs, -1)\n', (2427, 2440), True, 'import tensorflow as tf\n'), ((801, 839), 'numpy.float32', 'np.float32', (['[103.939, 116.779, 123.68]'], {}), '([103.939, 116.779, 123.68])\n', (811, 839), True, 'import numpy as np\n'), ((1481, 1492), 'tensorflow.shape', 'tf.shape', (['a'], {}), '(a)\n', (1489, 1492), True, 'import tensorflow as tf\n'), ((1502, 1518), 'tensorflow.sqrt', 'tf.sqrt', (['(a + 1.0)'], {}), '(a + 1.0)\n', (1509, 1518), True, 'import tensorflow as tf\n'), ((1587, 1613), 'tensorflow.cast', 'tf.cast', (['(h * w)', 'tf.float32'], {}), '(h * w, tf.float32)\n', (1594, 1613), True, 'import tensorflow as tf\n'), ((1672, 1724), 'tensorflow.io.gfile.GFile', 'tf.io.gfile.GFile', (['cfg.texture_ca.inception_pb', '"""rb"""'], {}), "(cfg.texture_ca.inception_pb, 'rb')\n", (1689, 1724), True, 'import tensorflow as tf\n'), ((2453, 2484), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['a[..., self.ch]'], {}), '(a[..., self.ch])\n', (2467, 2484), True, 'import tensorflow as tf\n'), ((1374, 1390), 'tensorflow.square', 'tf.square', (['(x - y)'], {}), '(x - y)\n', (1383, 1390), True, 'import tensorflow as tf\n'), ((2271, 2300), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['x', '(0.0)', '(1.0)'], {}), '(x, 0.0, 1.0)\n', (2287, 2300), True, 'import tensorflow as tf\n')]
|
import numpy as np
import os
import traceback
import yaml
from edflow.hooks.hook import Hook
from edflow.util import walk, retrieve, contains_key
from edflow.custom_logging import get_logger
class RuntimeInputHook(Hook):
"""Given a textfile reads that at each step and passes the results to
a callback function."""
def __init__(self, update_file, callback):
"""Args:
update_file (str): path/to/yaml-file containing the parameters of
interest.
callback (Callable): Each time something changes in the update_file
this function is called with the content of the file as
argument.
"""
self.logger = get_logger(self)
self.ufile = update_file
self.callback = callback
self.last_updates = None
if not os.path.exists(self.ufile):
msg = (
"# Automatically created file. Changes made in here will "
"be recognized during runtime."
)
with open(self.ufile, "w+") as f:
f.write(msg)
def before_step(self, *args, **kwargs):
"""Checks if something changed and if yes runs the callback."""
try:
updates = yaml.full_load(open(self.ufile, "r"))
if self.last_updates is not None:
changes = {}
def is_changed(key, val, changes=changes):
if contains_key(key, updates):
other_val = retrieve(key, updates)
change = np.any(val != other_val)
else:
# This key is new -> Changes did happen!
change = True
changes[key] = change
self.logger.debug("Pre CHANGES: {}".format(changes))
walk(self.last_updates, is_changed, pass_key=True)
self.logger.debug("Post CHANGES: {}".format(changes))
if np.any(list(changes.values())):
self.callback(updates)
self.logger.debug("Runtime inputs received.")
self.logger.debug("{}".format(updates))
self.last_updates = updates
else:
if updates is not None:
self.callback(updates)
self.logger.info("Runtime inputs received.")
self.logger.debug("{}".format(updates))
self.last_updates = updates
except Exception as e:
self.logger.error("Something bad happend :(")
self.logger.error("{}".format(e))
self.logger.error(traceback.format_exc())
|
[
"os.path.exists",
"traceback.format_exc",
"edflow.custom_logging.get_logger",
"edflow.util.walk",
"numpy.any",
"edflow.util.contains_key",
"edflow.util.retrieve"
] |
[((708, 724), 'edflow.custom_logging.get_logger', 'get_logger', (['self'], {}), '(self)\n', (718, 724), False, 'from edflow.custom_logging import get_logger\n'), ((842, 868), 'os.path.exists', 'os.path.exists', (['self.ufile'], {}), '(self.ufile)\n', (856, 868), False, 'import os\n'), ((1856, 1906), 'edflow.util.walk', 'walk', (['self.last_updates', 'is_changed'], {'pass_key': '(True)'}), '(self.last_updates, is_changed, pass_key=True)\n', (1860, 1906), False, 'from edflow.util import walk, retrieve, contains_key\n'), ((1452, 1478), 'edflow.util.contains_key', 'contains_key', (['key', 'updates'], {}), '(key, updates)\n', (1464, 1478), False, 'from edflow.util import walk, retrieve, contains_key\n'), ((2689, 2711), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (2709, 2711), False, 'import traceback\n'), ((1516, 1538), 'edflow.util.retrieve', 'retrieve', (['key', 'updates'], {}), '(key, updates)\n', (1524, 1538), False, 'from edflow.util import walk, retrieve, contains_key\n'), ((1573, 1597), 'numpy.any', 'np.any', (['(val != other_val)'], {}), '(val != other_val)\n', (1579, 1597), True, 'import numpy as np\n')]
|
import json
import sys
# import matplotlib.pyplot as plt
import copy
import numpy as np
import tensorflow as tf
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.utils import class_weight
from collections import Counter
import random
from tensorflow.keras.callbacks import Callback
from sklearn.metrics import classification_report
from sklearn.metrics import accuracy_score
from nltk.tokenize import sent_tokenize
import os
# read entire json file
# if loading the original dataset ignore the reviews with score 0
# return a list of json entries
def readJson(file_path, original=False):
data = []
with open(file_path, encoding="utf8") as json_file:
for line in json_file:
entry = json.loads(line)
if original == True:
if entry['_source']['Review']['ReviewRating'] == 0:
continue
data.append(entry)
return data
# compute histogram of review scores
# input -> list of jsons
# output -> dict score -> #reviews
def computeScoreHistogram(data, normalize = False):
histo = {}
for entry in data:
score = entry['_source']['Review']['ReviewRating']
if score in histo:
histo[score] += 1
else:
histo[score] = 1
if normalize == True:
for key, value in histo.items():
histo[key] = 1.0 * value / len(data)
print(histo)
return histo
def computeTextStatistics(data, superior_threshold=None, inferior_threshold=None):
histo_char = {}
histo_word = {}
histo_category = {}
sup_threshold = 0
inf_threshold = 0
for entry in data:
text = entry['_source']['Review']['ReviewBody']
category = entry['_source']['Product']['ProductCategory']
chars = len(text)
words = len(text.split(" "))
if superior_threshold != None and words > superior_threshold:
sup_threshold += 1
if inferior_threshold != None and words < inferior_threshold:
inf_threshold += 1
if chars in histo_char:
histo_char[chars] += 1
else:
histo_char[chars] = 1
if words in histo_word:
histo_word[words] += 1
else:
histo_word[words] = 1
if category in histo_category:
histo_category[category] += 1
else:
histo_category[category] = 1
return histo_char, histo_word, histo_category, sup_threshold, inf_threshold
def computeDatasetStatistics(data, superior_threshold=None, inferior_threshold=None):
histo_scores = computeScoreHistogram(data)
histo_chars, histo_words, histo_category, sup_threshold, inf_threshold = computeTextStatistics(data, superior_threshold, inferior_threshold)
print("Reviews with number of words over", superior_threshold, "=", sup_threshold, "percentage =", 100.0*sup_threshold/len(data))
print("Reviews with number of words under", inferior_threshold, "=", inf_threshold, "percentage =", 100.0*inf_threshold/len(data))
print(histo_category)
plt.bar(histo_scores.keys(), histo_scores.values(), 1.0, color='g')
plt.title("Scores")
plt.show()
plt.bar(histo_chars.keys(), histo_chars.values(), 1.0, color='g')
plt.title("Chars")
plt.show()
plt.bar(histo_words.keys(), histo_words.values(), 1.0, color='g')
plt.title("Words")
plt.show()
# split the dataset in 5 vs ALL -> 1,2,3,4 -> label 0
# 5 -> label 1
# input -> dataset list of jsons
# output -> dataset list of jsons
def splitData5vAll(data):
new_data = copy.deepcopy(data)
for entry in new_data:
if entry['_source']['Review']['ReviewRating'] == 5:
entry['_source']['Review']['ReviewRating'] = 1
else:
entry['_source']['Review']['ReviewRating'] = 0
return new_data
# save the dataset
# input -> dataset list of jsons, filename to save
def saveData(data, filename):
with open(filename, 'w') as outfile:
for entry in data:
json.dump(entry, outfile)
outfile.write("\n")
# get features from data
# input -> data list of json
# sample_majority -> sample or not from majority class
# sample_count -> how many entries to sample from majority class
# set seed -> random seed value
# output -> list of dicts | one entry is a dict with features and labels
def getFeatures(data, use_review_text=True, sample_majority=False, sample_count=0, seed=None, majority_class=3):
if sample_majority == False:
train_list = []
for data_entry in data:
train_entry = {}
if use_review_text == True:
train_entry['features:review_text'] = data_entry['_source']['Review']['ReviewBody']
train_entry['label'] = data_entry['_source']['Review']['ReviewRating']
train_list.append(train_entry)
return train_list
elif sample_majority == True:
majority_list = []
for data_entry in data:
majority_entry = {}
if data_entry['_source']['Review']['ReviewRating'] == majority_class:
if use_review_text == True:
majority_entry['features:review_text'] = data_entry['_source']['Review']['ReviewBody']
majority_entry['label'] = data_entry['_source']['Review']['ReviewRating']
majority_list.append(majority_entry)
random.seed(seed)
sampled_majority_list = random.sample(majority_list, sample_count)
random.seed()
train_list = []
for data_entry in data:
train_entry = {}
if data_entry['_source']['Review']['ReviewRating'] != majority_class:
if use_review_text == True:
train_entry['features:review_text'] = data_entry['_source']['Review']['ReviewBody']
train_entry['label'] = data_entry['_source']['Review']['ReviewRating']
# train_list.append(train_entry)
sampled_majority_list.append(train_entry)
# train_list.extend(sampled_majority_list)
train_list = sampled_majority_list
return train_list
# get processed features and labels
# input -> features
# output -> list of processed features, list of labels, dict of class_weights
def processFeatures(data, bert_proc):
features = []
labels = []
iids = []
sids = []
i = 0
for entry in data:
review_text = entry["features:review_text"]
input_ids, segment_ids = bert_proc.process_text(review_text)
iids.append(input_ids)
sids.append(segment_ids)
labels.append(entry['label'])
features = [np.array(iids), np.array(sids)]
class_weights = class_weight.compute_class_weight('balanced', np.unique(labels), labels)
class_weights = class_weights.astype(np.float32)
return features, labels, class_weights
# get processed features and labels from texst
# input -> features
# output -> list of processed features, list of labels, dict of class_weights
def processFeaturesRawText(data, bert_proc):
features = []
iids = []
sids = []
i = 0
for entry in data:
review_text = entry
input_ids, segment_ids = bert_proc.process_text(review_text)
iids.append(input_ids)
sids.append(segment_ids)
features = [np.array(iids), np.array(sids)]
return features
# split data in train dev test split using stratified
# input -> data
# output -> train, dev, test data
def splitTrainDevTest(data):
train_data = []
dev_data = []
test_data = []
full_indices = np.array(range(len(data)))
full_classes = np.array(list(map(lambda x: x['_source']['Review']['ReviewRating'], data)))
sss = StratifiedShuffleSplit(n_splits=1, test_size=0.1)
for tr, te in sss.split(full_indices, full_classes):
aux_train_indexes = tr
test_indexes = te
aux_train_data = []
for i in test_indexes:
test_data.append(data[i])
for i in aux_train_indexes:
aux_train_data.append(data[i])
indices = np.array(range(len(aux_train_data)))
classes = np.array(list(map(lambda x: x['_source']['Review']['ReviewRating'], aux_train_data)))
sss_ = StratifiedShuffleSplit(n_splits=1, test_size=0.111111)
for tr, de in sss_.split(indices, classes):
train_indexes = tr
dev_indexes = de
for i in dev_indexes:
dev_data.append(aux_train_data[i])
for i in train_indexes:
train_data.append(aux_train_data[i])
print(len(train_data), len(dev_data), len(test_data), len(train_data) + len(dev_data) + len(test_data), len(data))
print(len(list(set(train_indexes) & set(dev_indexes) & set(test_indexes))))
return train_data, dev_data, test_data
# split the dataset in 4 classes -> 1 -> label 0
# 2,3 -> label 1
# 4 -> label 2
# 5 -> label 3
# input -> dataset list of jsons
# output -> dataset list of jsons
def splitData4Classes(data):
new_data = copy.deepcopy(data)
for entry in new_data:
if entry['_source']['Review']['ReviewRating'] == 1:
entry['_source']['Review']['ReviewRating'] = 0
elif entry['_source']['Review']['ReviewRating'] == 2 or entry['_source']['Review']['ReviewRating'] == 3:
entry['_source']['Review']['ReviewRating'] = 1
elif entry['_source']['Review']['ReviewRating'] == 4:
entry['_source']['Review']['ReviewRating'] = 2
elif entry['_source']['Review']['ReviewRating'] == 5:
entry['_source']['Review']['ReviewRating'] = 3
return new_data
class FScoreCallback(Callback):
def __init__(self, dataset, steps, labels):
super().__init__()
self.steps = steps
self.dataset = dataset
self.labels_int = []
for x in labels:
self.labels_int.append(np.argmax(x))
def on_test_end(self, epoch, logs={}):
y_pred = []
y_true = self.labels_int
predict_results = self.model.predict(self.dataset, steps=self.steps)
for prediction in predict_results:
y_pred.append(np.argmax(prediction))
print()
print(classification_report(y_true, y_pred, digits=4))
def compute_parameters(model_folder_path):
# define input
input_ids = tf.keras.layers.Input(shape=(64), dtype=tf.int32, name="input_ids")
segment_ids = tf.keras.layers.Input(shape=(64), dtype=tf.int32, name="segment_ids")
import BertModel
import tensorflow.keras as keras
import bert
# define model
bert_model = BertModel.BertModel(model_folder_path, 64)
bert_output = bert_model.bert_layer([input_ids, segment_ids])
cls_output = keras.layers.Lambda(lambda seq: seq[:, 0, :])(bert_output)
cls_drop = keras.layers.Dropout(0.1)(cls_output)
fc1 = keras.layers.Dense(units=100, activation="relu")(cls_drop)
prediction = keras.layers.Dense(units=10, activation="softmax")(fc1)
# build model
model = keras.Model(inputs=[input_ids, segment_ids], outputs=prediction)
model.build(input_shape=[(None, 64), (None, 64)])
# load pretrained
bert.load_bert_weights(bert_model.bert_layer, model_folder_path+"bert_model.ckpt")
model.compile(optimizer=keras.optimizers.Adam(lr=0.1), loss = 'categorical_crossentropy', metrics = [tf.keras.metrics.categorical_accuracy])
model.summary()
from tensorflow.python.keras.utils.layer_utils import count_params
trainable_count = count_params(model.trainable_weights)
non_trainable_count = count_params(model.non_trainable_weights)
print(trainable_count/1e6)
print(non_trainable_count)
# return model, bert_model
def build_reallife_corpus(model_folder_path):
new_model_folder_path = "/".join(model_folder_path.split("/")[:-2])
new_model_folder_path = os.path.join(new_model_folder_path, "reallife")
train_data = readJson(model_folder_path+"train.json")
train_data = clean_dict(train_data)
new_train_data = add_last_sentence_to_data(train_data)
new_train_data_over = perform_oversampling(new_train_data)
print(len(train_data), len(new_train_data), len(new_train_data_over))
saveData(new_train_data_over, os.path.join(new_model_folder_path, "train.json"))
dev_data = readJson(model_folder_path+"dev.json")
dev_data = clean_dict(dev_data)
new_dev_data = add_last_sentence_to_data(dev_data)
new_dev_data_over = perform_oversampling(new_dev_data)
print(len(dev_data), len(new_dev_data), len(new_dev_data_over))
saveData(new_dev_data_over, os.path.join(new_model_folder_path, "dev.json"))
test_data = readJson(model_folder_path+"test.json")
test_data = clean_dict(test_data)
new_test_data = add_last_sentence_to_data(test_data)
new_test_data_over = perform_oversampling(new_test_data)
print(len(test_data), len(new_test_data), len(new_test_data_over))
saveData(new_test_data_over, os.path.join(new_model_folder_path, "test.json"))
def add_last_sentence_to_data(data):
new_data = copy.deepcopy(data)
new_entries = []
count = 0
for entry in new_data:
review_text = entry['_source']['Review']['ReviewBody']
sentences = sent_tokenize(review_text)
if len(sentences) > 1:
# add new entry to dataset
new_entry = copy.deepcopy(entry)
new_entry['_source']['Review']['ReviewBody'] = sentences[-1]
new_entry['_score'] = 2
new_entries.append(new_entry)
if entry == new_entry:
print(entry)
print(new_entry)
sys.exit()
count += 1
# print(new_entries)
new_data.extend(new_entries)
return new_data
def perform_oversampling(data):
new_data = copy.deepcopy(data)
new_entries = []
counter = [0,0,0,0,0]
for entry in new_data:
label = entry['_source']['Review']['ReviewRating']
counter[label-1] += 1
while True:
random_entry = random.choice(data)
random_label = random_entry['_source']['Review']['ReviewRating']
if counter[random_label-1] == counter[-1]:
continue
else:
new_entries.append(random_entry)
counter[random_label-1] += 1
if counter[0] == counter[1] and counter[1] == counter[2] and counter[2] == counter[3] and counter[3] == counter[4]:
break
print(counter)
new_data.extend(new_entries)
return new_data
def clean_dict(data):
new_data = copy.deepcopy(data)
for entry in new_data:
del entry["_index"]
del entry["_type"]
del entry["_id"]
del entry["_score"]
del entry["_source"]["Review"]["ReviewTitle"]
del entry["_source"]["Review"]["ReviewDate"]
del entry["_source"]["Review"]["ReviewProductVerified"]
del entry["_source"]["Product"]
return new_data
if __name__ == "__main__":
# data = readJson("../Dataset/Reviews/4Classes/train.json")
# computeDatasetStatistics(data, 32, 32)
# print("--------------------------DEV--------------------------")
# data = readJson("../Dataset/Reviews/4Classes/dev.json")
# computeDatasetStatistics(data, 32, 32)
# print("--------------------------TEST--------------------------")
# data = readJson("../Dataset/Reviews/4Classes/test.json")
# computeDatasetStatistics(data, 32, 32)
# compute_parameters("../Models/raw/small/clean/trained_512/ro2/")
# sys.exit()
# # split data
# raw = readJson("../Dataset/Reviews/all_reviews.json", original=True)
# # computeDatasetStatistics(raw, 256, 256)
# train_data, dev_data, test_data = splitTrainDevTest(raw)
# saveData(train_data, "../Dataset/Reviews/emag_train.json")
# saveData(dev_data, "../Dataset/Reviews/emag_dev.json")
# saveData(test_data, "../Dataset/Reviews/emag_test.json")
# raw = readJson("../Dataset/Reviews/all_reviews.json", original=True)
train_data = readJson("../Dataset/Reviews/emag_train.json")
# computeDatasetStatistics(train_data, 256, 256)
dev_data = readJson("../Dataset/Reviews/emag_dev.json")
test_data = readJson("../Dataset/Reviews/emag_test.json")
computeScoreHistogram(train_data, normalize=True)
split_train = splitData4Classes(train_data)
computeScoreHistogram(split_train, normalize=True)
saveData(split_train, "../Dataset/Reviews/4Classes/train.json")
computeScoreHistogram(dev_data, normalize=True)
split_dev = splitData4Classes(dev_data)
computeScoreHistogram(split_dev, normalize=True)
saveData(split_dev, "../Dataset/Reviews/4Classes/dev.json")
computeScoreHistogram(test_data, normalize=True)
split_test = splitData4Classes(test_data)
computeScoreHistogram(split_test, normalize=True)
saveData(split_test, "../Dataset/Reviews/4Classes/test.json")
|
[
"sklearn.model_selection.StratifiedShuffleSplit",
"sklearn.metrics.classification_report",
"bert.load_bert_weights",
"tensorflow.python.keras.utils.layer_utils.count_params",
"numpy.array",
"tensorflow.keras.layers.Dense",
"nltk.tokenize.sent_tokenize",
"BertModel.BertModel",
"copy.deepcopy",
"sys.exit",
"tensorflow.keras.layers.Input",
"json.loads",
"random.choice",
"random.sample",
"tensorflow.keras.layers.Lambda",
"tensorflow.keras.layers.Dropout",
"numpy.argmax",
"numpy.unique",
"os.path.join",
"random.seed",
"tensorflow.keras.optimizers.Adam",
"tensorflow.keras.Model",
"json.dump"
] |
[((3640, 3659), 'copy.deepcopy', 'copy.deepcopy', (['data'], {}), '(data)\n', (3653, 3659), False, 'import copy\n'), ((7818, 7867), 'sklearn.model_selection.StratifiedShuffleSplit', 'StratifiedShuffleSplit', ([], {'n_splits': '(1)', 'test_size': '(0.1)'}), '(n_splits=1, test_size=0.1)\n', (7840, 7867), False, 'from sklearn.model_selection import StratifiedShuffleSplit\n'), ((8309, 8363), 'sklearn.model_selection.StratifiedShuffleSplit', 'StratifiedShuffleSplit', ([], {'n_splits': '(1)', 'test_size': '(0.111111)'}), '(n_splits=1, test_size=0.111111)\n', (8331, 8363), False, 'from sklearn.model_selection import StratifiedShuffleSplit\n'), ((9163, 9182), 'copy.deepcopy', 'copy.deepcopy', (['data'], {}), '(data)\n', (9176, 9182), False, 'import copy\n'), ((10462, 10527), 'tensorflow.keras.layers.Input', 'tf.keras.layers.Input', ([], {'shape': '(64)', 'dtype': 'tf.int32', 'name': '"""input_ids"""'}), "(shape=64, dtype=tf.int32, name='input_ids')\n", (10483, 10527), True, 'import tensorflow as tf\n'), ((10548, 10615), 'tensorflow.keras.layers.Input', 'tf.keras.layers.Input', ([], {'shape': '(64)', 'dtype': 'tf.int32', 'name': '"""segment_ids"""'}), "(shape=64, dtype=tf.int32, name='segment_ids')\n", (10569, 10615), True, 'import tensorflow as tf\n'), ((10734, 10776), 'BertModel.BertModel', 'BertModel.BertModel', (['model_folder_path', '(64)'], {}), '(model_folder_path, 64)\n', (10753, 10776), False, 'import BertModel\n'), ((11145, 11209), 'tensorflow.keras.Model', 'keras.Model', ([], {'inputs': '[input_ids, segment_ids]', 'outputs': 'prediction'}), '(inputs=[input_ids, segment_ids], outputs=prediction)\n', (11156, 11209), True, 'import tensorflow.keras as keras\n'), ((11290, 11378), 'bert.load_bert_weights', 'bert.load_bert_weights', (['bert_model.bert_layer', "(model_folder_path + 'bert_model.ckpt')"], {}), "(bert_model.bert_layer, model_folder_path +\n 'bert_model.ckpt')\n", (11312, 11378), False, 'import bert\n'), ((11633, 11670), 'tensorflow.python.keras.utils.layer_utils.count_params', 'count_params', (['model.trainable_weights'], {}), '(model.trainable_weights)\n', (11645, 11670), False, 'from tensorflow.python.keras.utils.layer_utils import count_params\n'), ((11697, 11738), 'tensorflow.python.keras.utils.layer_utils.count_params', 'count_params', (['model.non_trainable_weights'], {}), '(model.non_trainable_weights)\n', (11709, 11738), False, 'from tensorflow.python.keras.utils.layer_utils import count_params\n'), ((11983, 12030), 'os.path.join', 'os.path.join', (['new_model_folder_path', '"""reallife"""'], {}), "(new_model_folder_path, 'reallife')\n", (11995, 12030), False, 'import os\n'), ((13195, 13214), 'copy.deepcopy', 'copy.deepcopy', (['data'], {}), '(data)\n', (13208, 13214), False, 'import copy\n'), ((13927, 13946), 'copy.deepcopy', 'copy.deepcopy', (['data'], {}), '(data)\n', (13940, 13946), False, 'import copy\n'), ((14705, 14724), 'copy.deepcopy', 'copy.deepcopy', (['data'], {}), '(data)\n', (14718, 14724), False, 'import copy\n'), ((6743, 6757), 'numpy.array', 'np.array', (['iids'], {}), '(iids)\n', (6751, 6757), True, 'import numpy as np\n'), ((6759, 6773), 'numpy.array', 'np.array', (['sids'], {}), '(sids)\n', (6767, 6773), True, 'import numpy as np\n'), ((6841, 6858), 'numpy.unique', 'np.unique', (['labels'], {}), '(labels)\n', (6850, 6858), True, 'import numpy as np\n'), ((7416, 7430), 'numpy.array', 'np.array', (['iids'], {}), '(iids)\n', (7424, 7430), True, 'import numpy as np\n'), ((7432, 7446), 'numpy.array', 'np.array', (['sids'], {}), '(sids)\n', (7440, 7446), True, 'import numpy as np\n'), ((10860, 10905), 'tensorflow.keras.layers.Lambda', 'keras.layers.Lambda', (['(lambda seq: seq[:, 0, :])'], {}), '(lambda seq: seq[:, 0, :])\n', (10879, 10905), True, 'import tensorflow.keras as keras\n'), ((10934, 10959), 'tensorflow.keras.layers.Dropout', 'keras.layers.Dropout', (['(0.1)'], {}), '(0.1)\n', (10954, 10959), True, 'import tensorflow.keras as keras\n'), ((10982, 11030), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', ([], {'units': '(100)', 'activation': '"""relu"""'}), "(units=100, activation='relu')\n", (11000, 11030), True, 'import tensorflow.keras as keras\n'), ((11058, 11108), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', ([], {'units': '(10)', 'activation': '"""softmax"""'}), "(units=10, activation='softmax')\n", (11076, 11108), True, 'import tensorflow.keras as keras\n'), ((12364, 12413), 'os.path.join', 'os.path.join', (['new_model_folder_path', '"""train.json"""'], {}), "(new_model_folder_path, 'train.json')\n", (12376, 12413), False, 'import os\n'), ((12721, 12768), 'os.path.join', 'os.path.join', (['new_model_folder_path', '"""dev.json"""'], {}), "(new_model_folder_path, 'dev.json')\n", (12733, 12768), False, 'import os\n'), ((13087, 13135), 'os.path.join', 'os.path.join', (['new_model_folder_path', '"""test.json"""'], {}), "(new_model_folder_path, 'test.json')\n", (13099, 13135), False, 'import os\n'), ((13360, 13386), 'nltk.tokenize.sent_tokenize', 'sent_tokenize', (['review_text'], {}), '(review_text)\n', (13373, 13386), False, 'from nltk.tokenize import sent_tokenize\n'), ((14155, 14174), 'random.choice', 'random.choice', (['data'], {}), '(data)\n', (14168, 14174), False, 'import random\n'), ((732, 748), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (742, 748), False, 'import json\n'), ((4086, 4111), 'json.dump', 'json.dump', (['entry', 'outfile'], {}), '(entry, outfile)\n', (4095, 4111), False, 'import json\n'), ((5472, 5489), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (5483, 5489), False, 'import random\n'), ((5522, 5564), 'random.sample', 'random.sample', (['majority_list', 'sample_count'], {}), '(majority_list, sample_count)\n', (5535, 5564), False, 'import random\n'), ((5573, 5586), 'random.seed', 'random.seed', ([], {}), '()\n', (5584, 5586), False, 'import random\n'), ((10332, 10379), 'sklearn.metrics.classification_report', 'classification_report', (['y_true', 'y_pred'], {'digits': '(4)'}), '(y_true, y_pred, digits=4)\n', (10353, 10379), False, 'from sklearn.metrics import classification_report\n'), ((11402, 11431), 'tensorflow.keras.optimizers.Adam', 'keras.optimizers.Adam', ([], {'lr': '(0.1)'}), '(lr=0.1)\n', (11423, 11431), True, 'import tensorflow.keras as keras\n'), ((13481, 13501), 'copy.deepcopy', 'copy.deepcopy', (['entry'], {}), '(entry)\n', (13494, 13501), False, 'import copy\n'), ((10021, 10033), 'numpy.argmax', 'np.argmax', (['x'], {}), '(x)\n', (10030, 10033), True, 'import numpy as np\n'), ((10278, 10299), 'numpy.argmax', 'np.argmax', (['prediction'], {}), '(prediction)\n', (10287, 10299), True, 'import numpy as np\n'), ((13766, 13776), 'sys.exit', 'sys.exit', ([], {}), '()\n', (13774, 13776), False, 'import sys\n')]
|
from sysu_dataset import SYSU
import numpy as np
import scipy
import itertools
import cv2
import torch
from torch.utils.data import Dataset
import torchvision.transforms as transforms
from config import *
vox_size=54
all_tups = np.array(list(itertools.product(range(vox_size), repeat=2)))
rot_array = np.arange(vox_size*vox_size).reshape([vox_size,vox_size])
K = 5
T = 10
class SYSUdataset(Dataset):
def __init__(self, test=False, full_train=False):
# Underlying dataset and features
self.dataset = SYSU()
# What to return
self.images = DATA_IMAGES
self.images_3D = DATA_IMAGES_3D
self.op_flow = DATA_OP_FLOW
self.op_flow_2D = DATA_OP_FLOW_2D
self.single_feature = DATA_SINGLE_FEAT
self.augmentation = DATA_AUGMENTATION
# Train, validation, test split
self.train = full_train
if test:
self.vid_ids = self.dataset.get_splits(SPLIT_NUMBER)[1]
else:
self.vid_ids = self.dataset.get_splits(SPLIT_NUMBER)[0]
def __len__(self):
return len(self.vid_ids)
def image_transforms(self, numpy_imgs):
''' Transformations on a list of images
Returns
-------
images : Torch Tensor
Stacked tensor of all images with the transformations applied
'''
# Get random parameters to apply same transformation to all images in list
color_jitter = transforms.ColorJitter.get_params(.25,.25,.25,.25)
rotation_param = transforms.RandomRotation.get_params((-15,15))
crop_params = None
# Apply transformations
images = []
for numpy_img in numpy_imgs:
i = transforms.functional.to_pil_image(numpy_img)
i = transforms.functional.resize(i, (224,224))
if self.train:
i = color_jitter(i)
i = transforms.functional.rotate(i, rotation_param)
i = transforms.functional.to_tensor(i)
i = transforms.functional.normalize(i, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
images.append(i)
return torch.stack(images)
def op_flow_transforms(self, op_flow):
''' Transformations on a tensor of optical flow voxel grids
Parameters
----------
op_flow : ndarray
Returns
-------
op_flow : Torch Tensor
A torch tensor of an optical flow voxel grid with the
transformations (rotation, scale, translation) applied to it
'''
def translate(op_flow):
# op_flow[:,0::3,:,:,:] ---> x axis vectors
# op_flow = scipy.ndimage.interpolation.shift(op_flow, [0,0,x_move,y_move,z_move], cval=0, order=0) # Slower alternative
# Get amount to shift
max_shift = int(op_flow.shape[2] * 0.10)
x_move, y_move, z_move = np.random.randint(-max_shift, max_shift, 3)
# Translate values
if x_move > 0:
op_flow[:,:,x_move:,:,:] = op_flow[:,:,:-x_move,:,:]
op_flow[:,:,:x_move,:,:] = 0
elif x_move < 0:
op_flow[:,:,:x_move,:,:] = op_flow[:,:,-x_move:,:,:]
op_flow[:,:,x_move:,:,:] = 0
if y_move > 0:
op_flow[:,:,:,y_move:,:] = op_flow[:,:,:,:-y_move,:]
op_flow[:,:,:,:y_move,:] = 0
elif y_move < 0:
op_flow[:,:,:,:y_move,:] = op_flow[:,:,:,-y_move:,:]
op_flow[:,:,:,y_move:,:] = 0
if z_move > 0:
op_flow[:,:,:,:,z_move:] = op_flow[:,:,:,:,:-z_move]
op_flow[:,:,:,:,:z_move] = 0
elif z_move < 0:
op_flow[:,:,:,:,:z_move] = op_flow[:,:,:,:,-z_move:]
op_flow[:,:,:,:,z_move:] = 0
return op_flow
def rotate(op_flow):
''' Rotate an optical flow tensor a random amount about the y axis '''
# Get angle
angle = np.random.randint(-45, 45)
# Rotate positions
rot_mat = scipy.ndimage.interpolation.rotate(rot_array, angle, (0,1), reshape=False, order=0)
op_flow_new = np.zeros(op_flow.shape, dtype=np.float32)
tup = all_tups[rot_mat]
op_flow_new = op_flow[:,:,tup[:, :, 0],:,tup[:, :, 1]].transpose(2,3,0,4,1)
# Rotate flow vectors
cos = np.cos(np.radians(-angle))
sin = np.sin(np.radians(-angle))
x_copy = op_flow_new[:,0].copy()
z_copy = op_flow_new[:,2].copy()
op_flow_new[:,0] = x_copy * cos + z_copy * sin
op_flow_new[:,2] = x_copy * -sin + z_copy * cos
return op_flow_new
def scale(op_flow):
return op_flow
# import datetime as dt
if self.train:
op_flow = translate(op_flow)
op_flow = rotate(op_flow)
return torch.from_numpy(op_flow)
def get_3D_op_flow(self, vid_id):
# Load the data
feat_values = np.load("{}/{:05}.npy".format(CACHE_3D_VOX_FLOW_SYSU, vid_id))
feat_nonzero = np.load("{}/{:05}.nonzeros.npy".format(CACHE_3D_VOX_FLOW_SYSU, vid_id))
feat_shape = np.load("{}/{:05}.shape.npy".format(CACHE_3D_VOX_FLOW_SYSU, vid_id))
# Rebuild the feature from the saved data
feature = np.zeros(feat_shape, np.float32)
feature[tuple(feat_nonzero)] = feat_values
return feature
def __getitem__(self, idx):
vid_id = self.vid_ids[idx]
to_return = []
# Images
if self.images:
images = np.load('{}/{:05}.npy'.format(CACHE_2D_IMAGES_SYSU, vid_id))
images = self.image_transforms(images)
to_return.append(images)
# Optical flow 3D
if self.op_flow:
op_flow = self.get_3D_op_flow(vid_id)
op_flow = self.op_flow_transforms(op_flow)
to_return.append(op_flow)
# Labels
to_return.append(self.dataset.get_label(vid_id))
return to_return
def get_train_loader():
dataset = SYSUdataset(full_train=True)
return torch.utils.data.DataLoader(dataset, batch_size=DATA_BATCH_SIZE,
shuffle=True, num_workers=NUM_WORKERS,
pin_memory=True)
def get_test_loader():
dataset = SYSUdataset(test=True)
return torch.utils.data.DataLoader(dataset, batch_size=DATA_BATCH_SIZE,
shuffle=False, num_workers=NUM_WORKERS,
pin_memory=True)
|
[
"numpy.radians",
"torchvision.transforms.functional.to_tensor",
"torchvision.transforms.RandomRotation.get_params",
"torchvision.transforms.functional.to_pil_image",
"torch.stack",
"torch.from_numpy",
"numpy.zeros",
"numpy.random.randint",
"torchvision.transforms.functional.rotate",
"torchvision.transforms.functional.resize",
"torch.utils.data.DataLoader",
"scipy.ndimage.interpolation.rotate",
"torchvision.transforms.ColorJitter.get_params",
"torchvision.transforms.functional.normalize",
"sysu_dataset.SYSU",
"numpy.arange"
] |
[((6173, 6298), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['dataset'], {'batch_size': 'DATA_BATCH_SIZE', 'shuffle': '(True)', 'num_workers': 'NUM_WORKERS', 'pin_memory': '(True)'}), '(dataset, batch_size=DATA_BATCH_SIZE, shuffle=\n True, num_workers=NUM_WORKERS, pin_memory=True)\n', (6200, 6298), False, 'import torch\n'), ((6446, 6572), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['dataset'], {'batch_size': 'DATA_BATCH_SIZE', 'shuffle': '(False)', 'num_workers': 'NUM_WORKERS', 'pin_memory': '(True)'}), '(dataset, batch_size=DATA_BATCH_SIZE, shuffle=\n False, num_workers=NUM_WORKERS, pin_memory=True)\n', (6473, 6572), False, 'import torch\n'), ((306, 336), 'numpy.arange', 'np.arange', (['(vox_size * vox_size)'], {}), '(vox_size * vox_size)\n', (315, 336), True, 'import numpy as np\n'), ((525, 531), 'sysu_dataset.SYSU', 'SYSU', ([], {}), '()\n', (529, 531), False, 'from sysu_dataset import SYSU\n'), ((1449, 1506), 'torchvision.transforms.ColorJitter.get_params', 'transforms.ColorJitter.get_params', (['(0.25)', '(0.25)', '(0.25)', '(0.25)'], {}), '(0.25, 0.25, 0.25, 0.25)\n', (1482, 1506), True, 'import torchvision.transforms as transforms\n'), ((1525, 1572), 'torchvision.transforms.RandomRotation.get_params', 'transforms.RandomRotation.get_params', (['(-15, 15)'], {}), '((-15, 15))\n', (1561, 1572), True, 'import torchvision.transforms as transforms\n'), ((2142, 2161), 'torch.stack', 'torch.stack', (['images'], {}), '(images)\n', (2153, 2161), False, 'import torch\n'), ((4946, 4971), 'torch.from_numpy', 'torch.from_numpy', (['op_flow'], {}), '(op_flow)\n', (4962, 4971), False, 'import torch\n'), ((5380, 5412), 'numpy.zeros', 'np.zeros', (['feat_shape', 'np.float32'], {}), '(feat_shape, np.float32)\n', (5388, 5412), True, 'import numpy as np\n'), ((1705, 1750), 'torchvision.transforms.functional.to_pil_image', 'transforms.functional.to_pil_image', (['numpy_img'], {}), '(numpy_img)\n', (1739, 1750), True, 'import torchvision.transforms as transforms\n'), ((1767, 1810), 'torchvision.transforms.functional.resize', 'transforms.functional.resize', (['i', '(224, 224)'], {}), '(i, (224, 224))\n', (1795, 1810), True, 'import torchvision.transforms as transforms\n'), ((1957, 1991), 'torchvision.transforms.functional.to_tensor', 'transforms.functional.to_tensor', (['i'], {}), '(i)\n', (1988, 1991), True, 'import torchvision.transforms as transforms\n'), ((2008, 2102), 'torchvision.transforms.functional.normalize', 'transforms.functional.normalize', (['i'], {'mean': '[0.485, 0.456, 0.406]', 'std': '[0.229, 0.224, 0.225]'}), '(i, mean=[0.485, 0.456, 0.406], std=[0.229, \n 0.224, 0.225])\n', (2039, 2102), True, 'import torchvision.transforms as transforms\n'), ((2902, 2945), 'numpy.random.randint', 'np.random.randint', (['(-max_shift)', 'max_shift', '(3)'], {}), '(-max_shift, max_shift, 3)\n', (2919, 2945), True, 'import numpy as np\n'), ((4015, 4041), 'numpy.random.randint', 'np.random.randint', (['(-45)', '(45)'], {}), '(-45, 45)\n', (4032, 4041), True, 'import numpy as np\n'), ((4096, 4184), 'scipy.ndimage.interpolation.rotate', 'scipy.ndimage.interpolation.rotate', (['rot_array', 'angle', '(0, 1)'], {'reshape': '(False)', 'order': '(0)'}), '(rot_array, angle, (0, 1), reshape=False,\n order=0)\n', (4130, 4184), False, 'import scipy\n'), ((4206, 4247), 'numpy.zeros', 'np.zeros', (['op_flow.shape'], {'dtype': 'np.float32'}), '(op_flow.shape, dtype=np.float32)\n', (4214, 4247), True, 'import numpy as np\n'), ((1893, 1940), 'torchvision.transforms.functional.rotate', 'transforms.functional.rotate', (['i', 'rotation_param'], {}), '(i, rotation_param)\n', (1921, 1940), True, 'import torchvision.transforms as transforms\n'), ((4432, 4450), 'numpy.radians', 'np.radians', (['(-angle)'], {}), '(-angle)\n', (4442, 4450), True, 'import numpy as np\n'), ((4477, 4495), 'numpy.radians', 'np.radians', (['(-angle)'], {}), '(-angle)\n', (4487, 4495), True, 'import numpy as np\n')]
|
import sys
sys.path.append('/home/jwalker/dynamics/python/atmos-tools')
sys.path.append('/home/jwalker/dynamics/python/atmos-read')
import xray
import numpy as np
from datetime import datetime
import matplotlib.pyplot as plt
import pandas as pd
import atmos as atm
import precipdat
import merra
# ----------------------------------------------------------------------
datadir = atm.homedir() + 'datastore/merra/daily/'
year = 2014
subset = '_40E-120E_90S-90N'
def get_var(datadir, varnm, subset, year):
filenm = '%smerra_%s%s_%d.nc' % (datadir, varnm, subset, year)
with xray.open_dataset(filenm) as ds:
var = ds[varnm].load()
return var
uq_int = get_var(datadir, 'UFLXQV', subset, year)
vq_int = get_var(datadir, 'VFLXQV', subset, year)
mfc = atm.moisture_flux_conv(uq_int, vq_int, already_int=True)
mfcbar = mfc.mean(dim='YDim').mean(dim='XDim')
# Test atm.gradient
a = atm.constants.radius_earth.values
latdim, londim = 1, 2
lat = atm.get_coord(uq_int, 'lat')
latrad = np.radians(lat)
latrad[abs(lat) > 89] = np.nan
coslat = xray.DataArray(np.cos(latrad), coords={'YDim' : lat})
lon = atm.get_coord(uq_int, 'lon')
lonrad = np.radians(lon)
mfc_x = atm.gradient(uq_int, lonrad, londim) / (a*coslat)
mfc_y = atm.gradient(vq_int * coslat, latrad, latdim) / (a*coslat)
mfc_test = mfc_x + mfc_y
mfc_test = - atm.precip_convert(mfc_test, 'kg/m2/s', 'mm/day')
mfc_test_bar = mfc_test.mean(dim='YDim').mean(dim='XDim')
diff = mfc_test - mfc
print(diff.max())
print(diff.min())
plt.plot(mfcbar)
plt.plot(mfc_test_bar)
print(mfc_test_bar - mfcbar)
# ----------------------------------------------------------------------
# Vertical gradient du/dp
lon1, lon2 = 40, 120
pmin, pmax = 100, 300
subset_dict = {'XDim' : (lon1, lon2), 'Height' : (pmin, pmax)}
urls = merra.merra_urls([year])
month, day = 7, 15
url = urls['%d%02d%02d' % (year, month, day)]
with xray.open_dataset(url) as ds:
u = atm.subset(ds['U'], subset_dict, copy=False)
u = u.mean(dim='TIME')
pres = u['Height']
pres = atm.pres_convert(pres, pres.attrs['units'], 'Pa')
dp = np.gradient(pres)
# Calc 1
dims = u.shape
dudp = np.nan * u
for i in range(dims[1]):
for j in range(dims[2]):
dudp.values[:, i, j] = np.gradient(u[:, i, j], dp)
# Test atm.gradient
dudp_test = atm.gradient(u, pres, axis=0)
diff = dudp_test - dudp
print(diff.max())
print(diff.min())
|
[
"numpy.radians",
"numpy.gradient",
"atmos.gradient",
"atmos.homedir",
"matplotlib.pyplot.plot",
"atmos.pres_convert",
"atmos.get_coord",
"xray.open_dataset",
"atmos.moisture_flux_conv",
"numpy.cos",
"merra.merra_urls",
"sys.path.append",
"atmos.precip_convert",
"atmos.subset"
] |
[((11, 71), 'sys.path.append', 'sys.path.append', (['"""/home/jwalker/dynamics/python/atmos-tools"""'], {}), "('/home/jwalker/dynamics/python/atmos-tools')\n", (26, 71), False, 'import sys\n'), ((72, 131), 'sys.path.append', 'sys.path.append', (['"""/home/jwalker/dynamics/python/atmos-read"""'], {}), "('/home/jwalker/dynamics/python/atmos-read')\n", (87, 131), False, 'import sys\n'), ((770, 826), 'atmos.moisture_flux_conv', 'atm.moisture_flux_conv', (['uq_int', 'vq_int'], {'already_int': '(True)'}), '(uq_int, vq_int, already_int=True)\n', (792, 826), True, 'import atmos as atm\n'), ((961, 989), 'atmos.get_coord', 'atm.get_coord', (['uq_int', '"""lat"""'], {}), "(uq_int, 'lat')\n", (974, 989), True, 'import atmos as atm\n'), ((999, 1014), 'numpy.radians', 'np.radians', (['lat'], {}), '(lat)\n', (1009, 1014), True, 'import numpy as np\n'), ((1115, 1143), 'atmos.get_coord', 'atm.get_coord', (['uq_int', '"""lon"""'], {}), "(uq_int, 'lon')\n", (1128, 1143), True, 'import atmos as atm\n'), ((1153, 1168), 'numpy.radians', 'np.radians', (['lon'], {}), '(lon)\n', (1163, 1168), True, 'import numpy as np\n'), ((1502, 1518), 'matplotlib.pyplot.plot', 'plt.plot', (['mfcbar'], {}), '(mfcbar)\n', (1510, 1518), True, 'import matplotlib.pyplot as plt\n'), ((1519, 1541), 'matplotlib.pyplot.plot', 'plt.plot', (['mfc_test_bar'], {}), '(mfc_test_bar)\n', (1527, 1541), True, 'import matplotlib.pyplot as plt\n'), ((1786, 1810), 'merra.merra_urls', 'merra.merra_urls', (['[year]'], {}), '([year])\n', (1802, 1810), False, 'import merra\n'), ((2018, 2067), 'atmos.pres_convert', 'atm.pres_convert', (['pres', "pres.attrs['units']", '"""Pa"""'], {}), "(pres, pres.attrs['units'], 'Pa')\n", (2034, 2067), True, 'import atmos as atm\n'), ((2073, 2090), 'numpy.gradient', 'np.gradient', (['pres'], {}), '(pres)\n', (2084, 2090), True, 'import numpy as np\n'), ((2280, 2309), 'atmos.gradient', 'atm.gradient', (['u', 'pres'], {'axis': '(0)'}), '(u, pres, axis=0)\n', (2292, 2309), True, 'import atmos as atm\n'), ((381, 394), 'atmos.homedir', 'atm.homedir', ([], {}), '()\n', (392, 394), True, 'import atmos as atm\n'), ((1070, 1084), 'numpy.cos', 'np.cos', (['latrad'], {}), '(latrad)\n', (1076, 1084), True, 'import numpy as np\n'), ((1178, 1214), 'atmos.gradient', 'atm.gradient', (['uq_int', 'lonrad', 'londim'], {}), '(uq_int, lonrad, londim)\n', (1190, 1214), True, 'import atmos as atm\n'), ((1236, 1281), 'atmos.gradient', 'atm.gradient', (['(vq_int * coslat)', 'latrad', 'latdim'], {}), '(vq_int * coslat, latrad, latdim)\n', (1248, 1281), True, 'import atmos as atm\n'), ((1334, 1383), 'atmos.precip_convert', 'atm.precip_convert', (['mfc_test', '"""kg/m2/s"""', '"""mm/day"""'], {}), "(mfc_test, 'kg/m2/s', 'mm/day')\n", (1352, 1383), True, 'import atmos as atm\n'), ((1881, 1903), 'xray.open_dataset', 'xray.open_dataset', (['url'], {}), '(url)\n', (1898, 1903), False, 'import xray\n'), ((1919, 1963), 'atmos.subset', 'atm.subset', (["ds['U']", 'subset_dict'], {'copy': '(False)'}), "(ds['U'], subset_dict, copy=False)\n", (1929, 1963), True, 'import atmos as atm\n'), ((583, 608), 'xray.open_dataset', 'xray.open_dataset', (['filenm'], {}), '(filenm)\n', (600, 608), False, 'import xray\n'), ((2219, 2246), 'numpy.gradient', 'np.gradient', (['u[:, i, j]', 'dp'], {}), '(u[:, i, j], dp)\n', (2230, 2246), True, 'import numpy as np\n')]
|
import os
import json
import numpy as np
import matplotlib.pyplot as plt
def compute_iou(box_1, box_2):
'''
This function takes a pair of bounding boxes and returns intersection-over-
union (IoU) of two bounding boxes.
'''
intersection = 0
tlr1, tlc1, brr1, brc1 = box_1[0], box_1[1], box_1[2], box_1[3]
tlr2, tlc2, brr2, brc2 = box_2[0], box_2[1], box_2[2], box_2[3]
dx = min(brr1, brr2) - max(tlr1, tlr2)
dy = min(brc1, brc1) - max(tlc1, tlc2)
if (dx>=0) and (dy>=0):
intersection = dx * dy
area1 = (brc1 - tlc1) * (brr1 - tlr1)
area2 = (brc2 - tlc2) * (brr2 - tlr2)
union = area1 + area2 - intersection
iou = intersection / union
assert (iou >= 0) and (iou <= 1.0)
return iou
def compute_counts(preds, gts, iou_thr=0.5, conf_thr=0.5):
'''
This function takes a pair of dictionaries (with our JSON format; see ex.)
corresponding to predicted and ground truth bounding boxes for a collection
of images and returns the number of true positives, false positives, and
false negatives.
<preds> is a dictionary containing predicted bounding boxes and confidence
scores for a collection of images.
<gts> is a dictionary containing ground truth bounding boxes for a
collection of images.
'''
TP = 0
FP = 0
FN = 0
for pred_file, pred in preds.items():
gt = gts[pred_file]
for i in range(len(gt)):
not_found = True
for j in range(len(pred)):
iou = compute_iou(pred[j][:4], gt[i])
if iou >= iou_thr and pred[j][4] >= conf_thr:
TP += 1
not_found = False
break
elif pred[j][4] >= conf_thr:
FP += 1
not_found = False
break
if not_found:
FN += 1
return TP, FP, FN
# set a path for predictions and annotations:
preds_path = 'hw02_preds'
gts_path = 'hw02_annotations'
# load splits:
split_path = 'hw02_splits'
file_names_train = np.load(os.path.join(split_path,'file_names_train.npy'))
file_names_test = np.load(os.path.join(split_path,'file_names_test.npy'))
# Set this parameter to True when you're done with algorithm development:
done_tweaking = True
'''
Load training data.
'''
with open(os.path.join(preds_path,'preds_train.json'),'r') as f:
preds_train = json.load(f)
with open(os.path.join(gts_path, 'annotations_train.json'),'r') as f:
gts_train = json.load(f)
if done_tweaking:
'''
Load test data.
'''
with open(os.path.join(preds_path,'preds_test.json'),'r') as f:
preds_test = json.load(f)
with open(os.path.join(gts_path, 'annotations_test.json'),'r') as f:
gts_test = json.load(f)
# For a fixed IoU threshold, vary the confidence thresholds.
# The code below gives an example on the training set for one IoU threshold.
def compute_PR(iou, preds, gts):
lst = []
for fname in preds:
if preds[fname] != []:
for pred in preds[fname]:
lst.append(pred[4])
confidence_thrs = np.sort(np.array(lst,dtype=float)) # using (ascending) list of confidence scores as thresholds
tp = np.zeros(len(confidence_thrs))
fp = np.zeros(len(confidence_thrs))
fn = np.zeros(len(confidence_thrs))
for i, conf_thr in enumerate(confidence_thrs):
tp[i], fp[i], fn[i] = compute_counts(preds, gts, iou_thr=iou, conf_thr=conf_thr)
# Plot training set PR curves
recall = np.zeros(len(confidence_thrs))
precision = np.zeros(len(confidence_thrs))
for i, elem in enumerate(tp):
precision[i] = tp[i]/(tp[i] + fp[i])
recall[i] = tp[i]/(tp[i] + fn[i])
return recall, precision
recall, precision = compute_PR(0.5, preds_train, gts_train)
recall_l, precision_l = compute_PR(0.25, preds_train, gts_train)
recall_m, precision_m = compute_PR(0.75, preds_train, gts_train)
plt.plot(recall, precision, color='black', marker='o')
plt.plot(recall_l, precision_l, color='blue', marker='o')
plt.plot(recall_m, precision_m, color='green', marker='o')
plt.legend(["IOU 0.5", "IOU 0.25", "IOU 0.75"])
plt.title("PR Curves Training")
plt.xlabel("Recall")
plt.ylabel("Precision")
plt.show()
if done_tweaking:
print('Code for plotting test set PR curves.')
recall, precision = compute_PR(0.5, preds_test, gts_test)
recall_l, precision_l = compute_PR(0.25, preds_test, gts_test)
recall_m, precision_m = compute_PR(0.75, preds_test, gts_test)
plt.figure()
plt.plot(recall, precision, color='black', marker='o')
plt.plot(recall_l, precision_l, color='blue', marker='o')
plt.plot(recall_m, precision_m, color='green', marker='o')
plt.legend(["IOU 0.5", "IOU 0.25", "IOU 0.75"])
plt.title("PR Curves Testing")
plt.xlabel("Recall")
plt.ylabel("Precision")
plt.show()
|
[
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"os.path.join",
"numpy.array",
"matplotlib.pyplot.figure",
"json.load",
"matplotlib.pyplot.title",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] |
[((4004, 4058), 'matplotlib.pyplot.plot', 'plt.plot', (['recall', 'precision'], {'color': '"""black"""', 'marker': '"""o"""'}), "(recall, precision, color='black', marker='o')\n", (4012, 4058), True, 'import matplotlib.pyplot as plt\n'), ((4059, 4116), 'matplotlib.pyplot.plot', 'plt.plot', (['recall_l', 'precision_l'], {'color': '"""blue"""', 'marker': '"""o"""'}), "(recall_l, precision_l, color='blue', marker='o')\n", (4067, 4116), True, 'import matplotlib.pyplot as plt\n'), ((4117, 4175), 'matplotlib.pyplot.plot', 'plt.plot', (['recall_m', 'precision_m'], {'color': '"""green"""', 'marker': '"""o"""'}), "(recall_m, precision_m, color='green', marker='o')\n", (4125, 4175), True, 'import matplotlib.pyplot as plt\n'), ((4176, 4223), 'matplotlib.pyplot.legend', 'plt.legend', (["['IOU 0.5', 'IOU 0.25', 'IOU 0.75']"], {}), "(['IOU 0.5', 'IOU 0.25', 'IOU 0.75'])\n", (4186, 4223), True, 'import matplotlib.pyplot as plt\n'), ((4224, 4255), 'matplotlib.pyplot.title', 'plt.title', (['"""PR Curves Training"""'], {}), "('PR Curves Training')\n", (4233, 4255), True, 'import matplotlib.pyplot as plt\n'), ((4256, 4276), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Recall"""'], {}), "('Recall')\n", (4266, 4276), True, 'import matplotlib.pyplot as plt\n'), ((4277, 4300), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Precision"""'], {}), "('Precision')\n", (4287, 4300), True, 'import matplotlib.pyplot as plt\n'), ((4301, 4311), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4309, 4311), True, 'import matplotlib.pyplot as plt\n'), ((2107, 2155), 'os.path.join', 'os.path.join', (['split_path', '"""file_names_train.npy"""'], {}), "(split_path, 'file_names_train.npy')\n", (2119, 2155), False, 'import os\n'), ((2182, 2229), 'os.path.join', 'os.path.join', (['split_path', '"""file_names_test.npy"""'], {}), "(split_path, 'file_names_test.npy')\n", (2194, 2229), False, 'import os\n'), ((2439, 2451), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2448, 2451), False, 'import json\n'), ((2543, 2555), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2552, 2555), False, 'import json\n'), ((4584, 4596), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (4594, 4596), True, 'import matplotlib.pyplot as plt\n'), ((4601, 4655), 'matplotlib.pyplot.plot', 'plt.plot', (['recall', 'precision'], {'color': '"""black"""', 'marker': '"""o"""'}), "(recall, precision, color='black', marker='o')\n", (4609, 4655), True, 'import matplotlib.pyplot as plt\n'), ((4660, 4717), 'matplotlib.pyplot.plot', 'plt.plot', (['recall_l', 'precision_l'], {'color': '"""blue"""', 'marker': '"""o"""'}), "(recall_l, precision_l, color='blue', marker='o')\n", (4668, 4717), True, 'import matplotlib.pyplot as plt\n'), ((4722, 4780), 'matplotlib.pyplot.plot', 'plt.plot', (['recall_m', 'precision_m'], {'color': '"""green"""', 'marker': '"""o"""'}), "(recall_m, precision_m, color='green', marker='o')\n", (4730, 4780), True, 'import matplotlib.pyplot as plt\n'), ((4785, 4832), 'matplotlib.pyplot.legend', 'plt.legend', (["['IOU 0.5', 'IOU 0.25', 'IOU 0.75']"], {}), "(['IOU 0.5', 'IOU 0.25', 'IOU 0.75'])\n", (4795, 4832), True, 'import matplotlib.pyplot as plt\n'), ((4837, 4867), 'matplotlib.pyplot.title', 'plt.title', (['"""PR Curves Testing"""'], {}), "('PR Curves Testing')\n", (4846, 4867), True, 'import matplotlib.pyplot as plt\n'), ((4872, 4892), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Recall"""'], {}), "('Recall')\n", (4882, 4892), True, 'import matplotlib.pyplot as plt\n'), ((4897, 4920), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Precision"""'], {}), "('Precision')\n", (4907, 4920), True, 'import matplotlib.pyplot as plt\n'), ((4925, 4935), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4933, 4935), True, 'import matplotlib.pyplot as plt\n'), ((2366, 2410), 'os.path.join', 'os.path.join', (['preds_path', '"""preds_train.json"""'], {}), "(preds_path, 'preds_train.json')\n", (2378, 2410), False, 'import os\n'), ((2467, 2515), 'os.path.join', 'os.path.join', (['gts_path', '"""annotations_train.json"""'], {}), "(gts_path, 'annotations_train.json')\n", (2479, 2515), False, 'import os\n'), ((2710, 2722), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2719, 2722), False, 'import json\n'), ((2824, 2836), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2833, 2836), False, 'import json\n'), ((3184, 3210), 'numpy.array', 'np.array', (['lst'], {'dtype': 'float'}), '(lst, dtype=float)\n', (3192, 3210), True, 'import numpy as np\n'), ((2635, 2678), 'os.path.join', 'os.path.join', (['preds_path', '"""preds_test.json"""'], {}), "(preds_path, 'preds_test.json')\n", (2647, 2678), False, 'import os\n'), ((2746, 2793), 'os.path.join', 'os.path.join', (['gts_path', '"""annotations_test.json"""'], {}), "(gts_path, 'annotations_test.json')\n", (2758, 2793), False, 'import os\n')]
|
# grasp.py
# This script implements the GRASP heuristic for the dynamic bin packing
# problem.
# Author: <NAME>
from __future__ import print_function
import numpy as np
import random
import solutions_dynamic as solmaker
import sys
from copy import deepcopy
from itertools import combinations
from math import ceil, sqrt
from operator import attrgetter
class BPP:
# This class groups the bin packing problem information and performs
# the GRASP operations.
def __init__(self, n, cookies, moop):
self.beta = 5 # Cardinality restriction
self.n = int(n) # Number of cookies to sort
self.cookies = cookies # dictionary of item objects
self.moop = moop # Multiobjective problem class
self.lb = 0 # initialize lower bound
self.calclowerbound()
def generate_newsol(self, index, p_ls1, p_ls2, *args):
# This module creates an instance of a NewSolution class and
# performs the generate_newsol procedure
newbie = NewSolution(self.beta, self.n, self.cookies, self.moop)
newsol = newbie.make_newsol(index, *args)
newsol = self.checkandfit(newsol)
p = index + 1 # ID number for first neighbor
rannum = random.random()
if rannum < p_ls1:
if newsol.getopenbins() > self.lb:
p, neighbors = self.ls1(p, 1, newsol)
else:
p, neighbors = self.bin_mutation(p, 1, newsol)
elif rannum < p_ls2:
p, neighbors = self.ls2(p, 1, newsol)
else:
p, neighbors = self.ls3(p, 1, newsol)
if neighbors:
winner = self.test_domination(newsol, neighbors[0])
return p, winner
return p, newsol
def checkandfit(self, solution):
# This function checks the feasibility of a solution and calculates fitness
# values.
solution = self.moop.calcfeasibility(solution)
checkformismatch(solution.getx(), solution.getvlrep())
fits = self.moop.calcfits(solution)
solution.updatefitvals(fits)
return solution
def test_domination(self, solution, neighbor):
# This function determines if neighbor dominates solution.
u = solution.getfits()
v = neighbor.getfits()
if dom2(v, u):
return neighbor
else:
return solution
def ls_time(self, solution, rcl_t):
# This function seeks to find a better time to fill bins
# Start by finding the dynamic residual matrix for the cooling rack
neighbor = deepcopy(solution)
tfill = neighbor.gettfill()
i_tlowtohigh = list(np.argsort(tfill[:neighbor.openbins], axis=0))
for i in i_tlowtohigh:
neighbor, rcl_t = self.find_new_tfilli(i, neighbor, rcl_t)
# Check if modified solution is nondominated
neighbor = self.checkandfit(neighbor)
winner = self.test_domination(solution, neighbor)
return winner
def find_new_tfilli(self, i, solution, rcl_t):
# This function determines a new time for box i to be filled and updates
# the RCLTime instance
vlrep = solution.getvlrep()
tfill = solution.gettfill()
told = tfill[i]
tmin = self.get_box_tmin(vlrep[i])
kwargs = {'mode': 'hload', 'nmove': len(vlrep[i]), 'told': told}
t, rcl_t = self.get_feasible_tfilli(rcl_t, tmin, **kwargs)
if t:
solution.edit_tfilli(i, t)
# Adapt Greedy Function
rcl_t.adapt_changetime(told, t, len(vlrep[i]))
return solution, rcl_t
def get_feasible_tfilli(self, rcl_t, tmin, **kwargs):
# This function locates a new value for tfill[i] that doesn't violate
# rack or fill limits
# Find new time for box i
t_new, p_t, rcl_t = self.find_new_time_value(rcl_t, tmin, **kwargs)
if not t_new:
return None, rcl_t
kappa = 0 # Counter to exit loop
# Check if possible to fill in period
while rcl_t.res_fill[p_t] < 1:
if kappa == 10:
return None, rcl_t
# If not possible, find new time value
t_new, p_t, rcl_t = self.find_new_time_value(rcl_t, tmin, **kwargs)
if not t_new:
return None, rcl_t
kappa += 1
# If returning t_new to open bin, reduce fill capacity by 1
rcl_t.res_fill[p_t] -= 1
return t_new, rcl_t
def get_box_tmin(self, vlrepi):
# Find minimum time for box i
boxi_contents = {k: v for k, v in self.cookies.items() if k in vlrepi}
maxbatch = max(boxi_contents.values(), key=attrgetter('batch')).batch
tmin = maxbatch * 600
return tmin
def find_new_time_value(self, rcl_t, tmin, **kwargs):
# This module retrieves a new time value and also returns which period
# it belongs to
t_new = rcl_t.get_new_t(tmin, **kwargs)
if not t_new:
return None, None, rcl_t
t_p = self.find_t_in_fill_periods(t_new, rcl_t)
return t_new, t_p, rcl_t
def find_t_in_fill_periods(self, t, rcl_t):
# If the new time value is beyond the current fill periods, extend
while t > rcl_t.t_t[-1]:
rcl_t.extend_fill_periods()
# Find the period containing t_new
tlist = np.where(t >= np.array(rcl_t.t_t))[0]
return tlist[-1]
def ls1(self, p, numls, solution):
# Heuristic to locate a better solution in terms of the first objective:
# minimizing the number of bins in use
k = 0
neighbors = []
searchfrom = solution
while k < numls:
coolneighbor, rcl_t = self.ls1_loading(searchfrom)
if coolneighbor:
k += 1
coolneighbor = self.ls_time(coolneighbor, rcl_t)
coolneighbor.updateid(p)
p += 1
neighbors.append(coolneighbor)
searchfrom = coolneighbor
else:
k = numls
return p, neighbors
def ls2(self, p, numls, solution):
# Heuristic to locate a better solution in terms of the second objective:
# minimizing the weighted average initial heat in a box
# p - current id number for new solution
# numls - number of neighbors to find during local search
# Returns updated p and list of neighbors
k = 0
neighbors = []
searchfrom = solution
while k < numls:
k, coolneighbor, rcl_t = self.ls2_loading(k, searchfrom)
if coolneighbor:
coolneighbor = self.ls_time(coolneighbor, rcl_t)
coolneighbor.updateid(p)
p += 1
neighbors.append(coolneighbor)
searchfrom = coolneighbor
else:
k = numls
return p, neighbors
def ls3(self, p, numls, solution):
# Heuristic to locate a better solution in terms of the third objective:
# minimizing the maximum time to move to store front.
k = 0
neighbors = []
searchfrom = solution
while k < numls:
k, coolneighbor, rcl_t = self.ls3_loading(k, searchfrom)
if coolneighbor:
coolneighbor = self.ls_time(coolneighbor, rcl_t)
coolneighbor.updateid(p)
p += 1
neighbors.append(coolneighbor)
searchfrom = coolneighbor
else:
k = numls
return p, neighbors
def ls1_loading(self, searchfrom):
# This function attempts to empty the least filled bin and move its
# cookies into available boxes.
u = searchfrom.getfits()
vlrep = searchfrom.getvlrep()
r, rcl_t = self.getresiduals(vlrep, searchfrom.gettfill())
copy = deepcopy(searchfrom)
half = len(vlrep) // 2
for iloop in range(half):
# Find the emptiest bin's index number
lengths = [len(i) for i in copy.getvlrep()]
i = np.argmin(np.array(lengths))
copy, r, rcl_t = self.empty_bin(i, copy, r, rcl_t)
# If a nondominated solution wasn't found, return nothing
copy = self.checkandfit(copy)
v = copy.getfits()
if not dom2(u, v):
return copy, rcl_t
return None, rcl_t
def empty_bin(self, i, copy, r, rcl_t):
# This function moves items in box i to other boxes
for j in list(copy.getvlrep()[i]):
# Find rcl_bins
tfill = copy.gettfill()
rcl_bins = self.ls1_makercl(i, j, r, rcl_t, tfill)
if len(rcl_bins) == 0:
return copy, r, rcl_t
# Pick random bin
inew = random.choice(rcl_bins)
# Move cookie to new bin
copy.moveitem(i, j, inew)
r = self.update_spaceresiduals(r, i, inew)
r[i, 1], r[inew, 1] = rcl_t.adapt_movebins(tfill[i], tfill[inew])
return copy, r, rcl_t
def ls1_makercl(self, iold, j, r, rcl_t, tfill):
# This function returns the restricted candidate list for cookie
# j to move into based on the dot product strategy
# Set weights for the dot product array (1/boxcap, 1/coolrackcap)
weights = [1.0 / self.moop.boxcap, 1.0 / self.moop.coolrack]
# The cookie should not move into a box that is filled until after
# it is done baking
tmin = self.cookies.get(j).getbatch() * 600
tmax = rcl_t.get_tmax(tmin, 1)
options_byt = [i for i in range(self.n) if tfill[i] > tmin]
if tfill[iold] != tmin:
options_byt.remove(iold)
# Form dot product array
dparray = np.zeros(self.n)
for i in options_byt:
if tfill[i] <= tmax:
# Make sure there is space available
if r[i, 0] > 1:
tk = rcl_t.find_t_in_timeline(tfill[i])
# Filling early will reduce onrack for all after time[tk]
onrack = np.subtract(self.moop.coolrack, rcl_t.space[tk:])
maxonrack_fromtk = max(onrack)
dparray[i] = weights[0] * r[i, 0] + weights[1] * maxonrack_fromtk
# Max fill
if len(np.nonzero(dparray)[0]) > self.beta:
options = list(np.argsort(-dparray)[:self.beta])
return options
else:
options = list(np.nonzero(dparray)[0])
return options
def ls2_loading(self, k, searchfrom):
# This function finds the restricted candidate list and tries to move
# cookies toward more favorable configurations to minimize the weighted avg
u = searchfrom.getfits()
r, rcl_t = self.getresiduals(searchfrom.getvlrep(), searchfrom.gettfill())
copy = deepcopy(searchfrom)
hotbins = np.argsort(searchfrom.getq0bins())
for s in range(searchfrom.openbins):
i = hotbins[-s - 1]
vlrep = copy.getvlrep()
# If there is only one item in the box, no point in moving
if len(vlrep[i]) < 2:
return k, None, rcl_t
rcl_j = self.ls2_makercl(i, vlrep)
k, newsol, rcl_t = self.search_rclj(k, i, copy, u, r, rcl_j, rcl_t)
if newsol:
return k, newsol, rcl_t
# If a nondominated solution wasn't found, return nothing
return k, None, rcl_t
def ls2_makercl(self, i, vlrep):
# This function returns the restricted candidate list for local search 2
# Restricted candidate list
binkeys = list(vlrep[i])
avglen = averageLen(vlrep)
nrcl_min = min(len(binkeys) - 1, self.beta)
nrcl = max(len(binkeys) - avglen, nrcl_min)
rcl_j = random.sample(binkeys, nrcl)
return rcl_j
def ls3_loading(self, k, searchfrom):
# This function finds the restricted candidate list for bin i and tries to
# move cookies to find a new nondominated solution. If unsuccessful, moves
# to a new bin
u = searchfrom.getfits()
r, rcl_t = self.getresiduals(searchfrom.getvlrep(), searchfrom.gettfill())
copy = deepcopy(searchfrom)
latebins = np.argsort(searchfrom.gettavail(), axis=0)
for s in range(searchfrom.openbins):
i = latebins[-s - 1]
vlrep = copy.getvlrep()
# If there is only one item in the box, no point in moving
if len(vlrep[i]) < 2:
return k, None, rcl_t
# Restricted candidate list
rcl_j = self.ls3_makercl(i, vlrep)
k, newsol, rcl_t = self.search_rclj(k, i, copy, u, r, rcl_j, rcl_t)
if newsol:
return k, newsol, rcl_t
# If a nondominated solution wasn't found, return nothing
return k, None, rcl_t
def ls3_makercl(self, i, vlrep):
# This function returns the restricted candidate list for local search 3
# Restricted candidate list
binkeys = list(vlrep[i])
n_rclj = int(0.5 * len(binkeys))
rcl_j = binkeys[-n_rclj - 1: -1]
return rcl_j
def search_rclj(self, k, i, solution, u, r, rcl_j, rcl_t):
# This function moves cookies into new boxes until either it finds a new
# nondominated solution or it runs out of candidates from this solution
for m in range(len(rcl_j)):
k += 1
j = random.choice(rcl_j)
rcl_j.remove(j)
r, rcl_t, solution = self.lsmove(i, j, r, rcl_t, solution)
# Check if modified solution is nondominated
solution = self.checkandfit(solution)
v = solution.getfits()
if not dom2(u, v):
return k, solution, rcl_t
return k, None, rcl_t
def lsmove(self, i, j, r, rcl_t, solution):
# This function determines where cookie j should move to
m = solution.getopenbins()
tfill = solution.gettfill()
# Gather bin options and pick new bin for the move
ilist = self.move_options(j, m, r, rcl_t, tfill)
inew = random.choice(ilist)
# Open a new bin or move cookie to a new bin
if inew == m:
tmin = self.get_box_tmin([j])
kwargs = {'mode': 'hload'}
t, rcl_t = self.get_feasible_tfilli(rcl_t, tmin, **kwargs)
if t:
solution.opennewbin(i, j, round(t, 1))
r[inew, 0] = self.moop.boxcap
r[inew, 1] = rcl_t.adapt_greedy_function_newbin(t)
else:
return r, rcl_t, solution
else:
solution.moveitem(i, j, inew)
r[i, 1], r[inew, 1] = rcl_t.adapt_movebins(tfill[i], tfill[inew])
r = self.update_spaceresiduals(r, i, inew)
return r, rcl_t, solution
def move_options(self, j, m, r, rcl_t, tfill):
# This function retrieves a candidate list for moving a cookie.
bcookiej = self.cookies.get(j).getbatch() # cookie batch number
tmax = rcl_t.get_tmax(bcookiej * 600, 1)
i_rlowtohigh = np.argsort(r[:m, 0], axis=0)
# This module performs the sorting for module ll.
for i in range(m):
# Find open bin with max. residual value, moving backward thru i_rlowtohigh
lsi = i_rlowtohigh[-1 - i]
if tfill[lsi] <= tmax:
pack = packable(r[lsi, :], bcookiej, tfill[lsi])
if pack:
return [m, lsi]
# If least loaded bin won't fit item, need to open new bin.
return [m]
def bin_mutation(self, p, numls, solution):
# Heuristic to locate a better solution in terms of the first objective:
# minimizing the number of bins.
k = 0
neighbors = []
searchfrom = solution
while k < numls:
k, coolneighbor, rcl_t = self.select_mutation_operation(k, searchfrom)
if coolneighbor:
coolneighbor.updateid(p)
coolneighbor = self.ls_time(coolneighbor, rcl_t)
p += 1
neighbors.append(coolneighbor)
searchfrom = coolneighbor
else:
k = numls
return p, neighbors
def select_mutation_operation(self, k, searchfrom):
# This function selects the mutation operator
vlrep = searchfrom.getvlrep()
avg_bin_size = averageLen(vlrep)
too_small_lengths = [i for i in vlrep if 2 * len(i) <= avg_bin_size]
if too_small_lengths:
k, coolneighbor, rcl_t = self.move_cookies(k, searchfrom)
else:
rannum = random.random()
if rannum < 0.50:
k, coolneighbor, rcl_t = self.part_swap(k, searchfrom)
else:
k, coolneighbor, rcl_t = self.cookie_swap(k, searchfrom)
return k, coolneighbor, rcl_t
def time_mutation_by_heat(self, solution, rcl_t):
# This function tries a new time value for the initial hottest bin to
# see if that helps
tfill = solution.gettfill()
q0_bybin = solution.getq0bins()[:solution.getopenbins()]
i_hot_list = np.argsort(q0_bybin)
i_hot = i_hot_list[-1]
told = tfill[i_hot]
kwargs = {'mode': 'hload', 'nmove': len(solution.vlrep[i_hot])}
t_new, rcl_t = self.get_feasible_tfilli(rcl_t, told - 5.0, **kwargs)
if t_new:
neighbor = deepcopy(solution)
neighbor.edit_tfilli(i_hot, t_new)
# Adapt Greedy Function
rcl_t.adapt_changetime(told, t_new, len(neighbor.vlrep[i_hot]))
# Check if modified solution is nondominated
neighbor = self.checkandfit(neighbor)
solution = self.test_domination(solution, neighbor)
return solution
def split_bin(self, solution, rcl_t):
# This function splits the highest capacity bin into two boxes.
vlrep = solution.getvlrep()
i = self.getmaxbin(vlrep)
# Get random place to split bin
jsplit = random.randrange(1, len(vlrep[i]))
newbin = list(vlrep[i][jsplit:])
# Open new bin with feasible time value
tmin = self.get_box_tmin(newbin)
kwargs = {'mode': 'hload', 'nmove': len(newbin)}
t_new, rcl_t = self.get_feasible_tfilli(rcl_t, tmin, **kwargs)
if t_new:
tfill = solution.gettfill()
solution.opennewbin(i, newbin[0], round(t_new, 1))
inew = solution.getopenbins() - 1
rcl_t.adapt_greedy_function_newbin(t_new, add=0)
rcl_t.adapt_movebins(tfill[i], t_new)
if len(newbin) > 1:
for j in newbin[1:]:
solution.moveitem(i, j, inew)
rcl_t.adapt_movebins(tfill[i], tfill[inew])
return solution, rcl_t
def cookie_swap(self, k, searchfrom):
# This function selects two random bins and tries to swap cookies between
# them. If unsuccessful, it splits the highest capacity bin.
u = searchfrom.getfits()
r, rcl_t = self.getresiduals(searchfrom.getvlrep(), searchfrom.gettfill())
copy = deepcopy(searchfrom)
for s in range(searchfrom.openbins):
mode = random.choice(['random', 'moveheat', 'movelate'])
i1, i2 = self.select_two_bins(copy, mode)
if not i2:
newsol, rcl_t = self.split_bin(copy, rcl_t)
else:
kwargs = {'i1': i1, 'i2': i2, 'mode': mode}
newsol, rcl_t = self.perform_cookie_swap(copy, rcl_t, **kwargs)
# Will return None if it's dominated by vector u
nondominated = self.check4nondomination(u, newsol)
k += 1
if nondominated:
return k, newsol, rcl_t
# If a nondominated solution wasn't found, return nothing
return k, None, rcl_t
def perform_cookie_swap(self, solution, rcl_t, i1, i2, mode):
# This function performs the part swap between box i1 and i2
tfill = solution.gettfill()
vlrep = solution.getvlrep()
# Get cookies to swap
bini1_options = [j for j in vlrep[i1] if self.cookies.get(j).getbatch()
* self.moop.tbatch < tfill[i2]]
bini2_options = [j for j in vlrep[i2] if self.cookies.get(j).getbatch()
* self.moop.tbatch < tfill[i1]]
if mode == 'moveheat':
j1 = bini1_options[-1]
j2 = bini2_options[0]
else:
j1 = random.choice(bini1_options)
j2 = random.choice(bini2_options)
solution.moveitem(i1, j1, i2)
solution.moveitem(i2, j2, i1)
return solution, rcl_t
def part_swap(self, k, searchfrom):
# This function selects two random bins and tries to swap cookies between
# them. If unsuccessful, it splits the highest capacity bin.
u = searchfrom.getfits()
r, rcl_t = self.getresiduals(searchfrom.getvlrep(), searchfrom.gettfill())
copy = deepcopy(searchfrom)
for s in range(searchfrom.openbins):
mode = random.choice(['random', 'moveheat', 'movelate'])
i1, i2 = self.select_two_bins(copy, mode)
if not i2:
newsol, rcl_t = self.split_bin(copy, rcl_t)
else:
kwargs = {'i1': i1, 'i2': i2, 'mode': mode}
newsol, rcl_t = self.perform_part_swap(copy, rcl_t, **kwargs)
# Will return None if it's dominated by vector u
nondominated = self.check4nondomination(u, newsol)
k += 1
if nondominated:
return k, newsol, rcl_t
# If a nondominated solution wasn't found, return nothing
return k, None, rcl_t
def perform_part_swap(self, solution, rcl_t, i1, i2, mode):
# This function performs the part swap between box i1 and i2
# Get swap points
if mode == 'moveheat':
movetobin2, movetobin1 = self.get_heat_swap_sets(solution, i1, i2)
else:
movetobin2, movetobin1 = self.get_random_swap_sets(solution, i1, i2)
if movetobin2:
kwargs = {'i1': i1, 'movetobin2': movetobin2,
'i2': i2, 'movetobin1': movetobin1}
solution, rcl_t = \
self.make_swap_happen(solution, rcl_t, **kwargs)
else:
solution, rcl_t = self.split_bin(solution, rcl_t)
return solution, rcl_t
def make_swap_happen(self, solution, rcl_t, i1, movetobin2, i2, movetobin1):
# This function swaps a portion of box i1 with box i2
# potentially fix this: adapt rcl_t all at once instead of cookie by cookie
tfill = solution.gettfill()
for j in movetobin2:
solution.moveitem(i1, j, i2)
rcl_t.adapt_movebins(tfill[i1], tfill[i2])
for j in movetobin1:
solution.moveitem(i2, j, i1)
rcl_t.adapt_movebins(tfill[i2], tfill[i1])
return solution, rcl_t
def get_heat_swap_sets(self, solution, i1, i2):
# This function returns sets of cookies meant to reduce overall heat
# between boxes
vlrep = solution.getvlrep()
tfill = solution.gettfill()
# Determine eligible cookies
bini1_options = [j for j in vlrep[i1] if self.cookies.get(j).getbatch()
* self.moop.tbatch < tfill[i2]]
bini2_options = [j for j in vlrep[i2] if self.cookies.get(j).getbatch()
* self.moop.tbatch < tfill[i1]]
# Pick random swap sets
min_box_fill = min(len(vlrep[i1]), len(vlrep[i2]))
max_swap = min(len(bini1_options), len(bini2_options), min_box_fill - 1)
swap_number = random.randint(1, max_swap)
movetobin2 = bini1_options[-swap_number:]
movetobin1 = bini2_options[:swap_number]
return movetobin2, movetobin1
def get_random_swap_sets(self, solution, i1, i2):
# This function returns a random set of cookies to swap between boxes.
vlrep = solution.getvlrep()
tfill = solution.gettfill()
# Determine eligible cookies
bini1_options = [j for j in vlrep[i1] if self.cookies.get(j).getbatch()
* self.moop.tbatch < tfill[i2]]
bini2_options = [j for j in vlrep[i2] if self.cookies.get(j).getbatch()
* self.moop.tbatch < tfill[i1]]
# Pick random swap sets
min_box_fill = min(len(vlrep[i1]), len(vlrep[i2]))
max_swap = min(len(bini1_options), len(bini2_options), min_box_fill - 1)
swap_number = random.randint(1, max_swap)
movetobin2 = random.sample(bini1_options, swap_number)
movetobin1 = random.sample(bini2_options, swap_number)
return movetobin2, movetobin1
def getpoints_4swap(self, binitems1, t1, binitems2, t2):
# This function returns two points to perform the swap on
# Retrieve boolean lists
bool1 = self.moop.packatt(binitems1, t2)
bool2 = self.moop.packatt(binitems2, t1)
p1 = self.get_swap_point(bool1)
p2 = self.get_swap_point(bool2)
# If no swap point, return false
if not p1 or not p2:
return None, None
# Check for capacity violations
newbin1 = binitems1[:p1] + binitems2[p2:]
if len(newbin1) > self.moop.boxcap:
p2 = self.get_new_swap_point(binitems1, p1, binitems2, bool2)
newbin2 = binitems2[:p2] + binitems1[p1:]
if len(newbin2) > self.moop.boxcap:
p1 = self.get_new_swap_point(binitems2, p2, binitems1, bool1)
# Return the lists of cookies to be swapped
movetobin2 = list(binitems1[p1:])
movetobin1 = list(binitems2[p2:])
return movetobin2, movetobin1
def get_swap_point(self, booli):
# This function finds a feasible point to swap with another box
# Find starting point for bin i
starti = self.findstartforswap(booli)
if starti == len(booli):
return False
else:
pi = random.randrange(starti, len(booli))
return pi
def get_new_swap_point(self, bin_into, p1, bin_outta, bool_outta):
# This function finds a swap point that won't violate bin_into's capacity
can_accept = self.moop.boxcap - len(bin_into[:p1])
p2 = self.get_swap_point(bool_outta)
kappa = 10
while len(bin_outta[p2:]) > can_accept:
# If can't find point, only swap one item
if kappa == 10:
return len(bin_outta) - 1
p2 = self.get_swap_point(bool_outta)
return p2
def findstartforswap(self, boollist):
# This function returns the index after which all values are True
start = 1
for k in range(len(boollist) - 1, 0, -1):
if boollist[k] is False:
start = k + 1
return start
return start
def move_cookies(self, k, searchfrom):
# This function selects two random bins and tries to move cookies between
# them. If unsuccessful, it splits the highest capacity bin.
u = searchfrom.getfits()
r, rcl_t = self.getresiduals(searchfrom.getvlrep(), searchfrom.gettfill())
copy = deepcopy(searchfrom)
for s in range(searchfrom.openbins):
mode = random.choice(['moveheat', 'movelate'])
i1, i2 = self.get_hot_empty_bins(copy, mode)
if i2 == None or len(copy.vlrep[i2]) == self.moop.boxcap:
newsol, rcl_t = self.split_bin(copy, rcl_t)
else:
kwargs = {'i1': i1, 'i2': i2, 'mode': mode}
newsol, rcl_t = self.perform_cookie_move(copy, rcl_t, **kwargs)
# Will return None if it's dominated by vector u
nondominated = self.check4nondomination(u, newsol)
k += 1
if nondominated:
return k, newsol, rcl_t
# If a nondominated solution wasn't found, return nothing
return k, None, rcl_t
def perform_cookie_move(self, solution, rcl_t, i1, i2, mode):
# This function performs the move of one cookie from box i1 to i2
tfill = solution.gettfill()
vlrep = solution.getvlrep()
# Get cookies to swap
bini1_options = [j for j in vlrep[i1] if self.cookies.get(j).getbatch()
* self.moop.tbatch < tfill[i2]]
empty_space = self.moop.boxcap - len(vlrep[i2])
max_move = min(empty_space, empty_space // 2 + 1, len(bini1_options))
nmove = random.randint(1, max_move)
for k in range(nmove):
j1 = bini1_options[-1 - k]
solution.moveitem(i1, j1, i2)
return solution, rcl_t
def select_two_bins(self, solution, mode):
# This module selects two bins for swap using specified function
vlrep = solution.getvlrep()
tfill = solution.gettfill()
if mode == 'moveheat':
i1, i2 = self.get_hot_cold_bins(vlrep, tfill, solution.getq0bins())
elif mode == 'movelate':
i1, i2 = self.get_hot_cold_bins(vlrep, tfill, solution.gettavail())
else:
# Pick random bins
i1, i2 = self.get_two_random_bins(vlrep, tfill)
return i1, i2
def get_hot_cold_bins(self, vlrep, tfill, characteristic):
# This function returns the indices of the hottest bin and the coldest
# bin that are compatible
m = len(vlrep) # number of open bins
ilist_hot = np.argsort(characteristic[:m])
for kh in range(m):
i_hot = ilist_hot[-1 - kh]
for kc in range(m - kh):
i_cold = ilist_hot[kc]
if i_hot != i_cold:
compatible = self.good_match(vlrep, tfill, i_hot, i_cold)
if compatible:
return i_hot, i_cold
return None, None
def get_hot_empty_bins(self, solution, mode):
# This function returns the indices of the hottest bin compatible with
# the emptiest bin
m = solution.getopenbins()
vlrep = solution.getvlrep()
tfill = solution.gettfill()
i2 = self.getminbin(vlrep)
if mode == 'moveheat':
ilist_hot = np.argsort(solution.getq0bins()[:m])
else:
ilist_hot = np.argsort(solution.gettavail()[:m])
for k in range(m):
i_hot = ilist_hot[-1 - k]
compatible = self.good_match(vlrep, tfill, i_hot, i2,
ignore_length=True)
if compatible:
return i_hot, i2
return None, None
def get_two_random_bins(self, vlrep, tfill):
# This function returns two individual random bins that can swap cookies
bin_pairs = list(combinations(range(len(vlrep)), 2))
for bp in range(len(bin_pairs)):
i1, i2 = random.choice(bin_pairs)
can_swap = self.good_match(vlrep, tfill, i1, i2)
if can_swap:
return i1, i2
return None, None
def good_match(self, vlrep, tfill, i1, i2, ignore_length=False):
# This function returns True if i1 and i2 are a good match for swapping
# and False if they are a bad match
if i1 == i2:
return False
if not ignore_length:
if len(vlrep[i1]) <= 1 or len(vlrep[i2]) <= 1:
return False
list1 = [j for j in vlrep[i1] if self.cookies.get(j).getbatch()
* self.moop.tbatch < tfill[i2]]
if not list1:
return False
list2 = [j for j in vlrep[i2] if self.cookies.get(j).getbatch()
* self.moop.tbatch < tfill[i1]]
if not list2:
return False
# If made it past conditions, return True
return True
def getrandombin(self, vlrep):
# This function returns a random bin with more than one item in it
bins = range(len(vlrep))
bini = random.choice(bins)
while len(vlrep[bini]) <= 1:
bini = random.choice(bins)
return bini
def getrandsecondbin(self, i1, vlrep, tfill):
# This function returns a second random bin that is not
# bin i1 and that items in bin i1 can be moved to
i2 = random.choice(range(len(vlrep)))
kappa = 1
while not self.good_match(vlrep, tfill, i1, i2):
if kappa == len(vlrep):
return None
i2 = random.choice(range(len(vlrep)))
kappa += 1
return i2
def getmaxbin(self, vlrep):
# This function returns the index of the fullest bin.
bincapacity = np.zeros(len(vlrep))
for i in range(len(vlrep)):
bincapacity[i] = len(vlrep[i])
bini = np.argmax(bincapacity)
return bini
def getminbin(self, vlrep):
# This function returns the index of the emptiest bin.
bincapacity = np.zeros(len(vlrep))
for i in range(len(vlrep)):
bincapacity[i] = len(vlrep[i])
minbin = np.argmin(bincapacity)
return minbin
def getresiduals(self, vlrep, tfill):
# This function calculates the residual matrix associated with a given
# dynamic bin packing loading. The first column represents the open box
# capacities, and the second column represents the maximum number of
# cookies that can be added to the cooling rack right before tfill_i
coolrack = self.moop.coolrack
r = np.zeros((self.n, 2), dtype=np.int)
# Set box capacity residuals
for i in range(len(vlrep)):
r[i, 0] = self.moop.boxcap - len(vlrep[i])
r[i, 1] = coolrack
# Set cooling rack capacity residuals
n_b = self.n // self.moop.nbatches
rcl_t = RCLtime(coolrack, self.moop.fillcap, n_b,
self.moop.tbatch, self.moop.nbatches)
r[:len(vlrep), 1] = rcl_t.initialize_withtfill(len(vlrep), vlrep, tfill)
return r, rcl_t
def update_spaceresiduals(self, r, i, inew):
# This function updates the space residual r after a cookie moves
# from box i to box inew
# Update r: box capacity
r[i, 0] += 1
r[inew, 0] -= 1
return r
def check4nondomination(self, u, solution):
# Check if modified solution is nondominated
solution = self.checkandfit(solution)
v = solution.getfits()
if not dom2(u, v):
return True
else:
return False
def countonrack(self, t, solution):
# Cookies from boxes filled after t might be on rack
vlrep = solution.getvlrep()
tfill = solution.gettfill()
timecheckindices = np.where(tfill > t)
nrackitems = 0
for i in timecheckindices[0]:
for j in vlrep[i]:
onrack = self.moop.rackij(t, tfill[i], self.cookies.get(j))
nrackitems += onrack
return nrackitems
def calclowerbound(self):
# This function calculates theoretical lower bound for the number of
# bins. It assumes this is the total number of cookies divided by
# the box capacity.
minbins = ceil(float(self.n) / self.moop.boxcap)
self.lb = int(minbins)
def getub(self):
# Returns the upper bound (bin capacity)
return self.moop.boxcap
def getcookies(self):
# Returns the list of items to pack
return self.cookies
def getlb(self):
# Returns the theoretical lower bound
return self.lb
class NewSolution:
# This class performs the GRASP creation of a new solution.
def __init__(self, beta, n, cookies, moop):
self.beta = beta # Cardinality restriction
self.n = int(n) # Number of cookies to sort
self.cookies = cookies # dictionary of item objects
self.moop = moop # Multiobjective problem class
self.m = 0 # initialize open bins count
self.r = np.zeros((n, 2)) # Residual capacity matrix
self.x = np.zeros((n, n), dtype=np.int)
self.y = np.zeros(n, dtype=np.int)
self.vlrep = []
self.tfill = np.zeros(n, dtype=np.float)
# Initialize restricted candidate list
n_b = self.n // self.moop.nbatches
self.rcl_t = RCLtime(moop.coolrack, moop.fillcap, n_b,
moop.tbatch, moop.nbatches)
def make_newsol(self, index, *args):
# This function takes the solution from generate_newsol and creates
# a CookieSol instance.
# Possible args: a newgenes list containing a chromosome representation
# and a suggested tfill.
if args:
self.generate_newsol_from_chromosome(args[0], args[1])
else:
self.generate_newsol()
newsol = solmaker.CookieSol(index, self.x, self.y, self.vlrep, self.tfill)
return newsol
def generate_newsol(self):
# This function generates a new solution from scratch using GRASP
modes = ['ss', 'hload'] # Modes for retrieving new tfill time
self.initialize_greedy_tfill()
self.open_new_bin(0, 0)
# Set strategy for the loading
theta_i = random.random()
for j in range(1, self.n):
rcl_i = self.get_rcl_bins(theta_i, j)
i = random.choice(rcl_i)
if self.y[i] == 0:
self.tfill[i] = self.get_feasible_tfilli(j, modes)
self.open_new_bin(i, j)
else:
self.vlrep[i].append(j)
self.r[i, 0] -= 1
self.rcl_t.adapt_greedy_function_addtobin(self.tfill[i])
self.r[:self.m, 1] = \
self.rcl_t.retrieve_space_by_tfill(self.m, self.tfill)
self.constructx()
def generate_newsol_from_chromosome(self, chrom, tfill_suggested):
# This function generates a new solution based on a given chromosome
modes = ['ss', 'hload'] # Modes for retrieving new tfill time
self.initialize_greedy_tfill(*tfill_suggested)
chrom = self.initialize_first_bin(chrom)
# Set strategy for the loading
theta_i = random.random()
for j in chrom:
rcl_i = self.get_rcl_bins(theta_i, j)
i = random.choice(rcl_i)
if self.y[i] == 0:
self.tfill[i] = self.pick_tfilli(j, modes, tfill_suggested)
self.open_new_bin(i, j)
else:
self.vlrep[i].append(j)
self.r[i, 0] -= 1
self.rcl_t.adapt_greedy_function_addtobin(self.tfill[i])
self.r[:self.m, 1] = \
self.rcl_t.retrieve_space_by_tfill(self.m, self.tfill)
self.constructx()
def initialize_greedy_tfill(self, *args):
# This function initializes t_fill
# Calculate tfill_0 using inverse cdf and set residual capacity
if args:
# args = tfill_suggested
self.tfill[0] = self.rcl_t.pick_suggested_t(args, self.moop.tbatch)
else:
self.tfill[0] = self.rcl_t.get_new_t(self.moop.tbatch)
def initialize_first_bin(self, chrom):
# This function finds the first cookie in list chrom that can be packed
# at tfill[0] and opens the first bin with that cookie
for j in chrom:
if self.moop.cookiedonebaking(j, self.tfill[0]):
self.open_new_bin(0, j)
chrom.remove(j)
return chrom
print('Error: NewSolution picked a time that cannot be filled.')
def pick_tfilli(self, j, modes, tfill_maybe):
# This module tries to use one of the time values from tfill
tmin = self.cookies.get(j).getbatch() * self.moop.tbatch
# If tmin when coolrack is overfull, find least worst solution
tk = self.find_t_in_trange(tmin)
if self.rcl_t.space[tk] <= 0:
t_new = self.rcl_t.find_least_worst_newt(tmin)
return t_new
t_possible = self.get_t_from_oldtfill(tmin, tfill_maybe)
if t_possible:
return t_possible
else:
# If nothing in tfill_maybe worked, return new value:
t_new = self.get_feasible_tfilli(j, modes)
return t_new
def get_t_from_oldtfill(self, tmin, tfill_maybe):
# This function returns a feasible time from tfill_maybe
# First establish tmax based on moving 1 cookie from the rack
tmax = self.rcl_t.get_tmax(tmin, 1)
t_options = np.unique(tfill_maybe)
for i in range(len(t_options)):
if t_options[i] < tmax:
# Avoid reusing a value from tfill_maybe
if t_options[i] not in self.tfill:
if self.rcl_t.time_feasible(t_options[i], tmin):
return t_options[i]
return None
def get_feasible_tfilli(self, j, modes):
# This function locates a new value for tfill[i] that doesn't violate
# rack or fill limits
theta_t = random.randint(0, 1)
tmin = self.cookies.get(j).getbatch() * self.moop.tbatch
# Find fill time for box i
t_new, p_t = self.find_new_time_value(tmin, modes[theta_t])
kappa = 0 # Counter to exit loop
# Check if possible to fill in period
while self.rcl_t.res_fill[p_t] < 1:
if kappa == 10:
return None
# If not possible, find new time value
t_new, p_t = self.find_new_time_value(tmin, modes[theta_t])
kappa += 1
return t_new
def find_new_time_value(self, tmin, mode):
# This module retrieves a new time value and also returns which period
# it belongs to
t_new = self.rcl_t.get_new_t(tmin, mode=mode)
t_t = self.find_t_in_fill_periods(t_new)
return t_new, t_t
def find_t_in_fill_periods(self, t):
# If the new time value is beyond the current fill periods, extend
while t > self.rcl_t.t_t[-1]:
self.rcl_t.extend_fill_periods()
# Find the period containing t_new
tlist = np.where(t >= np.array(self.rcl_t.t_t))[0]
return tlist[-1]
def find_t_in_trange(self, t):
# If the new time value is beyond the current timeline, extend
while t > self.rcl_t.trange[-1]:
self.rcl_t.extend_timeline()
tklist = np.where(np.array(self.rcl_t.trange) <= t)[0]
return tklist[-1]
def get_rcl_bins(self, theta_i, j):
# This module selects the strategy based on theta_i and returns
# the corresponding restricted candidate list.
if theta_i < 0.33:
# Least loaded strategy
rcl_i = self.llmove(j)
elif theta_i < 0.66:
# Weighted max strategy
rcl_i = self.wmaxmove(j)
else:
# Combo-t strategy
rcl_i = self.combot_move(j)
# Return either a new bin or the list found above
if not rcl_i:
rcl_i = self.find_alternative_bin(j)
return rcl_i
else:
return rcl_i
def llmove(self, j):
# This module performs the sorting for module ll.
# The goal of this strategy is to balance the loading of the boxes.
rcl_i = []
i_rlowtohigh = np.argsort(self.r[:self.m, 0], axis=0)
# Add new bin as an option if others are starting to get full
if self.r[i_rlowtohigh[-1], 0] <= 0.5 * self.moop.boxcap:
rcl_i.append(self.m)
for k in range(self.m):
# Find open bin with max. residual value, moving backward thru i_rlowtohigh
lli = i_rlowtohigh[- 1 - k]
bcookiej = self.cookies.get(j).getbatch()
pack = packable(self.r[lli, :], bcookiej, self.tfill[lli])
if pack:
rcl_i.append(lli)
if len(rcl_i) == self.beta:
return rcl_i
return rcl_i
def wmaxmove(self, j):
# This module determines the restricted candidate list by the weighted
# max strategy. The goal is to keep the number of boxes to a minimum.
rcl_i = []
# Gather weights: space on rack / maximum space over time
maxval = np.max(self.r[:self.m, 1])
weights = np.zeros(self.m)
for k in range(self.m):
weights[k] = self.r[k, 1] / maxval
# Calculate weighted residuals
wresidual = np.multiply(self.r[:self.m, 0], weights)
i_rlowtohigh = np.argsort(wresidual, axis=0)
for k in range(self.m):
# Find open bin with min. weighted residual value
i = i_rlowtohigh[k]
bcookiej = self.cookies.get(j).getbatch()
pack = packable(self.r[i, :], bcookiej, self.tfill[i])
if pack:
rcl_i.append(i)
if len(rcl_i) == self.beta // 2:
return rcl_i
return rcl_i
def combot_move(self, j):
# This module determines the restricted candidate list by the combo-t
# strategy. The goal is to reduce the maximum time until the boxes
# can be moved to the store front.
n_b = self.n // self.moop.nbatches # Number of cookies per batch
jmax = j - (j % n_b) # Max. cookie no. for heat restriction
rcl_i = []
i_rlowtohigh = np.argsort(self.r[:self.m, 0], axis=0)
# Add new bin as an option after all bins meet a minimum level
if self.r[i_rlowtohigh[-1], 0] <= 0.7 * self.moop.boxcap:
rcl_i.append(self.m)
for k in range(self.m):
# Find open bin with max. residual value
lli = i_rlowtohigh[- 1 - k]
otherbatch = [jo for jo in self.vlrep[lli] if jo < jmax]
# Heat restriction
if (self.r[lli, 0] <= 0.5 * self.moop.boxcap) & \
(len(otherbatch) == 0):
pass
else:
bcookiej = self.cookies.get(j).getbatch()
pack = packable(self.r[lli, :], bcookiej, self.tfill[lli])
if pack:
rcl_i.append(lli)
if len(rcl_i) == self.beta:
return rcl_i
return rcl_i
def open_new_bin(self, i, j):
# This module opens a new bin i with cookie j
self.m += 1
self.y[i] = 1
self.vlrep.insert(i, [j])
self.r[i, 0] = self.moop.boxcap - 1
# Adapt Greedy Function (time)
self.rcl_t.adapt_greedy_function_newbin(self.tfill[i])
t_t = self.find_t_in_fill_periods(self.tfill[i])
self.rcl_t.res_fill[t_t] -= 1
self.r[:self.m, 1] = self.rcl_t.retrieve_space_by_tfill(self.m, self.tfill)
def find_alternative_bin(self, j):
# If tmin when coolrack is overfull, find least worst solution
tmin = self.cookies.get(j).getbatch() * self.moop.tbatch
tk = self.find_t_in_trange(tmin)
if self.rcl_t.space[tk] <= 0:
# Find least-worst alternative
options = [i for i in range(self.m)
if tmin < self.tfill[i] and self.r[i, 0] > 0]
if options:
return options
else:
return [self.m]
else:
return [self.m]
def constructx(self):
# This function transforms the variable length representation into
# the x-matrix
for i in range(self.m):
for j in self.vlrep[i]:
self.x[i, j] = 1
checkformismatch(self.x, self.vlrep)
class RCLtime:
# This class maintains and updates the restricted candidate list for a
# unique t_fill
def __init__(self, coolrack, fillcap, n_b, tbatch, nbatches):
self.coolrack = coolrack # Cooling rack capacity
self.fillcap = fillcap # Fill period limit
self.n_b = n_b # Number of cookies in one batch
self.tbatch = tbatch # Time to cook one batch
self.nbatches = nbatches # Number of batches cooked
# Set the time range, extend one cycle past last pull
self.trange = [(b + 1) * self.tbatch for b in range(self.nbatches + 1)]
# Space on the cooling rack as a function of time
self.space = [self.coolrack - (b + 1) * self.n_b
for b in range(self.nbatches)]
self.space.append(self.space[-1])
# Include restrictions for period fill limits
n_period = 2 * (nbatches - 1) + 2
self.t_t = [self.tbatch * (1.0 + t / 2.0) for t in range(n_period)]
self.res_fill = [fillcap for _ in range(n_period)]
def initialize_withtfill(self, m, vlrep, tfill):
# This function adds the information from vlrep and tfill
# into the trange and space lists
# First fix the cooling rack related items
r2 = np.zeros(m, dtype=np.int) # Collect residual values
i_lowtohigh = list(np.argsort(tfill[:m], axis=0))
for i in i_lowtohigh:
r2[i] = self.adapt_greedy_function_newbin(tfill[i],
add=len(vlrep[i]))
# Then fix the fill period related items
t_latest = np.amax(tfill)
while t_latest > self.t_t[-1]:
self.extend_fill_periods()
for t in range(len(self.t_t) - 1):
p_t = [i for i in range(m)
if self.t_t[t] <= tfill[i] < self.t_t[t + 1]]
self.res_fill[t] -= len(p_t)
return r2
def pick_suggested_t(self, t_maybe, tmin):
# This function returns a possible starting t-value, first by trying
# the suggested t values in t_maybe, and then by finding a feasible one
for i in range(len(t_maybe)):
if t_maybe[i] < self.trange[-1]:
if self.time_feasible(t_maybe[i], tmin):
return t_maybe[i]
t_new = self.get_new_t(tmin)
return t_new
def time_feasible(self, t, tmin):
# This function checks if time t is feasible to open a new bin
if t < tmin:
return False
while self.trange[-1] < t:
self.extend_timeline()
tk = self.find_t_in_timeline(t)
# To be feasible, the cooling rack cannot be overcrowded
if self.space[tk] > 0:
return self.time_period_feasible(t)
# If overcrowded, return False
return False
def time_period_feasible(self, t):
# This module determines if time value t is valid within period fill
# limit constraints.
if t < self.t_t[0]:
return False
ttlist = np.where(np.array(self.t_t) <= t)[0]
# The number of boxes filled during the period < limit
if self.res_fill[ttlist[-1]] > 0:
return True
else:
return False
def get_new_t(self, tmin, mode='ss', nmove=1, told=None):
# This function returns a random time on the cumulative
# distribution function of space(trange) greater than tmin
t = 0
tmax = self.get_tmax(tmin, nmove)
dist = self.retrieve_pdensityfunction(mode)
c_min = dist.cdf(tmin)
c_max = dist.cdf(tmax)
if c_min == c_max:
return None
k = 0
while round(t) <= tmin or round(t) >= tmax:
rannum = random.uniform(c_min, c_max)
t = dist.ppf(rannum)
k += 1
if k == 10:
return None
return round(t)
def retrieve_pdensityfunction(self, mode):
# This function returns the needed pdf
if mode == 'hload':
dist = PiecewiseLinearPDF(self.trange, self.space)
else:
dist = PiecewisePDF(self.trange, self.space)
return dist
def find_least_worst_newt(self, tmin):
# This function returns the least worst time for a box to be opened
# based on tmin.
tklist = np.where(np.array(self.trange) >= tmin)[0]
max_space = self.space[tklist[0]]
tmax = self.get_tmax(tmin, max_space)
t_new = random.uniform(tmin + 1, tmax)
kappa = 0
while not self.time_period_feasible(t_new):
if kappa == 10:
return tmin + 1.0
t_new = random.uniform(tmin + 1, tmax)
kappa += 1
return round(t_new)
def get_tmax(self, tmin, nmove):
# This function determines if the get_new_t function needs to limit its
# search to a max. value. If not, it returns the last trange value.
tklist = np.where(np.array(self.trange) > tmin)[0]
for tk in tklist:
if self.space[tk] - nmove <= 0:
return self.trange[tk]
# If did not find t_max, and enough space at end of timeline, extend
if self.space[-1] >= nmove:
self.extend_timeline()
return self.trange[-1]
def adapt_greedy_function_newbin(self, t, add=1):
# This function updates the space and trange lists after a new bin is
# opened, add is the space being opened by # of cookies being removed
# If t is larger than the range, add it on to the end
if t > self.trange[-1]:
self.trange.append(t)
self.space.append(self.space[-1])
self.update_space(-1, add=add)
return self.space[-1]
# If the new t is the same as the last t in trange, extend it by some
elif t == self.trange[-1]:
self.update_space(-1, add=add)
self.extend_timeline()
return self.space[-2]
else:
ilist = np.where(np.array(self.trange) >= t)[0]
if t == self.trange[ilist[0]]:
start = ilist[0]
else:
self.trange.insert(ilist[0], t)
self.space.insert(ilist[0], self.space[ilist[0] - 1] + add)
start = ilist[0] + 1
for tk in range(start, len(self.space)):
self.update_space(tk, add=add)
return self.space[ilist[0]]
def adapt_greedy_function_addtobin(self, t):
# This function updates the space and trange lists after a cookie is
# added to a box and removed from the cooling rack at time t
tklist = np.where(np.array(self.trange) >= t)[0]
for tk in tklist:
self.update_space(tk)
return self.space[tklist[0]]
def adapt_movebins(self, t1, t2):
# This function updates the space list after a cookie is moved from
# the box filled at t1 to the one filled at t2
tklist1 = np.where(np.array(self.trange) >= t1)[0]
tklist2 = np.where(np.array(self.trange) >= t2)[0]
tklist = np.setxor1d(tklist1, tklist2)
if t1 == t2:
return self.space[tklist1[0]], self.space[tklist1[0]]
elif t1 < t2:
for tk in tklist:
self.update_space(tk, add=-1)
else:
for tk in tklist:
self.update_space(tk)
return self.space[tklist1[0]], self.space[tklist2[0]]
def adapt_changetime(self, told, tnew, nmove):
# This function updates the trange and space lists to account for a bin
# being filled at tnew instead of told.
# nmove is the size of the box being changed
while tnew > self.trange[-1]:
self.extend_timeline()
tklist1 = np.where(np.array(self.trange) >= told)[0]
tklist2 = np.where(np.array(self.trange) >= tnew)[0]
tklist = np.setxor1d(tklist1, tklist2)
if told < tnew:
for tk in tklist:
self.update_space(tk, add=-nmove)
else:
for tk in tklist:
self.update_space(tk, add=nmove)
self.trange.insert(tklist2[0], tnew)
self.space.insert(tklist2[0], self.space[tklist2[0] - 1] + nmove)
return self.space
def update_space(self, tk, add=1):
# This function updates the space list at time tk, assuming one cookie
# was removed from the cooling rack
self.space[tk] += add
if self.space[tk] > self.coolrack:
self.space[tk] = self.coolrack
def retrieve_space_by_tfill(self, m, tfill):
# This function returns the space residuals matching tfill
r2 = np.zeros(m, dtype=np.int) # Collect residual values
for i in range(m):
ilist = np.where(np.array(self.trange) == tfill[i])[0]
r2[i] = self.space[ilist[0]]
return r2
def find_t_in_timeline(self, t):
tklist = np.where(np.array(self.trange) > t)[0]
tk = tklist[0] - 1
return tk
def extend_timeline(self):
# This function extends trange by one batch time period.
new_tlast = self.trange[-1] + 0.5 * self.tbatch
self.trange.append(new_tlast)
self.space.append(self.space[-1])
def extend_fill_periods(self):
# This function extends t_t by one period
self.t_t.append(self.t_t[-1] + 0.5 * self.tbatch)
self.res_fill.append(self.fillcap)
class PiecewisePDF:
# This class defines a piecewise function along with its pdf and cdf
def __init__(self, trange, space):
self.tchunk = np.ediff1d(trange)
space_array = np.array(space)
for tk in range(len(space_array)):
if space_array[tk] < 0.0:
space_array[tk] = 0.0
area_chunks = np.multiply(self.tchunk, space_array[:-1])
area_total = np.sum(area_chunks)
self.tk = np.array(trange) # time range for distribution
self.pk = space_array / float(area_total) # probability at tk
self.ck = np.cumsum(np.multiply(self.pk[:-1], self.tchunk)) # cumulative probability
self.ck = np.insert(self.ck, 0, 0.0)
def pdf(self, t):
# This function returns the probability at time t
if t < self.tk[0]:
return 0.0
listi = np.where(t < self.tk)
probt = self.pk[listi[0][0] - 1]
return probt
def cdf(self, t):
# This function returns the cumulative probability of quantile t
if t < self.tk[0]:
return 0.0
i = np.where(t == self.tk)[0]
if any(i):
return self.ck[i[0]]
else:
ilist = np.where(t < self.tk)[0]
i1 = ilist[0] - 1
i2 = ilist[0]
slope = (self.ck[i2] - self.ck[i1]) / (self.tk[i2] - self.tk[i1])
p_c = slope * (t - self.tk[i1]) + self.ck[i1]
return p_c
def ppf(self, p):
# This function returns the time associated with percentile p
# This is the inverse cumulative distribution function.
i = np.where(p == self.ck)[0]
if any(i):
return self.tk[i[0]]
else:
ilist = np.where(p < self.ck)[0]
# Linear function: t = (t_high - t_low)/(c_high - c_low)* (p - c_low) + t_low
i1 = ilist[0] - 1
i2 = ilist[0]
slope = (self.tk[i2] - self.tk[i1]) / (self.ck[i2] - self.ck[i1])
return slope * (p - self.ck[i1]) + self.tk[i1]
class PiecewiseLinearPDF:
# This class defines a piecewise function along with its pdf and cdf, with a
# linear increase in probability over each given time range
def __init__(self, trange, space):
self.tk = np.array(trange) # time range for distribution
self.space_array = np.array(space) # space available in each time range
for tk in range(len(self.space_array)):
if self.space_array[tk] < 0.0:
self.space_array[tk] = 0.0
self.tchunk = np.ediff1d(trange) # differences between time values
area_chunks = np.multiply(self.tchunk, self.space_array[:-1])
self.area_total = float(np.sum(area_chunks)) # total area under the space(t) curve
self.ck = np.cumsum(np.divide(area_chunks, self.area_total)) # cumulative probability
self.ck = np.insert(self.ck, 0, 0.0)
def pdf(self, t):
# This function returns the probability at time t
if t < self.tk[0]:
return 0.0
listi = np.where(t < self.tk)[0]
k = listi[0] - 1
# Linear function: probt = [(2 * space(tk) - 0) / (tk+1 - tk) * (t - tk)] / totalarea
slope = 2 * (self.space_array[k]/self.area_total)/self.tchunk[k]
probt = slope * (t - self.tk[k])
return probt
def cdf(self, t):
# This function returns the cumulative probability of quantile t
if t < self.tk[0]:
return 0.0
i = np.where(t == self.tk)[0]
if any(i):
return self.ck[i[0]]
else:
ilist = np.where(t < self.tk)[0]
k = ilist[0] - 1 # index for lower boundary of chunk
slope = 2 * (self.space_array[k] / self.area_total) / self.tchunk[k]
p_c = slope * (t - self.tk[k]) ** 2 / 2 + self.ck[k]
return p_c
def ppf(self, p):
# This function returns the time associated with percentile p
# This is the inverse cumulative distribution function.
i = np.where(p == self.ck)[0]
if any(i):
return self.tk[i[0]]
else:
ilist = np.where(p < self.ck)[0]
# Quad function: t = sqrt(2*(p-c_low)/slope) + t_low
k = ilist[0] - 1
slope = 2 * (self.space_array[k]/self.area_total)/self.tchunk[k]
x = sqrt(2 * (p - self.ck[k]) / slope)
return x + self.tk[k]
def dom2(u, v):
# Determines if fitness vector u dominates fitness vector v
# This function assumes a minimization problem.
# For u to dominate v, every fitness value must be either
# equal to or less than the value in v AND one fitness value
# must be less than the one in v
equaltest = np.allclose(u, v)
if equaltest is True:
# If u == v then nondominated
return False
# less_equal returns boolean for each element u[i] <= v[i]
domtest = np.less_equal(u, v)
return np.all(domtest)
def packable(ri, batch, tfilli):
# This module checks to see if cookie j can fit inside bin i at time tfilli
# Capacity constraints
r1 = ri[0] - 1
r2 = ri[1] - 1
# Time constraint: tbatch = 10 min = 600 s
t_cook = batch * 600
return r1 >= 0 and r2 >= 0 and t_cook < tfilli
def checkformismatch(x, vlrep, out=sys.stdout):
# This function identifies if the given solution does not have an x-matrix
# and a variable length representation that match.
for i in range(len(vlrep)):
for j in vlrep[i]:
if x[i, j] != 1:
out.write('Error: NewSolution is not coordinated on item', j)
def averageLen(lst):
# Calculates the average length of lists inside a list, returns integer value
lengths = [len(i) for i in lst]
return 0 if len(lengths) == 0 else (int(sum(lengths) / len(lengths)))
def isclose(a, b, rel_tol=1e-09, abs_tol=0.0):
# This function determines if value a and value b are about equal
return abs(a-b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)
if __name__ == '__main__':
print('grasp.py needs to be combined with coolcookies.py')
|
[
"numpy.less_equal",
"math.sqrt",
"numpy.argsort",
"numpy.array",
"copy.deepcopy",
"numpy.divide",
"numpy.multiply",
"numpy.where",
"numpy.subtract",
"numpy.max",
"random.random",
"numpy.setxor1d",
"numpy.argmin",
"random.randint",
"operator.attrgetter",
"random.sample",
"numpy.allclose",
"random.choice",
"random.uniform",
"numpy.ediff1d",
"numpy.argmax",
"numpy.nonzero",
"numpy.insert",
"numpy.unique",
"solutions_dynamic.CookieSol",
"numpy.sum",
"numpy.zeros",
"numpy.all",
"numpy.amax"
] |
[((62326, 62343), 'numpy.allclose', 'np.allclose', (['u', 'v'], {}), '(u, v)\n', (62337, 62343), True, 'import numpy as np\n'), ((62506, 62525), 'numpy.less_equal', 'np.less_equal', (['u', 'v'], {}), '(u, v)\n', (62519, 62525), True, 'import numpy as np\n'), ((62537, 62552), 'numpy.all', 'np.all', (['domtest'], {}), '(domtest)\n', (62543, 62552), True, 'import numpy as np\n'), ((1283, 1298), 'random.random', 'random.random', ([], {}), '()\n', (1296, 1298), False, 'import random\n'), ((2629, 2647), 'copy.deepcopy', 'deepcopy', (['solution'], {}), '(solution)\n', (2637, 2647), False, 'from copy import deepcopy\n'), ((7982, 8002), 'copy.deepcopy', 'deepcopy', (['searchfrom'], {}), '(searchfrom)\n', (7990, 8002), False, 'from copy import deepcopy\n'), ((9889, 9905), 'numpy.zeros', 'np.zeros', (['self.n'], {}), '(self.n)\n', (9897, 9905), True, 'import numpy as np\n'), ((10995, 11015), 'copy.deepcopy', 'deepcopy', (['searchfrom'], {}), '(searchfrom)\n', (11003, 11015), False, 'from copy import deepcopy\n'), ((11954, 11982), 'random.sample', 'random.sample', (['binkeys', 'nrcl'], {}), '(binkeys, nrcl)\n', (11967, 11982), False, 'import random\n'), ((12367, 12387), 'copy.deepcopy', 'deepcopy', (['searchfrom'], {}), '(searchfrom)\n', (12375, 12387), False, 'from copy import deepcopy\n'), ((14301, 14321), 'random.choice', 'random.choice', (['ilist'], {}), '(ilist)\n', (14314, 14321), False, 'import random\n'), ((15284, 15312), 'numpy.argsort', 'np.argsort', (['r[:m, 0]'], {'axis': '(0)'}), '(r[:m, 0], axis=0)\n', (15294, 15312), True, 'import numpy as np\n'), ((17369, 17389), 'numpy.argsort', 'np.argsort', (['q0_bybin'], {}), '(q0_bybin)\n', (17379, 17389), True, 'import numpy as np\n'), ((19364, 19384), 'copy.deepcopy', 'deepcopy', (['searchfrom'], {}), '(searchfrom)\n', (19372, 19384), False, 'from copy import deepcopy\n'), ((21250, 21270), 'copy.deepcopy', 'deepcopy', (['searchfrom'], {}), '(searchfrom)\n', (21258, 21270), False, 'from copy import deepcopy\n'), ((23970, 23997), 'random.randint', 'random.randint', (['(1)', 'max_swap'], {}), '(1, max_swap)\n', (23984, 23997), False, 'import random\n'), ((24846, 24873), 'random.randint', 'random.randint', (['(1)', 'max_swap'], {}), '(1, max_swap)\n', (24860, 24873), False, 'import random\n'), ((24895, 24936), 'random.sample', 'random.sample', (['bini1_options', 'swap_number'], {}), '(bini1_options, swap_number)\n', (24908, 24936), False, 'import random\n'), ((24958, 24999), 'random.sample', 'random.sample', (['bini2_options', 'swap_number'], {}), '(bini2_options, swap_number)\n', (24971, 24999), False, 'import random\n'), ((27515, 27535), 'copy.deepcopy', 'deepcopy', (['searchfrom'], {}), '(searchfrom)\n', (27523, 27535), False, 'from copy import deepcopy\n'), ((28823, 28850), 'random.randint', 'random.randint', (['(1)', 'max_move'], {}), '(1, max_move)\n', (28837, 28850), False, 'import random\n'), ((29793, 29823), 'numpy.argsort', 'np.argsort', (['characteristic[:m]'], {}), '(characteristic[:m])\n', (29803, 29823), True, 'import numpy as np\n'), ((32275, 32294), 'random.choice', 'random.choice', (['bins'], {}), '(bins)\n', (32288, 32294), False, 'import random\n'), ((33072, 33094), 'numpy.argmax', 'np.argmax', (['bincapacity'], {}), '(bincapacity)\n', (33081, 33094), True, 'import numpy as np\n'), ((33350, 33372), 'numpy.argmin', 'np.argmin', (['bincapacity'], {}), '(bincapacity)\n', (33359, 33372), True, 'import numpy as np\n'), ((33801, 33836), 'numpy.zeros', 'np.zeros', (['(self.n, 2)'], {'dtype': 'np.int'}), '((self.n, 2), dtype=np.int)\n', (33809, 33836), True, 'import numpy as np\n'), ((35032, 35051), 'numpy.where', 'np.where', (['(tfill > t)'], {}), '(tfill > t)\n', (35040, 35051), True, 'import numpy as np\n'), ((36347, 36363), 'numpy.zeros', 'np.zeros', (['(n, 2)'], {}), '((n, 2))\n', (36355, 36363), True, 'import numpy as np\n'), ((36410, 36440), 'numpy.zeros', 'np.zeros', (['(n, n)'], {'dtype': 'np.int'}), '((n, n), dtype=np.int)\n', (36418, 36440), True, 'import numpy as np\n'), ((36458, 36483), 'numpy.zeros', 'np.zeros', (['n'], {'dtype': 'np.int'}), '(n, dtype=np.int)\n', (36466, 36483), True, 'import numpy as np\n'), ((36529, 36556), 'numpy.zeros', 'np.zeros', (['n'], {'dtype': 'np.float'}), '(n, dtype=np.float)\n', (36537, 36556), True, 'import numpy as np\n'), ((37180, 37245), 'solutions_dynamic.CookieSol', 'solmaker.CookieSol', (['index', 'self.x', 'self.y', 'self.vlrep', 'self.tfill'], {}), '(index, self.x, self.y, self.vlrep, self.tfill)\n', (37198, 37245), True, 'import solutions_dynamic as solmaker\n'), ((37576, 37591), 'random.random', 'random.random', ([], {}), '()\n', (37589, 37591), False, 'import random\n'), ((38538, 38553), 'random.random', 'random.random', ([], {}), '()\n', (38551, 38553), False, 'import random\n'), ((40891, 40913), 'numpy.unique', 'np.unique', (['tfill_maybe'], {}), '(tfill_maybe)\n', (40900, 40913), True, 'import numpy as np\n'), ((41403, 41423), 'random.randint', 'random.randint', (['(0)', '(1)'], {}), '(0, 1)\n', (41417, 41423), False, 'import random\n'), ((43709, 43747), 'numpy.argsort', 'np.argsort', (['self.r[:self.m, 0]'], {'axis': '(0)'}), '(self.r[:self.m, 0], axis=0)\n', (43719, 43747), True, 'import numpy as np\n'), ((44634, 44660), 'numpy.max', 'np.max', (['self.r[:self.m, 1]'], {}), '(self.r[:self.m, 1])\n', (44640, 44660), True, 'import numpy as np\n'), ((44679, 44695), 'numpy.zeros', 'np.zeros', (['self.m'], {}), '(self.m)\n', (44687, 44695), True, 'import numpy as np\n'), ((44834, 44874), 'numpy.multiply', 'np.multiply', (['self.r[:self.m, 0]', 'weights'], {}), '(self.r[:self.m, 0], weights)\n', (44845, 44874), True, 'import numpy as np\n'), ((44898, 44927), 'numpy.argsort', 'np.argsort', (['wresidual'], {'axis': '(0)'}), '(wresidual, axis=0)\n', (44908, 44927), True, 'import numpy as np\n'), ((45749, 45787), 'numpy.argsort', 'np.argsort', (['self.r[:self.m, 0]'], {'axis': '(0)'}), '(self.r[:self.m, 0], axis=0)\n', (45759, 45787), True, 'import numpy as np\n'), ((49306, 49331), 'numpy.zeros', 'np.zeros', (['m'], {'dtype': 'np.int'}), '(m, dtype=np.int)\n', (49314, 49331), True, 'import numpy as np\n'), ((49656, 49670), 'numpy.amax', 'np.amax', (['tfill'], {}), '(tfill)\n', (49663, 49670), True, 'import numpy as np\n'), ((52532, 52562), 'random.uniform', 'random.uniform', (['(tmin + 1)', 'tmax'], {}), '(tmin + 1, tmax)\n', (52546, 52562), False, 'import random\n'), ((55149, 55178), 'numpy.setxor1d', 'np.setxor1d', (['tklist1', 'tklist2'], {}), '(tklist1, tklist2)\n', (55160, 55178), True, 'import numpy as np\n'), ((55953, 55982), 'numpy.setxor1d', 'np.setxor1d', (['tklist1', 'tklist2'], {}), '(tklist1, tklist2)\n', (55964, 55982), True, 'import numpy as np\n'), ((56734, 56759), 'numpy.zeros', 'np.zeros', (['m'], {'dtype': 'np.int'}), '(m, dtype=np.int)\n', (56742, 56759), True, 'import numpy as np\n'), ((57655, 57673), 'numpy.ediff1d', 'np.ediff1d', (['trange'], {}), '(trange)\n', (57665, 57673), True, 'import numpy as np\n'), ((57696, 57711), 'numpy.array', 'np.array', (['space'], {}), '(space)\n', (57704, 57711), True, 'import numpy as np\n'), ((57853, 57895), 'numpy.multiply', 'np.multiply', (['self.tchunk', 'space_array[:-1]'], {}), '(self.tchunk, space_array[:-1])\n', (57864, 57895), True, 'import numpy as np\n'), ((57917, 57936), 'numpy.sum', 'np.sum', (['area_chunks'], {}), '(area_chunks)\n', (57923, 57936), True, 'import numpy as np\n'), ((57955, 57971), 'numpy.array', 'np.array', (['trange'], {}), '(trange)\n', (57963, 57971), True, 'import numpy as np\n'), ((58237, 58263), 'numpy.insert', 'np.insert', (['self.ck', '(0)', '(0.0)'], {}), '(self.ck, 0, 0.0)\n', (58246, 58263), True, 'import numpy as np\n'), ((58411, 58432), 'numpy.where', 'np.where', (['(t < self.tk)'], {}), '(t < self.tk)\n', (58419, 58432), True, 'import numpy as np\n'), ((59824, 59840), 'numpy.array', 'np.array', (['trange'], {}), '(trange)\n', (59832, 59840), True, 'import numpy as np\n'), ((59911, 59926), 'numpy.array', 'np.array', (['space'], {}), '(space)\n', (59919, 59926), True, 'import numpy as np\n'), ((60125, 60143), 'numpy.ediff1d', 'np.ediff1d', (['trange'], {}), '(trange)\n', (60135, 60143), True, 'import numpy as np\n'), ((60207, 60254), 'numpy.multiply', 'np.multiply', (['self.tchunk', 'self.space_array[:-1]'], {}), '(self.tchunk, self.space_array[:-1])\n', (60218, 60254), True, 'import numpy as np\n'), ((60461, 60487), 'numpy.insert', 'np.insert', (['self.ck', '(0)', '(0.0)'], {}), '(self.ck, 0, 0.0)\n', (60470, 60487), True, 'import numpy as np\n'), ((2712, 2757), 'numpy.argsort', 'np.argsort', (['tfill[:neighbor.openbins]'], {'axis': '(0)'}), '(tfill[:neighbor.openbins], axis=0)\n', (2722, 2757), True, 'import numpy as np\n'), ((8916, 8939), 'random.choice', 'random.choice', (['rcl_bins'], {}), '(rcl_bins)\n', (8929, 8939), False, 'import random\n'), ((13620, 13640), 'random.choice', 'random.choice', (['rcl_j'], {}), '(rcl_j)\n', (13633, 13640), False, 'import random\n'), ((16840, 16855), 'random.random', 'random.random', ([], {}), '()\n', (16853, 16855), False, 'import random\n'), ((17639, 17657), 'copy.deepcopy', 'deepcopy', (['solution'], {}), '(solution)\n', (17647, 17657), False, 'from copy import deepcopy\n'), ((19449, 19498), 'random.choice', 'random.choice', (["['random', 'moveheat', 'movelate']"], {}), "(['random', 'moveheat', 'movelate'])\n", (19462, 19498), False, 'import random\n'), ((20745, 20773), 'random.choice', 'random.choice', (['bini1_options'], {}), '(bini1_options)\n', (20758, 20773), False, 'import random\n'), ((20791, 20819), 'random.choice', 'random.choice', (['bini2_options'], {}), '(bini2_options)\n', (20804, 20819), False, 'import random\n'), ((21335, 21384), 'random.choice', 'random.choice', (["['random', 'moveheat', 'movelate']"], {}), "(['random', 'moveheat', 'movelate'])\n", (21348, 21384), False, 'import random\n'), ((27600, 27639), 'random.choice', 'random.choice', (["['moveheat', 'movelate']"], {}), "(['moveheat', 'movelate'])\n", (27613, 27639), False, 'import random\n'), ((31185, 31209), 'random.choice', 'random.choice', (['bin_pairs'], {}), '(bin_pairs)\n', (31198, 31209), False, 'import random\n'), ((32351, 32370), 'random.choice', 'random.choice', (['bins'], {}), '(bins)\n', (32364, 32370), False, 'import random\n'), ((37693, 37713), 'random.choice', 'random.choice', (['rcl_i'], {}), '(rcl_i)\n', (37706, 37713), False, 'import random\n'), ((38644, 38664), 'random.choice', 'random.choice', (['rcl_i'], {}), '(rcl_i)\n', (38657, 38664), False, 'import random\n'), ((49390, 49419), 'numpy.argsort', 'np.argsort', (['tfill[:m]'], {'axis': '(0)'}), '(tfill[:m], axis=0)\n', (49400, 49419), True, 'import numpy as np\n'), ((51789, 51817), 'random.uniform', 'random.uniform', (['c_min', 'c_max'], {}), '(c_min, c_max)\n', (51803, 51817), False, 'import random\n'), ((52715, 52745), 'random.uniform', 'random.uniform', (['(tmin + 1)', 'tmax'], {}), '(tmin + 1, tmax)\n', (52729, 52745), False, 'import random\n'), ((58153, 58191), 'numpy.multiply', 'np.multiply', (['self.pk[:-1]', 'self.tchunk'], {}), '(self.pk[:-1], self.tchunk)\n', (58164, 58191), True, 'import numpy as np\n'), ((58653, 58675), 'numpy.where', 'np.where', (['(t == self.tk)'], {}), '(t == self.tk)\n', (58661, 58675), True, 'import numpy as np\n'), ((59174, 59196), 'numpy.where', 'np.where', (['(p == self.ck)'], {}), '(p == self.ck)\n', (59182, 59196), True, 'import numpy as np\n'), ((60287, 60306), 'numpy.sum', 'np.sum', (['area_chunks'], {}), '(area_chunks)\n', (60293, 60306), True, 'import numpy as np\n'), ((60376, 60415), 'numpy.divide', 'np.divide', (['area_chunks', 'self.area_total'], {}), '(area_chunks, self.area_total)\n', (60385, 60415), True, 'import numpy as np\n'), ((60635, 60656), 'numpy.where', 'np.where', (['(t < self.tk)'], {}), '(t < self.tk)\n', (60643, 60656), True, 'import numpy as np\n'), ((61072, 61094), 'numpy.where', 'np.where', (['(t == self.tk)'], {}), '(t == self.tk)\n', (61080, 61094), True, 'import numpy as np\n'), ((61619, 61641), 'numpy.where', 'np.where', (['(p == self.ck)'], {}), '(p == self.ck)\n', (61627, 61641), True, 'import numpy as np\n'), ((61943, 61977), 'math.sqrt', 'sqrt', (['(2 * (p - self.ck[k]) / slope)'], {}), '(2 * (p - self.ck[k]) / slope)\n', (61947, 61977), False, 'from math import ceil, sqrt\n'), ((8201, 8218), 'numpy.array', 'np.array', (['lengths'], {}), '(lengths)\n', (8209, 8218), True, 'import numpy as np\n'), ((58765, 58786), 'numpy.where', 'np.where', (['(t < self.tk)'], {}), '(t < self.tk)\n', (58773, 58786), True, 'import numpy as np\n'), ((59286, 59307), 'numpy.where', 'np.where', (['(p < self.ck)'], {}), '(p < self.ck)\n', (59294, 59307), True, 'import numpy as np\n'), ((61184, 61205), 'numpy.where', 'np.where', (['(t < self.tk)'], {}), '(t < self.tk)\n', (61192, 61205), True, 'import numpy as np\n'), ((61731, 61752), 'numpy.where', 'np.where', (['(p < self.ck)'], {}), '(p < self.ck)\n', (61739, 61752), True, 'import numpy as np\n'), ((4760, 4779), 'operator.attrgetter', 'attrgetter', (['"""batch"""'], {}), "('batch')\n", (4770, 4779), False, 'from operator import attrgetter\n'), ((5465, 5484), 'numpy.array', 'np.array', (['rcl_t.t_t'], {}), '(rcl_t.t_t)\n', (5473, 5484), True, 'import numpy as np\n'), ((10221, 10270), 'numpy.subtract', 'np.subtract', (['self.moop.coolrack', 'rcl_t.space[tk:]'], {}), '(self.moop.coolrack, rcl_t.space[tk:])\n', (10232, 10270), True, 'import numpy as np\n'), ((10442, 10461), 'numpy.nonzero', 'np.nonzero', (['dparray'], {}), '(dparray)\n', (10452, 10461), True, 'import numpy as np\n'), ((10506, 10526), 'numpy.argsort', 'np.argsort', (['(-dparray)'], {}), '(-dparray)\n', (10516, 10526), True, 'import numpy as np\n'), ((10608, 10627), 'numpy.nonzero', 'np.nonzero', (['dparray'], {}), '(dparray)\n', (10618, 10627), True, 'import numpy as np\n'), ((42529, 42553), 'numpy.array', 'np.array', (['self.rcl_t.t_t'], {}), '(self.rcl_t.t_t)\n', (42537, 42553), True, 'import numpy as np\n'), ((42798, 42825), 'numpy.array', 'np.array', (['self.rcl_t.trange'], {}), '(self.rcl_t.trange)\n', (42806, 42825), True, 'import numpy as np\n'), ((51091, 51109), 'numpy.array', 'np.array', (['self.t_t'], {}), '(self.t_t)\n', (51099, 51109), True, 'import numpy as np\n'), ((52394, 52415), 'numpy.array', 'np.array', (['self.trange'], {}), '(self.trange)\n', (52402, 52415), True, 'import numpy as np\n'), ((53017, 53038), 'numpy.array', 'np.array', (['self.trange'], {}), '(self.trange)\n', (53025, 53038), True, 'import numpy as np\n'), ((54716, 54737), 'numpy.array', 'np.array', (['self.trange'], {}), '(self.trange)\n', (54724, 54737), True, 'import numpy as np\n'), ((55041, 55062), 'numpy.array', 'np.array', (['self.trange'], {}), '(self.trange)\n', (55049, 55062), True, 'import numpy as np\n'), ((55100, 55121), 'numpy.array', 'np.array', (['self.trange'], {}), '(self.trange)\n', (55108, 55121), True, 'import numpy as np\n'), ((55841, 55862), 'numpy.array', 'np.array', (['self.trange'], {}), '(self.trange)\n', (55849, 55862), True, 'import numpy as np\n'), ((55902, 55923), 'numpy.array', 'np.array', (['self.trange'], {}), '(self.trange)\n', (55910, 55923), True, 'import numpy as np\n'), ((57004, 57025), 'numpy.array', 'np.array', (['self.trange'], {}), '(self.trange)\n', (57012, 57025), True, 'import numpy as np\n'), ((56843, 56864), 'numpy.array', 'np.array', (['self.trange'], {}), '(self.trange)\n', (56851, 56864), True, 'import numpy as np\n'), ((54068, 54089), 'numpy.array', 'np.array', (['self.trange'], {}), '(self.trange)\n', (54076, 54089), True, 'import numpy as np\n')]
|
import os.path
import numpy as np
import itertools
import Tools
# Those patterns are used for tests and benchmarks.
# For tests, there is the need to add tests for saturation
def writeTests(config):
NBSAMPLES=128
inputsA=np.random.randn(NBSAMPLES)
inputsB=np.random.randn(NBSAMPLES)
inputsA = inputsA/max(inputsA)
inputsB = inputsB/max(inputsB)
config.writeInput(1, inputsA,"InputsA")
config.writeInput(1, inputsB,"InputsB")
PATTERNDIR = os.path.join("Patterns","DSP","Filtering","MISC","MISC")
PARAMDIR = os.path.join("Parameters","DSP","Filtering","MISC","MISC")
configf32=Tools.Config(PATTERNDIR,PARAMDIR,"f32")
configq31=Tools.Config(PATTERNDIR,PARAMDIR,"q31")
configq15=Tools.Config(PATTERNDIR,PARAMDIR,"q15")
configq7=Tools.Config(PATTERNDIR,PARAMDIR,"q7")
writeTests(configf32)
writeTests(configq31)
writeTests(configq15)
writeTests(configq7)
|
[
"Tools.Config",
"numpy.random.randn"
] |
[((624, 665), 'Tools.Config', 'Tools.Config', (['PATTERNDIR', 'PARAMDIR', '"""f32"""'], {}), "(PATTERNDIR, PARAMDIR, 'f32')\n", (636, 665), False, 'import Tools\n'), ((674, 715), 'Tools.Config', 'Tools.Config', (['PATTERNDIR', 'PARAMDIR', '"""q31"""'], {}), "(PATTERNDIR, PARAMDIR, 'q31')\n", (686, 715), False, 'import Tools\n'), ((724, 765), 'Tools.Config', 'Tools.Config', (['PATTERNDIR', 'PARAMDIR', '"""q15"""'], {}), "(PATTERNDIR, PARAMDIR, 'q15')\n", (736, 765), False, 'import Tools\n'), ((773, 813), 'Tools.Config', 'Tools.Config', (['PATTERNDIR', 'PARAMDIR', '"""q7"""'], {}), "(PATTERNDIR, PARAMDIR, 'q7')\n", (785, 813), False, 'import Tools\n'), ((235, 261), 'numpy.random.randn', 'np.random.randn', (['NBSAMPLES'], {}), '(NBSAMPLES)\n', (250, 261), True, 'import numpy as np\n'), ((274, 300), 'numpy.random.randn', 'np.random.randn', (['NBSAMPLES'], {}), '(NBSAMPLES)\n', (289, 300), True, 'import numpy as np\n')]
|
from genericpath import exists
import math
import numpy as np
import os
import re
from PIL import Image
import matplotlib.pyplot as plt
from matplotlib import cm
# append line to log file
def log(file, line, doPrint=True):
f = open(file, "a+")
f.wrtite(line + "\n")
f.close()
if doPrint:
print(line)
# reset log file
def resetLog(file):
f = open(file, "w")
f.close()
def plot_loss(history_L1, history_L1val):
l1train = np.asarray(history_L1)
l1vali = np.asarray(history_L1val)
plt.figure()
plt.plot(np.arange(l1train.shape[0]), l1train, "b", label="Training loss")
plt.plot(np.arange(l1vali.shape[0]), l1vali, "g", label="Validation loss")
plt.legend()
plt.show()
def computeLR(i, epochs, minLR, maxLR):
if i < epochs * 0.5:
return maxLR
e = (i / float(epochs) - 0.5) * 2.0
fmin = 0.0
fmax = 6.0
e = fmin + e * (fmax - fmin)
f = math.pow(0.5, e)
return minLR + (maxLR - minLR) * f
def makeDirs(directoryList):
for directory in directoryList:
if not os.path.exists(directory):
os.makedirs(directory)
def imageOut(filename, _outputs, _targets, saveTargets=False, normalize=False, saveMontage=True):
outputs = np.copy(_outputs)
targets = np.copy(_targets)
s = outputs.shape[1]
if saveMontage:
new_img = Image.new("RGB", ((s + 10) * 3, s * 2), color=(255, 255, 255))
BW_img = Image.new("RGB", ((s + 10) * 3, s * 3), color=(255, 255, 255))
for i in range(3):
outputs[i] = np.flipud(outputs[i].transpose())
targets[i] = np.flipud(targets[i].transpose())
min_value = min(np.min(outputs[i]), np.min(targets[i]))
max_value = max(np.max(outputs[i]), np.max(targets[i]))
if normalize:
outputs[i] -= min_value
targets[i] -= min_value
max_value -= min_value
outputs[i] /= max_value
targets[i] /= max_value
else:
outputs[i] -= -1.0
targets[i] -= -1.0
outputs[i] /= 2.0
targets[i] /= 2.0
if not saveMontage:
suffix = ""
if i == 0:
suffix = "_pressure"
elif i == 1:
suffix = "_velX"
else:
suffix = "_velY"
im = Image.fromarray(cm.magma(outputs[i], bytes=True))
im = im.resize((512, 512))
im.save(filename + suffix + "_pred.png")
im = Image.fromarray(cm.magma(targets[i], bytes=True))
if saveTargets:
im = im.resize((512, 512))
im.save(filename + suffix + "_target.png")
else:
im = Image.fromarray(cm.magma(targets[i], bytes=True))
new_img.paste(im, ((s + 10) * i, s * 0))
im = Image.fromarray(cm.magma(outputs[i], bytes=True))
new_img.paste(im, ((s + 10) * i, s * 1))
im = Image.fromarray(targets[i] * 256.0)
BW_img.paste(im, ((s + 10) * i, s * 0))
im = Image.fromarray(outputs[i] * 256.0)
BW_img.paste(im, ((s + 10) * i, s * 1))
im = Image.fromarray(np.abs(targets[i] - outputs[i]) * 10.0 * 256.0)
BW_img.paste(im, ((s + 10) * i, s * 2))
if saveMontage:
new_img.save(filename + ".png")
BW_img.save(filename + "_bw.png")
def imageOut(filename, _outputs, saveTargets=True, normalize=False):
outputs = np.copy(_outputs)
for i in range(3):
outputs[i] = np.flipud(outputs[i].transpose())
min_value = np.min(outputs[i])
max_value = np.max(outputs[i])
if normalize:
outputs[i] -= min_value
max_value -= min_value
outputs[i] /= max_value
else: # from -1,1 to 0,1
outputs[i] -= -1.0
outputs[i] /= 2.0
suffix = ""
if i == 0:
suffix = "_pressure"
elif i == 1:
suffix = "_velX"
else:
suffix = "_velY"
im = Image.fromarray(cm.magma(outputs[i], bytes=True))
im = im.resize((128, 128))
im.save(filename + suffix + "_pred.png")
def saveOutput(output_arr, target_arr):
if target_arr is None:
imageOut("./results/result", output_arr)
else:
imageOut(
"./results/result", output_arr, target_arr, normalize=False, saveMontage=True
) # write normalized with error
class InputData:
def __init__(self, npz_arr, removePOffset=True, makeDimLess=True):
self.input = None
self.target = None
self.max_inputs_0 = 100.0
self.max_inputs_1 = 38.12
self.max_inputs_2 = 1.0
self.max_targets_0 = 4.65
self.max_targets_1 = 2.04
self.max_targets_2 = 2.37
if npz_arr.shape[0] >= 3:
self.input = npz_arr[0:3]
if npz_arr.shape[0] == 6:
self.target = npz_arr[3:6]
self.removePOffset = removePOffset
self.makeDimLess = makeDimLess
self.normalize()
def normalize(self):
if self.target is not None:
if self.removePOffset:
self.target[0, :, :] -= np.mean(self.target[0, :, :]) # remove offset
self.target[0, :, :] -= self.target[0, :, :] * self.input[2, :, :] # pressure * mask
if self.makeDimLess:
v_norm = (np.max(np.abs(self.input[0, :, :])) ** 2 + np.max(np.abs(self.input[1, :, :])) ** 2) ** 0.5
self.target[0, :, :] /= v_norm ** 2
self.target[1, :, :] /= v_norm
self.target[2, :, :] /= v_norm
self.target[0, :, :] *= 1.0 / self.max_targets_0
self.target[1, :, :] *= 1.0 / self.max_targets_1
self.target[2, :, :] *= 1.0 / self.max_targets_2
if self.input is not None:
self.input[0, :, :] *= 1 / self.max_inputs_0
self.input[1, :, :] *= 1 / self.max_inputs_1
def denormalize(self, data, v_norm):
a = data.copy()
a[0, :, :] /= 1.0 / self.max_targets_0
a[1, :, :] /= 1.0 / self.max_targets_1
a[2, :, :] /= 1.0 / self.max_targets_2
if self.makeDimLess:
a[0, :, :] *= v_norm ** 2
a[1, :, :] *= v_norm
a[2, :, :] *= v_norm
return a
|
[
"numpy.copy",
"os.path.exists",
"PIL.Image.fromarray",
"numpy.mean",
"numpy.abs",
"os.makedirs",
"numpy.arange",
"math.pow",
"PIL.Image.new",
"matplotlib.cm.magma",
"numpy.asarray",
"numpy.max",
"matplotlib.pyplot.figure",
"numpy.min",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] |
[((460, 482), 'numpy.asarray', 'np.asarray', (['history_L1'], {}), '(history_L1)\n', (470, 482), True, 'import numpy as np\n'), ((496, 521), 'numpy.asarray', 'np.asarray', (['history_L1val'], {}), '(history_L1val)\n', (506, 521), True, 'import numpy as np\n'), ((527, 539), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (537, 539), True, 'import matplotlib.pyplot as plt\n'), ((702, 714), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (712, 714), True, 'import matplotlib.pyplot as plt\n'), ((719, 729), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (727, 729), True, 'import matplotlib.pyplot as plt\n'), ((930, 946), 'math.pow', 'math.pow', (['(0.5)', 'e'], {}), '(0.5, e)\n', (938, 946), False, 'import math\n'), ((1244, 1261), 'numpy.copy', 'np.copy', (['_outputs'], {}), '(_outputs)\n', (1251, 1261), True, 'import numpy as np\n'), ((1276, 1293), 'numpy.copy', 'np.copy', (['_targets'], {}), '(_targets)\n', (1283, 1293), True, 'import numpy as np\n'), ((3467, 3484), 'numpy.copy', 'np.copy', (['_outputs'], {}), '(_outputs)\n', (3474, 3484), True, 'import numpy as np\n'), ((553, 580), 'numpy.arange', 'np.arange', (['l1train.shape[0]'], {}), '(l1train.shape[0])\n', (562, 580), True, 'import numpy as np\n'), ((632, 658), 'numpy.arange', 'np.arange', (['l1vali.shape[0]'], {}), '(l1vali.shape[0])\n', (641, 658), True, 'import numpy as np\n'), ((1358, 1420), 'PIL.Image.new', 'Image.new', (['"""RGB"""', '((s + 10) * 3, s * 2)'], {'color': '(255, 255, 255)'}), "('RGB', ((s + 10) * 3, s * 2), color=(255, 255, 255))\n", (1367, 1420), False, 'from PIL import Image\n'), ((1438, 1500), 'PIL.Image.new', 'Image.new', (['"""RGB"""', '((s + 10) * 3, s * 3)'], {'color': '(255, 255, 255)'}), "('RGB', ((s + 10) * 3, s * 3), color=(255, 255, 255))\n", (1447, 1500), False, 'from PIL import Image\n'), ((3583, 3601), 'numpy.min', 'np.min', (['outputs[i]'], {}), '(outputs[i])\n', (3589, 3601), True, 'import numpy as np\n'), ((3622, 3640), 'numpy.max', 'np.max', (['outputs[i]'], {}), '(outputs[i])\n', (3628, 3640), True, 'import numpy as np\n'), ((1068, 1093), 'os.path.exists', 'os.path.exists', (['directory'], {}), '(directory)\n', (1082, 1093), False, 'import os\n'), ((1107, 1129), 'os.makedirs', 'os.makedirs', (['directory'], {}), '(directory)\n', (1118, 1129), False, 'import os\n'), ((1659, 1677), 'numpy.min', 'np.min', (['outputs[i]'], {}), '(outputs[i])\n', (1665, 1677), True, 'import numpy as np\n'), ((1679, 1697), 'numpy.min', 'np.min', (['targets[i]'], {}), '(targets[i])\n', (1685, 1697), True, 'import numpy as np\n'), ((1723, 1741), 'numpy.max', 'np.max', (['outputs[i]'], {}), '(outputs[i])\n', (1729, 1741), True, 'import numpy as np\n'), ((1743, 1761), 'numpy.max', 'np.max', (['targets[i]'], {}), '(targets[i])\n', (1749, 1761), True, 'import numpy as np\n'), ((2953, 2988), 'PIL.Image.fromarray', 'Image.fromarray', (['(targets[i] * 256.0)'], {}), '(targets[i] * 256.0)\n', (2968, 2988), False, 'from PIL import Image\n'), ((3058, 3093), 'PIL.Image.fromarray', 'Image.fromarray', (['(outputs[i] * 256.0)'], {}), '(outputs[i] * 256.0)\n', (3073, 3093), False, 'from PIL import Image\n'), ((4061, 4093), 'matplotlib.cm.magma', 'cm.magma', (['outputs[i]'], {'bytes': '(True)'}), '(outputs[i], bytes=True)\n', (4069, 4093), False, 'from matplotlib import cm\n'), ((2356, 2388), 'matplotlib.cm.magma', 'cm.magma', (['outputs[i]'], {'bytes': '(True)'}), '(outputs[i], bytes=True)\n', (2364, 2388), False, 'from matplotlib import cm\n'), ((2516, 2548), 'matplotlib.cm.magma', 'cm.magma', (['targets[i]'], {'bytes': '(True)'}), '(targets[i], bytes=True)\n', (2524, 2548), False, 'from matplotlib import cm\n'), ((2728, 2760), 'matplotlib.cm.magma', 'cm.magma', (['targets[i]'], {'bytes': '(True)'}), '(targets[i], bytes=True)\n', (2736, 2760), False, 'from matplotlib import cm\n'), ((2848, 2880), 'matplotlib.cm.magma', 'cm.magma', (['outputs[i]'], {'bytes': '(True)'}), '(outputs[i], bytes=True)\n', (2856, 2880), False, 'from matplotlib import cm\n'), ((5195, 5224), 'numpy.mean', 'np.mean', (['self.target[0, :, :]'], {}), '(self.target[0, :, :])\n', (5202, 5224), True, 'import numpy as np\n'), ((3179, 3210), 'numpy.abs', 'np.abs', (['(targets[i] - outputs[i])'], {}), '(targets[i] - outputs[i])\n', (3185, 3210), True, 'import numpy as np\n'), ((5411, 5438), 'numpy.abs', 'np.abs', (['self.input[0, :, :]'], {}), '(self.input[0, :, :])\n', (5417, 5438), True, 'import numpy as np\n'), ((5454, 5481), 'numpy.abs', 'np.abs', (['self.input[1, :, :]'], {}), '(self.input[1, :, :])\n', (5460, 5481), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""
Created on 2017-8-24
@author: cheng.li
"""
import bisect
import datetime as dt
from typing import Iterable
from typing import Union
import numpy as np
import pandas as pd
from simpleutils.asserts import require
from PyFin.DateUtilities import Period
from PyFin.api import BizDayConventions
from PyFin.api import DateGeneration
from PyFin.api import advanceDateByCalendar
from PyFin.api import makeSchedule
from alphamind.data.engines.sqlengine import SqlEngine
from alphamind.data.engines.sqlengine import total_risk_factors
from alphamind.data.engines.universe import Universe
from alphamind.data.processing import factor_processing
from alphamind.data.transformer import Transformer
from alphamind.utilities import alpha_logger
from alphamind.utilities import map_freq
def _merge_df(engine, names, factor_df, target_df, universe, dates, risk_model, neutralized_risk):
risk_df = engine.fetch_risk_model_range(universe, dates=dates, risk_model=risk_model)[1]
used_neutralized_risk = list(set(total_risk_factors).difference(names))
risk_df = risk_df[['trade_date', 'code'] + used_neutralized_risk].dropna()
target_df = pd.merge(target_df, risk_df, on=['trade_date', 'code']).dropna()
if neutralized_risk:
train_x = pd.merge(factor_df, risk_df, on=['trade_date', 'code'])
train_y = target_df.copy()
risk_exp = train_x[neutralized_risk].values.astype(float)
x_values = train_x[names].values.astype(float)
y_values = train_y[['dx']].values
else:
risk_exp = None
train_x = factor_df.copy()
train_y = target_df.copy()
x_values = train_x[names].values.astype(float)
y_values = train_y[['dx']].values
codes = train_x['code'].values
date_label = pd.DatetimeIndex(factor_df.trade_date).to_pydatetime()
dates = np.unique(date_label)
return target_df, dates, date_label, risk_exp, x_values, y_values, train_x, train_y, codes
def prepare_data(engine: SqlEngine,
factors: Union[Transformer, Iterable[object]],
start_date: str,
end_date: str,
frequency: str,
universe: Universe,
benchmark: int,
warm_start: int = 0,
fit_target: Union[Transformer, object] = None):
if warm_start > 0:
p = Period(frequency)
p = Period(length=-warm_start * p.length(), units=p.units())
start_date = advanceDateByCalendar('china.sse', start_date, p).strftime('%Y-%m-%d')
dates = makeSchedule(start_date,
end_date,
frequency,
calendar='china.sse',
dateRule=BizDayConventions.Following,
dateGenerationRule=DateGeneration.Forward)
dates = [d.strftime('%Y-%m-%d') for d in dates]
horizon = map_freq(frequency)
if isinstance(factors, Transformer):
transformer = factors
else:
transformer = Transformer(factors)
factor_df = engine.fetch_factor_range(universe,
factors=transformer,
dates=dates).sort_values(['trade_date', 'code'])
alpha_logger.info("factor data loading finished")
if fit_target is None:
target_df = engine.fetch_dx_return_range(universe, dates=dates, horizon=horizon)
else:
one_more_date = advanceDateByCalendar('china.sse', dates[-1], frequency)
target_df = engine.fetch_factor_range_forward(universe, factors=fit_target,
dates=dates + [one_more_date])
target_df = target_df[target_df.trade_date.isin(dates)]
target_df = target_df.groupby('code').apply(lambda x: x.fillna(method='pad'))
alpha_logger.info("fit target data loading finished")
industry_df = engine.fetch_industry_range(universe, dates=dates)
alpha_logger.info("industry data loading finished")
benchmark_df = engine.fetch_benchmark_range(benchmark, dates=dates)
alpha_logger.info("benchmark data loading finished")
df = pd.merge(factor_df, target_df, on=['trade_date', 'code']).dropna()
df = pd.merge(df, benchmark_df, on=['trade_date', 'code'], how='left')
df = pd.merge(df, industry_df, on=['trade_date', 'code'])
df['weight'] = df['weight'].fillna(0.)
df.dropna(inplace=True)
return dates, df[['trade_date', 'code', 'dx']], df[
['trade_date', 'code', 'weight', 'industry_code', 'industry'] + transformer.names]
def batch_processing(names,
x_values,
y_values,
groups,
group_label,
batch,
risk_exp,
pre_process,
post_process,
codes):
train_x_buckets = {}
train_y_buckets = {}
train_risk_buckets = {}
predict_x_buckets = {}
predict_y_buckets = {}
predict_risk_buckets = {}
predict_codes_bucket = {}
for i, start in enumerate(groups[:-batch]):
end = groups[i + batch]
left_index = bisect.bisect_left(group_label, start)
right_index = bisect.bisect_left(group_label, end)
this_raw_x = x_values[left_index:right_index]
this_raw_y = y_values[left_index:right_index]
if risk_exp is not None:
this_risk_exp = risk_exp[left_index:right_index]
else:
this_risk_exp = None
train_x_buckets[end] = pd.DataFrame(factor_processing(this_raw_x,
pre_process=pre_process,
risk_factors=this_risk_exp,
post_process=post_process),
columns=names)
train_y_buckets[end] = factor_processing(this_raw_y,
pre_process=pre_process,
risk_factors=this_risk_exp,
post_process=post_process)
train_risk_buckets[end] = this_risk_exp
left_index = bisect.bisect_right(group_label, start)
right_index = bisect.bisect_right(group_label, end)
sub_dates = group_label[left_index:right_index]
this_raw_x = x_values[left_index:right_index]
this_codes = codes[left_index:right_index]
if risk_exp is not None:
this_risk_exp = risk_exp[left_index:right_index]
else:
this_risk_exp = None
ne_x = factor_processing(this_raw_x,
pre_process=pre_process,
risk_factors=this_risk_exp,
post_process=post_process)
inner_left_index = bisect.bisect_left(sub_dates, end)
inner_right_index = bisect.bisect_right(sub_dates, end)
predict_x_buckets[end] = pd.DataFrame(ne_x[inner_left_index:inner_right_index],
columns=names)
if risk_exp is not None:
predict_risk_buckets[end] = this_risk_exp[inner_left_index:inner_right_index]
else:
predict_risk_buckets = None
predict_codes_bucket[end] = this_codes[inner_left_index:inner_right_index]
this_raw_y = y_values[left_index:right_index]
if len(this_raw_y) > 0:
ne_y = factor_processing(this_raw_y,
pre_process=pre_process,
risk_factors=this_risk_exp,
post_process=post_process)
predict_y_buckets[end] = ne_y[inner_left_index:inner_right_index]
return train_x_buckets, \
train_y_buckets, \
train_risk_buckets, \
predict_x_buckets, \
predict_y_buckets, \
predict_risk_buckets, \
predict_codes_bucket
def fetch_data_package(engine: SqlEngine,
alpha_factors: Iterable[object],
start_date: str,
end_date: str,
frequency: str,
universe: Universe,
benchmark: int,
warm_start: int = 0,
batch: int = 1,
neutralized_risk: Iterable[str] = None,
risk_model: str = 'short',
pre_process: Iterable[object] = None,
post_process: Iterable[object] = None,
fit_target: Union[Transformer, object] = None) -> dict:
alpha_logger.info("Starting data package fetching ...")
transformer = Transformer(alpha_factors)
names = transformer.names
dates, target_df, factor_df = prepare_data(engine,
transformer,
start_date,
end_date,
frequency,
universe,
benchmark,
warm_start + batch,
fit_target=fit_target)
target_df, dates, date_label, risk_exp, x_values, y_values, train_x, train_y, codes = \
_merge_df(engine, names, factor_df, target_df, universe, dates, risk_model,
neutralized_risk)
alpha_logger.info("data merging finished")
target_df['weight'] = train_x['weight']
target_df['industry'] = train_x['industry']
target_df['industry_code'] = train_x['industry_code']
if neutralized_risk:
for i, name in enumerate(neutralized_risk):
target_df.loc[:, name] = risk_exp[:, i]
alpha_logger.info("Loading data is finished")
train_x_buckets, train_y_buckets, train_risk_buckets, predict_x_buckets, predict_y_buckets, predict_risk_buckets, predict_codes_bucket \
= batch_processing(names,
x_values,
y_values,
dates,
date_label,
batch,
risk_exp,
pre_process,
post_process,
codes)
alpha_logger.info("Data processing is finished")
ret = dict()
ret['x_names'] = names
ret['settlement'] = target_df[target_df.trade_date >= start_date]
train_x_buckets = {k: train_x_buckets[k] for k in train_x_buckets if
k.strftime('%Y-%m-%d') >= start_date}
train_y_buckets = {k: train_y_buckets[k] for k in train_y_buckets if
k.strftime('%Y-%m-%d') >= start_date}
train_risk_buckets = {k: train_risk_buckets[k] for k in train_risk_buckets if
k.strftime('%Y-%m-%d') >= start_date}
predict_x_buckets = {k: predict_x_buckets[k] for k in predict_x_buckets if
k.strftime('%Y-%m-%d') >= start_date}
predict_y_buckets = {k: predict_y_buckets[k] for k in predict_y_buckets if
k.strftime('%Y-%m-%d') >= start_date}
if neutralized_risk:
predict_risk_buckets = {k: predict_risk_buckets[k] for k in predict_risk_buckets if
k.strftime('%Y-%m-%d') >= start_date}
else:
predict_risk_buckets = None
predict_codes_bucket = {k: predict_codes_bucket[k] for k in predict_codes_bucket if
k.strftime('%Y-%m-%d') >= start_date}
ret['train'] = {'x': train_x_buckets, 'y': train_y_buckets, 'risk': train_risk_buckets}
ret['predict'] = {'x': predict_x_buckets, 'y': predict_y_buckets, 'risk': predict_risk_buckets,
'code': predict_codes_bucket}
return ret
def fetch_train_phase(engine,
alpha_factors: Union[Transformer, Iterable[object]],
ref_date,
frequency,
universe,
batch=1,
neutralized_risk: Iterable[str] = None,
risk_model: str = 'short',
pre_process: Iterable[object] = None,
post_process: Iterable[object] = None,
warm_start: int = 0,
fit_target: Union[Transformer, object] = None) -> dict:
if isinstance(alpha_factors, Transformer):
transformer = alpha_factors
else:
transformer = Transformer(alpha_factors)
p = Period(frequency)
p = Period(length=-(warm_start + batch) * p.length(), units=p.units())
start_date = advanceDateByCalendar('china.sse', ref_date, p, BizDayConventions.Following)
dates = makeSchedule(start_date,
ref_date,
frequency,
calendar='china.sse',
dateRule=BizDayConventions.Following,
dateGenerationRule=DateGeneration.Backward)
horizon = map_freq(frequency)
factor_df = engine.fetch_factor_range(universe, factors=transformer, dates=dates)
if fit_target is None:
target_df = engine.fetch_dx_return_range(universe, dates=dates, horizon=horizon)
else:
one_more_date = advanceDateByCalendar('china.sse', dates[-1], frequency)
target_df = engine.fetch_factor_range_forward(universe, factors=fit_target,
dates=dates + [one_more_date])
target_df = target_df[target_df.trade_date.isin(dates)]
target_df = target_df.groupby('code').apply(lambda x: x.fillna(method='pad'))
df = pd.merge(factor_df, target_df, on=['trade_date', 'code']).dropna()
target_df, factor_df = df[['trade_date', 'code', 'dx']], df[
['trade_date', 'code'] + transformer.names]
target_df, dates, date_label, risk_exp, x_values, y_values, _, _, codes = \
_merge_df(engine, transformer.names, factor_df, target_df, universe, dates, risk_model,
neutralized_risk)
if dates[-1] == dt.datetime.strptime(ref_date, '%Y-%m-%d'):
require(len(dates) >= 2, ValueError,
"No previous data for training for the date {0}".format(ref_date))
end = dates[-2]
start = dates[-batch - 1] if batch <= len(dates) - 1 else dates[0]
else:
end = dates[-1]
start = dates[-batch] if batch <= len(dates) else dates[0]
index = (date_label >= start) & (date_label <= end)
this_raw_x = x_values[index]
this_raw_y = y_values[index]
this_code = codes[index]
if risk_exp is not None:
this_risk_exp = risk_exp[index]
else:
this_risk_exp = None
ne_x = factor_processing(this_raw_x,
pre_process=pre_process,
risk_factors=this_risk_exp,
post_process=post_process)
ne_y = factor_processing(this_raw_y,
pre_process=pre_process,
risk_factors=this_risk_exp,
post_process=post_process)
ret = dict()
ret['x_names'] = transformer.names
ret['train'] = {'x': pd.DataFrame(ne_x, columns=transformer.names), 'y': ne_y,
'code': this_code}
return ret
def fetch_predict_phase(engine,
alpha_factors: Union[Transformer, Iterable[object]],
ref_date,
frequency,
universe,
batch=1,
neutralized_risk: Iterable[str] = None,
risk_model: str = 'short',
pre_process: Iterable[object] = None,
post_process: Iterable[object] = None,
warm_start: int = 0,
fillna: str = None,
fit_target: Union[Transformer, object] = None):
if isinstance(alpha_factors, Transformer):
transformer = alpha_factors
else:
transformer = Transformer(alpha_factors)
p = Period(frequency)
p = Period(length=-(warm_start + batch - 1) * p.length(), units=p.units())
start_date = advanceDateByCalendar('china.sse', ref_date, p, BizDayConventions.Following)
dates = makeSchedule(start_date,
ref_date,
frequency,
calendar='china.sse',
dateRule=BizDayConventions.Following,
dateGenerationRule=DateGeneration.Backward)
horizon = map_freq(frequency)
factor_df = engine.fetch_factor_range(universe, factors=transformer, dates=dates)
if fillna:
factor_df = factor_df.groupby('trade_date').apply(
lambda x: x.fillna(x.median())).reset_index(
drop=True).dropna()
else:
factor_df = factor_df.dropna()
if fit_target is None:
target_df = engine.fetch_dx_return_range(universe, dates=dates, horizon=horizon)
else:
one_more_date = advanceDateByCalendar('china.sse', dates[-1], frequency)
target_df = engine.fetch_factor_range_forward(universe, factors=fit_target,
dates=dates + [one_more_date])
target_df = target_df[target_df.trade_date.isin(dates)]
target_df = target_df.groupby('code').apply(lambda x: x.fillna(method='pad'))
names = transformer.names
if neutralized_risk:
risk_df = engine.fetch_risk_model_range(universe, dates=dates, risk_model=risk_model)[1]
used_neutralized_risk = list(set(neutralized_risk).difference(names))
risk_df = risk_df[['trade_date', 'code'] + used_neutralized_risk].dropna()
train_x = pd.merge(factor_df, risk_df, on=['trade_date', 'code'])
train_x = pd.merge(train_x, target_df, on=['trade_date', 'code'], how='left')
risk_exp = train_x[neutralized_risk].values.astype(float)
else:
train_x = pd.merge(factor_df, target_df, on=['trade_date', 'code'], how='left')
risk_exp = None
train_x.dropna(inplace=True, subset=train_x.columns[:-1])
x_values = train_x[names].values.astype(float)
y_values = train_x[['dx']].values.astype(float)
date_label = pd.DatetimeIndex(train_x.trade_date).to_pydatetime()
dates = np.unique(date_label)
if dates[-1] == dt.datetime.strptime(ref_date, '%Y-%m-%d'):
end = dates[-1]
start = dates[-batch] if batch <= len(dates) else dates[0]
left_index = bisect.bisect_left(date_label, start)
right_index = bisect.bisect_right(date_label, end)
this_raw_x = x_values[left_index:right_index]
this_raw_y = y_values[left_index:right_index]
sub_dates = date_label[left_index:right_index]
if risk_exp is not None:
this_risk_exp = risk_exp[left_index:right_index]
else:
this_risk_exp = None
ne_x = factor_processing(this_raw_x,
pre_process=pre_process,
risk_factors=this_risk_exp,
post_process=post_process)
ne_y = factor_processing(this_raw_y,
pre_process=pre_process,
risk_factors=this_risk_exp,
post_process=post_process)
inner_left_index = bisect.bisect_left(sub_dates, end)
inner_right_index = bisect.bisect_right(sub_dates, end)
ne_x = ne_x[inner_left_index:inner_right_index]
ne_y = ne_y[inner_left_index:inner_right_index]
left_index = bisect.bisect_left(date_label, end)
right_index = bisect.bisect_right(date_label, end)
codes = train_x.code.values[left_index:right_index]
else:
ne_x = None
ne_y = None
codes = None
ret = dict()
ret['x_names'] = transformer.names
ret['predict'] = {'x': pd.DataFrame(ne_x, columns=transformer.names, index=codes), 'code': codes,
'y': ne_y.flatten()}
return ret
|
[
"alphamind.data.processing.factor_processing",
"numpy.unique",
"pandas.DataFrame",
"datetime.datetime.strptime",
"pandas.DatetimeIndex",
"pandas.merge",
"PyFin.api.makeSchedule",
"PyFin.DateUtilities.Period",
"bisect.bisect_right",
"alphamind.utilities.alpha_logger.info",
"PyFin.api.advanceDateByCalendar",
"alphamind.utilities.map_freq",
"bisect.bisect_left",
"alphamind.data.transformer.Transformer"
] |
[((1853, 1874), 'numpy.unique', 'np.unique', (['date_label'], {}), '(date_label)\n', (1862, 1874), True, 'import numpy as np\n'), ((2571, 2728), 'PyFin.api.makeSchedule', 'makeSchedule', (['start_date', 'end_date', 'frequency'], {'calendar': '"""china.sse"""', 'dateRule': 'BizDayConventions.Following', 'dateGenerationRule': 'DateGeneration.Forward'}), "(start_date, end_date, frequency, calendar='china.sse',\n dateRule=BizDayConventions.Following, dateGenerationRule=DateGeneration\n .Forward)\n", (2583, 2728), False, 'from PyFin.api import makeSchedule\n'), ((2913, 2932), 'alphamind.utilities.map_freq', 'map_freq', (['frequency'], {}), '(frequency)\n', (2921, 2932), False, 'from alphamind.utilities import map_freq\n'), ((3269, 3318), 'alphamind.utilities.alpha_logger.info', 'alpha_logger.info', (['"""factor data loading finished"""'], {}), "('factor data loading finished')\n", (3286, 3318), False, 'from alphamind.utilities import alpha_logger\n'), ((3850, 3903), 'alphamind.utilities.alpha_logger.info', 'alpha_logger.info', (['"""fit target data loading finished"""'], {}), "('fit target data loading finished')\n", (3867, 3903), False, 'from alphamind.utilities import alpha_logger\n'), ((3978, 4029), 'alphamind.utilities.alpha_logger.info', 'alpha_logger.info', (['"""industry data loading finished"""'], {}), "('industry data loading finished')\n", (3995, 4029), False, 'from alphamind.utilities import alpha_logger\n'), ((4106, 4158), 'alphamind.utilities.alpha_logger.info', 'alpha_logger.info', (['"""benchmark data loading finished"""'], {}), "('benchmark data loading finished')\n", (4123, 4158), False, 'from alphamind.utilities import alpha_logger\n'), ((4245, 4310), 'pandas.merge', 'pd.merge', (['df', 'benchmark_df'], {'on': "['trade_date', 'code']", 'how': '"""left"""'}), "(df, benchmark_df, on=['trade_date', 'code'], how='left')\n", (4253, 4310), True, 'import pandas as pd\n'), ((4320, 4372), 'pandas.merge', 'pd.merge', (['df', 'industry_df'], {'on': "['trade_date', 'code']"}), "(df, industry_df, on=['trade_date', 'code'])\n", (4328, 4372), True, 'import pandas as pd\n'), ((8805, 8860), 'alphamind.utilities.alpha_logger.info', 'alpha_logger.info', (['"""Starting data package fetching ..."""'], {}), "('Starting data package fetching ...')\n", (8822, 8860), False, 'from alphamind.utilities import alpha_logger\n'), ((8879, 8905), 'alphamind.data.transformer.Transformer', 'Transformer', (['alpha_factors'], {}), '(alpha_factors)\n', (8890, 8905), False, 'from alphamind.data.transformer import Transformer\n'), ((9695, 9737), 'alphamind.utilities.alpha_logger.info', 'alpha_logger.info', (['"""data merging finished"""'], {}), "('data merging finished')\n", (9712, 9737), False, 'from alphamind.utilities import alpha_logger\n'), ((10024, 10069), 'alphamind.utilities.alpha_logger.info', 'alpha_logger.info', (['"""Loading data is finished"""'], {}), "('Loading data is finished')\n", (10041, 10069), False, 'from alphamind.utilities import alpha_logger\n'), ((10584, 10632), 'alphamind.utilities.alpha_logger.info', 'alpha_logger.info', (['"""Data processing is finished"""'], {}), "('Data processing is finished')\n", (10601, 10632), False, 'from alphamind.utilities import alpha_logger\n'), ((12834, 12851), 'PyFin.DateUtilities.Period', 'Period', (['frequency'], {}), '(frequency)\n', (12840, 12851), False, 'from PyFin.DateUtilities import Period\n'), ((12945, 13021), 'PyFin.api.advanceDateByCalendar', 'advanceDateByCalendar', (['"""china.sse"""', 'ref_date', 'p', 'BizDayConventions.Following'], {}), "('china.sse', ref_date, p, BizDayConventions.Following)\n", (12966, 13021), False, 'from PyFin.api import advanceDateByCalendar\n'), ((13034, 13192), 'PyFin.api.makeSchedule', 'makeSchedule', (['start_date', 'ref_date', 'frequency'], {'calendar': '"""china.sse"""', 'dateRule': 'BizDayConventions.Following', 'dateGenerationRule': 'DateGeneration.Backward'}), "(start_date, ref_date, frequency, calendar='china.sse',\n dateRule=BizDayConventions.Following, dateGenerationRule=DateGeneration\n .Backward)\n", (13046, 13192), False, 'from PyFin.api import makeSchedule\n'), ((13324, 13343), 'alphamind.utilities.map_freq', 'map_freq', (['frequency'], {}), '(frequency)\n', (13332, 13343), False, 'from alphamind.utilities import map_freq\n'), ((15030, 15144), 'alphamind.data.processing.factor_processing', 'factor_processing', (['this_raw_x'], {'pre_process': 'pre_process', 'risk_factors': 'this_risk_exp', 'post_process': 'post_process'}), '(this_raw_x, pre_process=pre_process, risk_factors=\n this_risk_exp, post_process=post_process)\n', (15047, 15144), False, 'from alphamind.data.processing import factor_processing\n'), ((15239, 15353), 'alphamind.data.processing.factor_processing', 'factor_processing', (['this_raw_y'], {'pre_process': 'pre_process', 'risk_factors': 'this_risk_exp', 'post_process': 'post_process'}), '(this_raw_y, pre_process=pre_process, risk_factors=\n this_risk_exp, post_process=post_process)\n', (15256, 15353), False, 'from alphamind.data.processing import factor_processing\n'), ((16430, 16447), 'PyFin.DateUtilities.Period', 'Period', (['frequency'], {}), '(frequency)\n', (16436, 16447), False, 'from PyFin.DateUtilities import Period\n'), ((16545, 16621), 'PyFin.api.advanceDateByCalendar', 'advanceDateByCalendar', (['"""china.sse"""', 'ref_date', 'p', 'BizDayConventions.Following'], {}), "('china.sse', ref_date, p, BizDayConventions.Following)\n", (16566, 16621), False, 'from PyFin.api import advanceDateByCalendar\n'), ((16634, 16792), 'PyFin.api.makeSchedule', 'makeSchedule', (['start_date', 'ref_date', 'frequency'], {'calendar': '"""china.sse"""', 'dateRule': 'BizDayConventions.Following', 'dateGenerationRule': 'DateGeneration.Backward'}), "(start_date, ref_date, frequency, calendar='china.sse',\n dateRule=BizDayConventions.Following, dateGenerationRule=DateGeneration\n .Backward)\n", (16646, 16792), False, 'from PyFin.api import makeSchedule\n'), ((16924, 16943), 'alphamind.utilities.map_freq', 'map_freq', (['frequency'], {}), '(frequency)\n', (16932, 16943), False, 'from alphamind.utilities import map_freq\n'), ((18683, 18704), 'numpy.unique', 'np.unique', (['date_label'], {}), '(date_label)\n', (18692, 18704), True, 'import numpy as np\n'), ((1277, 1332), 'pandas.merge', 'pd.merge', (['factor_df', 'risk_df'], {'on': "['trade_date', 'code']"}), "(factor_df, risk_df, on=['trade_date', 'code'])\n", (1285, 1332), True, 'import pandas as pd\n'), ((2379, 2396), 'PyFin.DateUtilities.Period', 'Period', (['frequency'], {}), '(frequency)\n', (2385, 2396), False, 'from PyFin.DateUtilities import Period\n'), ((3037, 3057), 'alphamind.data.transformer.Transformer', 'Transformer', (['factors'], {}), '(factors)\n', (3048, 3057), False, 'from alphamind.data.transformer import Transformer\n'), ((3470, 3526), 'PyFin.api.advanceDateByCalendar', 'advanceDateByCalendar', (['"""china.sse"""', 'dates[-1]', 'frequency'], {}), "('china.sse', dates[-1], frequency)\n", (3491, 3526), False, 'from PyFin.api import advanceDateByCalendar\n'), ((5199, 5237), 'bisect.bisect_left', 'bisect.bisect_left', (['group_label', 'start'], {}), '(group_label, start)\n', (5217, 5237), False, 'import bisect\n'), ((5260, 5296), 'bisect.bisect_left', 'bisect.bisect_left', (['group_label', 'end'], {}), '(group_label, end)\n', (5278, 5296), False, 'import bisect\n'), ((5981, 6095), 'alphamind.data.processing.factor_processing', 'factor_processing', (['this_raw_y'], {'pre_process': 'pre_process', 'risk_factors': 'this_risk_exp', 'post_process': 'post_process'}), '(this_raw_y, pre_process=pre_process, risk_factors=\n this_risk_exp, post_process=post_process)\n', (5998, 6095), False, 'from alphamind.data.processing import factor_processing\n'), ((6309, 6348), 'bisect.bisect_right', 'bisect.bisect_right', (['group_label', 'start'], {}), '(group_label, start)\n', (6328, 6348), False, 'import bisect\n'), ((6371, 6408), 'bisect.bisect_right', 'bisect.bisect_right', (['group_label', 'end'], {}), '(group_label, end)\n', (6390, 6408), False, 'import bisect\n'), ((6729, 6843), 'alphamind.data.processing.factor_processing', 'factor_processing', (['this_raw_x'], {'pre_process': 'pre_process', 'risk_factors': 'this_risk_exp', 'post_process': 'post_process'}), '(this_raw_x, pre_process=pre_process, risk_factors=\n this_risk_exp, post_process=post_process)\n', (6746, 6843), False, 'from alphamind.data.processing import factor_processing\n'), ((6966, 7000), 'bisect.bisect_left', 'bisect.bisect_left', (['sub_dates', 'end'], {}), '(sub_dates, end)\n', (6984, 7000), False, 'import bisect\n'), ((7029, 7064), 'bisect.bisect_right', 'bisect.bisect_right', (['sub_dates', 'end'], {}), '(sub_dates, end)\n', (7048, 7064), False, 'import bisect\n'), ((7098, 7167), 'pandas.DataFrame', 'pd.DataFrame', (['ne_x[inner_left_index:inner_right_index]'], {'columns': 'names'}), '(ne_x[inner_left_index:inner_right_index], columns=names)\n', (7110, 7167), True, 'import pandas as pd\n'), ((12798, 12824), 'alphamind.data.transformer.Transformer', 'Transformer', (['alpha_factors'], {}), '(alpha_factors)\n', (12809, 12824), False, 'from alphamind.data.transformer import Transformer\n'), ((13581, 13637), 'PyFin.api.advanceDateByCalendar', 'advanceDateByCalendar', (['"""china.sse"""', 'dates[-1]', 'frequency'], {}), "('china.sse', dates[-1], frequency)\n", (13602, 13637), False, 'from PyFin.api import advanceDateByCalendar\n'), ((14386, 14428), 'datetime.datetime.strptime', 'dt.datetime.strptime', (['ref_date', '"""%Y-%m-%d"""'], {}), "(ref_date, '%Y-%m-%d')\n", (14406, 14428), True, 'import datetime as dt\n'), ((15518, 15563), 'pandas.DataFrame', 'pd.DataFrame', (['ne_x'], {'columns': 'transformer.names'}), '(ne_x, columns=transformer.names)\n', (15530, 15563), True, 'import pandas as pd\n'), ((16394, 16420), 'alphamind.data.transformer.Transformer', 'Transformer', (['alpha_factors'], {}), '(alpha_factors)\n', (16405, 16420), False, 'from alphamind.data.transformer import Transformer\n'), ((17395, 17451), 'PyFin.api.advanceDateByCalendar', 'advanceDateByCalendar', (['"""china.sse"""', 'dates[-1]', 'frequency'], {}), "('china.sse', dates[-1], frequency)\n", (17416, 17451), False, 'from PyFin.api import advanceDateByCalendar\n'), ((18104, 18159), 'pandas.merge', 'pd.merge', (['factor_df', 'risk_df'], {'on': "['trade_date', 'code']"}), "(factor_df, risk_df, on=['trade_date', 'code'])\n", (18112, 18159), True, 'import pandas as pd\n'), ((18178, 18245), 'pandas.merge', 'pd.merge', (['train_x', 'target_df'], {'on': "['trade_date', 'code']", 'how': '"""left"""'}), "(train_x, target_df, on=['trade_date', 'code'], how='left')\n", (18186, 18245), True, 'import pandas as pd\n'), ((18340, 18409), 'pandas.merge', 'pd.merge', (['factor_df', 'target_df'], {'on': "['trade_date', 'code']", 'how': '"""left"""'}), "(factor_df, target_df, on=['trade_date', 'code'], how='left')\n", (18348, 18409), True, 'import pandas as pd\n'), ((18726, 18768), 'datetime.datetime.strptime', 'dt.datetime.strptime', (['ref_date', '"""%Y-%m-%d"""'], {}), "(ref_date, '%Y-%m-%d')\n", (18746, 18768), True, 'import datetime as dt\n'), ((18883, 18920), 'bisect.bisect_left', 'bisect.bisect_left', (['date_label', 'start'], {}), '(date_label, start)\n', (18901, 18920), False, 'import bisect\n'), ((18943, 18979), 'bisect.bisect_right', 'bisect.bisect_right', (['date_label', 'end'], {}), '(date_label, end)\n', (18962, 18979), False, 'import bisect\n'), ((19301, 19415), 'alphamind.data.processing.factor_processing', 'factor_processing', (['this_raw_x'], {'pre_process': 'pre_process', 'risk_factors': 'this_risk_exp', 'post_process': 'post_process'}), '(this_raw_x, pre_process=pre_process, risk_factors=\n this_risk_exp, post_process=post_process)\n', (19318, 19415), False, 'from alphamind.data.processing import factor_processing\n'), ((19526, 19640), 'alphamind.data.processing.factor_processing', 'factor_processing', (['this_raw_y'], {'pre_process': 'pre_process', 'risk_factors': 'this_risk_exp', 'post_process': 'post_process'}), '(this_raw_y, pre_process=pre_process, risk_factors=\n this_risk_exp, post_process=post_process)\n', (19543, 19640), False, 'from alphamind.data.processing import factor_processing\n'), ((19763, 19797), 'bisect.bisect_left', 'bisect.bisect_left', (['sub_dates', 'end'], {}), '(sub_dates, end)\n', (19781, 19797), False, 'import bisect\n'), ((19826, 19861), 'bisect.bisect_right', 'bisect.bisect_right', (['sub_dates', 'end'], {}), '(sub_dates, end)\n', (19845, 19861), False, 'import bisect\n'), ((19997, 20032), 'bisect.bisect_left', 'bisect.bisect_left', (['date_label', 'end'], {}), '(date_label, end)\n', (20015, 20032), False, 'import bisect\n'), ((20055, 20091), 'bisect.bisect_right', 'bisect.bisect_right', (['date_label', 'end'], {}), '(date_label, end)\n', (20074, 20091), False, 'import bisect\n'), ((20308, 20366), 'pandas.DataFrame', 'pd.DataFrame', (['ne_x'], {'columns': 'transformer.names', 'index': 'codes'}), '(ne_x, columns=transformer.names, index=codes)\n', (20320, 20366), True, 'import pandas as pd\n'), ((1168, 1223), 'pandas.merge', 'pd.merge', (['target_df', 'risk_df'], {'on': "['trade_date', 'code']"}), "(target_df, risk_df, on=['trade_date', 'code'])\n", (1176, 1223), True, 'import pandas as pd\n'), ((1786, 1824), 'pandas.DatetimeIndex', 'pd.DatetimeIndex', (['factor_df.trade_date'], {}), '(factor_df.trade_date)\n', (1802, 1824), True, 'import pandas as pd\n'), ((4169, 4226), 'pandas.merge', 'pd.merge', (['factor_df', 'target_df'], {'on': "['trade_date', 'code']"}), "(factor_df, target_df, on=['trade_date', 'code'])\n", (4177, 4226), True, 'import pandas as pd\n'), ((5593, 5707), 'alphamind.data.processing.factor_processing', 'factor_processing', (['this_raw_x'], {'pre_process': 'pre_process', 'risk_factors': 'this_risk_exp', 'post_process': 'post_process'}), '(this_raw_x, pre_process=pre_process, risk_factors=\n this_risk_exp, post_process=post_process)\n', (5610, 5707), False, 'from alphamind.data.processing import factor_processing\n'), ((7580, 7694), 'alphamind.data.processing.factor_processing', 'factor_processing', (['this_raw_y'], {'pre_process': 'pre_process', 'risk_factors': 'this_risk_exp', 'post_process': 'post_process'}), '(this_raw_y, pre_process=pre_process, risk_factors=\n this_risk_exp, post_process=post_process)\n', (7597, 7694), False, 'from alphamind.data.processing import factor_processing\n'), ((13967, 14024), 'pandas.merge', 'pd.merge', (['factor_df', 'target_df'], {'on': "['trade_date', 'code']"}), "(factor_df, target_df, on=['trade_date', 'code'])\n", (13975, 14024), True, 'import pandas as pd\n'), ((18618, 18654), 'pandas.DatetimeIndex', 'pd.DatetimeIndex', (['train_x.trade_date'], {}), '(train_x.trade_date)\n', (18634, 18654), True, 'import pandas as pd\n'), ((2487, 2536), 'PyFin.api.advanceDateByCalendar', 'advanceDateByCalendar', (['"""china.sse"""', 'start_date', 'p'], {}), "('china.sse', start_date, p)\n", (2508, 2536), False, 'from PyFin.api import advanceDateByCalendar\n')]
|
#TODO: use only one (RGB) channel
import numpy as np
import pandas as pd
import os
from torch.utils import data
from torch.utils.data.dataloader import DataLoader as DataLoader
import torch
from torchvision import transforms
from natsort import natsorted, ns
import cv2
from PIL import Image
import matplotlib.pyplot as plt
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from tqdm import tqdm
dataset_path = "C:\\Users\\User\\Documents\\GitHub\\Csgo-NeuralNetwork\\output\\"
#train_split and test_split 0.1 > x > 0.9 and must add up to 1
train_split = 0.7
test_split = 0.3
num_epochs = 10
batch_size = 100
if torch.cuda.is_available():
device = torch.device("cuda:0")
print("Running on: %s"%(torch.cuda.get_device_name(device)))
else:
device = torch.device("cpu")
print('running on: CPU')
class CsgoPersonNoPersonDataset(data.Dataset):
"""pretty description."""
length = -1
def __init__(self, root_dir, transform=None):
"""
Args:
root_dir (string): Directory with all the images.
transform (callable, optional): Optional transform to be applied
on a sample.6)
"""
self.root_dir = root_dir
self.transform = transform
self.length = 0
# dictionary that marks what the last frame of each folder is
# ie. number of examples in specific folder
self.folder_system = {2426: 'CSGOraw2'}
for folder_index in self.folder_system:
self.length += folder_index
# returns name of folder that contains specific frame
def find_folder(self, idx):
for num_frames in self.folder_system:
if num_frames >= idx:
return str(self.folder_system[num_frames])
def __len__(self):
return self.length
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
# sets path and gets txt/jpg files
img_path = self.find_folder(idx)
img_name = "%sframe#%s" % (img_path, idx)
img_path = os.path.join(self.root_dir,
img_path, img_name)
img_path_ext = img_path + '.jpg'
img = Image.open((img_path_ext))
# img = np.array(img)
label_path = str(img_path) + '.txt'
label = 0
# loads label from disk, converts csv to tensor
label = torch.as_tensor(os.stat(label_path).st_size != 0, dtype=torch.float).reshape((1,))
sample = {'image': img, 'label': label}
# apply transforms
# TODO: farofa aqui hein
if self.transform:
img = self.transform(sample['image'])
# img = img.reshape(172800)
sample['image'] = img
return sample
#defining NN layeres
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool1 = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.pool2 = nn.MaxPool2d(2, 2)
self.fc1 = nn.Linear(16 * 61 * 33, 120)
self.fc2 = nn.Linear(120, 60)
self.fc3 = nn.Linear(60, 1)
self.fc4 = nn.Linear(30, 15)
self.fc5 = nn.Linear(15, 7)
self.fc6 = nn.Linear(7, 1)
def forward(self, x):
x = self.pool1(F.relu(self.conv1(x)))
x = self.pool2(F.relu(self.conv2(x)))
x = x.view(-1, 16 * 61 * 33)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = F.relu(self.fc3(x))
#x = F.relu(self.fc4(x))
#x = F.relu(self.fc5(x))
#x = F.relu(self.fc6(x))
return x
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1 or classname.find('Linear') != -1:
torch.nn.init.xavier_uniform_(m.weight.data)
#runs NN in training mode
def train_run(train_loader, criterion, optimizer, device):
losses = []
print(len(train_loader.dataset))
for epoch in range(num_epochs): # loop over the dataset multiple times
running_loss = 0.0
for i, data in enumerate(train_loader):
# get the inputs; data is a list of [inputs, labels]
inputs, labels = data['image'], data['label']
#if labels[0].item() == -1:
# continue
#sends batch to gpu
inputs, labels = inputs.to(device), labels.to(device)
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
#print(f"{epoch}, {i}")
outputs = net(inputs)
#print(f"Labels: {labels.shape}, {labels.dtype}")
#print(f"Outputs: {outputs.shape}, {outputs.dtype}")
loss = criterion(outputs, labels)
losses.append(loss.item())
running_loss += loss.item()
if (i + 1) % 10 == 0: # print every 10 mini-batches
print(f"Labels: {torch.transpose(labels, 0, 1)}")
print(f"Outputs: {torch.transpose(outputs, 0, 1)}")
print('[%d, %5d] loss: %.3f' %
(epoch + 1, i + 1, running_loss / 10))
running_loss = 0.0
print("-------------------------------------")
loss.backward()
optimizer.step()
print('Finished Training')
return losses
net = Net().to(device)
net.apply(weights_init)
transform = transforms.Compose([
transforms.Resize([256, 144]),
# transforms.Resize([57600, 1]),
transforms.ToTensor(),
])
dataset = CsgoPersonNoPersonDataset(dataset_path, transform)
dataset_len = len(dataset)
train_split = int(np.floor(dataset_len * train_split))
test_split = int(np.floor(dataset_len * test_split))
while train_split + test_split != dataset_len:
train_split += 1
train_set, test_set = torch.utils.data.random_split(\
dataset, [train_split, test_split])
train_loader = DataLoader(dataset=train_set, batch_size=batch_size, shuffle=False, drop_last=True)
test_loader = DataLoader(dataset=test_set, batch_size=batch_size, shuffle=True, drop_last=True)
def my_binary_loss(output, target):
return (output and target).mean
criterion = nn.MSELoss()
criterion = nn.BCEWithLogitsLoss()
optimizer = optim.Adam(net.parameters())
# for i in range(500):
# image, label = dataset[i]['image'], dataset[i]['label']
# print(label)
losses = train_run(train_loader, criterion, optimizer, device)
print("------------------------------------------------------------")
print("Losses")
for loss in losses:
print(loss)
print("------------------------------------------------------------")
|
[
"torch.nn.MSELoss",
"torch.cuda.is_available",
"torch.nn.init.xavier_uniform_",
"torch.utils.data.dataloader.DataLoader",
"torchvision.transforms.ToTensor",
"torch.utils.data.random_split",
"numpy.floor",
"torch.transpose",
"torch.is_tensor",
"torchvision.transforms.Resize",
"torch.nn.BCEWithLogitsLoss",
"torch.device",
"torch.cuda.get_device_name",
"PIL.Image.open",
"os.path.join",
"torch.nn.Conv2d",
"torch.nn.MaxPool2d",
"torch.nn.Linear",
"os.stat"
] |
[((647, 672), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (670, 672), False, 'import torch\n'), ((5829, 5894), 'torch.utils.data.random_split', 'torch.utils.data.random_split', (['dataset', '[train_split, test_split]'], {}), '(dataset, [train_split, test_split])\n', (5858, 5894), False, 'import torch\n'), ((5921, 6008), 'torch.utils.data.dataloader.DataLoader', 'DataLoader', ([], {'dataset': 'train_set', 'batch_size': 'batch_size', 'shuffle': '(False)', 'drop_last': '(True)'}), '(dataset=train_set, batch_size=batch_size, shuffle=False,\n drop_last=True)\n', (5931, 6008), True, 'from torch.utils.data.dataloader import DataLoader as DataLoader\n'), ((6019, 6105), 'torch.utils.data.dataloader.DataLoader', 'DataLoader', ([], {'dataset': 'test_set', 'batch_size': 'batch_size', 'shuffle': '(True)', 'drop_last': '(True)'}), '(dataset=test_set, batch_size=batch_size, shuffle=True, drop_last\n =True)\n', (6029, 6105), True, 'from torch.utils.data.dataloader import DataLoader as DataLoader\n'), ((6187, 6199), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (6197, 6199), True, 'import torch.nn as nn\n'), ((6212, 6234), 'torch.nn.BCEWithLogitsLoss', 'nn.BCEWithLogitsLoss', ([], {}), '()\n', (6232, 6234), True, 'import torch.nn as nn\n'), ((687, 709), 'torch.device', 'torch.device', (['"""cuda:0"""'], {}), "('cuda:0')\n", (699, 709), False, 'import torch\n'), ((794, 813), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (806, 813), False, 'import torch\n'), ((5649, 5684), 'numpy.floor', 'np.floor', (['(dataset_len * train_split)'], {}), '(dataset_len * train_split)\n', (5657, 5684), True, 'import numpy as np\n'), ((5703, 5737), 'numpy.floor', 'np.floor', (['(dataset_len * test_split)'], {}), '(dataset_len * test_split)\n', (5711, 5737), True, 'import numpy as np\n'), ((1874, 1894), 'torch.is_tensor', 'torch.is_tensor', (['idx'], {}), '(idx)\n', (1889, 1894), False, 'import torch\n'), ((2081, 2128), 'os.path.join', 'os.path.join', (['self.root_dir', 'img_path', 'img_name'], {}), '(self.root_dir, img_path, img_name)\n', (2093, 2128), False, 'import os\n'), ((2216, 2240), 'PIL.Image.open', 'Image.open', (['img_path_ext'], {}), '(img_path_ext)\n', (2226, 2240), False, 'from PIL import Image\n'), ((2900, 2918), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3)', '(6)', '(5)'], {}), '(3, 6, 5)\n', (2909, 2918), True, 'import torch.nn as nn\n'), ((2940, 2958), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2)', '(2)'], {}), '(2, 2)\n', (2952, 2958), True, 'import torch.nn as nn\n'), ((2980, 2999), 'torch.nn.Conv2d', 'nn.Conv2d', (['(6)', '(16)', '(5)'], {}), '(6, 16, 5)\n', (2989, 2999), True, 'import torch.nn as nn\n'), ((3021, 3039), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2)', '(2)'], {}), '(2, 2)\n', (3033, 3039), True, 'import torch.nn as nn\n'), ((3059, 3087), 'torch.nn.Linear', 'nn.Linear', (['(16 * 61 * 33)', '(120)'], {}), '(16 * 61 * 33, 120)\n', (3068, 3087), True, 'import torch.nn as nn\n'), ((3107, 3125), 'torch.nn.Linear', 'nn.Linear', (['(120)', '(60)'], {}), '(120, 60)\n', (3116, 3125), True, 'import torch.nn as nn\n'), ((3145, 3161), 'torch.nn.Linear', 'nn.Linear', (['(60)', '(1)'], {}), '(60, 1)\n', (3154, 3161), True, 'import torch.nn as nn\n'), ((3181, 3198), 'torch.nn.Linear', 'nn.Linear', (['(30)', '(15)'], {}), '(30, 15)\n', (3190, 3198), True, 'import torch.nn as nn\n'), ((3218, 3234), 'torch.nn.Linear', 'nn.Linear', (['(15)', '(7)'], {}), '(15, 7)\n', (3227, 3234), True, 'import torch.nn as nn\n'), ((3254, 3269), 'torch.nn.Linear', 'nn.Linear', (['(7)', '(1)'], {}), '(7, 1)\n', (3263, 3269), True, 'import torch.nn as nn\n'), ((3776, 3820), 'torch.nn.init.xavier_uniform_', 'torch.nn.init.xavier_uniform_', (['m.weight.data'], {}), '(m.weight.data)\n', (3805, 3820), False, 'import torch\n'), ((5442, 5471), 'torchvision.transforms.Resize', 'transforms.Resize', (['[256, 144]'], {}), '([256, 144])\n', (5459, 5471), False, 'from torchvision import transforms\n'), ((5514, 5535), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (5533, 5535), False, 'from torchvision import transforms\n'), ((738, 772), 'torch.cuda.get_device_name', 'torch.cuda.get_device_name', (['device'], {}), '(device)\n', (764, 772), False, 'import torch\n'), ((2424, 2443), 'os.stat', 'os.stat', (['label_path'], {}), '(label_path)\n', (2431, 2443), False, 'import os\n'), ((4941, 4970), 'torch.transpose', 'torch.transpose', (['labels', '(0)', '(1)'], {}), '(labels, 0, 1)\n', (4956, 4970), False, 'import torch\n'), ((5008, 5038), 'torch.transpose', 'torch.transpose', (['outputs', '(0)', '(1)'], {}), '(outputs, 0, 1)\n', (5023, 5038), False, 'import torch\n')]
|
import re
import numpy as np
import pandas as pd
import scipy.stats as stats
R_REGEX = re.compile('(.*):(.*)-(.*)')
R_REGEX_STRAND = re.compile('(.*):(.*)-(.*):(.*)')
def chunks(l, n):
"""Yield successive n-sized chunks from l."""
# https://stackoverflow.com/questions/312443/how-do-you-split-a-list-into-evenly-sized-chunks
for i in range(0, len(l), n):
yield l[i:i + n]
def estimate_allele_frequency(ac, an, a=1, b=100):
"""
Make sample (or other) names.
Parameters:
-----------
ac : array-like
Array-like object with the observed allele counts for each variant. If
ac is a pandas Series, the output dataframe will have the same index as
ac.
an : array-like
Array-like object with the number of haplotypes that were genotyped.
a : float
Parameter for prior distribution beta(a, b).
b : float
Parameter for prior distribution beta(a, b).
Returns
-------
out : pandas.DataFrame
Pandas dataframe with allele frequency estimate
"""
# Credible interval is 95% highest posterior density
td = dict(zip(['ci_lower', 'ci_upper'],
stats.beta(a + ac, b + an - ac).interval(0.95)))
td['af'] = (a + ac) / (a + b + an)
td['af_mle'] = np.array(ac).astype(float) / np.array(an)
out = pd.DataFrame(td)[['af_mle', 'af', 'ci_lower', 'ci_upper']]
if type(ac) == pd.Series:
out.index = ac.index
return(out)
def transform_standard_normal(df):
"""Transform a series or the rows of a dataframe to the values of a standard
normal based on rank."""
import pandas as pd
import scipy.stats as stats
if type(df) == pd.core.frame.DataFrame:
gc_ranks = df.rank(axis=1)
gc_ranks = gc_ranks / (gc_ranks.shape[1] + 1)
std_norm = stats.norm.ppf(gc_ranks)
std_norm = pd.DataFrame(std_norm, index=gc_ranks.index,
columns=gc_ranks.columns)
elif type(df) == pd.core.series.Series:
gc_ranks = df.rank()
gc_ranks = gc_ranks / (gc_ranks.shape[0] + 1)
std_norm = stats.norm.ppf(gc_ranks)
std_norm = pd.Series(std_norm, index=df.index)
return std_norm
def read_gzipped_text_url(url):
"""Read a gzipped text file from a URL and return
contents as a string."""
import urllib2
import zlib
from StringIO import StringIO
opener = urllib2.build_opener()
request = urllib2.Request(url)
request.add_header('Accept-encoding', 'gzip')
respond = opener.open(request)
compressedData = respond.read()
respond.close()
opener.close()
compressedDataBuf = StringIO(compressedData)
d = zlib.decompressobj(16+zlib.MAX_WBITS)
buffer = compressedDataBuf.read(1024)
#saveFile = open('/tmp/test.txt', "wb")
s = []
while buffer:
s.append(d.decompress(buffer))
buffer = compressedDataBuf.read(1024)
s = ''.join(s)
return s
def parse_region(region):
"""
Parse region of type chr1:10-20 or chr1:10-20:+
Parameters:
-----------
region : str
Region of type chr1:10-20 or chr1:10-20:+.
Returns
-------
groups : tuple
Tuple of groups from regex e.g. (chr1, 10, 20) or (chr1, 10, 20, +).
"""
m = R_REGEX_STRAND.search(region)
if not m:
m = R_REGEX.search(region)
if m:
groups = m.groups()
return groups
else:
return None
def _sample_names(files, kwargs):
"""
Make sample (or other) names.
Parameters:
-----------
files : list of string
Typically a list of file paths although could be any list of strings
that you want to make names for. If neither names nor define_sample_name
are provided, then files is returned as is.
kwargs : dict
kwargs from another function. Can include the following keys with
appropriate arguments.
names : list of strings
Names to use. Overrides define_sample_name if provided.
define_sample_name : function that takes string as input
Function mapping string to name. For instance, you may have a sample
name in a file path and use a regex to extract it.
"""
if 'define_sample_name' not in kwargs.keys():
define_sample_name = lambda x: x
else:
define_sample_name = kwargs['define_sample_name']
if 'names' in kwargs.keys():
names = kwargs['names']
else:
names = [define_sample_name(f) for f in files]
assert len(names) == len(files)
return names
|
[
"StringIO.StringIO",
"pandas.Series",
"zlib.decompressobj",
"re.compile",
"scipy.stats.norm.ppf",
"urllib2.Request",
"numpy.array",
"scipy.stats.beta",
"pandas.DataFrame",
"urllib2.build_opener"
] |
[((89, 117), 're.compile', 're.compile', (['"""(.*):(.*)-(.*)"""'], {}), "('(.*):(.*)-(.*)')\n", (99, 117), False, 'import re\n'), ((135, 168), 're.compile', 're.compile', (['"""(.*):(.*)-(.*):(.*)"""'], {}), "('(.*):(.*)-(.*):(.*)')\n", (145, 168), False, 'import re\n'), ((2426, 2448), 'urllib2.build_opener', 'urllib2.build_opener', ([], {}), '()\n', (2446, 2448), False, 'import urllib2\n'), ((2464, 2484), 'urllib2.Request', 'urllib2.Request', (['url'], {}), '(url)\n', (2479, 2484), False, 'import urllib2\n'), ((2669, 2693), 'StringIO.StringIO', 'StringIO', (['compressedData'], {}), '(compressedData)\n', (2677, 2693), False, 'from StringIO import StringIO\n'), ((2702, 2741), 'zlib.decompressobj', 'zlib.decompressobj', (['(16 + zlib.MAX_WBITS)'], {}), '(16 + zlib.MAX_WBITS)\n', (2720, 2741), False, 'import zlib\n'), ((1321, 1333), 'numpy.array', 'np.array', (['an'], {}), '(an)\n', (1329, 1333), True, 'import numpy as np\n'), ((1344, 1360), 'pandas.DataFrame', 'pd.DataFrame', (['td'], {}), '(td)\n', (1356, 1360), True, 'import pandas as pd\n'), ((1832, 1856), 'scipy.stats.norm.ppf', 'stats.norm.ppf', (['gc_ranks'], {}), '(gc_ranks)\n', (1846, 1856), True, 'import scipy.stats as stats\n'), ((1876, 1946), 'pandas.DataFrame', 'pd.DataFrame', (['std_norm'], {'index': 'gc_ranks.index', 'columns': 'gc_ranks.columns'}), '(std_norm, index=gc_ranks.index, columns=gc_ranks.columns)\n', (1888, 1946), True, 'import pandas as pd\n'), ((2126, 2150), 'scipy.stats.norm.ppf', 'stats.norm.ppf', (['gc_ranks'], {}), '(gc_ranks)\n', (2140, 2150), True, 'import scipy.stats as stats\n'), ((2170, 2205), 'pandas.Series', 'pd.Series', (['std_norm'], {'index': 'df.index'}), '(std_norm, index=df.index)\n', (2179, 2205), True, 'import pandas as pd\n'), ((1292, 1304), 'numpy.array', 'np.array', (['ac'], {}), '(ac)\n', (1300, 1304), True, 'import numpy as np\n'), ((1185, 1216), 'scipy.stats.beta', 'stats.beta', (['(a + ac)', '(b + an - ac)'], {}), '(a + ac, b + an - ac)\n', (1195, 1216), True, 'import scipy.stats as stats\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
Created on Mon Aug 17 11:31:32 2020
Distance-Controlled Boundaries Coefficient (DCBC) evaluation
for a functional parcellation of brain cortex
INPUTS:
sn: The return subject number
hems: Hemisphere to test. 'L' - left hemisphere; 'R' - right hemisphere; 'all' - both hemispheres
binWidth: The spatial binning width in mm, default 1 mm
maxDist: The maximum distance for vertices pairs
parcels: The cortical parcellation labels (integer value) to be evaluated, shape is (N,)
N is the number of vertices, 0 - medial wall
condType: The condition type for evaluating
'unique' - evaluation will be done by using unique task conditions of the task set
'all' - evaluation will be done by all task conditions of the task set
taskSet: The task set of MDTB to use for evaluating. 1 - taskset A; 2 - taskset B; [1,2] - both
resolution: The resolution of surface space, either 32k or 164k, 32k as default
distType: The distance metric of vertices pairs, for example Dijkstra's distance, GOD distance
Euclidean distance. Dijkstra's distance as default
icoRes: Icosahedron resolution, 42, 162, 362, 642, 1002, ... default to use 2562
mwallFile: The medial wall to be excluded from the evaluation
OUTPUT:
M: Gifti object- can be saved as a *.func.gii or *.label.gii file
Author: <NAME>
'''
import os
import numpy as np
import pandas as pd
import scipy.io as spio
from scipy.sparse import find
import nibabel as nb
def eval_DCBC(sn=[2],subj_name=['s02'], hems='L', maxDist=35, binWidth=1, parcels='',
condType='unique', taskSet=[1],resolution='32k', distType='Dijkstra',
icoRes=162, mWallFile='icos_162'):
taskConds = pd.read_table('DCBC/sc1_sc2_taskConds.txt', delim_whitespace=True)
numBins = int(np.floor(maxDist / binWidth))
if distType is 'Dijkstra':
dist = spio.loadmat("DCBC/distAvrg_sp.mat")['avrgDs']
elif distType is 'Sphere':
dist = spio.loadmat("DCBC/distSphere_sp.mat")['avrgDs']
else:
raise TypeError("Distance type cannot be recognized!")
# Determine which hemisphere shall be evaluated
if hems is 'all':
hems = ['L', 'R']
elif hems is 'L' or 'R':
hems = [hems]
else:
raise TypeError("Hemisphere type cannot be recognized!")
# Initialization of the result buffers
studyNum, SN, hem = [], [], []
N, bwParcel, distmin, distmax, meanCorr, weightedCorr = [], [], [], [], [], []
for h in hems:
mWall = np.where(parcels == 0)[0]
parcels = np.delete(parcels, mWall) # remove medial wall
parcels = np.abs(parcels - parcels[:, np.newaxis])
dist=dist.todense()
dist = np.delete(dist, mWall, 0)
dist = np.delete(dist, mWall, 1)
row, col, dist = find(dist)
sameRegion = np.zeros((dist.shape[0],), dtype=int)
for i in range(len(row)):
if parcels[row[i]][col[i]] == 0:
sameRegion[i] = 1 # within-parcel
else:
sameRegion[i] = 2 # between-parcel
del parcels
for ts in taskSet:
taskConds = taskConds[taskConds['StudyNum'] == ts]
if condType is 'unique': # unique conditions in taskset ts
condIdx = taskConds['condNum'][taskConds['overlap']==0]
elif condType is 'all': # all conditions in taskset ts
condIdx = taskConds['condNum']
else:
raise TypeError("Invalid condition type input!")
for s in sn:
this_wcon = nb.load("DCBC/%s/%s.%s.sc%s.con.%s.func.gii" %
(subj_name[s-1],subj_name[s-1], h, ts, resolution))
this_wcon = [x.data for x in this_wcon.darrays]
this_wcon = np.reshape(this_wcon, (len(this_wcon), len(this_wcon[0]))).transpose()
res = np.sqrt(this_wcon[:,-1])
this_wcon = np.delete(this_wcon, [0, this_wcon.shape[1] - 1], axis=1) # remove instruction
this_wcon = np.concatenate((this_wcon, np.zeros((this_wcon.shape[0], 1))), axis=1) # add rest
for i in range(this_wcon.shape[0]): # noise normalize
this_wcon[i, :] = this_wcon[i, :] / res[i]
this_wcon = np.delete(this_wcon, mWall, axis=0)
this_wcon = this_wcon[:,condIdx-1] # take the right subset
mean_wcon = this_wcon.mean(1)
for i in range(this_wcon.shape[0]):
this_wcon[i, :] = this_wcon[i, :] - mean_wcon[i]
this_wcon = this_wcon.astype('float32').transpose()
K=this_wcon.shape[0]
del res, mean_wcon
SD = np.sqrt(np.sum(np.square(this_wcon), axis=0)/K) # standard deviation
SD = np.reshape(SD, (SD.shape[0], 1))
VAR = np.matmul(SD, SD.transpose())
COV = np.matmul(this_wcon.transpose(), this_wcon) / K
VAR = VAR[row,col]
COV = COV[row,col]
del SD, this_wcon
print("\n")
for bw in range(1,3):
for i in range(numBins):
print(".")
inBin = np.zeros((dist.shape[0],), dtype=int)
for j in range(len(inBin)):
if (dist[j] > i*binWidth) & (dist[j] <= (i+1)*binWidth) & (sameRegion[j] == bw):
inBin[j] = 1
# inBin = np.where(dist>i*binWidth) & (dist<=(i+1)*binWidth) & (sameRegion==bw)
# inBin = np.reshape(inBin, (inBin.shape[1],))
N = np.append(N, np.count_nonzero(inBin == 1))
studyNum = np.append(studyNum, ts)
SN = np.append(SN, s)
hem = np.append(hem, h)
bwParcel = np.append(bwParcel, bw - 1)
distmin = np.append(distmin, i * binWidth)
distmax = np.append(distmax, (i + 1) * binWidth)
meanCorr = np.append(meanCorr, np.nanmean(COV[inBin == 1]) / np.nanmean(VAR[inBin == 1]))
del inBin
del VAR, COV
num_w = N[bwParcel == 0]
num_b = N[bwParcel == 1]
weight = 1/(1/num_w + 1/num_b)
weight = weight / np.sum(weight)
weightedCorr = np.append(meanCorr * weight)
print("\n")
struct = {
"SN": SN,
"hem": hem,
"studyNum": studyNum,
"N": N,
"bwParcel": bwParcel,
"distmin": distmin,
"distmax":distmax,
"meanCorr": meanCorr,
"weightedCorr": weightedCorr
}
return struct
|
[
"numpy.abs",
"numpy.sqrt",
"numpy.reshape",
"nibabel.load",
"numpy.where",
"numpy.delete",
"scipy.io.loadmat",
"numpy.floor",
"numpy.square",
"numpy.append",
"numpy.sum",
"numpy.zeros",
"numpy.count_nonzero",
"numpy.nanmean",
"pandas.read_table",
"scipy.sparse.find"
] |
[((1943, 2009), 'pandas.read_table', 'pd.read_table', (['"""DCBC/sc1_sc2_taskConds.txt"""'], {'delim_whitespace': '(True)'}), "('DCBC/sc1_sc2_taskConds.txt', delim_whitespace=True)\n", (1956, 2009), True, 'import pandas as pd\n'), ((2028, 2056), 'numpy.floor', 'np.floor', (['(maxDist / binWidth)'], {}), '(maxDist / binWidth)\n', (2036, 2056), True, 'import numpy as np\n'), ((2788, 2813), 'numpy.delete', 'np.delete', (['parcels', 'mWall'], {}), '(parcels, mWall)\n', (2797, 2813), True, 'import numpy as np\n'), ((2853, 2893), 'numpy.abs', 'np.abs', (['(parcels - parcels[:, np.newaxis])'], {}), '(parcels - parcels[:, np.newaxis])\n', (2859, 2893), True, 'import numpy as np\n'), ((2938, 2963), 'numpy.delete', 'np.delete', (['dist', 'mWall', '(0)'], {}), '(dist, mWall, 0)\n', (2947, 2963), True, 'import numpy as np\n'), ((2979, 3004), 'numpy.delete', 'np.delete', (['dist', 'mWall', '(1)'], {}), '(dist, mWall, 1)\n', (2988, 3004), True, 'import numpy as np\n'), ((3030, 3040), 'scipy.sparse.find', 'find', (['dist'], {}), '(dist)\n', (3034, 3040), False, 'from scipy.sparse import find\n'), ((3062, 3099), 'numpy.zeros', 'np.zeros', (['(dist.shape[0],)'], {'dtype': 'int'}), '((dist.shape[0],), dtype=int)\n', (3070, 3099), True, 'import numpy as np\n'), ((2105, 2141), 'scipy.io.loadmat', 'spio.loadmat', (['"""DCBC/distAvrg_sp.mat"""'], {}), "('DCBC/distAvrg_sp.mat')\n", (2117, 2141), True, 'import scipy.io as spio\n'), ((2744, 2766), 'numpy.where', 'np.where', (['(parcels == 0)'], {}), '(parcels == 0)\n', (2752, 2766), True, 'import numpy as np\n'), ((2198, 2236), 'scipy.io.loadmat', 'spio.loadmat', (['"""DCBC/distSphere_sp.mat"""'], {}), "('DCBC/distSphere_sp.mat')\n", (2210, 2236), True, 'import scipy.io as spio\n'), ((3807, 3915), 'nibabel.load', 'nb.load', (["('DCBC/%s/%s.%s.sc%s.con.%s.func.gii' % (subj_name[s - 1], subj_name[s - 1],\n h, ts, resolution))"], {}), "('DCBC/%s/%s.%s.sc%s.con.%s.func.gii' % (subj_name[s - 1], subj_name\n [s - 1], h, ts, resolution))\n", (3814, 3915), True, 'import nibabel as nb\n'), ((4127, 4152), 'numpy.sqrt', 'np.sqrt', (['this_wcon[:, -1]'], {}), '(this_wcon[:, -1])\n', (4134, 4152), True, 'import numpy as np\n'), ((4180, 4237), 'numpy.delete', 'np.delete', (['this_wcon', '[0, this_wcon.shape[1] - 1]'], {'axis': '(1)'}), '(this_wcon, [0, this_wcon.shape[1] - 1], axis=1)\n', (4189, 4237), True, 'import numpy as np\n'), ((4532, 4567), 'numpy.delete', 'np.delete', (['this_wcon', 'mWall'], {'axis': '(0)'}), '(this_wcon, mWall, axis=0)\n', (4541, 4567), True, 'import numpy as np\n'), ((5064, 5096), 'numpy.reshape', 'np.reshape', (['SD', '(SD.shape[0], 1)'], {}), '(SD, (SD.shape[0], 1))\n', (5074, 5096), True, 'import numpy as np\n'), ((6737, 6765), 'numpy.append', 'np.append', (['(meanCorr * weight)'], {}), '(meanCorr * weight)\n', (6746, 6765), True, 'import numpy as np\n'), ((6691, 6705), 'numpy.sum', 'np.sum', (['weight'], {}), '(weight)\n', (6697, 6705), True, 'import numpy as np\n'), ((4314, 4347), 'numpy.zeros', 'np.zeros', (['(this_wcon.shape[0], 1)'], {}), '((this_wcon.shape[0], 1))\n', (4322, 4347), True, 'import numpy as np\n'), ((5502, 5539), 'numpy.zeros', 'np.zeros', (['(dist.shape[0],)'], {'dtype': 'int'}), '((dist.shape[0],), dtype=int)\n', (5510, 5539), True, 'import numpy as np\n'), ((6029, 6052), 'numpy.append', 'np.append', (['studyNum', 'ts'], {}), '(studyNum, ts)\n', (6038, 6052), True, 'import numpy as np\n'), ((6082, 6098), 'numpy.append', 'np.append', (['SN', 's'], {}), '(SN, s)\n', (6091, 6098), True, 'import numpy as np\n'), ((6129, 6146), 'numpy.append', 'np.append', (['hem', 'h'], {}), '(hem, h)\n', (6138, 6146), True, 'import numpy as np\n'), ((6182, 6209), 'numpy.append', 'np.append', (['bwParcel', '(bw - 1)'], {}), '(bwParcel, bw - 1)\n', (6191, 6209), True, 'import numpy as np\n'), ((6244, 6276), 'numpy.append', 'np.append', (['distmin', '(i * binWidth)'], {}), '(distmin, i * binWidth)\n', (6253, 6276), True, 'import numpy as np\n'), ((6311, 6349), 'numpy.append', 'np.append', (['distmax', '((i + 1) * binWidth)'], {}), '(distmax, (i + 1) * binWidth)\n', (6320, 6349), True, 'import numpy as np\n'), ((4989, 5009), 'numpy.square', 'np.square', (['this_wcon'], {}), '(this_wcon)\n', (4998, 5009), True, 'import numpy as np\n'), ((5964, 5992), 'numpy.count_nonzero', 'np.count_nonzero', (['(inBin == 1)'], {}), '(inBin == 1)\n', (5980, 5992), True, 'import numpy as np\n'), ((6405, 6432), 'numpy.nanmean', 'np.nanmean', (['COV[inBin == 1]'], {}), '(COV[inBin == 1])\n', (6415, 6432), True, 'import numpy as np\n'), ((6435, 6462), 'numpy.nanmean', 'np.nanmean', (['VAR[inBin == 1]'], {}), '(VAR[inBin == 1])\n', (6445, 6462), True, 'import numpy as np\n')]
|
import torch.nn as nn
import torch.nn.functional as F
from torchvision.transforms import functional
import numpy as np
class Rotate(nn.Module):
"""
Rotate the image by random angle between -degrees and degrees.
"""
def __init__(self, degrees, interpolation_method='nearest'):
super(Rotate, self).__init__()
self.degrees = degrees
self.interpolation_method = interpolation_method
def forward(self, noised_and_cover):
rotation_angle = np.random.uniform(-self.degrees, self.degrees)
noised_image = noised_and_cover[0]
noised_and_cover[0] = functional.rotate(noised_image, rotation_angle)
return noised_and_cover
|
[
"torchvision.transforms.functional.rotate",
"numpy.random.uniform"
] |
[((488, 534), 'numpy.random.uniform', 'np.random.uniform', (['(-self.degrees)', 'self.degrees'], {}), '(-self.degrees, self.degrees)\n', (505, 534), True, 'import numpy as np\n'), ((608, 655), 'torchvision.transforms.functional.rotate', 'functional.rotate', (['noised_image', 'rotation_angle'], {}), '(noised_image, rotation_angle)\n', (625, 655), False, 'from torchvision.transforms import functional\n')]
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import numpy as np
import time
import matplotlib.pyplot as plt
from matplotlib.ticker import FuncFormatter
import copy
# In[22]:
# helps from: https://www.geeksforgeeks.org/merge-sort/
def RecursiveMergeSort(input_array, is_first = True):
time_start = time.time()
compare_time = 0
if is_first:
sort_array = copy.deepcopy(input_array) # sort, but not change orignial array(strange)
else:
sort_array = input_array
if len(sort_array) > 1: # stop base
mid = len(sort_array)//2
left_array = sort_array[:mid]
right_array = sort_array[mid:]
# recursive
left_temp = RecursiveMergeSort(left_array, is_first = False)
if left_temp != None:
compare_time += left_temp[0]
right_temp = RecursiveMergeSort(right_array, is_first = False)
if right_temp != None:
compare_time += right_temp[0]
# merge part
i = j = k = 0
while i < len(left_array) and j < len(right_array):
compare_time += 1
if left_array[i] < right_array[j]:
sort_array[k] = left_array[i]
i += 1
else:
sort_array[k] = right_array[j]
j += 1
k += 1
while i < len(left_array):
sort_array[k] = left_array[i]
k += 1
i += 1
while j < len(right_array):
sort_array[k] = right_array[j]
k += 1
j += 1
time_finish = time.time()
time_run = time_finish - time_start
# check if sort_array is sorted, of course
#if is_first:
# print(sort_array)
return compare_time, time_run
# iterative merge sort
# helps: https://www.geeksforgeeks.org/iterative-merge-sort/
def IterativeMergeSort(input_array):
time_start = time.time()
compare_time = 0
current_size = 1
sort_array = copy.deepcopy(input_array) # sort, but not change orignial array(strange)
while current_size < len(sort_array) - 1:
left = 0
while left < len(sort_array)-1:
mid = left + current_size - 1
right = ((2 * current_size + left - 1, len(sort_array) - 1) [2 * current_size + left - 1 > len(sort_array)-1])
# Merge each subarray
compare_time += merge(sort_array, left, mid, right)
left = left + current_size*2
# have new sixe for subarray
current_size = 2 * current_size
time_finish = time.time()
time_run = time_finish - time_start
return compare_time, time_run
def merge(input_array, left, mid, right):
compare_time = 0
# length for each subarray to be merged
n1 = mid - left + 1
n2 = right - mid
# create zreos subarrays
L = [0] * n1
R = [0] * n2
for i in range(0, n1):
L[i] = input_array[left + i]
for i in range(0, n2):
R[i] = input_array[mid + i + 1]
# merge
i, j, k = 0, 0, left
while i < n1 and j < n2:
compare_time += 1
if L[i] > R[j]:
input_array[k] = R[j]
j += 1
else:
input_array[k] = L[i]
i += 1
k += 1
while i < n1:
input_array[k] = L[i]
i += 1
k += 1
while j < n2:
input_array[k] = R[j]
j += 1
k += 1
return compare_time
# In[23]:
input_1024_0 = np.loadtxt('./data/data0.1024', int)
input_2048_0 = np.loadtxt('./data/data0.2048', int)
input_4096_0 = np.loadtxt('./data/data0.4096', int)
input_8192_0 = np.loadtxt('./data/data0.8192', int)
input_16384_0 = np.loadtxt('./data/data0.16384', int)
input_32768_0 = np.loadtxt('./data/data0.32768', int)
input_1024_1 = np.loadtxt('./data/data1.1024', int)
input_2048_1 = np.loadtxt('./data/data1.2048', int)
input_4096_1 = np.loadtxt('./data/data1.4096', int)
input_8192_1 = np.loadtxt('./data/data1.8192', int)
input_16384_1 = np.loadtxt('./data/data1.16384', int)
input_32768_1 = np.loadtxt('./data/data1.32768', int)
input_data = [input_1024_0, input_1024_1, input_2048_0, input_2048_1, input_4096_0, input_4096_1, input_8192_0, input_8192_1, input_16384_0, input_16384_1, input_32768_0, input_32768_1]
# In[24]:
result = []
for i in input_data:
result.append(RecursiveMergeSort(i))
print(result)
# In[8]:
recursive_merge_compare_0 = []
recursive_merge_compare_1 = []
recursive_merge_runtime_0 = []
recursive_merge_runtime_1 = []
for i in range(0, len(result), 2):
recursive_merge_compare_0.append(result[i][0])
recursive_merge_runtime_0.append(result[i][1])
recursive_merge_compare_1.append(result[i+1][0])
recursive_merge_runtime_1.append(result[i+1][1])
print(recursive_merge_compare_1)
# In[9]:
result = []
for i in input_data:
result.append(IterativeMergeSort(i))
print(result)
# In[10]:
iterative_merge_compare_0 = []
iterative_merge_compare_1 = []
iterative_merge_runtime_0 = []
iterative_merge_runtime_1 = []
for i in range(0, len(result), 2):
iterative_merge_compare_0.append(result[i][0])
iterative_merge_runtime_0.append(result[i][1])
iterative_merge_compare_1.append(result[i+1][0])
iterative_merge_runtime_1.append(result[i+1][1])
print(iterative_merge_compare_1)
# In[11]:
np.savetxt('./result/recursice compare 0.txt', recursive_merge_compare_0, fmt='%f')
np.savetxt('./result/recursice compare 1.txt', recursive_merge_compare_1, fmt='%f')
np.savetxt('./result/recursice runtime 0.txt', recursive_merge_runtime_0, fmt='%f')
np.savetxt('./result/recursice runtime 1.txt', recursive_merge_runtime_1, fmt='%f')
np.savetxt('./result/iterative compare 0.txt', iterative_merge_compare_0, fmt='%f')
np.savetxt('./result/iterative compare 1.txt', iterative_merge_compare_1, fmt='%f')
np.savetxt('./result/iterative runtime 0.txt', iterative_merge_runtime_0, fmt='%f')
np.savetxt('./result/iterative runtime 1.txt', iterative_merge_runtime_1, fmt='%f')
# In[12]:
input_size = [1024, 2048, 4096, 8192, 16384, 32768]
plt.figure()
plt.plot(input_size, recursive_merge_compare_0, label = 'recursive compare times with sorted data')
plt.plot(input_size, recursive_merge_compare_1, label = 'recursive compare times with random data')
plt.plot(input_size, iterative_merge_compare_0, label = 'iterative compare times with sorted data')
plt.plot(input_size, iterative_merge_compare_1, label = 'iterative compare times with random data')
plt.legend(loc='upper left')
plt.title('Compare times as function of input size')
plt.xlabel('input size')
plt.ylabel('compare times')
#ax = plt.gca()
#ax.get_yaxis().get_major_formatter().set_scientific(False)
plt.savefig('./result/compare times vs input.jpg')
plt.show()
# In[13]:
plt.figure()
plt.plot(input_size, recursive_merge_runtime_0, label = 'recursive runtime with sorted data')
plt.plot(input_size, recursive_merge_runtime_1, label = 'recursive runtime with random data')
plt.plot(input_size, iterative_merge_runtime_0, label = 'iterative runtime with sorted data')
plt.plot(input_size, iterative_merge_runtime_1, label = 'iterative runtime with random data')
plt.legend(loc='upper left')
plt.title('Runtime as function of input size')
plt.xlabel('input size')
plt.ylabel('runtime(s)')
#ax = plt.gca()
#ax.get_yaxis().get_major_formatter().set_scientific(False)
plt.savefig('./result/runtime vs input.jpg')
plt.show()
# In[ ]:
|
[
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.figure",
"numpy.savetxt",
"copy.deepcopy",
"matplotlib.pyplot.title",
"numpy.loadtxt",
"time.time",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] |
[((3620, 3656), 'numpy.loadtxt', 'np.loadtxt', (['"""./data/data0.1024"""', 'int'], {}), "('./data/data0.1024', int)\n", (3630, 3656), True, 'import numpy as np\n'), ((3672, 3708), 'numpy.loadtxt', 'np.loadtxt', (['"""./data/data0.2048"""', 'int'], {}), "('./data/data0.2048', int)\n", (3682, 3708), True, 'import numpy as np\n'), ((3724, 3760), 'numpy.loadtxt', 'np.loadtxt', (['"""./data/data0.4096"""', 'int'], {}), "('./data/data0.4096', int)\n", (3734, 3760), True, 'import numpy as np\n'), ((3776, 3812), 'numpy.loadtxt', 'np.loadtxt', (['"""./data/data0.8192"""', 'int'], {}), "('./data/data0.8192', int)\n", (3786, 3812), True, 'import numpy as np\n'), ((3829, 3866), 'numpy.loadtxt', 'np.loadtxt', (['"""./data/data0.16384"""', 'int'], {}), "('./data/data0.16384', int)\n", (3839, 3866), True, 'import numpy as np\n'), ((3883, 3920), 'numpy.loadtxt', 'np.loadtxt', (['"""./data/data0.32768"""', 'int'], {}), "('./data/data0.32768', int)\n", (3893, 3920), True, 'import numpy as np\n'), ((3937, 3973), 'numpy.loadtxt', 'np.loadtxt', (['"""./data/data1.1024"""', 'int'], {}), "('./data/data1.1024', int)\n", (3947, 3973), True, 'import numpy as np\n'), ((3989, 4025), 'numpy.loadtxt', 'np.loadtxt', (['"""./data/data1.2048"""', 'int'], {}), "('./data/data1.2048', int)\n", (3999, 4025), True, 'import numpy as np\n'), ((4041, 4077), 'numpy.loadtxt', 'np.loadtxt', (['"""./data/data1.4096"""', 'int'], {}), "('./data/data1.4096', int)\n", (4051, 4077), True, 'import numpy as np\n'), ((4093, 4129), 'numpy.loadtxt', 'np.loadtxt', (['"""./data/data1.8192"""', 'int'], {}), "('./data/data1.8192', int)\n", (4103, 4129), True, 'import numpy as np\n'), ((4146, 4183), 'numpy.loadtxt', 'np.loadtxt', (['"""./data/data1.16384"""', 'int'], {}), "('./data/data1.16384', int)\n", (4156, 4183), True, 'import numpy as np\n'), ((4200, 4237), 'numpy.loadtxt', 'np.loadtxt', (['"""./data/data1.32768"""', 'int'], {}), "('./data/data1.32768', int)\n", (4210, 4237), True, 'import numpy as np\n'), ((5482, 5569), 'numpy.savetxt', 'np.savetxt', (['"""./result/recursice compare 0.txt"""', 'recursive_merge_compare_0'], {'fmt': '"""%f"""'}), "('./result/recursice compare 0.txt', recursive_merge_compare_0,\n fmt='%f')\n", (5492, 5569), True, 'import numpy as np\n'), ((5566, 5653), 'numpy.savetxt', 'np.savetxt', (['"""./result/recursice compare 1.txt"""', 'recursive_merge_compare_1'], {'fmt': '"""%f"""'}), "('./result/recursice compare 1.txt', recursive_merge_compare_1,\n fmt='%f')\n", (5576, 5653), True, 'import numpy as np\n'), ((5650, 5737), 'numpy.savetxt', 'np.savetxt', (['"""./result/recursice runtime 0.txt"""', 'recursive_merge_runtime_0'], {'fmt': '"""%f"""'}), "('./result/recursice runtime 0.txt', recursive_merge_runtime_0,\n fmt='%f')\n", (5660, 5737), True, 'import numpy as np\n'), ((5734, 5821), 'numpy.savetxt', 'np.savetxt', (['"""./result/recursice runtime 1.txt"""', 'recursive_merge_runtime_1'], {'fmt': '"""%f"""'}), "('./result/recursice runtime 1.txt', recursive_merge_runtime_1,\n fmt='%f')\n", (5744, 5821), True, 'import numpy as np\n'), ((5818, 5905), 'numpy.savetxt', 'np.savetxt', (['"""./result/iterative compare 0.txt"""', 'iterative_merge_compare_0'], {'fmt': '"""%f"""'}), "('./result/iterative compare 0.txt', iterative_merge_compare_0,\n fmt='%f')\n", (5828, 5905), True, 'import numpy as np\n'), ((5902, 5989), 'numpy.savetxt', 'np.savetxt', (['"""./result/iterative compare 1.txt"""', 'iterative_merge_compare_1'], {'fmt': '"""%f"""'}), "('./result/iterative compare 1.txt', iterative_merge_compare_1,\n fmt='%f')\n", (5912, 5989), True, 'import numpy as np\n'), ((5986, 6073), 'numpy.savetxt', 'np.savetxt', (['"""./result/iterative runtime 0.txt"""', 'iterative_merge_runtime_0'], {'fmt': '"""%f"""'}), "('./result/iterative runtime 0.txt', iterative_merge_runtime_0,\n fmt='%f')\n", (5996, 6073), True, 'import numpy as np\n'), ((6070, 6157), 'numpy.savetxt', 'np.savetxt', (['"""./result/iterative runtime 1.txt"""', 'iterative_merge_runtime_1'], {'fmt': '"""%f"""'}), "('./result/iterative runtime 1.txt', iterative_merge_runtime_1,\n fmt='%f')\n", (6080, 6157), True, 'import numpy as np\n'), ((6220, 6232), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (6230, 6232), True, 'import matplotlib.pyplot as plt\n'), ((6233, 6335), 'matplotlib.pyplot.plot', 'plt.plot', (['input_size', 'recursive_merge_compare_0'], {'label': '"""recursive compare times with sorted data"""'}), "(input_size, recursive_merge_compare_0, label=\n 'recursive compare times with sorted data')\n", (6241, 6335), True, 'import matplotlib.pyplot as plt\n'), ((6333, 6435), 'matplotlib.pyplot.plot', 'plt.plot', (['input_size', 'recursive_merge_compare_1'], {'label': '"""recursive compare times with random data"""'}), "(input_size, recursive_merge_compare_1, label=\n 'recursive compare times with random data')\n", (6341, 6435), True, 'import matplotlib.pyplot as plt\n'), ((6433, 6535), 'matplotlib.pyplot.plot', 'plt.plot', (['input_size', 'iterative_merge_compare_0'], {'label': '"""iterative compare times with sorted data"""'}), "(input_size, iterative_merge_compare_0, label=\n 'iterative compare times with sorted data')\n", (6441, 6535), True, 'import matplotlib.pyplot as plt\n'), ((6533, 6635), 'matplotlib.pyplot.plot', 'plt.plot', (['input_size', 'iterative_merge_compare_1'], {'label': '"""iterative compare times with random data"""'}), "(input_size, iterative_merge_compare_1, label=\n 'iterative compare times with random data')\n", (6541, 6635), True, 'import matplotlib.pyplot as plt\n'), ((6633, 6661), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper left"""'}), "(loc='upper left')\n", (6643, 6661), True, 'import matplotlib.pyplot as plt\n'), ((6662, 6714), 'matplotlib.pyplot.title', 'plt.title', (['"""Compare times as function of input size"""'], {}), "('Compare times as function of input size')\n", (6671, 6714), True, 'import matplotlib.pyplot as plt\n'), ((6715, 6739), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""input size"""'], {}), "('input size')\n", (6725, 6739), True, 'import matplotlib.pyplot as plt\n'), ((6740, 6767), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""compare times"""'], {}), "('compare times')\n", (6750, 6767), True, 'import matplotlib.pyplot as plt\n'), ((6844, 6894), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""./result/compare times vs input.jpg"""'], {}), "('./result/compare times vs input.jpg')\n", (6855, 6894), True, 'import matplotlib.pyplot as plt\n'), ((6895, 6905), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6903, 6905), True, 'import matplotlib.pyplot as plt\n'), ((6920, 6932), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (6930, 6932), True, 'import matplotlib.pyplot as plt\n'), ((6933, 7029), 'matplotlib.pyplot.plot', 'plt.plot', (['input_size', 'recursive_merge_runtime_0'], {'label': '"""recursive runtime with sorted data"""'}), "(input_size, recursive_merge_runtime_0, label=\n 'recursive runtime with sorted data')\n", (6941, 7029), True, 'import matplotlib.pyplot as plt\n'), ((7027, 7123), 'matplotlib.pyplot.plot', 'plt.plot', (['input_size', 'recursive_merge_runtime_1'], {'label': '"""recursive runtime with random data"""'}), "(input_size, recursive_merge_runtime_1, label=\n 'recursive runtime with random data')\n", (7035, 7123), True, 'import matplotlib.pyplot as plt\n'), ((7121, 7217), 'matplotlib.pyplot.plot', 'plt.plot', (['input_size', 'iterative_merge_runtime_0'], {'label': '"""iterative runtime with sorted data"""'}), "(input_size, iterative_merge_runtime_0, label=\n 'iterative runtime with sorted data')\n", (7129, 7217), True, 'import matplotlib.pyplot as plt\n'), ((7215, 7311), 'matplotlib.pyplot.plot', 'plt.plot', (['input_size', 'iterative_merge_runtime_1'], {'label': '"""iterative runtime with random data"""'}), "(input_size, iterative_merge_runtime_1, label=\n 'iterative runtime with random data')\n", (7223, 7311), True, 'import matplotlib.pyplot as plt\n'), ((7309, 7337), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper left"""'}), "(loc='upper left')\n", (7319, 7337), True, 'import matplotlib.pyplot as plt\n'), ((7338, 7384), 'matplotlib.pyplot.title', 'plt.title', (['"""Runtime as function of input size"""'], {}), "('Runtime as function of input size')\n", (7347, 7384), True, 'import matplotlib.pyplot as plt\n'), ((7385, 7409), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""input size"""'], {}), "('input size')\n", (7395, 7409), True, 'import matplotlib.pyplot as plt\n'), ((7410, 7434), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""runtime(s)"""'], {}), "('runtime(s)')\n", (7420, 7434), True, 'import matplotlib.pyplot as plt\n'), ((7511, 7555), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""./result/runtime vs input.jpg"""'], {}), "('./result/runtime vs input.jpg')\n", (7522, 7555), True, 'import matplotlib.pyplot as plt\n'), ((7556, 7566), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7564, 7566), True, 'import matplotlib.pyplot as plt\n'), ((310, 321), 'time.time', 'time.time', ([], {}), '()\n', (319, 321), False, 'import time\n'), ((1943, 1954), 'time.time', 'time.time', ([], {}), '()\n', (1952, 1954), False, 'import time\n'), ((2019, 2045), 'copy.deepcopy', 'copy.deepcopy', (['input_array'], {}), '(input_array)\n', (2032, 2045), False, 'import copy\n'), ((2683, 2694), 'time.time', 'time.time', ([], {}), '()\n', (2692, 2694), False, 'import time\n'), ((381, 407), 'copy.deepcopy', 'copy.deepcopy', (['input_array'], {}), '(input_array)\n', (394, 407), False, 'import copy\n'), ((1601, 1612), 'time.time', 'time.time', ([], {}), '()\n', (1610, 1612), False, 'import time\n')]
|
from ibapi.client import EClient
from ibapi.wrapper import EWrapper
from ibapi.contract import Contract
from ibapi.order import Order
from ibapi.scanner import ScannerSubscription
from ibapi.ticktype import TickTypeEnum
from ibapi.common import *
from ibapi.tag_value import TagValue
from ibapi.execution import ExecutionFilter
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import numpy as np
from bs4 import BeautifulSoup
from datetime import datetime
from time import sleep, strftime, localtime, time
sleeptime = 5
class AccountManagement:
def read_nextvalidid(self):
class TestApp(EWrapper, EClient):
def __init__(self):
EClient.__init__(self, self)
self.nextValidOrderId = []
def error(self, reqId: TickerId, errorCode: int, errorString: str):
if reqId > -1:
print("Error. Id: ", reqId, " Code: ", errorCode, " Msg: ", errorString)
def nextValidId(self, orderId):
super().nextValidId(orderId)
self.nextValidOrderId.append(orderId)
print("NextValidId:", orderId)
self.disconnect()
app = TestApp()
app.connect('127.0.0.1', 7497, 0)
sleep(sleeptime)
app.reqIds(-1)
nid = app.nextValidOrderId
app.run()
return nid[0]
def placing_orders(self, symbol, sec_type, exch, prim_exch, curr, order_type, quantity, action):
contract = Contract()
contract.symbol = symbol
contract.secType = sec_type
contract.exchange = exch
contract.primaryExchange = prim_exch
contract.currency = curr
order = Order()
order.orderType = order_type
order.totalQuantity = quantity
order.action = action
class TestApp(EWrapper, EClient):
def __init__(self):
EClient.__init__(self, self)
def error(self, reqId: TickerId, errorCode: int, errorString: str):
if reqId > -1:
print("Error. Id: ", reqId, " Code: ", errorCode, " Msg: ", errorString)
app = TestApp()
app.connect('127.0.0.1', 7497, 0)
app.placeOrder(orderId=orderId, contract=contract, order=order)
print('order quantity placed for {} is: {} '.format(contract.symbol, order.totalQuantity))
sleep(sleeptime)
return order, contract
app.disconnect()
app.run()
def read_positions(self, subscribe, acctCode):
class TestApp(EWrapper, EClient):
def __init__(self):
EClient.__init__(self, self)
self.up = pd.DataFrame([], columns=['Position', 'marketPrice', 'marketValue', 'averageCost',
'unrealizedPNL', 'realizedPNL'])
def error(self, reqId: TickerId, errorCode: int, errorString: str):
if reqId > -1:
print("Error. Id: ", reqId, " Code: ", errorCode, " Msg: ", errorString)
def updatePortfolio(self, contract, position, marketPrice, marketValue, averageCost, unrealizedPNL,
realizedPNL, accountName):
self.up.index.name = 'Symbol'
self.up.loc[
contract.symbol] = position, marketPrice, marketValue, averageCost, unrealizedPNL, realizedPNL
def positionEnd(self):
super().positionEnd()
print("PositionEnd")
self.cancelPositions()
self.disconnect()
app = TestApp()
app.connect('127.0.0.1', 7497, 0)
sleep(sleeptime)
app.reqAccountUpdates(subscribe=subscribe, acctCode=acctCode)
app.reqPositions()
update = app.up
app.run()
print('Reading Portfolio')
rows = update[update['Position'] == 0].index
update.drop(rows, axis=0, inplace=True)
return update
def read_account(self, subscribe, acctCode):
class TestApp(EWrapper, EClient):
def __init__(self):
EClient.__init__(self, self)
self.up = pd.DataFrame([], columns=['Values'])
def error(self, reqId: TickerId, errorCode: int, errorString: str):
if reqId > -1:
print("Error. Id: ", reqId, " Code: ", errorCode, " Msg: ", errorString)
def updateAccountValue(self, key, value, currency, accountName):
self.up.index.name = 'Keys'
self.up.loc[key] = value
def accountDownloadEnd(self, account):
print("AccountDownloadEnd. Account:", account)
self.disconnect()
app = TestApp()
app.connect('127.0.0.1', 7497, 0)
sleep(sleeptime)
app.reqAccountUpdates(subscribe=subscribe, acctCode=acctCode)
update = app.up
app.reqAccountUpdates(False, acctCode)
app.run()
print('Reading Account')
return update
def cancel_openorders(self):
class TestApp(EWrapper, EClient):
def __init__(self):
EClient.__init__(self, self)
self.open_orders = pd.DataFrame(columns=['action', 'quantity',
'type', 'algoStrategy',
'algoParams', 'pre_status'])
def error(self, reqId: TickerId, errorCode: int, errorString: str):
if reqId > -1:
print("Error. Id: ", reqId, " Code: ", errorCode, " Msg: ", errorString)
def cancelOrder(self, orderId):
super().cancelOrder(orderId)
print('cancel order ended')
def openOrder(self, orderId, Contract, Order, OrderState):
super().openOrder(orderId, Contract, Order, OrderState)
self.open_orders.loc[Contract.symbol, :] = [Order.action,
Order.totalQuantity,
Order.orderType,
Order.algoStrategy,
Order.algoParams[0],
OrderState.status]
def openOrderEnd(self):
super().openOrderEnd()
print('open order ended')
self.disconnect()
app = TestApp()
app.connect('127.0.0.1', 7497, 0)
sleep(sleeptime)
app.reqIds(-1)
app.reqAllOpenOrders()
open_orders = app.open_orders
app.reqGlobalCancel()
app.run()
return open_orders
def get_openorders(self):
class TestApp(EWrapper, EClient):
def __init__(self):
EClient.__init__(self, self)
self.open_orders = pd.DataFrame(columns=['action', 'open orders',
'type', 'algoStrategy',
'algoParams', 'status'])
def error(self, reqId: TickerId, errorCode: int, errorString: str):
if reqId > -1:
print("Error. Id: ", reqId, " Code: ", errorCode, " Msg: ", errorString)
def openOrder(self, orderId, Contract, Order, OrderState):
super().openOrder(orderId, Contract, Order, OrderState)
self.open_orders.loc[Contract.symbol, :] = [Order.action,
Order.totalQuantity,
Order.orderType,
Order.algoStrategy,
Order.algoParams[0],
OrderState.status]
def openOrderEnd(self):
super().openOrderEnd()
print('open order ended')
self.disconnect()
app = TestApp()
app.connect('127.0.0.1', 7497, 0)
app.reqIds(-1)
app.reqAllOpenOrders()
sleep(sleeptime)
open_orders = app.open_orders
app.run()
return open_orders
def closing_positions(self, portfolio, order_id, ordersPriority, transmit):
class TestApp(EWrapper, EClient):
def __init__(self):
EClient.__init__(self, self)
def error(self, reqId: TickerId, errorCode: int, errorString: str):
if reqId > -1:
print("Error. Id: ", reqId, " Code: ", errorCode, " Msg: ", errorString)
app = TestApp()
app.connect('127.0.0.1', 7497, 0)
if app.isConnected():
print('app is running ...')
print('closing {} positions which are not present in action'.format(len(stock_to_close)))
# Closing Position
for i in stock_to_close:
contract = Contract()
contract.symbol = i
contract.secType = 'STK'
contract.exchange = 'SMART'
# contract.primaryExchange = 'ISLAND'
contract.currency = 'USD'
order = Order()
order.orderType = 'MKT'
order.totalQuantity = int(np.abs(portfolio.loc[i, 'Position']))
order.transmit = transmit
if portfolio.loc[i, 'Position'] > 0:
order.action = 'SELL'
# order.cashQty = weigth * 1.5 * net_liq
order.algoStrategy = 'Adaptive'
order.algoParams = []
order.algoParams.append(TagValue("adaptivePriority", ordersPriority))
app.placeOrder(orderId=order_id, contract=contract, order=order)
sleep(sleeptime)
order_id = order_id + 1
print('closing position for {} is: {} '.format(contract.symbol, order.totalQuantity))
elif portfolio.loc[i, 'Position'] < 0:
order.action = 'BUY'
# order.cashQty = weigth * 1.5 * net_liq
order.algoStrategy = 'Adaptive'
order.algoParams = []
order.algoParams.append(TagValue("adaptivePriority", ordersPriority))
app.placeOrder(orderId=order_id, contract=contract, order=order)
sleep(sleeptime)
order_id = order_id + 1
print('closing position for {} is: {} '.format(contract.symbol, order.totalQuantity))
else:
print('app not connected')
app.disconnect()
return order_id + 1
def rebalancing_to_leverage(self, order_id, ordersPriority, transmit):
class TestApp(EWrapper, EClient):
def __init__(self):
EClient.__init__(self, self)
def error(self, reqId: TickerId, errorCode: int, errorString: str):
if reqId > -1:
print("Error. Id: ", reqId, " Code: ", errorCode, " Msg: ", errorString)
app = TestApp()
app.connect('127.0.0.1', 7497, 0)
if app.isConnected():
print('app is running ...')
print('balancing {} positions'.format(len(action_balance.index)))
# Closing Position
for i in action_balance.index:
contract = Contract()
contract.symbol = i
contract.secType = 'STK'
contract.exchange = 'SMART'
contract.currency = 'USD'
order = Order()
order.orderType = 'MKT'
order.totalQuantity = np.abs(action_balance.loc[i, 'shares'])
order.transmit = transmit
if action_balance.loc[i, 'shares'] > 0:
order.action = 'BUY'
order.algoStrategy = 'Adaptive'
order.algoParams = []
order.algoParams.append(TagValue("adaptivePriority", ordersPriority))
app.placeOrder(orderId=order_id, contract=contract, order=order)
sleep(sleeptime)
order_id = order_id + 1
print(' buy order quantity placed for {} is: {} '.format(contract.symbol, order.totalQuantity))
elif action_balance.loc[i, 'shares'] < 0:
order.action = 'SELL'
order.algoStrategy = 'Adaptive'
order.algoParams = []
order.algoParams.append(TagValue("adaptivePriority", ordersPriority))
app.placeOrder(orderId=order_id, contract=contract, order=order)
sleep(sleeptime)
order_id = order_id + 1
print(' sell order quantity placed for {} is: {} '.format(contract.symbol, order.totalQuantity))
else:
print('app not connected')
app.disconnect()
def placing_final_orders(self, order_id, ordersPriority, transmit):
class TestApp(EWrapper, EClient):
def __init__(self):
EClient.__init__(self, self)
def error(self, reqId: TickerId, errorCode: int, errorString: str):
if reqId > -1:
print("Error. Id: ", reqId, " Code: ", errorCode, " Msg: ", errorString)
app = TestApp()
app.connect('127.0.0.1', 7497, 0)
for ticker in action_final.index:
contract = Contract()
contract.symbol = ticker
contract.secType = 'STK'
contract.exchange = 'SMART'
# contract.primaryExchange = 'ISLAND'
contract.currency = 'USD'
order = Order()
order.orderType = 'MKT'
order.transmit = transmit
order.totalQuantity = np.abs(action_final.loc[ticker])[0]
if action_final.loc[ticker][0] > 0:
order.action = 'BUY'
order.algoStrategy = 'Adaptive'
order.algoParams = []
order.algoParams.append(TagValue("adaptivePriority", ordersPriority))
app.placeOrder(orderId=order_id, contract=contract, order=order)
sleep(sleeptime)
order_id = order_id + 1
print('buy order quantity placed for {} is: {} '.format(contract.symbol, order.totalQuantity))
elif action_final.loc[ticker][0] < 0:
order.action = 'SELL'
order.algoStrategy = 'Adaptive'
order.algoParams = []
order.algoParams.append(TagValue("adaptivePriority", ordersPriority))
app.placeOrder(orderId=order_id, contract=contract, order=order)
sleep(sleeptime)
order_id = order_id + 1
print('sell order quantity placed for {} is: {} '.format(contract.symbol, order.totalQuantity))
app.disconnect()
def commission_report(self, time):
class TestApp(EWrapper, EClient):
def __init__(self):
EClient.__init__(self, self)
self.executed_orders = pd.DataFrame(columns=['ticker',
'time', 'shares', 'action',
'price', 'marketValue',
'RealizedPNL', 'commission'])
self.val = 0
self.val2 = 0
def error(self, reqId: TickerId, errorCode: int, errorString: str):
if reqId > -1:
print("Error. Id: ", reqId, " Code: ", errorCode, " Msg: ", errorString)
def execDetails(self, reqId, contract, execution):
super().execDetails(reqId, contract, execution)
self.executed_orders.loc[self.val, ['ticker',
'time',
'shares',
'action',
'price',
'marketValue']] = [contract.symbol,
pd.to_datetime(execution.time),
execution.shares, execution.side,
execution.price,
execution.shares * execution.price]
self.val = self.val + 1
def commissionReport(self, commissionReport):
super().commissionReport(commissionReport)
self.executed_orders.loc[self.val2, ['RealizedPNL', 'commission']] = [
float(commissionReport.realizedPNL),
float(commissionReport.commission)]
self.val2 = self.val2 + 1
def execDetailsEnd(self, reqId):
super().execDetailsEnd(reqId)
self.disconnect()
app = TestApp()
app.connect('127.0.0.1', 7497, 0)
execution_filter = ExecutionFilter()
execution_filter.acctCode = acctCode
execution_filter.time = time
app.reqExecutions(0, execution_filter)
sleep(sleeptime)
df = app.executed_orders
app.run()
sleep(sleeptime)
df.set_index('time', inplace=True)
df.sort_index(inplace=True)
df['RealizedPNL'][df['RealizedPNL'] > 1000000] = 'OPEN'
return df
|
[
"numpy.abs",
"ibapi.client.EClient.__init__",
"time.sleep",
"ibapi.tag_value.TagValue",
"ibapi.contract.Contract",
"ibapi.order.Order",
"pandas.DataFrame",
"ibapi.execution.ExecutionFilter",
"pandas.to_datetime"
] |
[((1277, 1293), 'time.sleep', 'sleep', (['sleeptime'], {}), '(sleeptime)\n', (1282, 1293), False, 'from time import sleep, strftime, localtime, time\n'), ((1517, 1527), 'ibapi.contract.Contract', 'Contract', ([], {}), '()\n', (1525, 1527), False, 'from ibapi.contract import Contract\n'), ((1725, 1732), 'ibapi.order.Order', 'Order', ([], {}), '()\n', (1730, 1732), False, 'from ibapi.order import Order\n'), ((2413, 2429), 'time.sleep', 'sleep', (['sleeptime'], {}), '(sleeptime)\n', (2418, 2429), False, 'from time import sleep, strftime, localtime, time\n'), ((3699, 3715), 'time.sleep', 'sleep', (['sleeptime'], {}), '(sleeptime)\n', (3704, 3715), False, 'from time import sleep, strftime, localtime, time\n'), ((4844, 4860), 'time.sleep', 'sleep', (['sleeptime'], {}), '(sleeptime)\n', (4849, 4860), False, 'from time import sleep, strftime, localtime, time\n'), ((6664, 6680), 'time.sleep', 'sleep', (['sleeptime'], {}), '(sleeptime)\n', (6669, 6680), False, 'from time import sleep, strftime, localtime, time\n'), ((8353, 8369), 'time.sleep', 'sleep', (['sleeptime'], {}), '(sleeptime)\n', (8358, 8369), False, 'from time import sleep, strftime, localtime, time\n'), ((17579, 17596), 'ibapi.execution.ExecutionFilter', 'ExecutionFilter', ([], {}), '()\n', (17594, 17596), False, 'from ibapi.execution import ExecutionFilter\n'), ((17735, 17751), 'time.sleep', 'sleep', (['sleeptime'], {}), '(sleeptime)\n', (17740, 17751), False, 'from time import sleep, strftime, localtime, time\n'), ((17812, 17828), 'time.sleep', 'sleep', (['sleeptime'], {}), '(sleeptime)\n', (17817, 17828), False, 'from time import sleep, strftime, localtime, time\n'), ((13800, 13810), 'ibapi.contract.Contract', 'Contract', ([], {}), '()\n', (13808, 13810), False, 'from ibapi.contract import Contract\n'), ((14034, 14041), 'ibapi.order.Order', 'Order', ([], {}), '()\n', (14039, 14041), False, 'from ibapi.order import Order\n'), ((699, 727), 'ibapi.client.EClient.__init__', 'EClient.__init__', (['self', 'self'], {}), '(self, self)\n', (715, 727), False, 'from ibapi.client import EClient\n'), ((1931, 1959), 'ibapi.client.EClient.__init__', 'EClient.__init__', (['self', 'self'], {}), '(self, self)\n', (1947, 1959), False, 'from ibapi.client import EClient\n'), ((2650, 2678), 'ibapi.client.EClient.__init__', 'EClient.__init__', (['self', 'self'], {}), '(self, self)\n', (2666, 2678), False, 'from ibapi.client import EClient\n'), ((2705, 2824), 'pandas.DataFrame', 'pd.DataFrame', (['[]'], {'columns': "['Position', 'marketPrice', 'marketValue', 'averageCost', 'unrealizedPNL',\n 'realizedPNL']"}), "([], columns=['Position', 'marketPrice', 'marketValue',\n 'averageCost', 'unrealizedPNL', 'realizedPNL'])\n", (2717, 2824), True, 'import pandas as pd\n'), ((4160, 4188), 'ibapi.client.EClient.__init__', 'EClient.__init__', (['self', 'self'], {}), '(self, self)\n', (4176, 4188), False, 'from ibapi.client import EClient\n'), ((4215, 4251), 'pandas.DataFrame', 'pd.DataFrame', (['[]'], {'columns': "['Values']"}), "([], columns=['Values'])\n", (4227, 4251), True, 'import pandas as pd\n'), ((5206, 5234), 'ibapi.client.EClient.__init__', 'EClient.__init__', (['self', 'self'], {}), '(self, self)\n', (5222, 5234), False, 'from ibapi.client import EClient\n'), ((5270, 5370), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['action', 'quantity', 'type', 'algoStrategy', 'algoParams', 'pre_status']"}), "(columns=['action', 'quantity', 'type', 'algoStrategy',\n 'algoParams', 'pre_status'])\n", (5282, 5370), True, 'import pandas as pd\n'), ((6975, 7003), 'ibapi.client.EClient.__init__', 'EClient.__init__', (['self', 'self'], {}), '(self, self)\n', (6991, 7003), False, 'from ibapi.client import EClient\n'), ((7039, 7138), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['action', 'open orders', 'type', 'algoStrategy', 'algoParams', 'status']"}), "(columns=['action', 'open orders', 'type', 'algoStrategy',\n 'algoParams', 'status'])\n", (7051, 7138), True, 'import pandas as pd\n'), ((8629, 8657), 'ibapi.client.EClient.__init__', 'EClient.__init__', (['self', 'self'], {}), '(self, self)\n', (8645, 8657), False, 'from ibapi.client import EClient\n'), ((9200, 9210), 'ibapi.contract.Contract', 'Contract', ([], {}), '()\n', (9208, 9210), False, 'from ibapi.contract import Contract\n'), ((9453, 9460), 'ibapi.order.Order', 'Order', ([], {}), '()\n', (9458, 9460), False, 'from ibapi.order import Order\n'), ((11132, 11160), 'ibapi.client.EClient.__init__', 'EClient.__init__', (['self', 'self'], {}), '(self, self)\n', (11148, 11160), False, 'from ibapi.client import EClient\n'), ((11685, 11695), 'ibapi.contract.Contract', 'Contract', ([], {}), '()\n', (11693, 11695), False, 'from ibapi.contract import Contract\n'), ((11884, 11891), 'ibapi.order.Order', 'Order', ([], {}), '()\n', (11889, 11891), False, 'from ibapi.order import Order\n'), ((11970, 12009), 'numpy.abs', 'np.abs', (["action_balance.loc[i, 'shares']"], {}), "(action_balance.loc[i, 'shares'])\n", (11976, 12009), True, 'import numpy as np\n'), ((13432, 13460), 'ibapi.client.EClient.__init__', 'EClient.__init__', (['self', 'self'], {}), '(self, self)\n', (13448, 13460), False, 'from ibapi.client import EClient\n'), ((14151, 14183), 'numpy.abs', 'np.abs', (['action_final.loc[ticker]'], {}), '(action_final.loc[ticker])\n', (14157, 14183), True, 'import numpy as np\n'), ((14544, 14560), 'time.sleep', 'sleep', (['sleeptime'], {}), '(sleeptime)\n', (14549, 14560), False, 'from time import sleep, strftime, localtime, time\n'), ((15399, 15427), 'ibapi.client.EClient.__init__', 'EClient.__init__', (['self', 'self'], {}), '(self, self)\n', (15415, 15427), False, 'from ibapi.client import EClient\n'), ((15468, 15585), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['ticker', 'time', 'shares', 'action', 'price', 'marketValue',\n 'RealizedPNL', 'commission']"}), "(columns=['ticker', 'time', 'shares', 'action', 'price',\n 'marketValue', 'RealizedPNL', 'commission'])\n", (15480, 15585), True, 'import pandas as pd\n'), ((9543, 9579), 'numpy.abs', 'np.abs', (["portfolio.loc[i, 'Position']"], {}), "(portfolio.loc[i, 'Position'])\n", (9549, 9579), True, 'import numpy as np\n'), ((10071, 10087), 'time.sleep', 'sleep', (['sleeptime'], {}), '(sleeptime)\n', (10076, 10087), False, 'from time import sleep, strftime, localtime, time\n'), ((12440, 12456), 'time.sleep', 'sleep', (['sleeptime'], {}), '(sleeptime)\n', (12445, 12456), False, 'from time import sleep, strftime, localtime, time\n'), ((14400, 14444), 'ibapi.tag_value.TagValue', 'TagValue', (['"""adaptivePriority"""', 'ordersPriority'], {}), "('adaptivePriority', ordersPriority)\n", (14408, 14444), False, 'from ibapi.tag_value import TagValue\n'), ((15072, 15088), 'time.sleep', 'sleep', (['sleeptime'], {}), '(sleeptime)\n', (15077, 15088), False, 'from time import sleep, strftime, localtime, time\n'), ((16624, 16654), 'pandas.to_datetime', 'pd.to_datetime', (['execution.time'], {}), '(execution.time)\n', (16638, 16654), True, 'import pandas as pd\n'), ((9919, 9963), 'ibapi.tag_value.TagValue', 'TagValue', (['"""adaptivePriority"""', 'ordersPriority'], {}), "('adaptivePriority', ordersPriority)\n", (9927, 9963), False, 'from ibapi.tag_value import TagValue\n'), ((10688, 10704), 'time.sleep', 'sleep', (['sleeptime'], {}), '(sleeptime)\n', (10693, 10704), False, 'from time import sleep, strftime, localtime, time\n'), ((12289, 12333), 'ibapi.tag_value.TagValue', 'TagValue', (['"""adaptivePriority"""', 'ordersPriority'], {}), "('adaptivePriority', ordersPriority)\n", (12297, 12333), False, 'from ibapi.tag_value import TagValue\n'), ((13009, 13025), 'time.sleep', 'sleep', (['sleeptime'], {}), '(sleeptime)\n', (13014, 13025), False, 'from time import sleep, strftime, localtime, time\n'), ((14928, 14972), 'ibapi.tag_value.TagValue', 'TagValue', (['"""adaptivePriority"""', 'ordersPriority'], {}), "('adaptivePriority', ordersPriority)\n", (14936, 14972), False, 'from ibapi.tag_value import TagValue\n'), ((10536, 10580), 'ibapi.tag_value.TagValue', 'TagValue', (['"""adaptivePriority"""', 'ordersPriority'], {}), "('adaptivePriority', ordersPriority)\n", (10544, 10580), False, 'from ibapi.tag_value import TagValue\n'), ((12858, 12902), 'ibapi.tag_value.TagValue', 'TagValue', (['"""adaptivePriority"""', 'ordersPriority'], {}), "('adaptivePriority', ordersPriority)\n", (12866, 12902), False, 'from ibapi.tag_value import TagValue\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 30 21:25:24 2015
@author: Konrad
"""
import copy
import numpy as np
import matplotlib.pyplot as plt
import scipy.special as sc_p
def gen_clusters(means, num_each):
tup = ();
for m in means:
tup = tup + (np.random.multivariate_normal(m, np.diag(np.ones(2)), num_each),)
data = np.concatenate(tup);
np.random.shuffle(data);
return data;
def make_pts(data):
pts = [];
for pos in data:
pts.append(Point(pos));
return pts;
def euclid(obj1, obj2):
if (isinstance(obj1, Point) and isinstance(obj2, Point)):
return np.sqrt(sum(( obj1.pos - obj2.pos )**2));
elif (isinstance(obj1, np.ndarray) and isinstance(obj2, np.ndarray)):
return np.sqrt(sum(( obj1 - obj2 )**2))
else:
return None;
class Point:
def __init__(self, pos):
self.pos = copy.deepcopy(pos);
self.processed = False;
self.core_dist = None;
self.reach_dist = None;
self.in_seed = False;
class OPTICS:
def __init__(self, min_pts, data, max_eps = None):
self.max_eps = max_eps;
self.min_pts = min_pts;
self.data = copy.deepcopy(data);
self.dim = self.data[0].pos.size;
self.main_list = [];
if (self.max_eps == None):
self.get_max_eps();
self.main_loop();
def __call__(self, main_idx):
return self.data[self.main_list[main_idx]].reach_dist;
def main_loop(self):
for idx, obj in enumerate(self.data):
if (not obj.processed):
self.expand_point(idx);
for idx, obj in enumerate(self.data):
if (not obj.processed):
self.append_main(idx);
def get_max_eps(self):
extr_x = self.get_extr_x();
extr_y = self.get_extr_y();
area = (extr_x[1] - extr_x[0])*(extr_y[1] - extr_y[0]);
self.max_eps = ((area*self.min_pts*sc_p.gamma(2))/(len(self.data)*np.sqrt(np.pi**2)))**0.5
def get_extr_x(self):
min_x = float("inf");
max_x = -float("inf");
for obj in self.data:
if obj.pos[0] < min_x:
min_x = obj.pos[0];
if obj.pos[0] > max_x:
max_x = obj.pos[0];
return (min_x, max_x);
def get_extr_y(self):
min_y = float("inf");
max_y = -float("inf");
for obj in self.data:
if obj.pos[1] < min_y:
min_y = obj.pos[1];
if obj.pos[1] > max_y:
max_y = obj.pos[1];
return (min_y, max_y);
def append_main(self, idx):
self.data[idx].processed = True;
if (self.data[idx].reach_dist == None):
self.data[idx].reach_dist = self.max_eps;
self.main_list.append(idx);
def expand_point(self, idx):
self.get_neighbours(idx);
self.get_core_dist(idx);
if (self.data[idx].core_dist == -1):
return;
else:
self.data[idx].processed = True;
self.append_main(idx);
seed_list = [];
self.append_seed(seed_list, self.data[idx].neighbours, idx)
while (len(seed_list) > 0):
curr_idx = seed_list[0];
self.get_neighbours(curr_idx);
self.get_core_dist(curr_idx);
self.data[curr_idx].processed = True;
self.append_main(curr_idx);
self.remove_seed(seed_list);
if (not (self.data[curr_idx].core_dist == -1)):
self.append_seed(seed_list, self.data[curr_idx].neighbours, curr_idx);
def get_core_dist(self, idx):
if (len(self.data[idx].neighbours) >= self.min_pts):
self.data[idx].core_dist = self.data[idx].neighbours[self.min_pts - 1][1];
else:
self.data[idx].core_dist = -1;
def get_reach_dist(self, center_idx, idx, dist):
r_dist = max(dist, self.data[center_idx].core_dist);
if (self.data[idx].reach_dist == None):
self.data[idx].reach_dist = r_dist;
return True;
elif (self.data[idx].reach_dist > r_dist):
self.data[idx].reach_dist = r_dist;
return True;
else:
return False;
def get_neighbours(self, idx):
self.data[idx].neighbours = [];
for n_idx, obj in enumerate(self.data):
dist = euclid(obj, self.data[idx])
if (dist <= self.max_eps):
self.data[idx].neighbours.append([n_idx, dist]);
self.data[idx].neighbours.sort(key = lambda x : x[1]);
def append_seed(self, seed_list, neighbours, center_idx):
for n_tup in neighbours:
changed = self.get_reach_dist(center_idx, n_tup[0], n_tup[1]);
if (self.data[n_tup[0]].in_seed and changed):
del seed_list[seed_list.index(n_tup[0])];
self.data[n_tup[0]].in_seed = False;
elif (self.data[n_tup[0]].processed or self.data[n_tup[0]].in_seed):
continue;
for idx, obj in enumerate(seed_list):
if ( self.data[n_tup[0]].reach_dist < self.data[obj].reach_dist ):
seed_list.insert(idx, n_tup[0]);
self.data[n_tup[0]].in_seed = True;
break;
if (not self.data[n_tup[0]].in_seed):
seed_list.append(n_tup[0]);
self.data[n_tup[0]].in_seed = True;
def remove_seed(self, seed_list):
self.data[seed_list[0]].in_seed = False;
del seed_list[0];
def reach_plot(self):
x = list(range(len(self.main_list)));
y = [];
for idx in self.main_list:
y.append(self.data[idx].reach_dist);
f, ax = plt.subplots();
ax.bar(x, y);
def print_reach_dist(self):
for idx in self.main_list:
print (idx)
print (self.data[idx].reach_dist)
def plot_data(self):
x = [];
y = [];
for obj in self.data:
x.append(obj.pos[0]);
y.append(obj.pos[1]);
f, ax = plt.subplots();
ax.scatter(x, y);
def get_num_clusters(self):
clusters = [];
up = True;
top, bottom = -1, -1;
for i, idx in enumerate(self.main_list[:-1]):
if (up and (self.data[idx].reach_dist > self.data[self.main_list[i + 1]])):
up = not up;
if (not bottom == -1):
clusters.append(top - bottom);
top = self.data[idx].reach_dist;
continue;
if (not up) and (self.data[idx].reach_dist < self.data[self.main_list[i + 1]].reach_dist):
up = not up;
bottom = self.data[idx].reach_dist;
class Clusters:
def __init__(optics_obj, eps):
self.optics_obj = optics_obj;
self.main_list = optics_obj.main_list;
self.eps = eps;
self.min_pts = optics_obj.min_pts;
def find(self):
idx = 0;
#down, up = False, False;
downs = [];
clusters = [];
while idx < len(self.main_list):
diff = self.main_list[idx] - self.main_list[idx + 1];
if (diff >= self.optics_obj(idx)*self.eps):
new_down, idx = self.proc_down(idx);
downs.append([new_down, -float("inf")]);
#glob_mib = self.optics_obj(downs[-1][0][0]]);
#self.filter_downs(glob_mib, downs);
elif (-diff >= self.optics_obj(idx)*self.eps):
glob_mib = self.get_glob_mib(downs[-1], idx);
self.filter_downs(glob_mib, downs);
up, idx = self.proc_up(idx);
for down in downs:
if (self.optics_obj(up[1]).reach_dist*(1 - self.eps) >= down[1]):
clusters.append((down[0][0], up[1]));
else:
idx += 1;
def get_glob_mib(self, last_down, curr_idx):
begin_idx, end_idx = last_down[0][1], curr_idx;
glob_mib = -float("inf");
for i in range(begin_idx, end_idx + 1):
if (self.optics_obj(i) > glob_mib):
glob_mib = self.optics_obj(i);
return glob_mib;
def proc_down(self, idx):
bad_inrow = 0;
begin_idx = idx;
while (idx < len(self.main_list)):
idx += 1;
diff = self.main_list[idx].reach_dist - self.main_list[idx + 1].reach_dist;
if (diff < 0):
return (begin_idx, idx - 1);
if (diff > 0):
if (diff >= self.eps*self.main_list[idx]):
bad_inrow = 0;
else:
if (bad_inrow == 0):
last_good = idx - 1;
bad_inrow += 1;
if bad_inrow > self.min_pts:
# include a check that ensures region does not have
# length zero?
return (begin_idx, last_good), idx;
def proc_up(self, idx):
bad_inrow = 0;
begin_idx = idx;
while (idx < len(self.main_list)):
idx += 1;
diff = self.main_list[idx].reach_dist[idx + 1] - self.main_list[idx].reach_dist;
if (diff < 0):
return (begin_idx, idx - 1);
if (diff > 0):
if (diff >= self.eps*self.main_list[idx + 1]):
bad_inrow = 0;
else:
if (bad_inrow == 0):
last_good = idx - 1;
bad_inrow += 1;
if (bad_inrow > self.min_pts):
return (begin_idx, last_good), idx;
def filter_downs(self, glob_mib, downs):
del_idx = [];
for idx, obj in enumerate(downs[:-1]):
if self.main_list[obj[0][0]].reach_dist*(1 - self.eps) < glob_mib:
del_idx.append(idx);
elif (obj[1] < glob_mib):
downs[idx][1] = glob_mib;
del_idx.reverse();
for i in del_idx:
del downs[i];
dat = gen_clusters([[1, 1], [6, 7], [10, 15], [15, 15]], 200);
data = make_pts(dat);
optics = OPTICS(15, data);
optics.reach_plot();
optics.plot_data();
plt.show();
#optics.print_reach_dist();
print ("Done")
|
[
"numpy.sqrt",
"numpy.ones",
"matplotlib.pyplot.show",
"scipy.special.gamma",
"numpy.concatenate",
"copy.deepcopy",
"matplotlib.pyplot.subplots",
"numpy.random.shuffle"
] |
[((10939, 10949), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (10947, 10949), True, 'import matplotlib.pyplot as plt\n'), ((362, 381), 'numpy.concatenate', 'np.concatenate', (['tup'], {}), '(tup)\n', (376, 381), True, 'import numpy as np\n'), ((388, 411), 'numpy.random.shuffle', 'np.random.shuffle', (['data'], {}), '(data)\n', (405, 411), True, 'import numpy as np\n'), ((912, 930), 'copy.deepcopy', 'copy.deepcopy', (['pos'], {}), '(pos)\n', (925, 930), False, 'import copy\n'), ((1231, 1250), 'copy.deepcopy', 'copy.deepcopy', (['data'], {}), '(data)\n', (1244, 1250), False, 'import copy\n'), ((6147, 6161), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (6159, 6161), True, 'import matplotlib.pyplot as plt\n'), ((6553, 6567), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (6565, 6567), True, 'import matplotlib.pyplot as plt\n'), ((2040, 2053), 'scipy.special.gamma', 'sc_p.gamma', (['(2)'], {}), '(2)\n', (2050, 2053), True, 'import scipy.special as sc_p\n'), ((2071, 2090), 'numpy.sqrt', 'np.sqrt', (['(np.pi ** 2)'], {}), '(np.pi ** 2)\n', (2078, 2090), True, 'import numpy as np\n'), ((325, 335), 'numpy.ones', 'np.ones', (['(2)'], {}), '(2)\n', (332, 335), True, 'import numpy as np\n')]
|
#
# Copyright 2020 Logical Clocks AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
import random
import numpy as np
class Searchspace(object):
"""Create an instance of `Searchspace` from keyword arguments.
A searchspace is essentially a set of key value pairs, defining the
hyperparameters with a name, type and a feasible interval. The keyword
arguments specify name-values pairs for the hyperparameters,
where values are tuples of the form (type, list). Type is a string with
one of the following values:
- DOUBLE
- INTEGER
- DISCRETE
- CATEGORICAL
And the list in the tuple specifies either two values only, the start
and end point of of the feasible interval for DOUBLE and INTEGER,
or the discrete possible values for the types DISCRETE and CATEGORICAL.
Sample usage:
>>> # Define Searchspace
>>> from maggy import Searchspace
>>> # The searchspace can be instantiated with parameters
>>> sp = Searchspace(kernel=('INTEGER', [2, 8]), pool=('INTEGER', [2, 8]))
>>> # Or additional parameters can be added one by one
>>> sp.add('dropout', ('DOUBLE', [0.01, 0.99]))
The `Searchspace` object can also be initialized from a python dictionary:
>>> sp_dict = sp.to_dict()
>>> sp_new = Searchspace(**sp_dict)
The parameter names are added as attributes of `Searchspace` object,
so they can be accessed directly with the dot notation
`searchspace._name_`.
"""
DOUBLE = "DOUBLE"
INTEGER = "INTEGER"
DISCRETE = "DISCRETE"
CATEGORICAL = "CATEGORICAL"
def __init__(self, **kwargs):
self._hparam_types = {}
self._names = []
for name, value in kwargs.items():
self.add(name, value)
def add(self, name, value):
"""Adds {name, value} pair to hyperparameters.
:param name: Name of the hyperparameter
:type name: str
:param value: A tuple of the parameter type and its feasible region
:type value: tuple
:raises ValueError: Hyperparameter name is reserved
:raises ValueError: Hyperparameter feasible region in wrong format
"""
if getattr(self, name, None) is not None:
raise ValueError("Hyperparameter name is reserved: {}".format(name))
if isinstance(value, tuple) or isinstance(value, list):
if len(value) != 2:
raise ValueError(
"Hyperparameter tuple has to be of length "
"two and format (type, list): {0}, {1}".format(name, value)
)
param_type = value[0].upper()
param_values = value[1]
if param_type in [
Searchspace.DOUBLE,
Searchspace.INTEGER,
Searchspace.DISCRETE,
Searchspace.CATEGORICAL,
]:
if len(param_values) == 0:
raise ValueError(
"Hyperparameter feasible region list "
"cannot be empty: {0}, {1}".format(name, param_values)
)
if param_type in [Searchspace.DOUBLE, Searchspace.INTEGER]:
assert len(param_values) == 2, (
"For DOUBLE or INTEGER type parameters, list "
"can only contain upper and lower bounds: {0}, {1}".format(
name, param_values
)
)
if param_type == Searchspace.DOUBLE:
if type(param_values[0]) not in [int, float] or type(
param_values[1]
) not in [int, float]:
raise ValueError(
"Hyperparameter boundaries for type DOUBLE need to be integer "
"or float: {}".format(name)
)
elif param_type == Searchspace.INTEGER:
if type(param_values[0]) != int or type(param_values[1]) != int:
raise ValueError(
"Hyperparameter boundaries for type INTEGER need to be integer: "
"{}".format(name)
)
assert param_values[0] < param_values[1], (
"Lower bound {0} must be "
"less than upper bound {1}: {2}".format(
param_values[0], param_values[1], name
)
)
self._hparam_types[name] = param_type
setattr(self, name, value[1])
self._names.append(name)
else:
raise ValueError(
"Hyperparameter type is not of type DOUBLE, "
"INTEGER, DISCRETE or CATEGORICAL: {}".format(name)
)
else:
raise ValueError("Value is not an appropriate tuple: {}".format(name))
print("Hyperparameter added: {}".format(name))
def to_dict(self):
"""Return the hyperparameters as a Python dictionary.
:return: A dictionary with hyperparameter names as keys. The values are
the hyperparameter values.
:rtype: dict
"""
return {
n: (self._hparam_types[n], getattr(self, n))
for n in self._hparam_types.keys()
}
def names(self):
"""Returns the dictionary with the names and types of all
hyperparameters.
:return: Dictionary of hyperparameter names, with types as value
:rtype: dict
"""
return self._hparam_types
def get(self, name, default=None):
"""Returns the value of `name` if it exists, else `default`."""
if name in self._hparam_types:
return getattr(self, name)
return default
def get_random_parameter_values(self, num):
"""Generate random parameter dictionaries, e.g. to be used for initializing an optimizer.
:param num: number of random parameter dictionaries to be generated.
:type num: int
:raises ValueError: `num` is not an int.
:return: a list containing parameter dictionaries
:rtype: list
"""
return_list = []
for _ in range(num):
params = {}
for name, value in self.names().items():
feasible_region = self.get(name)
if value == Searchspace.DOUBLE:
params[name] = random.uniform(
feasible_region[0], feasible_region[1]
)
elif value == Searchspace.INTEGER:
params[name] = random.randint(
feasible_region[0], feasible_region[1]
)
elif value == Searchspace.DISCRETE:
params[name] = random.choice(feasible_region)
elif value == Searchspace.CATEGORICAL:
params[name] = random.choice(feasible_region)
return_list.append(params)
return return_list
def __iter__(self):
self._returned = self._names.copy()
return self
def __next__(self):
# if list not empty
if self._returned:
# pop from left and get parameter tuple
name = self._returned.pop(0)
return {
"name": name,
"type": self._hparam_types[name],
"values": self.get(name),
}
else:
raise StopIteration
def items(self):
"""Returns a sorted iterable over all hyperparameters in the searchspace.
Allows to iterate over the hyperparameters in a searchspace. The parameters
are sorted in the order of which they were added to the searchspace by the user.
:return: an iterable of the searchspace
:type: Searchspace
"""
# for consistency and serves mainly as syntactic sugar
return self
def keys(self):
"""Returns a sorted iterable list over the names of hyperparameters in
the searchspace.
:return: names of hyperparameters as a list of strings
:type: list
"""
return self._names
def values(self):
"""Returns a sorted iterable list over the types and feasible intervals of
hyperparameters in the searchspace.
:return: types and feasible interval of hyperparameters as tuple
:type: tuple
"""
return [(self._hparam_types[name], self.get(name)) for name in self._names]
def __contains__(self, name):
return name in self._hparam_types
def __str__(self):
return json.dumps(self.to_dict(), sort_keys=True)
def json(self):
return json.dumps(self.to_dict(), sort_keys=True)
def transform(self, hparams, normalize_categorical=False):
"""Transforms array of hypeparameters for one trial.
+--------------+-----------------------------------------------------+
| Hparam Type | Transformation |
+==============+=====================================================+
| DOUBLE | Max-Min Normalization |
+--------------+-----------------------------------------------------+
| INTEGER | Max-Min Normalization |
+--------------+-----------------------------------------------------+
| CATEGORICAL | Encoding: index in list + opt. Max-Min Normalization|
+--------------+-----------------------------------------------------+
:param hparams: hparams in original representation for one trial
:type hparams: 1D np.ndarray
:param normalize_categorical: If True, the encoded categorical hparam is also max-min normalized between 0 and 1
`inverse_transform()` must use the same value for this parameter
:type normalize_categorical: bool
:return: transformed hparams
:rtype: np.ndarray[np.float]
"""
transformed_hparams = []
# loop through hparams
for hparam, hparam_spec in zip(hparams, self.items()):
if hparam_spec["type"] == "DOUBLE":
normalized_hparam = Searchspace._normalize_scalar(
hparam_spec["values"], hparam
)
transformed_hparams.append(normalized_hparam)
elif hparam_spec["type"] == "INTEGER":
normalized_hparam = Searchspace._normalize_integer(
hparam_spec["values"], hparam
)
transformed_hparams.append(normalized_hparam)
elif hparam_spec["type"] == "CATEGORICAL":
encoded_hparam = Searchspace._encode_categorical(
hparam_spec["values"], hparam
)
if normalize_categorical:
encoded_hparam = Searchspace._normalize_integer(
[0, len(hparam_spec["values"]) - 1], encoded_hparam
)
transformed_hparams.append(encoded_hparam)
else:
raise NotImplementedError("Not Implemented other types yet")
return transformed_hparams
def inverse_transform(self, transformed_hparams, normalize_categorical=False):
"""Returns array of hparams in same representation as specified when instantiated
:param transformed_hparams: hparams in transformed representation for one trial
:type transformed_hparams: 1D np.ndarray
:param normalize_categorical: If True, the encoded categorical hparam was also max-min normalized between 0 and 1
`transform()` must use the same value for this parameter
:type normalize_categorical: bool
:return: transformed hparams
:rtype: np.ndarray
"""
hparams = []
for hparam, hparam_spec in zip(transformed_hparams, self.items()):
if hparam_spec["type"] == "DOUBLE":
value = Searchspace._inverse_normalize_scalar(
hparam_spec["values"], hparam
)
hparams.append(value)
elif hparam_spec["type"] == "INTEGER":
value = Searchspace._inverse_normalize_integer(
hparam_spec["values"], hparam
)
hparams.append(value)
elif hparam_spec["type"] == "CATEGORICAL":
if normalize_categorical:
value = Searchspace._inverse_normalize_integer(
[0, len(hparam_spec["values"]) - 1], hparam
)
value = Searchspace._decode_categorical(
hparam_spec["values"], value
)
else:
value = Searchspace._decode_categorical(
hparam_spec["values"], hparam
)
hparams.append(value)
else:
raise NotImplementedError("Not Implemented other types yet")
return hparams
@staticmethod
def _encode_categorical(choices, value):
"""Encodes category to integer. The encoding is the list index of the category
:param choices: possible values of the categorical hparam
:type choices: list
:param value: category to encode
:type value: str
:return: encoded category
:rtype: int
"""
return choices.index(value)
@staticmethod
def _decode_categorical(choices, encoded_value):
"""Decodes integer to corresponding category value
:param choices: possible values of the categorical hparam
:type choices: list
:param encoded_value: encoding of category
:type encoded_value: int
:return: category value
:rtype: str
"""
encoded_value = int(
encoded_value
) # it is possible that value gets casted to np.float by numpy
return choices[encoded_value]
@staticmethod
def _normalize_scalar(bounds, scalar):
"""Returns max-min normalized scalar
:param bounds: list containing lower and upper bound, e.g.: [-3,3]
:type bounds: list
:param scalar: scalar value to be normalized
:type scalar: float
:return: normalized scalar
:rtype: float
"""
scalar = float(scalar)
scalar = (scalar - bounds[0]) / (bounds[1] - bounds[0])
scalar = np.minimum(1.0, scalar)
scalar = np.maximum(0.0, scalar)
return scalar
@staticmethod
def _inverse_normalize_scalar(bounds, normalized_scalar):
"""Returns inverse normalized scalar
:param bounds: list containing lower and upper bound, e.g.: [-3,3]
:type bounds: list
:param normalized_scalar: normalized scalar value
:type normalized_scalar: float
:return: original scalar
:rtype: float
"""
normalized_scalar = float(normalized_scalar)
normalized_scalar = normalized_scalar * (bounds[1] - bounds[0]) + bounds[0]
return normalized_scalar
@staticmethod
def _normalize_integer(bounds, integer):
"""
:param bounds: list containing lower and upper bound, e.g.: [-3,3]
:type bounds: list
:param integer: value to be normalized
:type normalized_scalar: int
:return: normalized value between 0 and 1
:rtype: float
"""
integer = int(integer)
return Searchspace._normalize_scalar(bounds, integer)
@staticmethod
def _inverse_normalize_integer(bounds, scalar):
"""Returns inverse normalized scalar
:param bounds: list containing lower and upper bound, e.g.: [-3,3]
:type bounds: list
:param normalized_scalar: normalized scalar value
:type normalized_scalar: float
:return: original integer
:rtype: int
"""
x = Searchspace._inverse_normalize_scalar(bounds, scalar)
return int(np.round(x))
@staticmethod
def dict_to_list(hparams):
"""Transforms dict of hparams to list representation ( for one hparam config )
example:
{'x': -3.0, 'y': 3.0, 'z': 'green'} to [-3.0, 3.0, 'green']
:param hparams: hparams in dict representation
:type hparams: dict
:return: hparams in list representation
:rtype: list
"""
return list(hparams.values())
def list_to_dict(self, hparams):
"""Transforms list of hparams to dict representation ( for one hparam config )
example:
[-3.0, 3.0, 'green'] to {'x': -3.0, 'y': 3.0, 'z': 'green'}
:param hparams: hparams in list representation
:type hparams: list
:return: hparams in dict representation
:rtype: dict
"""
hparam_names = self.keys()
if len(hparam_names) != len(hparams):
raise ValueError(
"hparam_names and hparams have to have same length (and order!)"
)
hparam_dict = {
hparam_name: hparam for hparam_name, hparam in zip(hparam_names, hparams)
}
return hparam_dict
|
[
"random.uniform",
"random.choice",
"numpy.minimum",
"numpy.maximum",
"random.randint",
"numpy.round"
] |
[((15258, 15281), 'numpy.minimum', 'np.minimum', (['(1.0)', 'scalar'], {}), '(1.0, scalar)\n', (15268, 15281), True, 'import numpy as np\n'), ((15299, 15322), 'numpy.maximum', 'np.maximum', (['(0.0)', 'scalar'], {}), '(0.0, scalar)\n', (15309, 15322), True, 'import numpy as np\n'), ((16816, 16827), 'numpy.round', 'np.round', (['x'], {}), '(x)\n', (16824, 16827), True, 'import numpy as np\n'), ((7161, 7215), 'random.uniform', 'random.uniform', (['feasible_region[0]', 'feasible_region[1]'], {}), '(feasible_region[0], feasible_region[1])\n', (7175, 7215), False, 'import random\n'), ((7348, 7402), 'random.randint', 'random.randint', (['feasible_region[0]', 'feasible_region[1]'], {}), '(feasible_region[0], feasible_region[1])\n', (7362, 7402), False, 'import random\n'), ((7536, 7566), 'random.choice', 'random.choice', (['feasible_region'], {}), '(feasible_region)\n', (7549, 7566), False, 'import random\n'), ((7657, 7687), 'random.choice', 'random.choice', (['feasible_region'], {}), '(feasible_region)\n', (7670, 7687), False, 'import random\n')]
|
import numpy as np
from sklearn.kernel_ridge import KernelRidge
from sklearn.model_selection import GridSearchCV
from sklearn.gaussian_process import GaussianProcessRegressor
import sklearn.gaussian_process.kernels as Kernels
from scipy.optimize import minimize
from numpy.linalg import norm
import tensorflow as tf
from tensorflow.contrib.layers import fully_connected as tf_layer
class Kernel_Optimization():
def __init__(self, dict_mat=None, kernel_type='RBF', CV=5, X=np.array([[1,2],[2,3],[3,4]]) , y=np.array([[1],[2],[3]]),
All_material = ['K+','P-']):
self._kernel_type = kernel_type
self.All_material = All_material
kernel = getattr(Kernels,kernel_type)
self.dict_mat = dict_mat
if kernel_type =='ExpSineSquared':
param_grid = {"alpha": [1e0, 1e-1, 1e-2, 1e-3],
"kernel": [kernel(length_scale=l,periodicity=p)
for l in np.logspace(-2, 2, 500)
for p in np.logspace(-2, 2, 500)]}
elif kernel_type =='RBF':
param_grid = {"alpha": [1e0, 1e-1, 1e-2, 1e-3],
"kernel": [kernel(length_scale=l)
for l in np.logspace(-2, 2, 100)]}
self._CV = CV
self.kr= GridSearchCV(KernelRidge(), cv=self._CV, param_grid=param_grid)
self.X , self.y = X, y
self.kr.fit(self.X, self.y)
def kr_func(self, x):
return self.kr.predict(x)
def constraint(self, x):
''' Create Constraints for physically-consistent solvent decomposition
sum_cat x_i = 1.0 & sum_an x_i = 1.0 , x_i > 0 for both cation and anaion
'''
n_cations = 0
n_anions = 0
for k in self.All_material:
if k[-1] =='+':
n_cations += 1
else:
n_anions += 1
n_constraints = len(self.All_material)+ 2
for cnt, m in enumerate(self.All_material):
if m[:-1] in self.dict_mat.keys():
n_constraints -= 1
if x[cnt] <0 or x[cnt] > 1:
n_constraints += 1
val_constraints = np.zeros((n_constraints))
cat_list = []
an_list = []
# active (user selected) materials constraints
for k, v in self.dict_mat.items():
if v =='+':
cat_list.append(k)
if v =='-':
an_list.append(k)
cnt = 2
for i in range(len(self.All_material)):
if self.All_material[i][:-1] in cat_list:
val_constraints[0] += x[i]
elif self.All_material[i][:-1] in an_list:
val_constraints[1] += x[i]
else:
val_constraints[cnt] += x[i]
cnt += 1
if x[i] < 0 or x[i] > 1:
val_constraints[cnt] += x[i]
cnt += 1
val_constraints[0] -= 1.0
val_constraints[1] -= 1.0
return val_constraints
def minimize_func(self, optimal, sig,i=0):
if i==0:
optimal = self.X[np.random.randint(self.X.shape[0])]
def funct(x):
const = self.constraint(x)
f = 0
for i in range(len(const)):
f += sig*max(0.0, const[i]**2)
return self.kr_func(x) + f
res = minimize(funct, optimal, method='nelder-mead', options={'xtol': 1e-16, 'disp': False, 'maxiter': 1000})
optimal = res.x
return optimal
|
[
"scipy.optimize.minimize",
"numpy.array",
"numpy.random.randint",
"numpy.zeros",
"sklearn.kernel_ridge.KernelRidge",
"numpy.logspace"
] |
[((479, 513), 'numpy.array', 'np.array', (['[[1, 2], [2, 3], [3, 4]]'], {}), '([[1, 2], [2, 3], [3, 4]])\n', (487, 513), True, 'import numpy as np\n'), ((513, 538), 'numpy.array', 'np.array', (['[[1], [2], [3]]'], {}), '([[1], [2], [3]])\n', (521, 538), True, 'import numpy as np\n'), ((2240, 2263), 'numpy.zeros', 'np.zeros', (['n_constraints'], {}), '(n_constraints)\n', (2248, 2263), True, 'import numpy as np\n'), ((3503, 3610), 'scipy.optimize.minimize', 'minimize', (['funct', 'optimal'], {'method': '"""nelder-mead"""', 'options': "{'xtol': 1e-16, 'disp': False, 'maxiter': 1000}"}), "(funct, optimal, method='nelder-mead', options={'xtol': 1e-16,\n 'disp': False, 'maxiter': 1000})\n", (3511, 3610), False, 'from scipy.optimize import minimize\n'), ((1324, 1337), 'sklearn.kernel_ridge.KernelRidge', 'KernelRidge', ([], {}), '()\n', (1335, 1337), False, 'from sklearn.kernel_ridge import KernelRidge\n'), ((3233, 3267), 'numpy.random.randint', 'np.random.randint', (['self.X.shape[0]'], {}), '(self.X.shape[0])\n', (3250, 3267), True, 'import numpy as np\n'), ((960, 983), 'numpy.logspace', 'np.logspace', (['(-2)', '(2)', '(500)'], {}), '(-2, 2, 500)\n', (971, 983), True, 'import numpy as np\n'), ((1018, 1041), 'numpy.logspace', 'np.logspace', (['(-2)', '(2)', '(500)'], {}), '(-2, 2, 500)\n', (1029, 1041), True, 'import numpy as np\n'), ((1233, 1256), 'numpy.logspace', 'np.logspace', (['(-2)', '(2)', '(100)'], {}), '(-2, 2, 100)\n', (1244, 1256), True, 'import numpy as np\n')]
|
import numpy as np
from gym import spaces
from agents import SimpleAgentClass
# Create agents for the CMA-ES, NEAT and WANN agents
# defined in the weight-agnostic paper repo:
# https://github.com/google/brain-tokyo-workshop/tree/master/WANNRelease/
# -------------------------------------------------------------------
# Here begins copy/paste from WANNRelease code linked above
def weightedRandom(weights):
"""Returns random index, with each choices chance weighted
Args:
weights - (np_array) - weighting of each choice
[N X 1]
Returns:
i - (int) - chosen index
"""
minVal = np.min(weights)
weights = weights - minVal # handle negative vals
cumVal = np.cumsum(weights)
pick = np.random.uniform(0, cumVal[-1])
for i in range(len(weights)):
if cumVal[i] >= pick:
return i
def selectAct(action, actSelect):
"""Selects action based on vector of actions
Single Action:
- Hard: a single action is chosen based on the highest index
- Prob: a single action is chosen probablistically with higher values
more likely to be chosen
We aren't selecting a single action:
- Softmax: a softmax normalized distribution of values is returned
- Default: all actions are returned
Args:
action - (np_array) - vector weighting each possible action
[N X 1]
Returns:
i - (int) or (np_array) - chosen index
[N X 1]
"""
if actSelect == 'softmax':
action = softmax(action)
elif actSelect == 'prob':
action = weightedRandom(np.sum(action,axis=0))
else:
action = action.flatten()
return action
def act(weights, aVec, nInput, nOutput, inPattern):
"""Returns FFANN output given a single input pattern
If the variable weights is a vector it is turned into a square weight matrix.
Allows the network to return the result of several samples at once if given a matrix instead of a vector of inputs:
Dim 0 : individual samples
Dim 1 : dimensionality of pattern (# of inputs)
Args:
weights - (np_array) - ordered weight matrix or vector
[N X N] or [N**2]
aVec - (np_array) - activation function of each node
[N X 1] - stored as ints (see applyAct in ann.py)
nInput - (int) - number of input nodes
nOutput - (int) - number of output nodes
inPattern - (np_array) - input activation
[1 X nInput] or [nSamples X nInput]
Returns:
output - (np_array) - output activation
[1 X nOutput] or [nSamples X nOutput]
"""
# Turn weight vector into weight matrix
if np.ndim(weights) < 2:
nNodes = int(np.sqrt(np.shape(weights)[0]))
wMat = np.reshape(weights, (nNodes, nNodes))
else:
nNodes = np.shape(weights)[0]
wMat = weights
wMat[np.isnan(wMat)]=0
# Vectorize input
if np.ndim(inPattern) > 1:
nSamples = np.shape(inPattern)[0]
else:
nSamples = 1
# Run input pattern through ANN
nodeAct = np.zeros((nSamples,nNodes))
nodeAct[:,0] = 1 # Bias activation
nodeAct[:,1:nInput+1] = inPattern
# Propagate signal through hidden to output nodes
iNode = nInput+1
for iNode in range(nInput+1,nNodes):
rawAct = np.dot(nodeAct, wMat[:,iNode]).squeeze()
nodeAct[:,iNode] = applyAct(aVec[iNode], rawAct)
#print(nodeAct)
output = nodeAct[:,-nOutput:]
return output
def applyAct(actId, x):
"""Returns value after an activation function is applied
Lookup table to allow activations to be stored in numpy arrays
case 1 -- Linear
case 2 -- Unsigned Step Function
case 3 -- Sin
case 4 -- Gausian with mean 0 and sigma 1
case 5 -- Hyperbolic Tangent [tanh] (signed)
case 6 -- Sigmoid unsigned [1 / (1 + exp(-x))]
case 7 -- Inverse
case 8 -- Absolute Value
case 9 -- Relu
case 10 -- Cosine
case 11 -- Squared
Args:
actId - (int) - key to look up table
x - (???) - value to be input into activation
[? X ?] - any type or dimensionality
Returns:
output - (float) - value after activation is applied
[? X ?] - same dimensionality as input
"""
if actId == 1: # Linear
value = x
if actId == 2: # Unsigned Step Function
value = 1.0*(x>0.0)
#value = (np.tanh(50*x/2.0) + 1.0)/2.0
elif actId == 3: # Sin
value = np.sin(np.pi*x)
elif actId == 4: # Gaussian with mean 0 and sigma 1
value = np.exp(-np.multiply(x, x) / 2.0)
elif actId == 5: # Hyperbolic Tangent (signed)
value = np.tanh(x)
elif actId == 6: # Sigmoid (unsigned)
value = (np.tanh(x/2.0) + 1.0)/2.0
elif actId == 7: # Inverse
value = -x
elif actId == 8: # Absolute Value
value = abs(x)
elif actId == 9: # Relu
value = np.maximum(0, x)
elif actId == 10: # Cosine
value = np.cos(np.pi*x)
elif actId == 11: # Squared
value = x**2
else:
value = x
return value
# End of copypaste
# -------------------------------------------------------------------
# This action is original to this repository
def create_wann_agent(agent_path, agent_type, env):
"""
Load and return a WANN agent.
The agent has a function `get_action` that takes in
an observation and returns an appropiate action.
"""
np_data = np.load(agent_path)
wMat = np_data["wMat"]
aVec = np_data["aVec"]
# TODO support for other input spaces?
nInput = env.observation_space.shape[0]
nOutput = 0
action_type = "all"
if isinstance(env.action_space, spaces.Box):
nOutput = env.action_space.shape[0]
elif isinstance(env.action_space, spaces.Discrete):
nOutput = env.action_space.n
action_type = "prob"
else:
raise ValueError("Unsupported action space")
def get_action(obs):
# Includes batch-size
output = act(wMat, aVec, nInput, nOutput, obs)
action = selectAct(output, action_type)
return action
agent = SimpleAgentClass(lambda obs: get_action(obs))
return agent
|
[
"numpy.multiply",
"numpy.reshape",
"numpy.maximum",
"numpy.min",
"numpy.ndim",
"numpy.tanh",
"numpy.sum",
"numpy.zeros",
"numpy.dot",
"numpy.isnan",
"numpy.cos",
"numpy.random.uniform",
"numpy.sin",
"numpy.cumsum",
"numpy.shape",
"numpy.load"
] |
[((644, 659), 'numpy.min', 'np.min', (['weights'], {}), '(weights)\n', (650, 659), True, 'import numpy as np\n'), ((727, 745), 'numpy.cumsum', 'np.cumsum', (['weights'], {}), '(weights)\n', (736, 745), True, 'import numpy as np\n'), ((757, 789), 'numpy.random.uniform', 'np.random.uniform', (['(0)', 'cumVal[-1]'], {}), '(0, cumVal[-1])\n', (774, 789), True, 'import numpy as np\n'), ((3155, 3183), 'numpy.zeros', 'np.zeros', (['(nSamples, nNodes)'], {}), '((nSamples, nNodes))\n', (3163, 3183), True, 'import numpy as np\n'), ((5582, 5601), 'numpy.load', 'np.load', (['agent_path'], {}), '(agent_path)\n', (5589, 5601), True, 'import numpy as np\n'), ((2747, 2763), 'numpy.ndim', 'np.ndim', (['weights'], {}), '(weights)\n', (2754, 2763), True, 'import numpy as np\n'), ((2836, 2873), 'numpy.reshape', 'np.reshape', (['weights', '(nNodes, nNodes)'], {}), '(weights, (nNodes, nNodes))\n', (2846, 2873), True, 'import numpy as np\n'), ((2954, 2968), 'numpy.isnan', 'np.isnan', (['wMat'], {}), '(wMat)\n', (2962, 2968), True, 'import numpy as np\n'), ((3002, 3020), 'numpy.ndim', 'np.ndim', (['inPattern'], {}), '(inPattern)\n', (3009, 3020), True, 'import numpy as np\n'), ((2901, 2918), 'numpy.shape', 'np.shape', (['weights'], {}), '(weights)\n', (2909, 2918), True, 'import numpy as np\n'), ((3045, 3064), 'numpy.shape', 'np.shape', (['inPattern'], {}), '(inPattern)\n', (3053, 3064), True, 'import numpy as np\n'), ((4581, 4598), 'numpy.sin', 'np.sin', (['(np.pi * x)'], {}), '(np.pi * x)\n', (4587, 4598), True, 'import numpy as np\n'), ((1649, 1671), 'numpy.sum', 'np.sum', (['action'], {'axis': '(0)'}), '(action, axis=0)\n', (1655, 1671), True, 'import numpy as np\n'), ((3394, 3425), 'numpy.dot', 'np.dot', (['nodeAct', 'wMat[:, iNode]'], {}), '(nodeAct, wMat[:, iNode])\n', (3400, 3425), True, 'import numpy as np\n'), ((2798, 2815), 'numpy.shape', 'np.shape', (['weights'], {}), '(weights)\n', (2806, 2815), True, 'import numpy as np\n'), ((4772, 4782), 'numpy.tanh', 'np.tanh', (['x'], {}), '(x)\n', (4779, 4782), True, 'import numpy as np\n'), ((4679, 4696), 'numpy.multiply', 'np.multiply', (['x', 'x'], {}), '(x, x)\n', (4690, 4696), True, 'import numpy as np\n'), ((4848, 4864), 'numpy.tanh', 'np.tanh', (['(x / 2.0)'], {}), '(x / 2.0)\n', (4855, 4864), True, 'import numpy as np\n'), ((5035, 5051), 'numpy.maximum', 'np.maximum', (['(0)', 'x'], {}), '(0, x)\n', (5045, 5051), True, 'import numpy as np\n'), ((5103, 5120), 'numpy.cos', 'np.cos', (['(np.pi * x)'], {}), '(np.pi * x)\n', (5109, 5120), True, 'import numpy as np\n')]
|
import os
import numpy as np
import torch
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms
from PIL import Image
class Anime_Dataset(Dataset):
def __init__(self, config, transform):
self.config = config
self.transform = transform
self.lines = open(config.label_path, 'r').readlines()
self.num_data = len(self.lines)
self.image_ids = []
self.labels = []
self.tag_dict = {'orange_hair': 0, 'white_hair': 1, 'aqua_hair': 2, 'gray_hair': 3, 'green_hair': 4,
'red_hair': 5, 'purple_hair': 6, 'pink_hair': 7, 'blue_hair': 8, 'black_hair': 9,
'brown_hair': 10, 'blonde_hair': 11, 'gray_eyes': 12, 'black_eyes': 13, 'orange_eyes': 14,
'pink_eyes': 15, 'yellow_eyes': 16, 'aqua_eyes': 17, 'purple_eyes': 18, 'green_eyes': 19,
'brown_eyes': 20, 'red_eyes': 21, 'blue_eyes': 22, 'bicolored_eyes': 23}
print('preprocessing...')
print('number of images: ', self.num_data)
self.preprocess()
def __len__(self):
return self.num_data
def __getitem__(self, index):
correct_image = Image.open(os.path.join(self.config.image_dir, self.image_ids[index] + '.jpg'))
correct_text = self.labels[index]
# wrong_text = self.labels[np.random.randint(low=0, high=self.num_data)]
random_index = np.random.randint(low=0, high=self.num_data)
wrong_image = Image.open(os.path.join(self.config.image_dir, self.image_ids[random_index] + '.jpg'))
return self.transform(correct_image), torch.Tensor(correct_text), self.transform(wrong_image)
def preprocess(self):
for i, line in enumerate(self.lines):
splits = line.split()
image_id = splits[0]
attr_values = splits[1:]
one_hot = np.zeros(len(self.tag_dict))
for value in attr_values:
index = self.tag_dict[value]
one_hot[index] = 1
self.labels += [one_hot]
self.image_ids += [image_id]
def generate_embedding(self):
test_str = ['blue_hair, red_eyes', 'brown_hair, brown_eyes', 'black_hair, blue_eyes', 'red_hair, green_eyes']
embeddings = {}
for str in test_str:
split = str.split(', ')
one_hot = np.zeros(len(self.tag_dict))
for tag in split:
one_hot[self.tag_dict[tag]] = 1
embeddings[str] = one_hot
return embeddings
def get_loader(config):
transform = transforms.Compose([
# transforms.CenterCrop(config.crop_size),
transforms.Scale(config.image_size),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=(0.5, 0.5, 0.5), # 3 for RGB channels
std=(0.5, 0.5, 0.5))
])
dataset = Anime_Dataset(config, transform)
print('generating test embeddings...')
embeddings = dataset.generate_embedding()
data_loader = DataLoader(dataset,
config.batch_size,
shuffle=True,
num_workers=4,
drop_last=True)
return data_loader, embeddings
|
[
"torchvision.transforms.Scale",
"os.path.join",
"torch.Tensor",
"torchvision.transforms.RandomHorizontalFlip",
"numpy.random.randint",
"torchvision.transforms.Normalize",
"torch.utils.data.DataLoader",
"torchvision.transforms.ToTensor"
] |
[((3071, 3158), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset', 'config.batch_size'], {'shuffle': '(True)', 'num_workers': '(4)', 'drop_last': '(True)'}), '(dataset, config.batch_size, shuffle=True, num_workers=4,\n drop_last=True)\n', (3081, 3158), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((1436, 1480), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(0)', 'high': 'self.num_data'}), '(low=0, high=self.num_data)\n', (1453, 1480), True, 'import numpy as np\n'), ((1221, 1288), 'os.path.join', 'os.path.join', (['self.config.image_dir', "(self.image_ids[index] + '.jpg')"], {}), "(self.config.image_dir, self.image_ids[index] + '.jpg')\n", (1233, 1288), False, 'import os\n'), ((1514, 1588), 'os.path.join', 'os.path.join', (['self.config.image_dir', "(self.image_ids[random_index] + '.jpg')"], {}), "(self.config.image_dir, self.image_ids[random_index] + '.jpg')\n", (1526, 1588), False, 'import os\n'), ((1636, 1662), 'torch.Tensor', 'torch.Tensor', (['correct_text'], {}), '(correct_text)\n', (1648, 1662), False, 'import torch\n'), ((2673, 2708), 'torchvision.transforms.Scale', 'transforms.Scale', (['config.image_size'], {}), '(config.image_size)\n', (2689, 2708), False, 'from torchvision import transforms\n'), ((2718, 2751), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {}), '()\n', (2749, 2751), False, 'from torchvision import transforms\n'), ((2761, 2782), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (2780, 2782), False, 'from torchvision import transforms\n'), ((2792, 2855), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '(0.5, 0.5, 0.5)', 'std': '(0.5, 0.5, 0.5)'}), '(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))\n', (2812, 2855), False, 'from torchvision import transforms\n')]
|
import matplotlib.pyplot as plt
from skimage import measure, morphology
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
import numpy as np
import pandas as pd
def plot_slice(img, slice=80):
# Show some slice in the middle
plt.imshow(img[slice])
plt.show()
def plot_3d(image, threshold=-100):
# Position the scan upright,
# so the head of the patient would be at the top facing the camera
# p = image.transpose(2,1,0)
p = image
results = measure.marching_cubes(p, threshold)
verts = results[0]
faces = results[1]
fig = plt.figure(figsize=(10, 10))
ax = fig.add_subplot(111, projection='3d')
# Fancy indexing: `verts[faces]` to generate a collection of triangles
mesh = Poly3DCollection(verts[faces], alpha=0.70)
face_color = [0.45, 0.45, 0.75]
mesh.set_facecolor(face_color)
ax.add_collection3d(mesh)
ax.set_xlim(0, p.shape[0])
ax.set_ylim(0, p.shape[1])
ax.set_zlim(0, p.shape[2])
plt.savefig('plot3d.png')
def save(arr, pth):
with open(pth, 'wb+') as fh:
np.savez_compressed(fh, data=arr)
def load(pth):
return np.load(pth)['data']
def read_mapping_file(pth):
return pd.read_csv(pth)
def shuffle_weights(model, weights=None):
"""Randomly permute the weights in `model`, or the given `weights`.
This is a fast approximation of re-initializing the weights of a model.
Assumes weights are distributed independently of the dimensions of the weight tensors
(i.e., the weights have the same distribution along each dimension).
:param Model model: Modify the weights of the given model.
:param list(ndarray) weights: The model's weights will be replaced by a random permutation of these weights.
If `None`, permute the model's current weights.
"""
if weights is None:
weights = model.get_weights()
weights = [np.random.permutation(w.flat).reshape(w.shape) for w in weights]
# Faster, but less random: only permutes along the first dimension
# weights = [np.random.permutation(w) for w in weights]
model.set_weights(weights)
|
[
"matplotlib.pyplot.imshow",
"mpl_toolkits.mplot3d.art3d.Poly3DCollection",
"matplotlib.pyplot.savefig",
"pandas.read_csv",
"numpy.random.permutation",
"matplotlib.pyplot.figure",
"skimage.measure.marching_cubes",
"numpy.savez_compressed",
"numpy.load",
"matplotlib.pyplot.show"
] |
[((239, 261), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img[slice]'], {}), '(img[slice])\n', (249, 261), True, 'import matplotlib.pyplot as plt\n'), ((266, 276), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (274, 276), True, 'import matplotlib.pyplot as plt\n'), ((481, 517), 'skimage.measure.marching_cubes', 'measure.marching_cubes', (['p', 'threshold'], {}), '(p, threshold)\n', (503, 517), False, 'from skimage import measure, morphology\n'), ((575, 603), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (585, 603), True, 'import matplotlib.pyplot as plt\n'), ((738, 779), 'mpl_toolkits.mplot3d.art3d.Poly3DCollection', 'Poly3DCollection', (['verts[faces]'], {'alpha': '(0.7)'}), '(verts[faces], alpha=0.7)\n', (754, 779), False, 'from mpl_toolkits.mplot3d.art3d import Poly3DCollection\n'), ((981, 1006), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""plot3d.png"""'], {}), "('plot3d.png')\n", (992, 1006), True, 'import matplotlib.pyplot as plt\n'), ((1194, 1210), 'pandas.read_csv', 'pd.read_csv', (['pth'], {}), '(pth)\n', (1205, 1210), True, 'import pandas as pd\n'), ((1070, 1103), 'numpy.savez_compressed', 'np.savez_compressed', (['fh'], {'data': 'arr'}), '(fh, data=arr)\n', (1089, 1103), True, 'import numpy as np\n'), ((1132, 1144), 'numpy.load', 'np.load', (['pth'], {}), '(pth)\n', (1139, 1144), True, 'import numpy as np\n'), ((1885, 1914), 'numpy.random.permutation', 'np.random.permutation', (['w.flat'], {}), '(w.flat)\n', (1906, 1914), True, 'import numpy as np\n')]
|
import os
import torch
import numpy as np
from tqdm import tqdm
import json
from torch.utils.data import Dataset, DataLoader
from arcface.resnet import ResNet
from arcface.googlenet import GoogLeNet
from arcface.inception_v4 import InceptionV4
from arcface.inceptionresnet_v2 import InceptionResNetV2
from arcface.densenet import DenseNet
from arcface.resnet_cbam import ResNetCBAM
import torchvision.transforms as transforms
import cv2
import random
import jieba
from autoaugment import rand_augment_transform
from PIL import Image
'''
for image-text match
'''
class ITMatchTrain(Dataset):
def __init__(self, opt):
arcfaceDataset = ArcfaceDataset(root_dir=opt.data_path, mode="train", size=(opt.size, opt.size), imgORvdo='video')
batch_size = 256
training_params = {"batch_size": batch_size,
"shuffle": False,
"drop_last": False,
"num_workers": opt.workers}
arcfaceLoader = DataLoader(arcfaceDataset, **training_params)
self.vocab_size = arcfaceDataset.vocab_size
if opt.network == 'resnet':
model = ResNet(opt)
b_name = opt.network+'_'+opt.mode+'_{}'.format(opt.num_layers_r)
elif opt.network == 'googlenet':
model = GoogLeNet(opt)
b_name = opt.network
elif opt.network == 'inceptionv4':
model = InceptionV4(opt)
b_name = opt.network
elif opt.network == 'inceptionresnetv2':
model = InceptionResNetV2(opt)
b_name = opt.network
elif opt.network == 'densenet':
model = DenseNet(opt)
b_name = opt.network+'_{}'.format(opt.num_layers_d)
elif opt.network == 'resnet_cbam':
model = ResNetCBAM(opt)
b_name = opt.network+'_{}'.format(opt.num_layers_c)
else:
raise RuntimeError('Cannot Find the Model: {}'.format(opt.network))
model.load_state_dict(torch.load(os.path.join(opt.saved_path, b_name+'.pth')))
model.cuda()
model.eval()
self.model_name = b_name
self.features = torch.zeros((len(arcfaceDataset), opt.embedding_size))
self.texts = torch.zeros((len(arcfaceDataset), 64)).long()
self.instances = torch.zeros((len(arcfaceDataset))).long()
print('Calculating features...')
for i, d in enumerate(tqdm(arcfaceLoader)):
# img = d['img'].cuda()
text = d['text']
instance = d['instance']
# with torch.no_grad():
# feature = model(img).cpu()
# self.features[i*batch_size:(i+1)*batch_size] = feature
self.texts[i*batch_size:(i+1)*batch_size] = text
self.instances[i*batch_size:(i+1)*batch_size] = instance
def __len__(self):
return self.texts.size(0)
def __getitem__(self, index):
text = self.texts[index]
# feature = self.features[index]
feature = None
instance = self.instances[index]
# return {'feature': feature, 'text':text, 'instance':instance}
return {'text':text, 'instance':instance}
class ITMatchValidation(Dataset):
def __init__(self, size=(224, 224), root_dir='data/validation_instance/', maxLen=64, PAD=0, imgORvdo='video'):
self.root_dir = root_dir
self.size = size
text2num = Text2Num(maxLen=maxLen, root_dir='data', PAD=PAD)
self.vocab_size = text2num.vocab_size
assert imgORvdo in ['image', 'video']
tat = 'validation_'+imgORvdo+'s'
# tat = 'train_'+imgORvdo+'s'
with open(os.path.join('data', tat+'_text.json'), 'r') as f:
textDic = json.load(f)
for k in textDic.keys():
textDic[k] = text2num(textDic[k])
instances = os.listdir(root_dir)
self.items = []
print('Loading Data...')
for instance in tqdm(instances):
imgs = os.listdir(root_dir+instance)
l = []
for img in imgs:
if imgORvdo in img:
l.append(os.path.join(instance, img))
text_name = img.split(instance)[-1].split('_')[0]
l.append(textDic[text_name])
break
if len(l) < 2:
continue
self.items.append(l)
print('Done')
self.transform = transforms.Normalize(
mean=[0.55574415, 0.51230767, 0.51123354],
std=[0.21303795, 0.21604613, 0.21273348])
def __len__(self):
return len(self.items)
def __getitem__(self, index):
imgPath, text = self.items[index]
text = torch.Tensor(text).long()
# img = np.load(os.path.join(self.root_dir, imgPath))
img = cv2.imread(os.path.join(self.root_dir, imgPath))
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = img.astype(np.float32) / 255
hi, wi, ci = img.shape
rh = (hi-self.size[0])//2
rw = (wi-self.size[1])//2
img = img[rh:self.size[0]+rh, rw:self.size[1]+rw, :]
img = torch.from_numpy(img)
img = img.permute(2, 0, 1)
img = self.transform(img)
return {
'img': img,
'text': text
}
'''
for text
'''
class Text2Num:
def __init__(self, maxLen, root_dir='data', PAD=0):
with open(os.path.join(root_dir, 'vocab.json'), 'r') as f:
self.vocab = json.load(f)
self.PAD = PAD
self.maxLen = maxLen
self.vocab_size = len(self.vocab)
def __call__(self, text):
words = jieba.cut(text, cut_all=False, HMM=True)
# l = [len(self.vocab)]# CLS
l = []
for w in words:
if w.strip() in self.vocab:
l.append(self.vocab[w.strip()])
if len(l) > self.maxLen:
l = l[:self.maxLen]
elif len(l) < self.maxLen:
l += [self.PAD]*(self.maxLen-len(l))
assert len(l) == self.maxLen
return l
'''
for efficientdet
'''
class EfficientdetDataset(Dataset):
def __init__(self, root_dir='data', mode='train', imgORvdo='all', transform=None, maxLen=64, PAD=0):
assert mode in ['train', 'validation']
assert imgORvdo in ['image', 'video', 'all']
self.root_dir = root_dir
self.transform = transform
text2num = Text2Num(maxLen=maxLen, root_dir=root_dir, PAD=PAD)
self.vocab_size = text2num.vocab_size
label_file = 'label.json'
with open(os.path.join(root_dir, label_file), 'r') as f:
self.labelDic = json.load(f)
self.num_classes = len(self.labelDic['label2index'])
if imgORvdo == 'image':
tats = [mode + '_images']
elif imgORvdo == 'video':
tats = [mode + '_videos']
else:
tats = [mode + '_images', mode + '_videos']
self.textDic = {}
ds = []
for t in tats:
with open(os.path.join(root_dir, t+'_annotation.json'), 'r') as f:
ds.append(json.load(f))
with open(os.path.join(root_dir, t+'_text.json'), 'r') as f:
self.textDic[t] = json.load(f)
for k in self.textDic.keys():
for kk in self.textDic[k].keys():
self.textDic[k][kk] = text2num(self.textDic[k][kk])
ls = [d['annotations'] for d in ds]
self.images = []
print('Loading {} {} data...'.format(mode, imgORvdo))
for i, l in enumerate(ls):
for d in l:
if len(d['annotations']) == 0:
continue
t = []
t.append(os.path.join(tats[i], d['img_name']))
t.append(d['annotations'])
t.append(d['img_name'])
t.append(tats[i])
self.images.append(t)
# print(len(self.images))
# self.images = self.images[:1000]
print('Done')
def __len__(self):
return len(self.images)
def __getitem__(self, index):
imgPath, annotationsList, imgName, t = self.images[index]
text_name = imgName.split('_')[0]
text = self.textDic[t][text_name]
text = torch.Tensor(text).long()
img = cv2.imread(os.path.join(self.root_dir, imgPath))
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = img.astype(np.float32) / 255
annotations = np.zeros((len(annotationsList), 6))
for i, annotationDic in enumerate(annotationsList):
annotation = np.zeros((1, 6))
annotation[0, :4] = annotationDic['box']
annotation[0, 4] = annotationDic['label']
if annotationDic['instance_id'] > 0:
annotation[0, 5] = 1
else:
annotation[0, 5] = 0
annotations[i:i+1, :] = annotation
# annotations = np.append(annotations, annotation, axis=0)
sample = {'img': img, 'annot': annotations, 'text': text}
if self.transform:
sample = self.transform(sample)
return sample
def label2index(self, label):
return self.labelDic['label2index'][label]
def index2label(self, index):
return self.labelDic['index2label'][str(index)]
def getImagePath(self, index):
imgPath, annotationsList, imgName, t = self.images[index]
return imgPath
def getImageInfo(self, index):
imgPath, annotationsList, imgName, t = self.images[index]
imgID, frame = imgName[:-4].split('_')
return imgPath, imgID, frame
class EfficientdetDatasetVideo(Dataset):
def __init__(self, root_dir='data', mode='train', imgORvdo='video', transform=None, maxLen=64, PAD=0):
assert mode in ['train', 'validation']
assert imgORvdo in ['video']
self.root_dir = root_dir
self.transform = transform
text2num = Text2Num(maxLen=maxLen, root_dir=root_dir, PAD=PAD)
self.vocab_size = text2num.vocab_size
label_file = 'label.json'
with open(os.path.join(root_dir, label_file), 'r') as f:
self.labelDic = json.load(f)
self.num_classes = len(self.labelDic['label2index'])
tats = [mode + '_videos']
self.textDic = {}
ds = []
for t in tats:
with open(os.path.join(root_dir, t+'_annotation.json'), 'r') as f:
ds.append(json.load(f))
with open(os.path.join(root_dir, t+'_text.json'), 'r') as f:
self.textDic[t] = json.load(f)
for k in self.textDic.keys():
for kk in self.textDic[k].keys():
self.textDic[k][kk] = text2num(self.textDic[k][kk])
ls = [d['annotations'] for d in ds]
self.images = []
self.videos = {}
print('Loading {} {} data...'.format(mode, imgORvdo))
for i, l in enumerate(ls):
for d in l:
if d['img_name'][:6] not in self.videos:
self.videos[d['img_name'][:6]] = []
# if len(d['annotations']) == 0:
# continue
t = []
t.append(os.path.join(tats[i], d['img_name']))
t.append(d['annotations'])
t.append(d['img_name'])
t.append(tats[i])
self.videos[d['img_name'][:6]].append(t)
# self.images.append(t)
self.videos = list(self.videos.values())
for l in self.videos:
assert len(l) == 10
# print(len(self.images))
self.videos = self.videos[:100]
print('Done')
def __len__(self):
return len(self.videos)
def __getitem__(self, index):
lst = self.videos[index]
datas = []
for imgPath, annotationsList, imgName, t in lst:
# imgPath, annotationsList, imgName, t = self.images[index]
text_name = imgName.split('_')[0]
text = self.textDic[t][text_name]
text = torch.Tensor(text).long()
img = cv2.imread(os.path.join(self.root_dir, imgPath))
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = img.astype(np.float32) / 255
annotations = np.zeros((len(annotationsList), 6))
for i, annotationDic in enumerate(annotationsList):
annotation = np.zeros((1, 6))
annotation[0, :4] = annotationDic['box']
annotation[0, 4] = annotationDic['label']
if annotationDic['instance_id'] > 0:
annotation[0, 5] = 1
else:
annotation[0, 5] = 0
annotations[i:i+1, :] = annotation
# annotations = np.append(annotations, annotation, axis=0)
sample = {'img': img, 'annot': annotations, 'text': text}
datas.append(sample)
if self.transform:
datas = self.transform(datas)
return datas
# def label2index(self, label):
# return self.labelDic['label2index'][label]
# def index2label(self, index):
# return self.labelDic['index2label'][str(index)]
# def getImagePath(self, index):
# imgPath, annotationsList, imgName, t = self.images[index]
# return imgPath
# def getImageInfo(self, index):
# imgPath, annotationsList, imgName, t = self.images[index]
# imgID, frame = imgName[:-4].split('_')
# return imgPath, imgID, frame
'''
for arcface
'''
class ArcfaceDataset(Dataset):
def __init__(self, root_dir='data', mode='train', size=(112, 112), flip_x=0.5, maxLen=64, PAD=0, imgORvdo='all'):
assert mode in ['train', 'all']
assert imgORvdo in ['all', 'image', 'video']
mean=[0.55574415, 0.51230767, 0.51123354]
aa_params = dict(
translate_const=int(size[0] * 0.40),
img_mean=tuple([min(255, round(255 * x)) for x in mean]),
)
self.randAug = rand_augment_transform('rand-m9-n3-mstd0.5', aa_params)
self.root_dir = root_dir
self.size = size
self.flip_x = flip_x
if mode == 'train':
modes = ['train']
instanceFile = 'instanceID.json'
elif mode == 'train_2':
modes = ['train', 'validation_2']
instanceFile = 'instanceID_2.json'
elif mode == 'all':
modes = ['train', 'validation_2', 'validation']
instanceFile = 'instanceID_all.json'
with open(os.path.join(root_dir, instanceFile), 'r') as f:
self.clsDic = json.load(f)
with open(os.path.join(root_dir, 'instance2label.json'), 'r') as f:
self.instance2label = json.load(f)
text2num = Text2Num(maxLen=maxLen, root_dir=root_dir, PAD=PAD)
self.vocab_size = text2num.vocab_size
self.images = []
self.textDics = {}
for mode in modes:
if imgORvdo == 'all':
tats = [mode + '_images', mode + '_videos']
elif imgORvdo == 'image':
tats = [mode + '_images']
elif imgORvdo == 'video':
tats = [mode + '_videos']
# img_tat = mode + '_images'
# vdo_tat = mode + '_videos'
savePath = mode + '_instance'
self.savePath = os.path.join(root_dir, savePath)
d = []
textDic = []
for tat in tats:
with open(os.path.join(root_dir, tat+'_annotation.json'), 'r') as f:
d.append(json.load(f))
with open(os.path.join(root_dir, tat+'_text.json'), 'r') as f:
textDic.append(json.load(f))
for i in range(len(textDic)):
for k in textDic[i].keys():
textDic[i][k] = text2num(textDic[i][k])
self.textDics[mode] = textDic
l = [dd['annotations'] for dd in d]
print('Loading data...')
for i, ll in enumerate(l):
for d in ll:
for dd in d['annotations']:
if dd['instance_id'] > 0 and str(dd['instance_id']) in self.clsDic.keys():
t = []
t.append(os.path.join(self.savePath, str(dd['instance_id']), tats[i]+str(dd['instance_id'])+d['img_name']))
t.append(dd['instance_id'])
t.append(d['img_name'].split('_')[0])
t.append(i)
t.append(mode)
self.images.append(t)
self.num_classes = len(self.clsDic)
self.num_labels = len(set(self.instance2label.values()))
# self.images = self.images[:2222]
print('Done')
self.transform = transforms.Normalize(
mean=[0.55574415, 0.51230767, 0.51123354],
std=[0.21303795, 0.21604613, 0.21273348])
def __len__(self):
return len(self.images)
def __getitem__(self, index):
imgName, instance_id, textName, iORv, mode = self.images[index]
img = np.load(imgName[:-4]+'.npy')
# img = cv2.imread(imgName[:-4]+'.jpg')
# img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# img = img.astype(np.float32) / 255
# '''randAug'''
# img = Image.fromarray(np.uint8(img*255))
# img = self.randAug(img)
# img.save('aaa.jpg')
# img = np.array(img)
# img = img.astype(np.float32) / 255
# '''randAug'''
text = self.textDics[mode][iORv][textName]
text = torch.tensor(text).long()
iORv = torch.tensor(iORv).long()
h, w, c = img.shape
# print(h,w,c)
rh = random.randint(0, h-256)
rw = random.randint(0, w-256)
img = img[rh:256+rh, rw:256+rw, :]
img = cv2.resize(img, self.size)
# '''random erasing'''
# if np.random.rand() < 0.5:
# w = h = 256
# while w >= 256 or h >= 256:
# r = np.random.uniform(0.3, 1/0.3)
# s = 256*256*np.random.uniform(0.02, 0.4)
# w = int(np.sqrt(s*r))
# h = int(np.sqrt(s/r))
# s_w = random.randint(0, 256-w)
# s_h = random.randint(0, 256-h)
# img[s_h:s_h+h, s_w:s_w+w, :] = 0
# print(img.shape)
instance = torch.tensor(self.clsDic[str(instance_id)])
label = torch.tensor(self.instance2label[str(instance_id)])
if np.random.rand() < self.flip_x:
img = img[:, ::-1, :].copy()
img = torch.from_numpy(img)
img = img.permute(2, 0, 1)
img = self.transform(img)
return {'img':img, 'instance':instance, 'label':label, 'text': text, 'iORv': iORv}
# return {'instance':instance, 'label':label, 'text': text, 'iORv': iORv}
class ArcfaceDatasetSeparate(Dataset):
def __init__(self, root_dir='data', mode='train', size=(112, 112), flip_x=0.5, maxLen=64, PAD=0, imgORvdo='all'):
assert mode in ['train']
assert imgORvdo in ['all']
self.root_dir = root_dir
self.size = size
self.flip_x = flip_x
if imgORvdo == 'all':
tats = [mode + '_images', mode + '_videos']
elif imgORvdo == 'image':
tats = [mode + '_images']
elif imgORvdo == 'video':
tats = [mode + '_videos']
savePath = mode + '_instance'
self.savePath = os.path.join(root_dir, savePath)
text2num = Text2Num(maxLen=maxLen, root_dir=root_dir, PAD=PAD)
self.vocab_size = text2num.vocab_size
d = []
self.textDic = []
for tat in tats:
with open(os.path.join(root_dir, tat+'_annotation.json'), 'r') as f:
d.append(json.load(f))
with open(os.path.join(root_dir, tat+'_text.json'), 'r') as f:
self.textDic.append(json.load(f))
for i in range(len(self.textDic)):
for k in self.textDic[i].keys():
self.textDic[i][k] = text2num(self.textDic[i][k])
l = [dd['annotations'] for dd in d]
self.images = []
with open(os.path.join(root_dir, 'instanceID.json'), 'r') as f:
self.clsDic = json.load(f)
with open(os.path.join(root_dir, 'instance2label.json'), 'r') as f:
self.instance2label = json.load(f)
names = ['image', 'video']
print('Loading data...')
for i, ll in enumerate(l):
for d in ll:
for dd in d['annotations']:
if dd['instance_id'] > 0 and str(dd['instance_id']) in self.clsDic.keys():
t = []
t.append(os.path.join(str(dd['instance_id']), tats[i]+str(dd['instance_id'])+d['img_name']))
t.append(dd['instance_id'])
t.append(d['img_name'].split('_')[0])
t.append(names[i])
self.images.append(t)
self.num_classes = len(self.clsDic)
self.num_labels = len(set(self.instance2label.values()))
self.dic = {}
for i in range(len(self.images)):
imgName, instance_id, textName, iORv = self.images[i]
if instance_id not in self.dic:
self.dic[instance_id] = {}
self.dic[instance_id]['image'] = []
self.dic[instance_id]['video'] = []
self.dic[instance_id][iORv].append(i)
for k in self.dic.keys():
if len(self.dic[k]['image']) == 0 or len(self.dic[k]['video']) == 0:
del self.dic[k]
self.dic = list(self.dic.items())
# self.images = self.images[:2222]
print('Done')
self.transform = transforms.Normalize(
mean=[0.55574415, 0.51230767, 0.51123354],
std=[0.21303795, 0.21604613, 0.21273348])
def __len__(self):
return len(self.dic)
def __getitem__(self, index):
imgIndex = random.choice(self.dic[index][1]['image'])
vdoIndex = random.choice(self.dic[index][1]['video'])
sample = []
instances = []
for index in [imgIndex, vdoIndex]:
imgName, instance_id, textName, iORv = self.images[index]
img = np.load(os.path.join(self.savePath, imgName)[:-4]+'.npy')
# text = self.textDic[iORv][textName]
# text = torch.tensor(text).long()
# iORv = torch.tensor(iORv).long()
h, w, c = img.shape
rh_1 = random.randint(0, h-224)
rh_2 = random.randint(224, h)
rw_1 = random.randint(0, w-224)
rw_2 = random.randint(224, w)
img = img[rh_1:rh_2, rw_1:rw_2, :]
img = cv2.resize(img, self.size)
instances.append(torch.tensor(self.clsDic[str(instance_id)]))
# label = torch.tensor(self.instance2label[str(instance_id)])
if np.random.rand() < self.flip_x:
img = img[:, ::-1, :].copy()
img = torch.from_numpy(img)
img = img.permute(2, 0, 1)
img = self.transform(img)
sample.append(img)
assert instances[0] == instances[1]
return {'img': sample[0], 'vdo':sample[1], 'instance':instances[0]}
class TripletDataset(Dataset):
def __init__(self, root_dir='data', mode='train', size=(112, 112), flip_x=0.5):
assert mode in ['train']
self.root_dir = root_dir
self.size = size
self.flip_x = flip_x
img_tat = mode + '_images'
vdo_tat = mode + '_videos'
savePath = mode + '_instance'
self.savePath = os.path.join(root_dir, savePath)
with open(os.path.join(root_dir, img_tat+'_annotation.json'), 'r') as f:
d_i = json.load(f)
with open(os.path.join(root_dir, vdo_tat+'_annotation.json'), 'r') as f:
d_v = json.load(f)
with open(os.path.join(root_dir, 'instanceID.json'), 'r') as f:
self.clsDic = json.load(f)
with open(os.path.join(root_dir, 'instance2label.json'), 'r') as f:
instance2label = json.load(f)
l_i = d_i['annotations']
l_v = d_v['annotations']
self.images = []
print('Loading data...')
for d in l_i:
for dd in d['annotations']:
if dd['instance_id'] > 0 and str(dd['instance_id']) in self.clsDic.keys():
t = []
t.append(os.path.join(str(dd['instance_id']), img_tat+str(dd['instance_id'])+d['img_name']))
t.append(self.clsDic[str(dd['instance_id'])])
t.append(instance2label[str(dd['instance_id'])])
self.images.append(t)
for d in l_v:
for dd in d['annotations']:
if dd['instance_id'] > 0 and str(dd['instance_id']) in self.clsDic.keys():
t = []
t.append(os.path.join(str(dd['instance_id']), vdo_tat+str(dd['instance_id'])+d['img_name']))
t.append(self.clsDic[str(dd['instance_id'])])
t.append(instance2label[str(dd['instance_id'])])
self.images.append(t)
self.num_classes = len(self.clsDic)
self.num_labels = len(set(instance2label.values()))
self.cls_ins_dic = {}
for i, l in enumerate(self.images):
imgName, instance_id, label = l
if label not in self.cls_ins_dic:
self.cls_ins_dic[label] = {}
if instance_id not in self.cls_ins_dic[label]:
self.cls_ins_dic[label][instance_id] = []
self.cls_ins_dic[label][instance_id].append(i)
for k in self.cls_ins_dic.keys():
if len(self.cls_ins_dic[k]) < 2:
raise RuntimeError('size of self.cls_ins_dic[k] must be larger than 1')
print('Done')
self.transform = transforms.Normalize(
mean=[0.55574415, 0.51230767, 0.51123354],
std=[0.21303795, 0.21604613, 0.21273348])
def __len__(self):
return len(self.images)
def __getitem__(self, index):
imgName_q, instance_id_q, label_q = self.images[index]
p_index = index
while p_index == index:
p_index = random.choice(self.cls_ins_dic[label_q][instance_id_q])
instance_id_n = instance_id_q
while instance_id_n == instance_id_q:
instance_id_n = random.choice(list(self.cls_ins_dic[label_q].keys()))
n_index = random.choice(self.cls_ins_dic[label_q][instance_id_n])
imgName_p, instance_id_p, label_p = self.images[p_index]
imgName_n, instance_id_n, label_n = self.images[n_index]
assert len(set([label_q, label_p, label_n])) == 1
assert len(set([instance_id_q, instance_id_p])) == 1
instance_id_q = torch.tensor(instance_id_q)
instance_id_p = torch.tensor(instance_id_p)
instance_id_n = torch.tensor(instance_id_n)
img_q = np.load(os.path.join(self.savePath, imgName_q)[:-4]+'.npy')
img_p = np.load(os.path.join(self.savePath, imgName_p)[:-4]+'.npy')
img_n = np.load(os.path.join(self.savePath, imgName_n)[:-4]+'.npy')
hq, wq, cq = img_q.shape
hp, wp, cp = img_p.shape
hn, wn, cn = img_n.shape
rh = random.randint(0, hq-self.size[0])
rw = random.randint(0, wq-self.size[1])
img_q = img_q[rh:self.size[0]+rh, rw:self.size[1]+rw, :]
rh = random.randint(0, hp-self.size[0])
rw = random.randint(0, wp-self.size[1])
img_p = img_p[rh:self.size[0]+rh, rw:self.size[1]+rw, :]
rh = random.randint(0, hn-self.size[0])
rw = random.randint(0, wn-self.size[1])
img_n = img_n[rh:self.size[0]+rh, rw:self.size[1]+rw, :]
if np.random.rand() < self.flip_x:
img_q = img_q[:, ::-1, :].copy()
if np.random.rand() < self.flip_x:
img_p = img_p[:, ::-1, :].copy()
if np.random.rand() < self.flip_x:
img_n = img_n[:, ::-1, :].copy()
img_q = torch.from_numpy(img_q).permute(2, 0, 1)
img_p = torch.from_numpy(img_p).permute(2, 0, 1)
img_n = torch.from_numpy(img_n).permute(2, 0, 1)
img_q = self.transform(img_q)
img_p = self.transform(img_p)
img_n = self.transform(img_n)
return {
'img_q':img_q,
'img_p':img_p,
'img_n':img_n,
'img_q_instance':instance_id_q,
'img_p_instance':instance_id_p,
'img_n_instance':instance_id_n,
}
class HardTripletDataset(Dataset):
def __init__(self, root_dir='data', mode='train', size=(112, 112), flip_x=0.5, n_samples=4):
assert mode in ['train', 'all', 'train_2']
mean=[0.55574415, 0.51230767, 0.51123354]
aa_params = dict(
translate_const=int(size[0] * 0.40),
img_mean=tuple([min(255, round(255 * x)) for x in mean]),
)
self.randAug = rand_augment_transform('rand-m9-n3-mstd0.5', aa_params)
self.root_dir = root_dir
self.size = size
self.flip_x = flip_x
self.n_samples = n_samples
if mode == 'train':
modes = ['train']
instanceFile = 'instanceID.json'
elif mode == 'train_2':
modes = ['train', 'validation_2']
instanceFile = 'instanceID_2.json'
elif mode == 'all':
modes = ['train', 'validation_2', 'validation']
instanceFile = 'instanceID_all.json'
with open(os.path.join(root_dir, instanceFile), 'r') as f:
self.clsDic = json.load(f)
self.samples = {}
for mode in modes:
img_tat = mode + '_images'
vdo_tat = mode + '_videos'
savePath = mode + '_instance'
savePath = os.path.join(root_dir, savePath)
with open(os.path.join(root_dir, img_tat+'_annotation.json'), 'r') as f:
d_i = json.load(f)
with open(os.path.join(root_dir, vdo_tat+'_annotation.json'), 'r') as f:
d_v = json.load(f)
l_i = d_i['annotations']
l_v = d_v['annotations']
print('Loading data...')
for d in l_i:
for dd in d['annotations']:
if dd['instance_id'] > 0 and str(dd['instance_id']) in self.clsDic.keys():
instance = self.clsDic[str(dd['instance_id'])]
if instance not in self.samples:
self.samples[instance] = []
self.samples[instance].append(
os.path.join(savePath, str(dd['instance_id']), img_tat+str(dd['instance_id'])+d['img_name']))
for d in l_v:
for dd in d['annotations']:
if dd['instance_id'] > 0 and str(dd['instance_id']) in self.clsDic.keys():
instance = self.clsDic[str(dd['instance_id'])]
if instance not in self.samples:
self.samples[instance] = []
self.samples[instance].append(
os.path.join(savePath, str(dd['instance_id']), vdo_tat+str(dd['instance_id'])+d['img_name']))
self.num_classes = len(self.clsDic)
for k in self.samples.keys():
while len(self.samples[k]) < n_samples:
self.samples[k] *= 2
assert len(self.samples[k]) >= n_samples
self.instances = list(self.samples.keys())
print('Done')
self.transform = transforms.Normalize(
mean=[0.55574415, 0.51230767, 0.51123354],
std=[0.21303795, 0.21604613, 0.21273348])
def __len__(self):
return len(self.instances)
def __getitem__(self, index):
instance = self.instances[index]
imgPaths = random.sample(self.samples[instance], self.n_samples)
imgs = []
instances = []
for imgPath in imgPaths:
img = np.load(imgPath[:-4]+'.npy')
# '''randAug'''
# img = Image.fromarray(np.uint8(img*255))
# img = self.randAug(img)
# img.save('aaa.jpg')
# img = np.array(img)
# img = img.astype(np.float32) / 255
# '''randAug'''
assert self.size[0] == 256
if self.size[0] != 256:
r = 256 / self.size[0]
img = cv2.resize(img, (int(270/r), int(270/r)))
h, w, c = img.shape
rh = random.randint(0, h-self.size[0])
rw = random.randint(0, w-self.size[1])
img = img[rh:self.size[0]+rh, rw:self.size[1]+rw, :]
# if np.random.rand() < 0.5:
# w = h = 256
# while w >= 256 or h >= 256:
# r = np.random.uniform(0.3, 1/0.3)
# s = 256*256*np.random.uniform(0.02, 0.4)
# w = int(np.sqrt(s*r))
# h = int(np.sqrt(s/r))
# s_w = random.randint(0, 256-w)
# s_h = random.randint(0, 256-h)
# img[s_h:s_h+h, s_w:s_w+w, :] = 0
instance_t = torch.tensor(instance)
if np.random.rand() < self.flip_x:
img = img[:, ::-1, :].copy()
img = torch.from_numpy(img)
img = img.permute(2, 0, 1)
img = self.transform(img)
imgs.append(img)
instances.append(instance_t)
imgs = torch.stack(imgs, dim=0)
instances = torch.stack(instances, dim=0)
return {'img': imgs, 'instance': instances}
'''
for validation
'''
class ValidationArcfaceDataset(Dataset):
def __init__(self, size=(112, 112), root_dir='data/validation_instance/', maxLen=64, PAD=0):
self.root_dir = root_dir
self.size = size
text2num = Text2Num(maxLen=maxLen, root_dir='data', PAD=PAD)
self.vocab_size = text2num.vocab_size
img_tat = 'validation_images'
vdo_tat = 'validation_videos'
with open(os.path.join('data', img_tat+'_text.json'), 'r') as f:
self.textDic_i = json.load(f)
with open(os.path.join('data', vdo_tat+'_text.json'), 'r') as f:
self.textDic_v = json.load(f)
for k in self.textDic_i.keys():
self.textDic_i[k] = text2num(self.textDic_i[k])
for k in self.textDic_v.keys():
self.textDic_v[k] = text2num(self.textDic_v[k])
instances = os.listdir(root_dir)
self.items = []
# s = ''
print('Loading Data...')
for instance in tqdm(instances):
imgs = os.listdir(root_dir+instance)
if len(imgs) < 2:
continue
l = []
for img in imgs:
if 'images' in img:
l.append(os.path.join(instance, img))
text_name = img.split(instance)[-1].split('_')[0]
l.append(text_name)
break
if len(l) == 0:
continue
for img in imgs:
if 'videos' in img:
l.append(os.path.join(instance, img))
text_name = img.split(instance)[-1].split('_')[0]
l.append(text_name)
break
if len(l) < 4:
continue
l.append(instance)
# s += '{}\t{}\n'.format(l[0], l[2])
self.items.append(l)
# with open('validation_path.txt', 'w') as f:
# f.write(s)
self.length = len(self.items)
print('Done')
self.transform = transforms.Normalize(
mean=[0.55574415, 0.51230767, 0.51123354],
std=[0.21303795, 0.21604613, 0.21273348])
def __len__(self):
return len(self.items) * 2
def __getitem__(self, index):
imgPath, textName_img, vdoPath, textName_vdo, instance = self.items[index%self.length]
img_text = self.textDic_i[textName_img]
vdo_text = self.textDic_v[textName_vdo]
img_text = torch.Tensor(img_text).long()
vdo_text = torch.Tensor(vdo_text).long()
# img = np.load(os.path.join(self.root_dir, imgPath))
# vdo = np.load(os.path.join(self.root_dir, vdoPath))
img = cv2.imread(os.path.join(self.root_dir, imgPath))
vdo = cv2.imread(os.path.join(self.root_dir, vdoPath))
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = img.astype(np.float32) / 255
vdo = cv2.cvtColor(vdo, cv2.COLOR_BGR2RGB)
vdo = vdo.astype(np.float32) / 255
hi, wi, ci = img.shape
hv, wv, cv = vdo.shape
if self.size[0] != 256:
r = 256 / self.size[0]
img = cv2.resize(img, (int(hi/r), int(wi/r)))
vdo = cv2.resize(vdo, (int(hv/r), int(wv/r)))
hi, wi, ci = img.shape
hv, wv, cv = vdo.shape
rh = (hi-self.size[0])//2
rw = (wi-self.size[1])//2
img = img[rh:self.size[0]+rh, rw:self.size[1]+rw, :]
rh = (hv-self.size[0])//2
rw = (wv-self.size[1])//2
vdo = vdo[rh:self.size[0]+rh, rw:self.size[1]+rw, :]
if index >= self.length:
img = img[:, ::-1, :].copy()
vdo = vdo[:, ::-1, :].copy()
img = torch.from_numpy(img)
img = img.permute(2, 0, 1)
vdo = torch.from_numpy(vdo)
vdo = vdo.permute(2, 0, 1)
img = self.transform(img)
vdo = self.transform(vdo)
return {
'img': img,
'vdo': vdo,
'img_text': img_text,
'vdo_text': vdo_text,
'instance':instance,
'img_e': torch.tensor(0),
'vdo_e': torch.tensor(1)
}
class ValidationDataset(Dataset):
def __init__(self, root_dir, items, size):
self.size = size
self.root_dir = root_dir
self.imgPath = None
self.img = None
self.items = items
self.transform = transforms.Normalize(
mean=[0.55574415, 0.51230767, 0.51123354],
std=[0.21303795, 0.21604613, 0.21273348])
def __len__(self):
return len(self.items)
def __getitem__(self, index):
frame, imgID, imgPath, xmin, ymin, xmax, ymax, classes = self.items[index]
if imgPath != self.imgPath:
self.imgPath = imgPath
self.img = cv2.imread(os.path.join(self.root_dir, imgPath))
det = self.img[ymin:ymax, xmin:xmax, :].copy()
det = cv2.resize(det, self.size)
det = cv2.cvtColor(det, cv2.COLOR_BGR2RGB)
det = det.astype(np.float32) / 255
det = torch.from_numpy(det)
det = det.permute(2, 0, 1)
det = self.transform(det)
# print(classes)
return {
'img': det,
'imgID': imgID,
'frame': frame,
'box': np.array([xmin, ymin, xmax, ymax]),
'classes': classes}
'''
for test
'''
class TestImageDataset(Dataset):
def __init__(self, root_dir='data', dir_list=['validation_dataset_part1', 'validation_dataset_part2'], transform=None, maxLen=64, PAD=0):
self.root_dir = root_dir
self.transform = transform
self.mode = 'image'
label_file = 'label.json'
with open(os.path.join(root_dir, label_file), 'r') as f:
self.labelDic = json.load(f)
self.num_classes = len(self.labelDic['label2index'])
dirs = [os.path.join(root_dir, d) for d in dir_list]
text2num = Text2Num(maxLen=maxLen, PAD=PAD)
self.vocab_size = text2num.vocab_size
self.images = []
self.ids = []
self.frames = []
self.textDic = {}
for di in dirs:
img_dir_list = os.listdir(os.path.join(di, 'image'))
for img_dir in img_dir_list:
img_names = os.listdir(os.path.join(di, 'image', img_dir))
for img_name in img_names:
self.images.append(os.path.join(di, 'image', img_dir, img_name))
self.frames.append(img_name.split('.')[0])
self.ids.append(img_dir)
textPath = os.path.join(di, 'image_text', img_dir+'.txt')
with open(textPath, 'r') as f:
self.textDic[img_dir] = text2num(f.readline())
# self.images = self.images[:100]
def __len__(self):
return len(self.images)
def __getitem__(self, index):
imgPath = self.images[index]
img = cv2.imread(imgPath)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = img.astype(np.float32) / 255
img_id = self.ids[index]
text = self.textDic[img_id]
text = torch.Tensor(text).long()
sample = {'img': img, 'text': text}
if self.transform:
sample = self.transform(sample)
return sample
def getImageInfo(self, index):
imgPath = self.images[index]
img_id = self.ids[index]
frame = self.frames[index]
return imgPath, img_id, frame
# class TestVideoDataset(Dataset):
# def __init__(self, root_dir, transform=None, n=20, maxLen=64, PAD=0):
# self.root_dir = root_dir
# self.transform = transform
# self.n = n
# self.mode = 'video'
# label_file = 'label.json'
# with open(label_file, 'r') as f:
# self.labelDic = json.load(f)
# self.num_classes = len(self.labelDic['label2index'])
# text2num = Text2Num(maxLen=maxLen, PAD=PAD)
# self.vocab_size = text2num.vocab_size
# # gap = 400 // n
# # self.frames_ids = [i*gap for i in range(n)]
# self.videos = []
# self.ids = []
# self.textDic = {}
# vdo_names = os.listdir(os.path.join(root_dir, 'video'))
# for vdo_name in vdo_names:
# self.videos.append(os.path.join(root_dir, 'video', vdo_name))
# self.ids.append(vdo_name.split('.')[0])
# textPath = os.path.join(root_dir, 'video_text', vdo_name.split('.')[0]+'.txt')
# with open(textPath, 'r') as f:
# self.textDic[vdo_name.split('.')[0]] = text2num(f.readline())
# # self.videos = self.videos[:100]
# def __len__(self):
# return len(self.videos)*self.n
# def __getitem__(self, index):
# v_index = index // self.n
# # f_index = self.frames_ids[index % self.n]
# vdo_name = self.videos[v_index]
# cap = cv2.VideoCapture(vdo_name)
# frames = cap.get(cv2.CAP_PROP_FRAME_COUNT)
# f_index = int((frames // self.n) * (index % self.n))
# cap.set(cv2.CAP_PROP_POS_FRAMES, f_index)
# ret, img = cap.read()
# cap.release()
# vdo_id = self.ids[v_index]
# text = self.textDic[vdo_id]
# text = torch.tensor(text).long()
# img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# img = img.astype(np.float32) / 255
# sample = {'img': img, 'text': text}
# if self.transform:
# sample = self.transform(sample)
# return sample
class TestVideoDataset(Dataset):
def __init__(self, root_dir, transform=None, n=20, dir_list=['validation_dataset_part1', 'validation_dataset_part2'], maxLen=64, PAD=0):
self.root_dir = root_dir
self.transform = transform
self.n = n
self.mode = 'video'
label_file = 'label.json'
with open(os.path.join(root_dir, label_file), 'r') as f:
self.labelDic = json.load(f)
self.num_classes = len(self.labelDic['label2index'])
text2num = Text2Num(maxLen=maxLen, PAD=PAD)
self.vocab_size = text2num.vocab_size
dirs = [os.path.join(root_dir, d) for d in dir_list]
# gap = 400 // n
# self.frames_ids = [i*gap for i in range(n)]
self.videos = []
self.ids = []
self.textDic = {}
for di in dirs:
vdo_names = os.listdir(os.path.join(di, 'video'))
for vdo_name in vdo_names:
self.videos.append(os.path.join(di, 'video', vdo_name))
self.ids.append(vdo_name.split('.')[0])
textPath = os.path.join(di, 'video_text', vdo_name.split('.')[0]+'.txt')
with open(textPath, 'r') as f:
self.textDic[vdo_name.split('.')[0]] = text2num(f.readline())
# self.videos = self.videos[:10]
def __len__(self):
return len(self.videos)*self.n
def __getitem__(self, index):
v_index = index // self.n
# f_index = self.frames_ids[index % self.n]
vdo_name = self.videos[v_index]
cap = cv2.VideoCapture(vdo_name)
frames = cap.get(cv2.CAP_PROP_FRAME_COUNT)
f_index = int((frames // self.n) * (index % self.n))
cap.set(cv2.CAP_PROP_POS_FRAMES, f_index)
ret, img = cap.read()
cap.release()
vdo_id = self.ids[v_index]
text = self.textDic[vdo_id]
text = torch.Tensor(text).long()
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = img.astype(np.float32) / 255
sample = {'img': img, 'text': text}
if self.transform:
sample = self.transform(sample)
return sample
def getImageInfo(self, index):
v_index = index // self.n
# frame = self.frames_ids[index % self.n]
vdoPath = self.videos[v_index]
cap = cv2.VideoCapture(vdoPath)
frames = cap.get(cv2.CAP_PROP_FRAME_COUNT)
frame = int((frames // self.n) * (index % self.n))
cap.release()
vdo_id = self.ids[v_index]
return vdoPath, vdo_id, str(frame)
class TestDataset(Dataset):
def __init__(self, root_dir, items, size, mode):
assert mode in ['image', 'video']
self.mode = mode
self.size = size
self.root_dir = root_dir
self.items = items
self.length = len(items)
self.transform = transforms.Normalize(
mean=[0.55574415, 0.51230767, 0.51123354],
std=[0.21303795, 0.21604613, 0.21273348])
def __len__(self):
return len(self.items) * 2
def __getitem__(self, index):
frame, imgID, imgPath, xmin, ymin, xmax, ymax, classes, text = self.items[index%self.length]
if self.mode == 'image':
img = cv2.imread(imgPath)
else:
cap = cv2.VideoCapture(imgPath)
cap.set(cv2.CAP_PROP_POS_FRAMES, int(frame))
ret, img = cap.read()
cap.release()
det = img[ymin:ymax, xmin:xmax, :]
if index >= self.length:
det = det[:, ::-1, :].copy()
det = cv2.resize(det, self.size)
det = cv2.cvtColor(det, cv2.COLOR_BGR2RGB)
det = det.astype(np.float32) / 255
det = torch.from_numpy(det)
det = det.permute(2, 0, 1)
det = self.transform(det)
return {
'img': det,
'imgID': imgID,
'frame': frame,
'box': np.array([xmin, ymin, xmax, ymax]),
'classes': classes,
'text': text}
if __name__ == "__main__":
from config import get_args_arcface
opt = get_args_arcface()
dataset = ArcfaceDataset()
# print(len(dataset))
print(dataset[0])
# from utils import collater_HardTriplet
# from torch.utils.data import DataLoader
# training_params = {"batch_size": 20,
# "shuffle": True,
# "drop_last": True,
# "collate_fn": collater_HardTriplet,
# "num_workers": 4}
# from PIL import Image
# dataset = ArcfaceDataset()
# print(dataset[0])
# loader = DataLoader(dataset, **training_params)
# for data in loader:
# print(data['img'].size())
# break
# print(len(dataset))
# for d in tqdm(dataset):
# pass
# img = dataset[100]['img']
# mi = min(img.view(-1))
# ma = max(img.view(-1))
# img = (img-mi)/(ma-mi)
# img = img*256
# img = img.permute(1, 2, 0)
# img = img.numpy()
# img = Image.fromarray(img.astype(np.uint8))
# img.save('aaa.jpg')
# img = dataset[0]['vdo']
# mi = min(img.view(-1))
# ma = max(img.view(-1))
# img = (img-mi)/(ma-mi)
# img = img*256
# img = img.permute(1, 2, 0)
# img = img.numpy()
# img = Image.fromarray(img.astype(np.uint8))
# img.save('bbb.jpg')
# mean = np.zeros(3)
# std = np.zeros(3)
# for d in tqdm(dataset):
# img = d['img']
# for i in range(3):
# mean[i] += img[:, :, i].mean()
# std[i] += img[:, :, i].std()
# mean = mean / len(dataset)
# std = std / len(dataset)
# print(mean, std)
|
[
"autoaugment.rand_augment_transform",
"numpy.random.rand",
"torch.from_numpy",
"numpy.array",
"arcface.inception_v4.InceptionV4",
"os.listdir",
"arcface.inceptionresnet_v2.InceptionResNetV2",
"config.get_args_arcface",
"random.randint",
"random.sample",
"random.choice",
"torch.Tensor",
"arcface.densenet.DenseNet",
"torchvision.transforms.Normalize",
"cv2.cvtColor",
"cv2.resize",
"cv2.imread",
"arcface.resnet_cbam.ResNetCBAM",
"arcface.googlenet.GoogLeNet",
"jieba.cut",
"tqdm.tqdm",
"os.path.join",
"torch.stack",
"torch.tensor",
"numpy.zeros",
"cv2.VideoCapture",
"arcface.resnet.ResNet",
"torch.utils.data.DataLoader",
"json.load",
"numpy.load"
] |
[((47781, 47799), 'config.get_args_arcface', 'get_args_arcface', ([], {}), '()\n', (47797, 47799), False, 'from config import get_args_arcface\n'), ((989, 1034), 'torch.utils.data.DataLoader', 'DataLoader', (['arcfaceDataset'], {}), '(arcfaceDataset, **training_params)\n', (999, 1034), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((3853, 3873), 'os.listdir', 'os.listdir', (['root_dir'], {}), '(root_dir)\n', (3863, 3873), False, 'import os\n'), ((3955, 3970), 'tqdm.tqdm', 'tqdm', (['instances'], {}), '(instances)\n', (3959, 3970), False, 'from tqdm import tqdm\n'), ((4440, 4550), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '[0.55574415, 0.51230767, 0.51123354]', 'std': '[0.21303795, 0.21604613, 0.21273348]'}), '(mean=[0.55574415, 0.51230767, 0.51123354], std=[\n 0.21303795, 0.21604613, 0.21273348])\n', (4460, 4550), True, 'import torchvision.transforms as transforms\n'), ((4888, 4924), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2RGB'], {}), '(img, cv2.COLOR_BGR2RGB)\n', (4900, 4924), False, 'import cv2\n'), ((5153, 5174), 'torch.from_numpy', 'torch.from_numpy', (['img'], {}), '(img)\n', (5169, 5174), False, 'import torch\n'), ((5663, 5703), 'jieba.cut', 'jieba.cut', (['text'], {'cut_all': '(False)', 'HMM': '(True)'}), '(text, cut_all=False, HMM=True)\n', (5672, 5703), False, 'import jieba\n'), ((8389, 8425), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2RGB'], {}), '(img, cv2.COLOR_BGR2RGB)\n', (8401, 8425), False, 'import cv2\n'), ((14059, 14114), 'autoaugment.rand_augment_transform', 'rand_augment_transform', (['"""rand-m9-n3-mstd0.5"""', 'aa_params'], {}), "('rand-m9-n3-mstd0.5', aa_params)\n", (14081, 14114), False, 'from autoaugment import rand_augment_transform\n'), ((16938, 17048), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '[0.55574415, 0.51230767, 0.51123354]', 'std': '[0.21303795, 0.21604613, 0.21273348]'}), '(mean=[0.55574415, 0.51230767, 0.51123354], std=[\n 0.21303795, 0.21604613, 0.21273348])\n', (16958, 17048), True, 'import torchvision.transforms as transforms\n'), ((17247, 17277), 'numpy.load', 'np.load', (["(imgName[:-4] + '.npy')"], {}), "(imgName[:-4] + '.npy')\n", (17254, 17277), True, 'import numpy as np\n'), ((17877, 17903), 'random.randint', 'random.randint', (['(0)', '(h - 256)'], {}), '(0, h - 256)\n', (17891, 17903), False, 'import random\n'), ((17915, 17941), 'random.randint', 'random.randint', (['(0)', '(w - 256)'], {}), '(0, w - 256)\n', (17929, 17941), False, 'import random\n'), ((17999, 18025), 'cv2.resize', 'cv2.resize', (['img', 'self.size'], {}), '(img, self.size)\n', (18009, 18025), False, 'import cv2\n'), ((18757, 18778), 'torch.from_numpy', 'torch.from_numpy', (['img'], {}), '(img)\n', (18773, 18778), False, 'import torch\n'), ((19649, 19681), 'os.path.join', 'os.path.join', (['root_dir', 'savePath'], {}), '(root_dir, savePath)\n', (19661, 19681), False, 'import os\n'), ((21996, 22106), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '[0.55574415, 0.51230767, 0.51123354]', 'std': '[0.21303795, 0.21604613, 0.21273348]'}), '(mean=[0.55574415, 0.51230767, 0.51123354], std=[\n 0.21303795, 0.21604613, 0.21273348])\n', (22016, 22106), True, 'import torchvision.transforms as transforms\n'), ((22235, 22277), 'random.choice', 'random.choice', (["self.dic[index][1]['image']"], {}), "(self.dic[index][1]['image'])\n", (22248, 22277), False, 'import random\n'), ((22297, 22339), 'random.choice', 'random.choice', (["self.dic[index][1]['video']"], {}), "(self.dic[index][1]['video'])\n", (22310, 22339), False, 'import random\n'), ((23970, 24002), 'os.path.join', 'os.path.join', (['root_dir', 'savePath'], {}), '(root_dir, savePath)\n', (23982, 24002), False, 'import os\n'), ((26257, 26367), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '[0.55574415, 0.51230767, 0.51123354]', 'std': '[0.21303795, 0.21604613, 0.21273348]'}), '(mean=[0.55574415, 0.51230767, 0.51123354], std=[\n 0.21303795, 0.21604613, 0.21273348])\n', (26277, 26367), True, 'import torchvision.transforms as transforms\n'), ((26869, 26924), 'random.choice', 'random.choice', (['self.cls_ins_dic[label_q][instance_id_n]'], {}), '(self.cls_ins_dic[label_q][instance_id_n])\n', (26882, 26924), False, 'import random\n'), ((27200, 27227), 'torch.tensor', 'torch.tensor', (['instance_id_q'], {}), '(instance_id_q)\n', (27212, 27227), False, 'import torch\n'), ((27252, 27279), 'torch.tensor', 'torch.tensor', (['instance_id_p'], {}), '(instance_id_p)\n', (27264, 27279), False, 'import torch\n'), ((27304, 27331), 'torch.tensor', 'torch.tensor', (['instance_id_n'], {}), '(instance_id_n)\n', (27316, 27331), False, 'import torch\n'), ((27675, 27711), 'random.randint', 'random.randint', (['(0)', '(hq - self.size[0])'], {}), '(0, hq - self.size[0])\n', (27689, 27711), False, 'import random\n'), ((27723, 27759), 'random.randint', 'random.randint', (['(0)', '(wq - self.size[1])'], {}), '(0, wq - self.size[1])\n', (27737, 27759), False, 'import random\n'), ((27837, 27873), 'random.randint', 'random.randint', (['(0)', '(hp - self.size[0])'], {}), '(0, hp - self.size[0])\n', (27851, 27873), False, 'import random\n'), ((27885, 27921), 'random.randint', 'random.randint', (['(0)', '(wp - self.size[1])'], {}), '(0, wp - self.size[1])\n', (27899, 27921), False, 'import random\n'), ((27999, 28035), 'random.randint', 'random.randint', (['(0)', '(hn - self.size[0])'], {}), '(0, hn - self.size[0])\n', (28013, 28035), False, 'import random\n'), ((28047, 28083), 'random.randint', 'random.randint', (['(0)', '(wn - self.size[1])'], {}), '(0, wn - self.size[1])\n', (28061, 28083), False, 'import random\n'), ((29365, 29420), 'autoaugment.rand_augment_transform', 'rand_augment_transform', (['"""rand-m9-n3-mstd0.5"""', 'aa_params'], {}), "('rand-m9-n3-mstd0.5', aa_params)\n", (29387, 29420), False, 'from autoaugment import rand_augment_transform\n'), ((32012, 32122), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '[0.55574415, 0.51230767, 0.51123354]', 'std': '[0.21303795, 0.21604613, 0.21273348]'}), '(mean=[0.55574415, 0.51230767, 0.51123354], std=[\n 0.21303795, 0.21604613, 0.21273348])\n', (32032, 32122), True, 'import torchvision.transforms as transforms\n'), ((32306, 32359), 'random.sample', 'random.sample', (['self.samples[instance]', 'self.n_samples'], {}), '(self.samples[instance], self.n_samples)\n', (32319, 32359), False, 'import random\n'), ((33970, 33994), 'torch.stack', 'torch.stack', (['imgs'], {'dim': '(0)'}), '(imgs, dim=0)\n', (33981, 33994), False, 'import torch\n'), ((34015, 34044), 'torch.stack', 'torch.stack', (['instances'], {'dim': '(0)'}), '(instances, dim=0)\n', (34026, 34044), False, 'import torch\n'), ((34976, 34996), 'os.listdir', 'os.listdir', (['root_dir'], {}), '(root_dir)\n', (34986, 34996), False, 'import os\n'), ((35095, 35110), 'tqdm.tqdm', 'tqdm', (['instances'], {}), '(instances)\n', (35099, 35110), False, 'from tqdm import tqdm\n'), ((36135, 36245), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '[0.55574415, 0.51230767, 0.51123354]', 'std': '[0.21303795, 0.21604613, 0.21273348]'}), '(mean=[0.55574415, 0.51230767, 0.51123354], std=[\n 0.21303795, 0.21604613, 0.21273348])\n', (36155, 36245), True, 'import torchvision.transforms as transforms\n'), ((36918, 36954), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2RGB'], {}), '(img, cv2.COLOR_BGR2RGB)\n', (36930, 36954), False, 'import cv2\n'), ((37012, 37048), 'cv2.cvtColor', 'cv2.cvtColor', (['vdo', 'cv2.COLOR_BGR2RGB'], {}), '(vdo, cv2.COLOR_BGR2RGB)\n', (37024, 37048), False, 'import cv2\n'), ((37801, 37822), 'torch.from_numpy', 'torch.from_numpy', (['img'], {}), '(img)\n', (37817, 37822), False, 'import torch\n'), ((37872, 37893), 'torch.from_numpy', 'torch.from_numpy', (['vdo'], {}), '(vdo)\n', (37888, 37893), False, 'import torch\n'), ((38502, 38612), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '[0.55574415, 0.51230767, 0.51123354]', 'std': '[0.21303795, 0.21604613, 0.21273348]'}), '(mean=[0.55574415, 0.51230767, 0.51123354], std=[\n 0.21303795, 0.21604613, 0.21273348])\n', (38522, 38612), True, 'import torchvision.transforms as transforms\n'), ((39019, 39045), 'cv2.resize', 'cv2.resize', (['det', 'self.size'], {}), '(det, self.size)\n', (39029, 39045), False, 'import cv2\n'), ((39060, 39096), 'cv2.cvtColor', 'cv2.cvtColor', (['det', 'cv2.COLOR_BGR2RGB'], {}), '(det, cv2.COLOR_BGR2RGB)\n', (39072, 39096), False, 'import cv2\n'), ((39163, 39184), 'torch.from_numpy', 'torch.from_numpy', (['det'], {}), '(det)\n', (39179, 39184), False, 'import torch\n'), ((41049, 41068), 'cv2.imread', 'cv2.imread', (['imgPath'], {}), '(imgPath)\n', (41059, 41068), False, 'import cv2\n'), ((41083, 41119), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2RGB'], {}), '(img, cv2.COLOR_BGR2RGB)\n', (41095, 41119), False, 'import cv2\n'), ((45251, 45277), 'cv2.VideoCapture', 'cv2.VideoCapture', (['vdo_name'], {}), '(vdo_name)\n', (45267, 45277), False, 'import cv2\n'), ((45628, 45664), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2RGB'], {}), '(img, cv2.COLOR_BGR2RGB)\n', (45640, 45664), False, 'import cv2\n'), ((46027, 46052), 'cv2.VideoCapture', 'cv2.VideoCapture', (['vdoPath'], {}), '(vdoPath)\n', (46043, 46052), False, 'import cv2\n'), ((46556, 46666), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '[0.55574415, 0.51230767, 0.51123354]', 'std': '[0.21303795, 0.21604613, 0.21273348]'}), '(mean=[0.55574415, 0.51230767, 0.51123354], std=[\n 0.21303795, 0.21604613, 0.21273348])\n', (46576, 46666), True, 'import torchvision.transforms as transforms\n'), ((47260, 47286), 'cv2.resize', 'cv2.resize', (['det', 'self.size'], {}), '(det, self.size)\n', (47270, 47286), False, 'import cv2\n'), ((47301, 47337), 'cv2.cvtColor', 'cv2.cvtColor', (['det', 'cv2.COLOR_BGR2RGB'], {}), '(det, cv2.COLOR_BGR2RGB)\n', (47313, 47337), False, 'import cv2\n'), ((47396, 47417), 'torch.from_numpy', 'torch.from_numpy', (['det'], {}), '(det)\n', (47412, 47417), False, 'import torch\n'), ((1145, 1156), 'arcface.resnet.ResNet', 'ResNet', (['opt'], {}), '(opt)\n', (1151, 1156), False, 'from arcface.resnet import ResNet\n'), ((2412, 2431), 'tqdm.tqdm', 'tqdm', (['arcfaceLoader'], {}), '(arcfaceLoader)\n', (2416, 2431), False, 'from tqdm import tqdm\n'), ((3732, 3744), 'json.load', 'json.load', (['f'], {}), '(f)\n', (3741, 3744), False, 'import json\n'), ((3991, 4022), 'os.listdir', 'os.listdir', (['(root_dir + instance)'], {}), '(root_dir + instance)\n', (4001, 4022), False, 'import os\n'), ((4836, 4872), 'os.path.join', 'os.path.join', (['self.root_dir', 'imgPath'], {}), '(self.root_dir, imgPath)\n', (4848, 4872), False, 'import os\n'), ((5509, 5521), 'json.load', 'json.load', (['f'], {}), '(f)\n', (5518, 5521), False, 'import json\n'), ((6666, 6678), 'json.load', 'json.load', (['f'], {}), '(f)\n', (6675, 6678), False, 'import json\n'), ((8337, 8373), 'os.path.join', 'os.path.join', (['self.root_dir', 'imgPath'], {}), '(self.root_dir, imgPath)\n', (8349, 8373), False, 'import os\n'), ((8613, 8629), 'numpy.zeros', 'np.zeros', (['(1, 6)'], {}), '((1, 6))\n', (8621, 8629), True, 'import numpy as np\n'), ((10202, 10214), 'json.load', 'json.load', (['f'], {}), '(f)\n', (10211, 10214), False, 'import json\n'), ((12187, 12223), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2RGB'], {}), '(img, cv2.COLOR_BGR2RGB)\n', (12199, 12223), False, 'import cv2\n'), ((14664, 14676), 'json.load', 'json.load', (['f'], {}), '(f)\n', (14673, 14676), False, 'import json\n'), ((14787, 14799), 'json.load', 'json.load', (['f'], {}), '(f)\n', (14796, 14799), False, 'import json\n'), ((15404, 15436), 'os.path.join', 'os.path.join', (['root_dir', 'savePath'], {}), '(root_dir, savePath)\n', (15416, 15436), False, 'import os\n'), ((18670, 18686), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (18684, 18686), True, 'import numpy as np\n'), ((20469, 20481), 'json.load', 'json.load', (['f'], {}), '(f)\n', (20478, 20481), False, 'import json\n'), ((20592, 20604), 'json.load', 'json.load', (['f'], {}), '(f)\n', (20601, 20604), False, 'import json\n'), ((22815, 22841), 'random.randint', 'random.randint', (['(0)', '(h - 224)'], {}), '(0, h - 224)\n', (22829, 22841), False, 'import random\n'), ((22859, 22881), 'random.randint', 'random.randint', (['(224)', 'h'], {}), '(224, h)\n', (22873, 22881), False, 'import random\n'), ((22901, 22927), 'random.randint', 'random.randint', (['(0)', '(w - 224)'], {}), '(0, w - 224)\n', (22915, 22927), False, 'import random\n'), ((22945, 22967), 'random.randint', 'random.randint', (['(224)', 'w'], {}), '(224, w)\n', (22959, 22967), False, 'import random\n'), ((23047, 23073), 'cv2.resize', 'cv2.resize', (['img', 'self.size'], {}), '(img, self.size)\n', (23057, 23073), False, 'import cv2\n'), ((23334, 23355), 'torch.from_numpy', 'torch.from_numpy', (['img'], {}), '(img)\n', (23350, 23355), False, 'import torch\n'), ((24103, 24115), 'json.load', 'json.load', (['f'], {}), '(f)\n', (24112, 24115), False, 'import json\n'), ((24215, 24227), 'json.load', 'json.load', (['f'], {}), '(f)\n', (24224, 24227), False, 'import json\n'), ((24327, 24339), 'json.load', 'json.load', (['f'], {}), '(f)\n', (24336, 24339), False, 'import json\n'), ((24454, 24466), 'json.load', 'json.load', (['f'], {}), '(f)\n', (24463, 24466), False, 'import json\n'), ((26629, 26684), 'random.choice', 'random.choice', (['self.cls_ins_dic[label_q][instance_id_q]'], {}), '(self.cls_ins_dic[label_q][instance_id_q])\n', (26642, 26684), False, 'import random\n'), ((28159, 28175), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (28173, 28175), True, 'import numpy as np\n'), ((28247, 28263), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (28261, 28263), True, 'import numpy as np\n'), ((28335, 28351), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (28349, 28351), True, 'import numpy as np\n'), ((30004, 30016), 'json.load', 'json.load', (['f'], {}), '(f)\n', (30013, 30016), False, 'import json\n'), ((30215, 30247), 'os.path.join', 'os.path.join', (['root_dir', 'savePath'], {}), '(root_dir, savePath)\n', (30227, 30247), False, 'import os\n'), ((32452, 32482), 'numpy.load', 'np.load', (["(imgPath[:-4] + '.npy')"], {}), "(imgPath[:-4] + '.npy')\n", (32459, 32482), True, 'import numpy as np\n'), ((32975, 33010), 'random.randint', 'random.randint', (['(0)', '(h - self.size[0])'], {}), '(0, h - self.size[0])\n', (32989, 33010), False, 'import random\n'), ((33026, 33061), 'random.randint', 'random.randint', (['(0)', '(w - self.size[1])'], {}), '(0, w - self.size[1])\n', (33040, 33061), False, 'import random\n'), ((33626, 33648), 'torch.tensor', 'torch.tensor', (['instance'], {}), '(instance)\n', (33638, 33648), False, 'import torch\n'), ((33760, 33781), 'torch.from_numpy', 'torch.from_numpy', (['img'], {}), '(img)\n', (33776, 33781), False, 'import torch\n'), ((34619, 34631), 'json.load', 'json.load', (['f'], {}), '(f)\n', (34628, 34631), False, 'import json\n'), ((34734, 34746), 'json.load', 'json.load', (['f'], {}), '(f)\n', (34743, 34746), False, 'import json\n'), ((35131, 35162), 'os.listdir', 'os.listdir', (['(root_dir + instance)'], {}), '(root_dir + instance)\n', (35141, 35162), False, 'import os\n'), ((36803, 36839), 'os.path.join', 'os.path.join', (['self.root_dir', 'imgPath'], {}), '(self.root_dir, imgPath)\n', (36815, 36839), False, 'import os\n'), ((36866, 36902), 'os.path.join', 'os.path.join', (['self.root_dir', 'vdoPath'], {}), '(self.root_dir, vdoPath)\n', (36878, 36902), False, 'import os\n'), ((38191, 38206), 'torch.tensor', 'torch.tensor', (['(0)'], {}), '(0)\n', (38203, 38206), False, 'import torch\n'), ((38230, 38245), 'torch.tensor', 'torch.tensor', (['(1)'], {}), '(1)\n', (38242, 38245), False, 'import torch\n'), ((39399, 39433), 'numpy.array', 'np.array', (['[xmin, ymin, xmax, ymax]'], {}), '([xmin, ymin, xmax, ymax])\n', (39407, 39433), True, 'import numpy as np\n'), ((39889, 39901), 'json.load', 'json.load', (['f'], {}), '(f)\n', (39898, 39901), False, 'import json\n'), ((39981, 40006), 'os.path.join', 'os.path.join', (['root_dir', 'd'], {}), '(root_dir, d)\n', (39993, 40006), False, 'import os\n'), ((44084, 44096), 'json.load', 'json.load', (['f'], {}), '(f)\n', (44093, 44096), False, 'import json\n'), ((44273, 44298), 'os.path.join', 'os.path.join', (['root_dir', 'd'], {}), '(root_dir, d)\n', (44285, 44298), False, 'import os\n'), ((46934, 46953), 'cv2.imread', 'cv2.imread', (['imgPath'], {}), '(imgPath)\n', (46944, 46953), False, 'import cv2\n'), ((46986, 47011), 'cv2.VideoCapture', 'cv2.VideoCapture', (['imgPath'], {}), '(imgPath)\n', (47002, 47011), False, 'import cv2\n'), ((47608, 47642), 'numpy.array', 'np.array', (['[xmin, ymin, xmax, ymax]'], {}), '([xmin, ymin, xmax, ymax])\n', (47616, 47642), True, 'import numpy as np\n'), ((1295, 1309), 'arcface.googlenet.GoogLeNet', 'GoogLeNet', (['opt'], {}), '(opt)\n', (1304, 1309), False, 'from arcface.googlenet import GoogLeNet\n'), ((1997, 2042), 'os.path.join', 'os.path.join', (['opt.saved_path', "(b_name + '.pth')"], {}), "(opt.saved_path, b_name + '.pth')\n", (2009, 2042), False, 'import os\n'), ((3659, 3699), 'os.path.join', 'os.path.join', (['"""data"""', "(tat + '_text.json')"], {}), "('data', tat + '_text.json')\n", (3671, 3699), False, 'import os\n'), ((4723, 4741), 'torch.Tensor', 'torch.Tensor', (['text'], {}), '(text)\n', (4735, 4741), False, 'import torch\n'), ((5435, 5471), 'os.path.join', 'os.path.join', (['root_dir', '"""vocab.json"""'], {}), "(root_dir, 'vocab.json')\n", (5447, 5471), False, 'import os\n'), ((6591, 6625), 'os.path.join', 'os.path.join', (['root_dir', 'label_file'], {}), '(root_dir, label_file)\n', (6603, 6625), False, 'import os\n'), ((7246, 7258), 'json.load', 'json.load', (['f'], {}), '(f)\n', (7255, 7258), False, 'import json\n'), ((8285, 8303), 'torch.Tensor', 'torch.Tensor', (['text'], {}), '(text)\n', (8297, 8303), False, 'import torch\n'), ((10127, 10161), 'os.path.join', 'os.path.join', (['root_dir', 'label_file'], {}), '(root_dir, label_file)\n', (10139, 10161), False, 'import os\n'), ((10604, 10616), 'json.load', 'json.load', (['f'], {}), '(f)\n', (10613, 10616), False, 'import json\n'), ((12131, 12167), 'os.path.join', 'os.path.join', (['self.root_dir', 'imgPath'], {}), '(self.root_dir, imgPath)\n', (12143, 12167), False, 'import os\n'), ((12427, 12443), 'numpy.zeros', 'np.zeros', (['(1, 6)'], {}), '((1, 6))\n', (12435, 12443), True, 'import numpy as np\n'), ((14589, 14625), 'os.path.join', 'os.path.join', (['root_dir', 'instanceFile'], {}), '(root_dir, instanceFile)\n', (14601, 14625), False, 'import os\n'), ((14695, 14740), 'os.path.join', 'os.path.join', (['root_dir', '"""instance2label.json"""'], {}), "(root_dir, 'instance2label.json')\n", (14707, 14740), False, 'import os\n'), ((17737, 17755), 'torch.tensor', 'torch.tensor', (['text'], {}), '(text)\n', (17749, 17755), False, 'import torch\n'), ((17778, 17796), 'torch.tensor', 'torch.tensor', (['iORv'], {}), '(iORv)\n', (17790, 17796), False, 'import torch\n'), ((20389, 20430), 'os.path.join', 'os.path.join', (['root_dir', '"""instanceID.json"""'], {}), "(root_dir, 'instanceID.json')\n", (20401, 20430), False, 'import os\n'), ((20500, 20545), 'os.path.join', 'os.path.join', (['root_dir', '"""instance2label.json"""'], {}), "(root_dir, 'instance2label.json')\n", (20512, 20545), False, 'import os\n'), ((23239, 23255), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (23253, 23255), True, 'import numpy as np\n'), ((24022, 24074), 'os.path.join', 'os.path.join', (['root_dir', "(img_tat + '_annotation.json')"], {}), "(root_dir, img_tat + '_annotation.json')\n", (24034, 24074), False, 'import os\n'), ((24134, 24186), 'os.path.join', 'os.path.join', (['root_dir', "(vdo_tat + '_annotation.json')"], {}), "(root_dir, vdo_tat + '_annotation.json')\n", (24146, 24186), False, 'import os\n'), ((24247, 24288), 'os.path.join', 'os.path.join', (['root_dir', '"""instanceID.json"""'], {}), "(root_dir, 'instanceID.json')\n", (24259, 24288), False, 'import os\n'), ((24367, 24412), 'os.path.join', 'os.path.join', (['root_dir', '"""instance2label.json"""'], {}), "(root_dir, 'instance2label.json')\n", (24379, 24412), False, 'import os\n'), ((28437, 28460), 'torch.from_numpy', 'torch.from_numpy', (['img_q'], {}), '(img_q)\n', (28453, 28460), False, 'import torch\n'), ((28494, 28517), 'torch.from_numpy', 'torch.from_numpy', (['img_p'], {}), '(img_p)\n', (28510, 28517), False, 'import torch\n'), ((28551, 28574), 'torch.from_numpy', 'torch.from_numpy', (['img_n'], {}), '(img_n)\n', (28567, 28574), False, 'import torch\n'), ((29929, 29965), 'os.path.join', 'os.path.join', (['root_dir', 'instanceFile'], {}), '(root_dir, instanceFile)\n', (29941, 29965), False, 'import os\n'), ((30356, 30368), 'json.load', 'json.load', (['f'], {}), '(f)\n', (30365, 30368), False, 'import json\n'), ((30476, 30488), 'json.load', 'json.load', (['f'], {}), '(f)\n', (30485, 30488), False, 'import json\n'), ((33665, 33681), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (33679, 33681), True, 'import numpy as np\n'), ((34535, 34579), 'os.path.join', 'os.path.join', (['"""data"""', "(img_tat + '_text.json')"], {}), "('data', img_tat + '_text.json')\n", (34547, 34579), False, 'import os\n'), ((34650, 34694), 'os.path.join', 'os.path.join', (['"""data"""', "(vdo_tat + '_text.json')"], {}), "('data', vdo_tat + '_text.json')\n", (34662, 34694), False, 'import os\n'), ((36575, 36597), 'torch.Tensor', 'torch.Tensor', (['img_text'], {}), '(img_text)\n', (36587, 36597), False, 'import torch\n'), ((36624, 36646), 'torch.Tensor', 'torch.Tensor', (['vdo_text'], {}), '(vdo_text)\n', (36636, 36646), False, 'import torch\n'), ((38912, 38948), 'os.path.join', 'os.path.join', (['self.root_dir', 'imgPath'], {}), '(self.root_dir, imgPath)\n', (38924, 38948), False, 'import os\n'), ((39814, 39848), 'os.path.join', 'os.path.join', (['root_dir', 'label_file'], {}), '(root_dir, label_file)\n', (39826, 39848), False, 'import os\n'), ((40293, 40318), 'os.path.join', 'os.path.join', (['di', '"""image"""'], {}), "(di, 'image')\n", (40305, 40318), False, 'import os\n'), ((40699, 40747), 'os.path.join', 'os.path.join', (['di', '"""image_text"""', "(img_dir + '.txt')"], {}), "(di, 'image_text', img_dir + '.txt')\n", (40711, 40747), False, 'import os\n'), ((41247, 41265), 'torch.Tensor', 'torch.Tensor', (['text'], {}), '(text)\n', (41259, 41265), False, 'import torch\n'), ((44009, 44043), 'os.path.join', 'os.path.join', (['root_dir', 'label_file'], {}), '(root_dir, label_file)\n', (44021, 44043), False, 'import os\n'), ((44539, 44564), 'os.path.join', 'os.path.join', (['di', '"""video"""'], {}), "(di, 'video')\n", (44551, 44564), False, 'import os\n'), ((45579, 45597), 'torch.Tensor', 'torch.Tensor', (['text'], {}), '(text)\n', (45591, 45597), False, 'import torch\n'), ((1406, 1422), 'arcface.inception_v4.InceptionV4', 'InceptionV4', (['opt'], {}), '(opt)\n', (1417, 1422), False, 'from arcface.inception_v4 import InceptionV4\n'), ((7042, 7088), 'os.path.join', 'os.path.join', (['root_dir', "(t + '_annotation.json')"], {}), "(root_dir, t + '_annotation.json')\n", (7054, 7088), False, 'import os\n'), ((7125, 7137), 'json.load', 'json.load', (['f'], {}), '(f)\n', (7134, 7137), False, 'import json\n'), ((7161, 7201), 'os.path.join', 'os.path.join', (['root_dir', "(t + '_text.json')"], {}), "(root_dir, t + '_text.json')\n", (7173, 7201), False, 'import os\n'), ((7737, 7773), 'os.path.join', 'os.path.join', (['tats[i]', "d['img_name']"], {}), "(tats[i], d['img_name'])\n", (7749, 7773), False, 'import os\n'), ((10400, 10446), 'os.path.join', 'os.path.join', (['root_dir', "(t + '_annotation.json')"], {}), "(root_dir, t + '_annotation.json')\n", (10412, 10446), False, 'import os\n'), ((10483, 10495), 'json.load', 'json.load', (['f'], {}), '(f)\n', (10492, 10495), False, 'import json\n'), ((10519, 10559), 'os.path.join', 'os.path.join', (['root_dir', "(t + '_text.json')"], {}), "(root_dir, t + '_text.json')\n", (10531, 10559), False, 'import os\n'), ((11237, 11273), 'os.path.join', 'os.path.join', (['tats[i]', "d['img_name']"], {}), "(tats[i], d['img_name'])\n", (11249, 11273), False, 'import os\n'), ((12075, 12093), 'torch.Tensor', 'torch.Tensor', (['text'], {}), '(text)\n', (12087, 12093), False, 'import torch\n'), ((19889, 19937), 'os.path.join', 'os.path.join', (['root_dir', "(tat + '_annotation.json')"], {}), "(root_dir, tat + '_annotation.json')\n", (19901, 19937), False, 'import os\n'), ((19973, 19985), 'json.load', 'json.load', (['f'], {}), '(f)\n', (19982, 19985), False, 'import json\n'), ((20009, 20051), 'os.path.join', 'os.path.join', (['root_dir', "(tat + '_text.json')"], {}), "(root_dir, tat + '_text.json')\n", (20021, 20051), False, 'import os\n'), ((20098, 20110), 'json.load', 'json.load', (['f'], {}), '(f)\n', (20107, 20110), False, 'import json\n'), ((27357, 27395), 'os.path.join', 'os.path.join', (['self.savePath', 'imgName_q'], {}), '(self.savePath, imgName_q)\n', (27369, 27395), False, 'import os\n'), ((27433, 27471), 'os.path.join', 'os.path.join', (['self.savePath', 'imgName_p'], {}), '(self.savePath, imgName_p)\n', (27445, 27471), False, 'import os\n'), ((27509, 27547), 'os.path.join', 'os.path.join', (['self.savePath', 'imgName_n'], {}), '(self.savePath, imgName_n)\n', (27521, 27547), False, 'import os\n'), ((30271, 30323), 'os.path.join', 'os.path.join', (['root_dir', "(img_tat + '_annotation.json')"], {}), "(root_dir, img_tat + '_annotation.json')\n", (30283, 30323), False, 'import os\n'), ((30391, 30443), 'os.path.join', 'os.path.join', (['root_dir', "(vdo_tat + '_annotation.json')"], {}), "(root_dir, vdo_tat + '_annotation.json')\n", (30403, 30443), False, 'import os\n'), ((40400, 40434), 'os.path.join', 'os.path.join', (['di', '"""image"""', 'img_dir'], {}), "(di, 'image', img_dir)\n", (40412, 40434), False, 'import os\n'), ((44640, 44675), 'os.path.join', 'os.path.join', (['di', '"""video"""', 'vdo_name'], {}), "(di, 'video', vdo_name)\n", (44652, 44675), False, 'import os\n'), ((1525, 1547), 'arcface.inceptionresnet_v2.InceptionResNetV2', 'InceptionResNetV2', (['opt'], {}), '(opt)\n', (1542, 1547), False, 'from arcface.inceptionresnet_v2 import InceptionResNetV2\n'), ((4134, 4161), 'os.path.join', 'os.path.join', (['instance', 'img'], {}), '(instance, img)\n', (4146, 4161), False, 'import os\n'), ((15537, 15585), 'os.path.join', 'os.path.join', (['root_dir', "(tat + '_annotation.json')"], {}), "(root_dir, tat + '_annotation.json')\n", (15549, 15585), False, 'import os\n'), ((15625, 15637), 'json.load', 'json.load', (['f'], {}), '(f)\n', (15634, 15637), False, 'import json\n'), ((15665, 15707), 'os.path.join', 'os.path.join', (['root_dir', "(tat + '_text.json')"], {}), "(root_dir, tat + '_text.json')\n", (15677, 15707), False, 'import os\n'), ((15753, 15765), 'json.load', 'json.load', (['f'], {}), '(f)\n', (15762, 15765), False, 'import json\n'), ((22531, 22567), 'os.path.join', 'os.path.join', (['self.savePath', 'imgName'], {}), '(self.savePath, imgName)\n', (22543, 22567), False, 'import os\n'), ((35329, 35356), 'os.path.join', 'os.path.join', (['instance', 'img'], {}), '(instance, img)\n', (35341, 35356), False, 'import os\n'), ((35641, 35668), 'os.path.join', 'os.path.join', (['instance', 'img'], {}), '(instance, img)\n', (35653, 35668), False, 'import os\n'), ((40518, 40562), 'os.path.join', 'os.path.join', (['di', '"""image"""', 'img_dir', 'img_name'], {}), "(di, 'image', img_dir, img_name)\n", (40530, 40562), False, 'import os\n'), ((1641, 1654), 'arcface.densenet.DenseNet', 'DenseNet', (['opt'], {}), '(opt)\n', (1649, 1654), False, 'from arcface.densenet import DenseNet\n'), ((1782, 1797), 'arcface.resnet_cbam.ResNetCBAM', 'ResNetCBAM', (['opt'], {}), '(opt)\n', (1792, 1797), False, 'from arcface.resnet_cbam import ResNetCBAM\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 30 13:44:34 2018
@author: Moha-Thinkpad
"""
from tensorflow.keras import optimizers
from tensorflow.keras.models import Model
import datetime
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import tensorflow.keras
import argparse
import tensorflow as tf
from tensorflow.keras import backend as K
#cfg = K.tf.ConfigProto()
#cfg.gpu_options.allow_growth = True
#K.set_session(K.tf.Session(config=cfg))
####################################
########################################################################
####################################
def custom_loss_seg (y_true, y_pred):
#A = tensorflow.keras.losses.mean_squared_error(y_true, y_pred)
B = tensorflow.keras.losses.mean_absolute_error(y_true, y_pred)
return(B)
from tensorflow.keras.layers import Lambda
sum_dim_channel = Lambda(lambda xin: K.sum(xin, axis=3))
def lrelu(x): #from pix2pix code
a=0.2
# adding these together creates the leak part and linear part
# then cancels them out by subtracting/adding an absolute value term
# leak: a*x/2 - a*abs(x)/2
# linear: x/2 + abs(x)/2
# this block looks like it has 2 inputs on the graph unless we do this
x = tf.identity(x)
return (0.5 * (1 + a)) * x + (0.5 * (1 - a)) * tf.abs(x)
def lrelu_output_shape(input_shape):
shape = list(input_shape)
return tuple(shape)
layer_lrelu=Lambda(lrelu, output_shape=lrelu_output_shape)
def PreProcess(InputImages):
#output=np.zeros(InputImages.shape,dtype=np.float)
InputImages=InputImages.astype(np.float)
for i in range(InputImages.shape[0]):
try:
InputImages[i,:,:,:]=InputImages[i,:,:,:]/np.max(InputImages[i,:,:,:])
# output[i,:,:,:] = (output[i,:,:,:]* 2)-1
except:
InputImages[i,:,:]=InputImages[i,:,:]/np.max(InputImages[i,:,:])
# output[i,:,:] = (output[i,:,:]* 2) -1
return InputImages
####################################
########################################################################
####################################
parser = argparse.ArgumentParser()
parser.add_argument("--mode", choices=["train", "test", "export"])
parser.add_argument("--input_dir", help="path to folder containing images")
parser.add_argument("--target_dir", help="where to")
parser.add_argument("--checkpoint", help="where to ")
parser.add_argument("--output_dir", help="where to p")
parser.add_argument("--landmarks", help=" -,-,-")
parser.add_argument("--lr", help="adam learning rate")
parser.add_argument("--ngf", type=int, default=64, help="number of generator filters in first conv layer")
# export options
a = parser.parse_args()
a.batch_size=40
a.max_epochs_seg=1
a.lr_seg=0.0001
a.beta1=0.5
a.ngf=64
#a.seed=1
# a.mode="train"
# a.input_dir='C:\\Users\\User\\Desktop\\Example_LoSoCo_Inputs_3_large_heatmaps/temp_train_png/'
# a.target_dir='C:\\Users\\User\\Desktop\\Example_LoSoCo_Inputs_3_large_heatmaps/temp_train_lm/'
# a.checkpoint='C:\\Users\\User\\Desktop\\Example_LoSoCo_Inputs_3_large_heatmaps/Models_lm/'
# a.output_dir='C:\\Users\\User\\Desktop\\Example_LoSoCo_Inputs_3_large_heatmaps/Models_lm/'
# a.landmarks='43,43,43'
#a.mode="test"
#a.batch_size=1
#a.input_dir='C:\\Users\\User\\Desktop\\Example_LoSoCo_Inputs_3_large_heatmaps/temp_test_png/'
#a.target_dir='C:\\Users\\User\\Desktop\\Example_LoSoCo_Inputs_3_large_heatmaps/temp_test_lm/'
#a.checkpoint='C:\\Users\\User\\Desktop\\Example_LoSoCo_Inputs_3_large_heatmaps/Models_lm/'
#a.output_dir='C:\\Users\\User\\Desktop\\Example_LoSoCo_Inputs_3_large_heatmaps/Models_lm/'
#a.landmarks='43,43,43'
######## ------------ Config
#Ind_impo_landmarks_matlab=np.array([5, 6, 15,16,17,18,20,21,22,23,24,25,26,27,28,29,30,32,33,34,35,36,37,38,41])
#Ind_impo_landmarks_python=Ind_impo_landmarks_matlab-1
#Num_landmarks=25
# 33,23,16 - 29,15, - 30,20,26 - 5,18,21 - 44,17,41 - 28,22,34, - 27,43,37
StrLandmarks=a.landmarks
StrLandmarks=StrLandmarks.split(",")
Ind_impo_landmarks_matlab=np.array([0,0,0])
Ind_impo_landmarks_matlab[0]=int(StrLandmarks[0])
Ind_impo_landmarks_matlab[1]=int(StrLandmarks[1])
Ind_impo_landmarks_matlab[2]=int(StrLandmarks[2])
Ind_impo_landmarks_python=Ind_impo_landmarks_matlab-1
Num_landmarks=3
print('============================')
print('============================')
print(datetime.datetime.now())
print('============================')
print('============================')
#########----------------------DATA
from os import listdir
ImageFileNames=[]
FileNames=listdir(a.input_dir)
for names in FileNames:
if names.endswith(".png"):
ImageFileNames.append(names)
#LMFileNames=listdir(a.target_dir)
from skimage import io as ioSK
from numpy import genfromtxt
Images=np.zeros((len(ImageFileNames),256,256,3),dtype=np.uint8)
#Images_seg=np.zeros((len(ImageFileNames),256,256),dtype=np.uint8)
LandmarkLocations=np.zeros((len(ImageFileNames),2,44),dtype=np.uint8)
for i in range(len(ImageFileNames)):
Image = ioSK.imread(a.input_dir+'/'+ImageFileNames[i])
Images[i,:,:,:]=Image
FileName=ImageFileNames[i]
FileName=FileName[:-4]
# Image = ioSK.imread(a.target_dir_seg+'/'+ImageFileNames[i])
# Images_seg[i,:,:]=Image
Landmarks0 = genfromtxt(a.target_dir+'/'+FileName+'.csv', delimiter=',')
Landmarks0 = Landmarks0.astype(int)
LandmarkLocations[i,0,:]=Landmarks0[:,0]
LandmarkLocations[i,1,:]=Landmarks0[:,1]
#Landmarks = np.flip(Landmarks0, axis=1)
#plt.figure()
#plt.imshow(Images[100,:,:,:])
#plt.scatter(LandmarkLocations[100,0,:],LandmarkLocations[100,1,:])
X_train = PreProcess(Images)
del Images
import gc
gc.collect()
LandmarkLocations_row=LandmarkLocations[:,0,:]
LandmarkLocations_col=LandmarkLocations[:,1,:]
LandmarkLocations_row=LandmarkLocations_row[:,Ind_impo_landmarks_python]
LandmarkLocations_col=LandmarkLocations_col[:,Ind_impo_landmarks_python]
from scipy.ndimage import gaussian_filter
Images_HeatMaps=np.zeros((X_train.shape[0],X_train.shape[1],X_train.shape[2],Num_landmarks),dtype=np.float)
Image_heatmap=np.zeros((256,256),dtype=np.float)
for i in range(X_train.shape[0]):
for k in range(Num_landmarks):
# h=np.argwhere(Images_seg[i,:,:]==2*Ind_impo_landmarks_matlab[k])
lms_1=LandmarkLocations_row[i,k]
lms_2=LandmarkLocations_col[i,k]
Image_heatmap[:,:]=0
Image_heatmap[lms_2,lms_1]=1
Image_heatmap=gaussian_filter(Image_heatmap, sigma=10)
Image_heatmap=(Image_heatmap/np.max(Image_heatmap))
Images_HeatMaps[i,:,:,k]=Image_heatmap
gc.collect()
#plt.figure()
#plt.imshow(np.squeeze(Images_HeatMaps[2,:,:,5]), cmap='gray')
#plt.imshow(Images[2,:,:,:],cmap='jet', alpha=0.5)
#plt.show()
Y_train_heatmap = PreProcess(Images_HeatMaps)
del Images_HeatMaps
gc.collect()
# del Images_seg
import os
if not os.path.exists(a.checkpoint):
os.makedirs(a.checkpoint)
if not os.path.exists(a.output_dir):
os.makedirs(a.output_dir)
if a.mode=='test':
checkpoint_model_file=a.checkpoint+'LandMarkModel'
from tensorflow.keras.models import load_model
print('loading model ...')
model_final=load_model(checkpoint_model_file+'_weights.h5', custom_objects={
'custom_loss_seg': custom_loss_seg,
'layer_lrelu':layer_lrelu,
'lrelu':lrelu,
'lrelu_output_shape':lrelu_output_shape,
'tf': tf})
print('model is loaded ')
Images=np.zeros((len(ImageFileNames),256,256,3),dtype=np.float)
newLandmarks=np.zeros((Num_landmarks,2),dtype=np.float16)
Y_test_heatmap=Y_train_heatmap
X_test=X_train
# fig = plt.figure()
# plt.imshow(X_train[0,:,:,:],cmap='gray', alpha=0.95)
# plt.imshow(Y_train_heatmap[0,:,:,:],cmap='jet', alpha=0.5)
# plt.grid(True)
pred_example_heatmaps=model_final.predict(X_test[:,:,:,:])
print('writing results ...')
for i in range(len(ImageFileNames)):
# print(i)
FileName=ImageFileNames[i]
FileName=FileName[:-4]
lms_pred_all=np.zeros((Num_landmarks,2),dtype=np.int)
lms_True_all=np.zeros((Num_landmarks,2),dtype=np.int)
for k in range(Num_landmarks):
# plt.figure()
# plt.imshow(example_segmentation[0,:,:,i], cmap='gray')
# plt.imshow(Y_train_heatmap[0,:,:,:],cmap='jet', alpha=0.5)
# plt.show()
True_chan=np.squeeze(Y_test_heatmap[i,:,:,k])
lms_True=np.unravel_index(np.argmax(True_chan, axis=None), True_chan.shape)
lms_True_all[k,:]=lms_True
Pred_chan=np.squeeze(pred_example_heatmaps[i,:,:,k])
lms_pred=np.unravel_index(np.argmax(Pred_chan, axis=None), Pred_chan.shape)
lms_pred_all[k,:]=lms_pred
# fig, ax = plt.subplots(1, 2)
# ax[0].imshow(Y_test_heatmap[i,:,:,i])
# ax[1].imshow(pred_example_heatmaps[i,:,:,i])
# plt.show()
np.savetxt(a.output_dir+FileName+'_pred.csv',
lms_pred_all , delimiter=",", fmt='%i')
np.savetxt(a.output_dir+FileName+'_true.csv',
lms_True_all , delimiter=",", fmt='%i')
fig = plt.figure()
plt.imshow(X_test[i,:,:,:],cmap='jet', alpha=0.9)
plt.scatter(lms_True_all[:,1],lms_True_all[:,0], marker='+', color='red')
plt.scatter(lms_pred_all[:,1],lms_pred_all[:,0], marker='x', color='blue')
# plt.grid(True)
fig.savefig(a.output_dir+FileName+'.png')
plt.close(fig)
if a.mode=='train':
# plt.figure()
# plt.imshow(X_train[90,:,:,:])
# plt.figure()
# plt.imshow(Y_train_heatmap[90,:,:,4])
try: # continue training
checkpoint_model_file=a.checkpoint+'LandMarkModel'
from tensorflow.keras.models import load_model
print('======== loading model ...')
model_4_heatmap=load_model(checkpoint_model_file+'_weights.h5', custom_objects={
'custom_loss_seg': custom_loss_seg,
'layer_lrelu':layer_lrelu,
'lrelu':lrelu,
'lrelu_output_shape':lrelu_output_shape,
'tf': tf})
print('======== continue training ...')
except: # new training
print('======== new training ...')
checkpoint_model_file=a.output_dir+'LandMarkModel'
########### network
kernelSize=(4,4)
InputLayer=tensorflow.keras.layers.Input(shape=(256,256,3))
e_1=tensorflow.keras.layers.Conv2D(a.ngf, kernel_size=kernelSize, strides=(2, 2), dilation_rate=(1, 1), padding='same',)(InputLayer)
e_2=layer_lrelu(e_1)
e_2=tensorflow.keras.layers.Conv2D(a.ngf * 2, kernel_size=kernelSize, strides=(2, 2), dilation_rate=(1, 1), padding='same',)(e_2)
e_2=tensorflow.keras.layers.BatchNormalization()(e_2)
e_3=layer_lrelu(e_2)
e_3=tensorflow.keras.layers.Conv2D(a.ngf * 4, kernel_size=kernelSize, strides=(2, 2), dilation_rate=(1, 1), padding='same',)(e_3)
e_3=tensorflow.keras.layers.BatchNormalization()(e_3)
e_4=layer_lrelu(e_3)
e_4=tensorflow.keras.layers.Conv2D(a.ngf * 8, kernel_size=kernelSize, strides=(2, 2), dilation_rate=(1, 1), padding='same',)(e_4)
e_4=tensorflow.keras.layers.BatchNormalization()(e_4)
e_5=layer_lrelu(e_4)
e_5=tensorflow.keras.layers.Conv2D(a.ngf * 8, kernel_size=kernelSize, strides=(2, 2), dilation_rate=(1, 1), padding='same',)(e_5)
e_5=tensorflow.keras.layers.BatchNormalization()(e_5)
e_6=layer_lrelu(e_5)
e_6=tensorflow.keras.layers.Conv2D(a.ngf * 8, kernel_size=kernelSize, strides=(2, 2), dilation_rate=(1, 1), padding='same',)(e_6)
e_6=tensorflow.keras.layers.BatchNormalization()(e_6)
e_7=layer_lrelu(e_6)
e_7=tensorflow.keras.layers.Conv2D(a.ngf * 8, kernel_size=kernelSize, strides=(2, 2), dilation_rate=(1, 1), padding='same',)(e_7)
e_7=tensorflow.keras.layers.BatchNormalization()(e_7)
e_8=layer_lrelu(e_7)
e_8=tensorflow.keras.layers.Conv2D(a.ngf * 8, kernel_size=kernelSize, strides=(2, 2), dilation_rate=(1, 1), padding='same',)(e_8)
e_8=tensorflow.keras.layers.BatchNormalization()(e_8)
d_8=e_8
d_8=tensorflow.keras.layers.Activation('relu')(d_8)
d_8=tensorflow.keras.layers.Conv2DTranspose(a.ngf * 8, kernel_size=kernelSize, strides=(2, 2), dilation_rate=(1, 1), padding='same',)(d_8)
d_8=tensorflow.keras.layers.BatchNormalization()(d_8)
d_8=tensorflow.keras.layers.Dropout(0.5)(d_8)
d_7=tensorflow.keras.layers.concatenate(inputs=[d_8, e_7], axis=3)
d_7=tensorflow.keras.layers.Activation('relu')(d_7)
d_7=tensorflow.keras.layers.Conv2DTranspose(a.ngf * 8, kernel_size=kernelSize, strides=(2, 2), dilation_rate=(1, 1), padding='same',)(d_7)
d_7=tensorflow.keras.layers.BatchNormalization()(d_7)
d_7=tensorflow.keras.layers.Dropout(0.5)(d_7)
d_6=tensorflow.keras.layers.concatenate(inputs=[d_7, e_6], axis=3)
d_6=tensorflow.keras.layers.Activation('relu')(d_6)
d_6=tensorflow.keras.layers.Conv2DTranspose(a.ngf * 8, kernel_size=kernelSize, strides=(2, 2), dilation_rate=(1, 1), padding='same',)(d_6)
d_6=tensorflow.keras.layers.BatchNormalization()(d_6)
d_6=tensorflow.keras.layers.Dropout(0.5) (d_6)
d_5=tensorflow.keras.layers.concatenate(inputs=[d_6, e_5], axis=3)
d_5=tensorflow.keras.layers.Activation('relu')(d_5)
d_5=tensorflow.keras.layers.Conv2DTranspose(a.ngf * 8, kernel_size=kernelSize, strides=(2, 2), dilation_rate=(1, 1), padding='same',)(d_5)
d_5=tensorflow.keras.layers.BatchNormalization()(d_5)
d_5=tensorflow.keras.layers.Dropout(0.5) (d_5)
d_4=tensorflow.keras.layers.concatenate(inputs=[d_5, e_4], axis=3)
d_4=tensorflow.keras.layers.Activation('relu')(d_4)
d_4=tensorflow.keras.layers.Conv2DTranspose(a.ngf * 4, kernel_size=kernelSize, strides=(2, 2), dilation_rate=(1, 1), padding='same',)(d_4)
d_4=tensorflow.keras.layers.BatchNormalization()(d_4)
d_3=tensorflow.keras.layers.concatenate(inputs=[d_4, e_3], axis=3)
d_3=tensorflow.keras.layers.Activation('relu')(d_3)
d_3=tensorflow.keras.layers.Conv2DTranspose(a.ngf * 2, kernel_size=kernelSize, strides=(2, 2), dilation_rate=(1, 1), padding='same',)(d_3)
d_3=tensorflow.keras.layers.BatchNormalization()(d_3)
d_2=tensorflow.keras.layers.concatenate(inputs=[d_3, e_2], axis=3)
d_2=tensorflow.keras.layers.Activation('relu')(d_2)
# d_2=tensorflow.keras.layers.Conv2DTranspose(a.ngf, kernel_size=kernelSize, strides=(2, 2), dilation_rate=(1, 1), padding='same',)(d_2)
d_2=tensorflow.keras.layers.Conv2DTranspose(a.ngf, kernel_size=kernelSize, strides=(2, 2), dilation_rate=(1, 1), padding='same',)(d_2)
d_2=tensorflow.keras.layers.BatchNormalization()(d_2)
d_1=tensorflow.keras.layers.concatenate(inputs=[d_2, e_1], axis=3)
d_1=tensorflow.keras.layers.Activation('relu')(d_1)
d_1=tensorflow.keras.layers.Conv2DTranspose(Num_landmarks, kernel_size=kernelSize, strides=(2, 2), dilation_rate=(1, 1), padding='same',)(d_1)
HeatMaps=tensorflow.keras.layers.Activation('sigmoid', name='last_layer_of_decoder')(d_1)
model_4_heatmap=Model(inputs=InputLayer, outputs=HeatMaps)
###########Train
print('trainable_count =',int(np.sum([K.count_params(p) for p in set(model_4_heatmap.trainable_weights)])))
print('non_trainable_count =', int(np.sum([K.count_params(p) for p in set(model_4_heatmap.non_trainable_weights)])))
# fix random seed for reproducibility
seed = 1
import random
tf.set_random_seed(seed)
np.random.seed(seed)
random.seed(seed)
#### compile and train the model
UsedOptimizer=optimizers.Adam(lr=a.lr_seg, beta_1=a.beta1)
model_4_heatmap.compile(loss=custom_loss_seg, optimizer=UsedOptimizer)
History=model_4_heatmap.fit(X_train, Y_train_heatmap,
batch_size=a.batch_size, shuffle=True, validation_split=0.05,
epochs=a.max_epochs_seg,
verbose=1)
plt.plot(History.history['loss'])
plt.plot(History.history['val_loss'])
plt.grid()
plt.savefig(a.output_dir+'History_'+str(a.lr)+'.png')
plt.close()
import pickle
Dict={'History_loss_train':History.history['loss'],
'History_loss_val':History.history['val_loss'],}
pickle.dump( Dict, open(a.output_dir+'History_'+str(a.lr)+'.pkl', "wb" ) )
# show an exemplary result
Num_example_train=0
pred_example_heatmaps=model_4_heatmap.predict(X_train[Num_example_train:Num_example_train+1,:,:,:])
lms_pred_all=np.zeros((Num_landmarks,2),dtype=np.int)
lms_True_all=np.zeros((Num_landmarks,2),dtype=np.int)
for i in range(Num_landmarks):
# plt.figure()
# plt.imshow(example_segmentation[0,:,:,i], cmap='gray')
# plt.imshow(X_train[0,:,:,:],cmap='jet', alpha=0.5)
# plt.show()
Pred_chan=np.squeeze(pred_example_heatmaps[0,:,:,i])
lms_pred=np.unravel_index(np.argmax(Pred_chan, axis=None), Pred_chan.shape)
lms_pred_all[i,:]=lms_pred
True_chan=np.squeeze(Y_train_heatmap[Num_example_train,:,:,i])
lms_True=np.unravel_index(np.argmax(True_chan, axis=None), True_chan.shape)
lms_True_all[i,:]=lms_True
# fig, ax = plt.subplots(1, 2)
# ax[0].imshow(Y_train_heatmap[Num_example_train,:,:,i])
# ax[1].imshow(pred_example_heatmaps[0,:,:,i])
# plt.show()
fig = plt.figure()
plt.imshow(X_train[Num_example_train,:,:,:],cmap='jet', alpha=0.9)
plt.scatter(lms_True_all[:,1],lms_True_all[:,0], marker='+', color='red')
plt.scatter(lms_pred_all[:,1],lms_pred_all[:,0], marker='x', color='blue')
plt.grid(True)
# fig.savefig('scatter-result'+str(i)+'_pred.png')
plt.close(fig)
print('===========training done=================')
print('============================')
print(datetime.datetime.now())
print('============================')
print('============================')
print('Saving model ...')
model_4_heatmap.save(checkpoint_model_file+'_weights.h5')
|
[
"matplotlib.pyplot.grid",
"numpy.array",
"tensorflow.keras.models.load_model",
"scipy.ndimage.gaussian_filter",
"tensorflow.set_random_seed",
"numpy.genfromtxt",
"matplotlib.pyplot.imshow",
"os.path.exists",
"os.listdir",
"argparse.ArgumentParser",
"matplotlib.pyplot.plot",
"numpy.max",
"matplotlib.pyplot.close",
"numpy.random.seed",
"matplotlib.pyplot.scatter",
"tensorflow.keras.models.Model",
"matplotlib.use",
"tensorflow.keras.layers.Lambda",
"numpy.argmax",
"tensorflow.keras.backend.count_params",
"numpy.squeeze",
"skimage.io.imread",
"gc.collect",
"numpy.savetxt",
"tensorflow.keras.backend.sum",
"os.makedirs",
"random.seed",
"tensorflow.keras.optimizers.Adam",
"datetime.datetime.now",
"numpy.zeros",
"matplotlib.pyplot.figure",
"tensorflow.identity",
"tensorflow.abs"
] |
[((228, 249), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (242, 249), False, 'import matplotlib\n'), ((1462, 1508), 'tensorflow.keras.layers.Lambda', 'Lambda', (['lrelu'], {'output_shape': 'lrelu_output_shape'}), '(lrelu, output_shape=lrelu_output_shape)\n', (1468, 1508), False, 'from tensorflow.keras.layers import Lambda\n'), ((2176, 2201), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2199, 2201), False, 'import argparse\n'), ((4096, 4115), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (4104, 4115), True, 'import numpy as np\n'), ((4611, 4631), 'os.listdir', 'listdir', (['a.input_dir'], {}), '(a.input_dir)\n', (4618, 4631), False, 'from os import listdir\n'), ((5766, 5778), 'gc.collect', 'gc.collect', ([], {}), '()\n', (5776, 5778), False, 'import gc\n'), ((6084, 6183), 'numpy.zeros', 'np.zeros', (['(X_train.shape[0], X_train.shape[1], X_train.shape[2], Num_landmarks)'], {'dtype': 'np.float'}), '((X_train.shape[0], X_train.shape[1], X_train.shape[2],\n Num_landmarks), dtype=np.float)\n', (6092, 6183), True, 'import numpy as np\n'), ((6191, 6227), 'numpy.zeros', 'np.zeros', (['(256, 256)'], {'dtype': 'np.float'}), '((256, 256), dtype=np.float)\n', (6199, 6227), True, 'import numpy as np\n'), ((6692, 6704), 'gc.collect', 'gc.collect', ([], {}), '()\n', (6702, 6704), False, 'import gc\n'), ((6924, 6936), 'gc.collect', 'gc.collect', ([], {}), '()\n', (6934, 6936), False, 'import gc\n'), ((1281, 1295), 'tensorflow.identity', 'tf.identity', (['x'], {}), '(x)\n', (1292, 1295), True, 'import tensorflow as tf\n'), ((4420, 4443), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (4441, 4443), False, 'import datetime\n'), ((5081, 5131), 'skimage.io.imread', 'ioSK.imread', (["(a.input_dir + '/' + ImageFileNames[i])"], {}), "(a.input_dir + '/' + ImageFileNames[i])\n", (5092, 5131), True, 'from skimage import io as ioSK\n'), ((5350, 5415), 'numpy.genfromtxt', 'genfromtxt', (["(a.target_dir + '/' + FileName + '.csv')"], {'delimiter': '""","""'}), "(a.target_dir + '/' + FileName + '.csv', delimiter=',')\n", (5360, 5415), False, 'from numpy import genfromtxt\n'), ((6981, 7009), 'os.path.exists', 'os.path.exists', (['a.checkpoint'], {}), '(a.checkpoint)\n', (6995, 7009), False, 'import os\n'), ((7015, 7040), 'os.makedirs', 'os.makedirs', (['a.checkpoint'], {}), '(a.checkpoint)\n', (7026, 7040), False, 'import os\n'), ((7053, 7081), 'os.path.exists', 'os.path.exists', (['a.output_dir'], {}), '(a.output_dir)\n', (7067, 7081), False, 'import os\n'), ((7087, 7112), 'os.makedirs', 'os.makedirs', (['a.output_dir'], {}), '(a.output_dir)\n', (7098, 7112), False, 'import os\n'), ((7298, 7505), 'tensorflow.keras.models.load_model', 'load_model', (["(checkpoint_model_file + '_weights.h5')"], {'custom_objects': "{'custom_loss_seg': custom_loss_seg, 'layer_lrelu': layer_lrelu, 'lrelu':\n lrelu, 'lrelu_output_shape': lrelu_output_shape, 'tf': tf}"}), "(checkpoint_model_file + '_weights.h5', custom_objects={\n 'custom_loss_seg': custom_loss_seg, 'layer_lrelu': layer_lrelu, 'lrelu':\n lrelu, 'lrelu_output_shape': lrelu_output_shape, 'tf': tf})\n", (7308, 7505), False, 'from tensorflow.keras.models import load_model\n'), ((8030, 8076), 'numpy.zeros', 'np.zeros', (['(Num_landmarks, 2)'], {'dtype': 'np.float16'}), '((Num_landmarks, 2), dtype=np.float16)\n', (8038, 8076), True, 'import numpy as np\n'), ((16856, 16880), 'tensorflow.set_random_seed', 'tf.set_random_seed', (['seed'], {}), '(seed)\n', (16874, 16880), True, 'import tensorflow as tf\n'), ((16885, 16905), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (16899, 16905), True, 'import numpy as np\n'), ((16910, 16927), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (16921, 16927), False, 'import random\n'), ((16993, 17037), 'tensorflow.keras.optimizers.Adam', 'optimizers.Adam', ([], {'lr': 'a.lr_seg', 'beta_1': 'a.beta1'}), '(lr=a.lr_seg, beta_1=a.beta1)\n', (17008, 17037), False, 'from tensorflow.keras import optimizers\n'), ((17323, 17356), 'matplotlib.pyplot.plot', 'plt.plot', (["History.history['loss']"], {}), "(History.history['loss'])\n", (17331, 17356), True, 'import matplotlib.pyplot as plt\n'), ((17361, 17398), 'matplotlib.pyplot.plot', 'plt.plot', (["History.history['val_loss']"], {}), "(History.history['val_loss'])\n", (17369, 17398), True, 'import matplotlib.pyplot as plt\n'), ((17403, 17413), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (17411, 17413), True, 'import matplotlib.pyplot as plt\n'), ((17476, 17487), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (17485, 17487), True, 'import matplotlib.pyplot as plt\n'), ((17907, 17949), 'numpy.zeros', 'np.zeros', (['(Num_landmarks, 2)'], {'dtype': 'np.int'}), '((Num_landmarks, 2), dtype=np.int)\n', (17915, 17949), True, 'import numpy as np\n'), ((17965, 18007), 'numpy.zeros', 'np.zeros', (['(Num_landmarks, 2)'], {'dtype': 'np.int'}), '((Num_landmarks, 2), dtype=np.int)\n', (17973, 18007), True, 'import numpy as np\n'), ((18798, 18810), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (18808, 18810), True, 'import matplotlib.pyplot as plt\n'), ((18815, 18885), 'matplotlib.pyplot.imshow', 'plt.imshow', (['X_train[Num_example_train, :, :, :]'], {'cmap': '"""jet"""', 'alpha': '(0.9)'}), "(X_train[Num_example_train, :, :, :], cmap='jet', alpha=0.9)\n", (18825, 18885), True, 'import matplotlib.pyplot as plt\n'), ((18886, 18962), 'matplotlib.pyplot.scatter', 'plt.scatter', (['lms_True_all[:, 1]', 'lms_True_all[:, 0]'], {'marker': '"""+"""', 'color': '"""red"""'}), "(lms_True_all[:, 1], lms_True_all[:, 0], marker='+', color='red')\n", (18897, 18962), True, 'import matplotlib.pyplot as plt\n'), ((18964, 19041), 'matplotlib.pyplot.scatter', 'plt.scatter', (['lms_pred_all[:, 1]', 'lms_pred_all[:, 0]'], {'marker': '"""x"""', 'color': '"""blue"""'}), "(lms_pred_all[:, 1], lms_pred_all[:, 0], marker='x', color='blue')\n", (18975, 19041), True, 'import matplotlib.pyplot as plt\n'), ((19043, 19057), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (19051, 19057), True, 'import matplotlib.pyplot as plt\n'), ((19116, 19130), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (19125, 19130), True, 'import matplotlib.pyplot as plt\n'), ((932, 950), 'tensorflow.keras.backend.sum', 'K.sum', (['xin'], {'axis': '(3)'}), '(xin, axis=3)\n', (937, 950), True, 'from tensorflow.keras import backend as K\n'), ((6538, 6578), 'scipy.ndimage.gaussian_filter', 'gaussian_filter', (['Image_heatmap'], {'sigma': '(10)'}), '(Image_heatmap, sigma=10)\n', (6553, 6578), False, 'from scipy.ndimage import gaussian_filter\n'), ((8588, 8630), 'numpy.zeros', 'np.zeros', (['(Num_landmarks, 2)'], {'dtype': 'np.int'}), '((Num_landmarks, 2), dtype=np.int)\n', (8596, 8630), True, 'import numpy as np\n'), ((8650, 8692), 'numpy.zeros', 'np.zeros', (['(Num_landmarks, 2)'], {'dtype': 'np.int'}), '((Num_landmarks, 2), dtype=np.int)\n', (8658, 8692), True, 'import numpy as np\n'), ((9549, 9642), 'numpy.savetxt', 'np.savetxt', (["(a.output_dir + FileName + '_pred.csv')", 'lms_pred_all'], {'delimiter': '""","""', 'fmt': '"""%i"""'}), "(a.output_dir + FileName + '_pred.csv', lms_pred_all, delimiter=\n ',', fmt='%i')\n", (9559, 9642), True, 'import numpy as np\n'), ((9656, 9749), 'numpy.savetxt', 'np.savetxt', (["(a.output_dir + FileName + '_true.csv')", 'lms_True_all'], {'delimiter': '""","""', 'fmt': '"""%i"""'}), "(a.output_dir + FileName + '_true.csv', lms_True_all, delimiter=\n ',', fmt='%i')\n", (9666, 9749), True, 'import numpy as np\n'), ((9773, 9785), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (9783, 9785), True, 'import matplotlib.pyplot as plt\n'), ((9794, 9847), 'matplotlib.pyplot.imshow', 'plt.imshow', (['X_test[i, :, :, :]'], {'cmap': '"""jet"""', 'alpha': '(0.9)'}), "(X_test[i, :, :, :], cmap='jet', alpha=0.9)\n", (9804, 9847), True, 'import matplotlib.pyplot as plt\n'), ((9852, 9928), 'matplotlib.pyplot.scatter', 'plt.scatter', (['lms_True_all[:, 1]', 'lms_True_all[:, 0]'], {'marker': '"""+"""', 'color': '"""red"""'}), "(lms_True_all[:, 1], lms_True_all[:, 0], marker='+', color='red')\n", (9863, 9928), True, 'import matplotlib.pyplot as plt\n'), ((9934, 10011), 'matplotlib.pyplot.scatter', 'plt.scatter', (['lms_pred_all[:, 1]', 'lms_pred_all[:, 0]'], {'marker': '"""x"""', 'color': '"""blue"""'}), "(lms_pred_all[:, 1], lms_pred_all[:, 0], marker='x', color='blue')\n", (9945, 10011), True, 'import matplotlib.pyplot as plt\n'), ((10091, 10105), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (10100, 10105), True, 'import matplotlib.pyplot as plt\n'), ((10516, 10723), 'tensorflow.keras.models.load_model', 'load_model', (["(checkpoint_model_file + '_weights.h5')"], {'custom_objects': "{'custom_loss_seg': custom_loss_seg, 'layer_lrelu': layer_lrelu, 'lrelu':\n lrelu, 'lrelu_output_shape': lrelu_output_shape, 'tf': tf}"}), "(checkpoint_model_file + '_weights.h5', custom_objects={\n 'custom_loss_seg': custom_loss_seg, 'layer_lrelu': layer_lrelu, 'lrelu':\n lrelu, 'lrelu_output_shape': lrelu_output_shape, 'tf': tf})\n", (10526, 10723), False, 'from tensorflow.keras.models import load_model\n'), ((18225, 18270), 'numpy.squeeze', 'np.squeeze', (['pred_example_heatmaps[0, :, :, i]'], {}), '(pred_example_heatmaps[0, :, :, i])\n', (18235, 18270), True, 'import numpy as np\n'), ((18413, 18468), 'numpy.squeeze', 'np.squeeze', (['Y_train_heatmap[Num_example_train, :, :, i]'], {}), '(Y_train_heatmap[Num_example_train, :, :, i])\n', (18423, 18468), True, 'import numpy as np\n'), ((19254, 19277), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (19275, 19277), False, 'import datetime\n'), ((1347, 1356), 'tensorflow.abs', 'tf.abs', (['x'], {}), '(x)\n', (1353, 1356), True, 'import tensorflow as tf\n'), ((6614, 6635), 'numpy.max', 'np.max', (['Image_heatmap'], {}), '(Image_heatmap)\n', (6620, 6635), True, 'import numpy as np\n'), ((8959, 8997), 'numpy.squeeze', 'np.squeeze', (['Y_test_heatmap[i, :, :, k]'], {}), '(Y_test_heatmap[i, :, :, k])\n', (8969, 8997), True, 'import numpy as np\n'), ((9157, 9202), 'numpy.squeeze', 'np.squeeze', (['pred_example_heatmaps[i, :, :, k]'], {}), '(pred_example_heatmaps[i, :, :, k])\n', (9167, 9202), True, 'import numpy as np\n'), ((16445, 16487), 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': 'InputLayer', 'outputs': 'HeatMaps'}), '(inputs=InputLayer, outputs=HeatMaps)\n', (16450, 16487), False, 'from tensorflow.keras.models import Model\n'), ((18302, 18333), 'numpy.argmax', 'np.argmax', (['Pred_chan'], {'axis': 'None'}), '(Pred_chan, axis=None)\n', (18311, 18333), True, 'import numpy as np\n'), ((18500, 18531), 'numpy.argmax', 'np.argmax', (['True_chan'], {'axis': 'None'}), '(True_chan, axis=None)\n', (18509, 18531), True, 'import numpy as np\n'), ((1754, 1785), 'numpy.max', 'np.max', (['InputImages[i, :, :, :]'], {}), '(InputImages[i, :, :, :])\n', (1760, 1785), True, 'import numpy as np\n'), ((9033, 9064), 'numpy.argmax', 'np.argmax', (['True_chan'], {'axis': 'None'}), '(True_chan, axis=None)\n', (9042, 9064), True, 'import numpy as np\n'), ((9238, 9269), 'numpy.argmax', 'np.argmax', (['Pred_chan'], {'axis': 'None'}), '(Pred_chan, axis=None)\n', (9247, 9269), True, 'import numpy as np\n'), ((1903, 1931), 'numpy.max', 'np.max', (['InputImages[i, :, :]'], {}), '(InputImages[i, :, :])\n', (1909, 1931), True, 'import numpy as np\n'), ((16569, 16586), 'tensorflow.keras.backend.count_params', 'K.count_params', (['p'], {}), '(p)\n', (16583, 16586), True, 'from tensorflow.keras import backend as K\n'), ((16686, 16703), 'tensorflow.keras.backend.count_params', 'K.count_params', (['p'], {}), '(p)\n', (16700, 16703), True, 'from tensorflow.keras import backend as K\n')]
|
"""This file contains functions for converting and storing jupyter notebooks."""
import nbformat
import pickle
import numpy as np
import os
from nbconvert import PythonExporter
from pathlib import Path # for windows-Unix compatibility
def nbconvert_python(path):
"""Use nbconvert to convert jupyter notebook to python code.
Return the string of python code. You can then excute it with `exec()`.
Args:
path (str): Path of jupyter notebook
Returns:
str: The string of python code converted from notebook
"""
with open(path) as f:
nb = nbformat.read(f, as_version=4)
body, _ = PythonExporter().from_notebook_node(nb)
return body
def is_picklable(obj):
"""Check if an obj can be dumped into a pickle file.
Args:
obj : The Object to be judged
Returns:
bool: The result if the input can be picklable
"""
try:
pickle.dumps(obj)
except Exception:
return False
return True
def filter_pickable(global_vars):
"""Filter the variables that are pickable.
Args:
global_vars (array-like): The names of variables to get
Returns:
dict: Dictionary containing names of objects and their values
"""
bk = {}
for k in global_vars:
obj = global_vars[k]
if is_picklable(obj):
try:
bk.update({k: obj})
except TypeError:
pass
return bk
def notebook_to_pickable_dict(path):
"""Excute jupyter notebook and then save variables defined in notebook.
This function converts notebook to python code and then excutes the code.
Finally it put all public variables that defined in notebook into dictionary
and return it.
Parameters
----------
path : str
Path of jupyter notebook
Returns
-------
bk : :dict
Dictionary containing names of variables and variables that defined in notebook.
"""
# Step 1: Convert notebook to script
code = nbconvert_python(path)
code = code.replace("get_ipython()", "# get_ipython()")
# Step 2: Execute script and save variables in dictionary
d = {}
exec(code, d)
d.pop("__builtins__")
# Step 3: Filter for pickable variables
bk = filter_pickable(d)
return bk
def save_to_pkl(path, obj):
"""Save object to pickle file.
Args:
path (str): Path to save pickle file
obj : Object to be saved
"""
with open(path, "wb") as f:
pickle.dump(obj, f)
def basic_type_or_list(obj):
"""Check type of object."""
return not np.asanyarray(obj).dtype.hasobject
def flatten_to_dict(obj):
"""Reduce dimensionality of dictionary."""
def _flatten(value, key):
"""Reduce dimensionality of object recursively."""
if isinstance(value, (list, tuple, set)):
if basic_type_or_list(value):
return {key: value} if key is not None else value
else:
tile_d = {}
for i, v in enumerate(value):
tile_d.update(_flatten(v, f"{key}_{i}" if key is not None else i))
return tile_d
elif isinstance(value, dict):
tile_d = {}
for k, v in value.items():
tile_d.update(_flatten(v, f"{key}_{k}" if key is not None else k))
return tile_d
else:
return {key: value} if key is not None else value
return _flatten(value=obj, key=None)
def to_ndarray(obj):
"""Convert to numpy array."""
if isinstance(obj, dict):
return {k: np.asanyarray(v) for k, v in obj.items()}
elif isinstance(obj, (list, tuple, set)) and not basic_type_or_list(obj):
return [np.asanyarray(v) for v in obj]
else:
return np.asanyarray(obj)
def is_path(path):
"""Judge if object is path or string of exists path."""
if isinstance(path, os.PathLike):
return True
if not isinstance(path, str):
return False
return os.path.exists(path)
def contains_path(obj):
"""Judge if an array contains path."""
if isinstance(obj, (np.ndarray, list, tuple, set)):
for v in obj:
if is_path(v):
return True
return False
else:
return is_path(obj)
def notebook_exec_result_flattened(path):
"""Prepare notebook for numpy savez."""
# Step 1: Convert notebook to script
code = nbconvert_python(path)
code = code.replace("get_ipython()", "# get_ipython()")
# Step 2: Execute script and save variables in dictionary
d = {}
exec(code, d)
d.pop("__builtins__")
# Step 3: Flatten all variables
bk = flatten_to_dict(d)
# Step 4: Filter for variables which is basic type or list of basic type
bk_filted = {k: v for k, v in bk.items() if basic_type_or_list(v)}
# Step 5: Remove environmental variables
bk_filted = {k: v for k, v in bk_filted.items() if not contains_path(v)}
for key in {"__warningregistry___version"}:
bk_filted.pop(key)
return bk_filted
def main():
"""Excute jupyter notebook and save global variables."""
notebook_path = Path("docs/getting_started.ipynb")
bk = notebook_exec_result_flattened(notebook_path)
# to save session
save_path = Path("pydsge/tests/resources/getting_started_stable.npz")
with open(save_path, "wb") as f:
np.savez_compressed(f, **bk)
if __name__ == "__main__":
main()
|
[
"os.path.exists",
"pickle.dump",
"pathlib.Path",
"pickle.dumps",
"nbformat.read",
"numpy.asanyarray",
"nbconvert.PythonExporter",
"numpy.savez_compressed"
] |
[((4025, 4045), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (4039, 4045), False, 'import os\n'), ((5177, 5211), 'pathlib.Path', 'Path', (['"""docs/getting_started.ipynb"""'], {}), "('docs/getting_started.ipynb')\n", (5181, 5211), False, 'from pathlib import Path\n'), ((5307, 5364), 'pathlib.Path', 'Path', (['"""pydsge/tests/resources/getting_started_stable.npz"""'], {}), "('pydsge/tests/resources/getting_started_stable.npz')\n", (5311, 5364), False, 'from pathlib import Path\n'), ((589, 619), 'nbformat.read', 'nbformat.read', (['f'], {'as_version': '(4)'}), '(f, as_version=4)\n', (602, 619), False, 'import nbformat\n'), ((916, 933), 'pickle.dumps', 'pickle.dumps', (['obj'], {}), '(obj)\n', (928, 933), False, 'import pickle\n'), ((2510, 2529), 'pickle.dump', 'pickle.dump', (['obj', 'f'], {}), '(obj, f)\n', (2521, 2529), False, 'import pickle\n'), ((5410, 5438), 'numpy.savez_compressed', 'np.savez_compressed', (['f'], {}), '(f, **bk)\n', (5429, 5438), True, 'import numpy as np\n'), ((634, 650), 'nbconvert.PythonExporter', 'PythonExporter', ([], {}), '()\n', (648, 650), False, 'from nbconvert import PythonExporter\n'), ((3609, 3625), 'numpy.asanyarray', 'np.asanyarray', (['v'], {}), '(v)\n', (3622, 3625), True, 'import numpy as np\n'), ((3801, 3819), 'numpy.asanyarray', 'np.asanyarray', (['obj'], {}), '(obj)\n', (3814, 3819), True, 'import numpy as np\n'), ((2608, 2626), 'numpy.asanyarray', 'np.asanyarray', (['obj'], {}), '(obj)\n', (2621, 2626), True, 'import numpy as np\n'), ((3745, 3761), 'numpy.asanyarray', 'np.asanyarray', (['v'], {}), '(v)\n', (3758, 3761), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
from datetime import datetime, timedelta
import pytest
import numpy as np
import pandas.util.testing as tm
from pandas.compat import long
from pandas.tseries import offsets
from pandas import Timestamp, Timedelta
class TestTimestampArithmetic(object):
def test_overflow_offset(self):
# xref https://github.com/statsmodels/statsmodels/issues/3374
# ends up multiplying really large numbers which overflow
stamp = Timestamp('2017-01-13 00:00:00', freq='D')
offset = 20169940 * offsets.Day(1)
with pytest.raises(OverflowError):
stamp + offset
with pytest.raises(OverflowError):
offset + stamp
with pytest.raises(OverflowError):
stamp - offset
def test_delta_preserve_nanos(self):
val = Timestamp(long(1337299200000000123))
result = val + timedelta(1)
assert result.nanosecond == val.nanosecond
def test_timestamp_sub_datetime(self):
dt = datetime(2013, 10, 12)
ts = Timestamp(datetime(2013, 10, 13))
assert (ts - dt).days == 1
assert (dt - ts).days == -1
def test_addition_subtraction_types(self):
# Assert on the types resulting from Timestamp +/- various date/time
# objects
dt = datetime(2014, 3, 4)
td = timedelta(seconds=1)
# build a timestamp with a frequency, since then it supports
# addition/subtraction of integers
ts = Timestamp(dt, freq='D')
with tm.assert_produces_warning(FutureWarning):
# GH#22535 add/sub with integers is deprecated
assert type(ts + 1) == Timestamp
assert type(ts - 1) == Timestamp
# Timestamp + datetime not supported, though subtraction is supported
# and yields timedelta more tests in tseries/base/tests/test_base.py
assert type(ts - dt) == Timedelta
assert type(ts + td) == Timestamp
assert type(ts - td) == Timestamp
# Timestamp +/- datetime64 not supported, so not tested (could possibly
# assert error raised?)
td64 = np.timedelta64(1, 'D')
assert type(ts + td64) == Timestamp
assert type(ts - td64) == Timestamp
def test_addition_subtraction_preserve_frequency(self):
ts = Timestamp('2014-03-05', freq='D')
td = timedelta(days=1)
original_freq = ts.freq
with tm.assert_produces_warning(FutureWarning):
# GH#22535 add/sub with integers is deprecated
assert (ts + 1).freq == original_freq
assert (ts - 1).freq == original_freq
assert (ts + td).freq == original_freq
assert (ts - td).freq == original_freq
td64 = np.timedelta64(1, 'D')
assert (ts + td64).freq == original_freq
assert (ts - td64).freq == original_freq
|
[
"datetime.datetime",
"pandas.Timestamp",
"pandas.compat.long",
"datetime.timedelta",
"pytest.raises",
"numpy.timedelta64",
"pandas.tseries.offsets.Day",
"pandas.util.testing.assert_produces_warning"
] |
[((469, 511), 'pandas.Timestamp', 'Timestamp', (['"""2017-01-13 00:00:00"""'], {'freq': '"""D"""'}), "('2017-01-13 00:00:00', freq='D')\n", (478, 511), False, 'from pandas import Timestamp, Timedelta\n'), ((1005, 1027), 'datetime.datetime', 'datetime', (['(2013)', '(10)', '(12)'], {}), '(2013, 10, 12)\n', (1013, 1027), False, 'from datetime import datetime, timedelta\n'), ((1302, 1322), 'datetime.datetime', 'datetime', (['(2014)', '(3)', '(4)'], {}), '(2014, 3, 4)\n', (1310, 1322), False, 'from datetime import datetime, timedelta\n'), ((1336, 1356), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(1)'}), '(seconds=1)\n', (1345, 1356), False, 'from datetime import datetime, timedelta\n'), ((1482, 1505), 'pandas.Timestamp', 'Timestamp', (['dt'], {'freq': '"""D"""'}), "(dt, freq='D')\n", (1491, 1505), False, 'from pandas import Timestamp, Timedelta\n'), ((2122, 2144), 'numpy.timedelta64', 'np.timedelta64', (['(1)', '"""D"""'], {}), "(1, 'D')\n", (2136, 2144), True, 'import numpy as np\n'), ((2307, 2340), 'pandas.Timestamp', 'Timestamp', (['"""2014-03-05"""'], {'freq': '"""D"""'}), "('2014-03-05', freq='D')\n", (2316, 2340), False, 'from pandas import Timestamp, Timedelta\n'), ((2354, 2371), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (2363, 2371), False, 'from datetime import datetime, timedelta\n'), ((2731, 2753), 'numpy.timedelta64', 'np.timedelta64', (['(1)', '"""D"""'], {}), "(1, 'D')\n", (2745, 2753), True, 'import numpy as np\n'), ((540, 554), 'pandas.tseries.offsets.Day', 'offsets.Day', (['(1)'], {}), '(1)\n', (551, 554), False, 'from pandas.tseries import offsets\n'), ((569, 597), 'pytest.raises', 'pytest.raises', (['OverflowError'], {}), '(OverflowError)\n', (582, 597), False, 'import pytest\n'), ((640, 668), 'pytest.raises', 'pytest.raises', (['OverflowError'], {}), '(OverflowError)\n', (653, 668), False, 'import pytest\n'), ((711, 739), 'pytest.raises', 'pytest.raises', (['OverflowError'], {}), '(OverflowError)\n', (724, 739), False, 'import pytest\n'), ((834, 859), 'pandas.compat.long', 'long', (['(1337299200000000123)'], {}), '(1337299200000000123)\n', (838, 859), False, 'from pandas.compat import long\n'), ((884, 896), 'datetime.timedelta', 'timedelta', (['(1)'], {}), '(1)\n', (893, 896), False, 'from datetime import datetime, timedelta\n'), ((1051, 1073), 'datetime.datetime', 'datetime', (['(2013)', '(10)', '(13)'], {}), '(2013, 10, 13)\n', (1059, 1073), False, 'from datetime import datetime, timedelta\n'), ((1520, 1561), 'pandas.util.testing.assert_produces_warning', 'tm.assert_produces_warning', (['FutureWarning'], {}), '(FutureWarning)\n', (1546, 1561), True, 'import pandas.util.testing as tm\n'), ((2418, 2459), 'pandas.util.testing.assert_produces_warning', 'tm.assert_produces_warning', (['FutureWarning'], {}), '(FutureWarning)\n', (2444, 2459), True, 'import pandas.util.testing as tm\n')]
|
import json
from pathlib import Path
import numpy as np
from matplotlib import path
current_dir = Path(__file__).parent
__all__ = list(p.stem for p in current_dir.glob("*.json"))
def __getattr__(name: str) -> path.Path:
file_path = current_dir / (name + ".json")
if file_path.exists():
data = json.loads(file_path.read_text())
return path.Path(
vertices=data["vertices"], codes=np.array(data["codes"], np.uint8)
)
raise AttributeError(
f"No {name}.json file found in {current_dir.absolute()}."
)
|
[
"numpy.array",
"pathlib.Path"
] |
[((100, 114), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (104, 114), False, 'from pathlib import Path\n'), ((418, 451), 'numpy.array', 'np.array', (["data['codes']", 'np.uint8'], {}), "(data['codes'], np.uint8)\n", (426, 451), True, 'import numpy as np\n')]
|
# Top of main python script
import os
os.environ["PYOPENGL_PLATFORM"] = "egl"
import sys
import random
import argparse
import numpy as np
import trimesh
import imageio
import open3d as o3d
from mathutils import Matrix
import h5py
import json
from mesh_to_sdf import get_surface_point_cloud
import pyrender
import util
np.random.seed(12433)
random.seed(12433)
train_categories = [
"04379243",
"02958343",
"03001627",
"02691156",
"04256520",
"04090263",
"03636649",
"04530566",
"02828884",
"03691459",
"02933112",
"03211117",
"04401088",
]
val_categories = [
"02924116",
"02808440",
"03467517",
"03325088",
"03046257",
"03991062",
"03593526",
"02876657",
"02871439",
"03642806",
"03624134",
"04468005",
"02747177",
"03790512",
"03948459",
"03337140",
"02818832",
"03928116",
"04330267",
"03797390",
"02880940",
"04554684",
"04004475",
"03513137",
"03761084",
"04225987",
"04460130",
"02942699",
"02801938",
"02946921",
"03938244",
"03710193",
"03207941",
"04099429",
"02773838",
"02843684",
"03261776",
"03759954",
"04074963",
"03085013",
"02992529",
"02954340",
]
p = argparse.ArgumentParser(
description="Renders given obj file by rotation a camera around it."
)
p.add_argument(
"--data_dir",
type=str,
default="/labdata/nicolai/data/ShapeNetCore.v2",
help="Data directory containing meshes.",
)
p.add_argument(
"--output_dir",
type=str,
default="./images",
help="The path the output will be dumped to.",
)
p.add_argument(
"--num_views",
type=int,
default=25,
help="Number of images to render",
)
p.add_argument("--resolution", type=int, default=256, help="output image resolution.")
p.add_argument(
"--sphere_radius",
type=float,
default=1.2,
help="Radius of the viewing sphere",
)
p.add_argument("--val", action="store_true", help="Use to render validation split")
p.add_argument(
"--save_png",
action="store_true",
help="Save output images for visualization",
)
p.add_argument(
"--show_3d",
action="store_true",
help="Save output images for visualization",
)
def normalize_mesh(mesh):
# Center the mesh
matrix = np.eye(4)
bounds = mesh.bounds
centroid = (bounds[1, :] + bounds[0, :]) / 2
matrix[:3, -1] = -centroid
mesh.apply_transform(matrix)
# Scale the model to unit diagonal lenght
matrix = np.eye(4)
extents = mesh.extents
diag = np.sqrt(extents[0] ** 2 + extents[1] ** 2 + extents[2] ** 2)
matrix[:3, :3] *= 1.0 / diag
mesh.apply_transform(matrix)
return mesh
def main():
args = p.parse_args()
instance_names = []
shapenet_categories = train_categories + val_categories
folders = sorted(os.listdir(args.data_dir))
for cat in shapenet_categories:
path = os.path.join(args.data_dir, cat)
new_instances = [
os.path.join(cat, f)
for f in sorted(os.listdir(path))
if os.path.isdir(os.path.join(path, f))
]
instance_names = instance_names + new_instances
instance_names = instance_names[0:10000]
if len(instance_names) == 0:
print("Data dir does not contain any instances")
raise NotImplementedError
# instance_names = instance_names[32000:]
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
print(f"Number of files: {len(instance_names)}")
# Load n meshes
count = 0
mesh_errors = {}
for instance_name in instance_names:
runtime_error = False
category, instance_name = instance_name.split("/")
if os.path.exists(os.path.join(args.output_dir, f"{instance_name}.h5")):
continue
try:
mesh = trimesh.load(
os.path.join(
args.data_dir,
category,
instance_name,
"models",
"model_normalized.obj",
),
force="mesh",
)
except ValueError:
if category not in mesh_errors.keys():
mesh_errors[category] = []
mesh_errors[category].append(instance_name)
print(f"ValueError with instance {instance_name}. Skipping....")
continue
# Normalize the mesh to unit diagonal
mesh = normalize_mesh(mesh)
cam_locations = util.sample_spherical(args.num_views, args.sphere_radius)
obj_location = np.zeros((1, 3))
cv_poses = util.look_at(cam_locations, obj_location)
cam_locations = [util.cv_cam2world_to_bcam2world(m) for m in cv_poses]
image_size = (args.resolution, args.resolution)
K = np.array([[262.5, 0.0, 128.0], [0.0, 262.5, 128.0], [0.0, 0.0, 1.0]])
camera = pyrender.IntrinsicsCamera(
fx=K[0, 0], fy=K[1, 1], cx=K[0, 2], cy=K[1, 2], znear=0.01, zfar=100
)
rgbs = []
depths = []
masks = []
c2ws = []
normals = []
scene = pyrender.Scene.from_trimesh_scene(
trimesh.Scene(mesh), ambient_light=(1, 1, 1)
)
for ii, w2c in enumerate(cam_locations):
# Add camera roll
theta = random.random() * np.pi
roll_matrix = Matrix(
(
(np.cos(theta), -np.sin(theta), 0, 0),
(np.sin(theta), np.cos(theta), 0, 0),
(0, 0, 1, 0),
(0, 0, 0, 1),
)
)
w2c = roll_matrix @ w2c
if ii == 0:
cam_node = scene.add(camera, pose=np.array(w2c))
else:
scene.set_pose(cam_node, pose=np.array(w2c))
try:
r = pyrender.OffscreenRenderer(*image_size)
color, depth = r.render(
scene, flags=pyrender.constants.RenderFlags.FLAT
)
if np.all(color == 255):
raise RuntimeError("No texture rendered")
except Exception as e:
print(f"RuntimeError with instance: {instance_name}. Skipping...")
runtime_error = True
r.delete()
if category not in mesh_errors.keys():
mesh_errors[category] = []
mesh_errors[category].append(instance_name)
break
normals.append(util.depth_2_normal(depth, depth == 0.0, K))
mask = depth != 0
w2c = np.array(util.get_world2cam_from_blender_cam(w2c))
rgbs.append(color)
depths.append(depth)
masks.append(mask)
c2ws.append(np.linalg.inv(w2c))
r.delete()
if args.save_png:
imageio.imwrite(
os.path.join(
args.output_dir, f"{instance_name}_{str(ii).zfill(3)}.png"
),
color,
)
if runtime_error:
runtime_error = False
continue
rgbs = np.stack([r for r in rgbs])
# Check if all images are white. If yes, continue without saving the model
depths = np.stack([r for r in depths])
masks = np.stack([r for r in masks])
poses = np.stack([r for r in c2ws])
normals = np.stack([r for r in normals])
# Generate 3D supervision data for the prior
number_of_points = 100000
surface_pcd = get_surface_point_cloud(
mesh, "scan", args.sphere_radius, 100, 400, 10000000, calculate_normals=True
)
pts, sdf = surface_pcd.sample_sdf_near_surface(
number_of_points,
1,
sign_method="normal",
normal_sample_count=11,
min_size=0,
return_gradients=False,
)
sdf_pts = np.concatenate([pts, sdf[:, None]], axis=-1)
if args.show_3d:
colors = np.zeros_like(pts)
colors[:, 0] = 1
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(pts)
pcd.colors = o3d.utility.Vector3dVector(colors)
frames = []
for c in c2ws:
frames.append(
o3d.geometry.TriangleMesh.create_coordinate_frame().transform(c)
)
o3d.visualization.draw_geometries(frames + [pcd])
hf = h5py.File(os.path.join(args.output_dir, f"{instance_name}.h5"), "w")
hf.create_dataset("rgb", data=rgbs, compression="gzip", dtype="f")
hf.create_dataset("depth", data=depths, compression="gzip", dtype="f")
hf.create_dataset("mask", data=masks, compression="gzip", dtype="f")
hf.create_dataset("normals", data=normals, compression="gzip", dtype="f")
hf.create_dataset("pose", data=poses, compression="gzip", dtype="f")
hf.create_dataset("K", data=K, dtype="f")
hf.create_dataset("sphere_radius", data=args.sphere_radius, dtype="f")
hf.create_dataset("sdf", data=sdf_pts, compression="gzip", dtype="f")
hf.create_dataset("category", data=category)
hf.close()
count += 1
if count % 100 == 0:
print(f"Generated {count} new instances")
with open(os.path.join(args.output_dir, "failures.json"), "w") as outfile:
json.dump(mesh_errors, outfile)
print("Finished all data generation")
if __name__ == "__main__":
main()
|
[
"numpy.sqrt",
"util.look_at",
"numpy.array",
"util.cv_cam2world_to_bcam2world",
"numpy.sin",
"util.sample_spherical",
"os.path.exists",
"pyrender.IntrinsicsCamera",
"os.listdir",
"argparse.ArgumentParser",
"numpy.stack",
"util.depth_2_normal",
"util.get_world2cam_from_blender_cam",
"numpy.random.seed",
"numpy.concatenate",
"open3d.geometry.TriangleMesh.create_coordinate_frame",
"numpy.eye",
"numpy.all",
"open3d.visualization.draw_geometries",
"trimesh.Scene",
"numpy.cos",
"mesh_to_sdf.get_surface_point_cloud",
"open3d.utility.Vector3dVector",
"os.makedirs",
"os.path.join",
"random.seed",
"pyrender.OffscreenRenderer",
"numpy.zeros",
"numpy.linalg.inv",
"open3d.geometry.PointCloud",
"random.random",
"numpy.zeros_like",
"json.dump"
] |
[((324, 345), 'numpy.random.seed', 'np.random.seed', (['(12433)'], {}), '(12433)\n', (338, 345), True, 'import numpy as np\n'), ((346, 364), 'random.seed', 'random.seed', (['(12433)'], {}), '(12433)\n', (357, 364), False, 'import random\n'), ((1295, 1393), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Renders given obj file by rotation a camera around it."""'}), "(description=\n 'Renders given obj file by rotation a camera around it.')\n", (1318, 1393), False, 'import argparse\n'), ((2345, 2354), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (2351, 2354), True, 'import numpy as np\n'), ((2553, 2562), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (2559, 2562), True, 'import numpy as np\n'), ((2601, 2661), 'numpy.sqrt', 'np.sqrt', (['(extents[0] ** 2 + extents[1] ** 2 + extents[2] ** 2)'], {}), '(extents[0] ** 2 + extents[1] ** 2 + extents[2] ** 2)\n', (2608, 2661), True, 'import numpy as np\n'), ((2890, 2915), 'os.listdir', 'os.listdir', (['args.data_dir'], {}), '(args.data_dir)\n', (2900, 2915), False, 'import os\n'), ((2968, 3000), 'os.path.join', 'os.path.join', (['args.data_dir', 'cat'], {}), '(args.data_dir, cat)\n', (2980, 3000), False, 'import os\n'), ((3453, 3484), 'os.path.exists', 'os.path.exists', (['args.output_dir'], {}), '(args.output_dir)\n', (3467, 3484), False, 'import os\n'), ((3494, 3522), 'os.makedirs', 'os.makedirs', (['args.output_dir'], {}), '(args.output_dir)\n', (3505, 3522), False, 'import os\n'), ((4563, 4620), 'util.sample_spherical', 'util.sample_spherical', (['args.num_views', 'args.sphere_radius'], {}), '(args.num_views, args.sphere_radius)\n', (4584, 4620), False, 'import util\n'), ((4644, 4660), 'numpy.zeros', 'np.zeros', (['(1, 3)'], {}), '((1, 3))\n', (4652, 4660), True, 'import numpy as np\n'), ((4680, 4721), 'util.look_at', 'util.look_at', (['cam_locations', 'obj_location'], {}), '(cam_locations, obj_location)\n', (4692, 4721), False, 'import util\n'), ((4869, 4938), 'numpy.array', 'np.array', (['[[262.5, 0.0, 128.0], [0.0, 262.5, 128.0], [0.0, 0.0, 1.0]]'], {}), '([[262.5, 0.0, 128.0], [0.0, 262.5, 128.0], [0.0, 0.0, 1.0]])\n', (4877, 4938), True, 'import numpy as np\n'), ((4956, 5055), 'pyrender.IntrinsicsCamera', 'pyrender.IntrinsicsCamera', ([], {'fx': 'K[0, 0]', 'fy': 'K[1, 1]', 'cx': 'K[0, 2]', 'cy': 'K[1, 2]', 'znear': '(0.01)', 'zfar': '(100)'}), '(fx=K[0, 0], fy=K[1, 1], cx=K[0, 2], cy=K[1, 2],\n znear=0.01, zfar=100)\n', (4981, 5055), False, 'import pyrender\n'), ((7244, 7271), 'numpy.stack', 'np.stack', (['[r for r in rgbs]'], {}), '([r for r in rgbs])\n', (7252, 7271), True, 'import numpy as np\n'), ((7373, 7402), 'numpy.stack', 'np.stack', (['[r for r in depths]'], {}), '([r for r in depths])\n', (7381, 7402), True, 'import numpy as np\n'), ((7419, 7447), 'numpy.stack', 'np.stack', (['[r for r in masks]'], {}), '([r for r in masks])\n', (7427, 7447), True, 'import numpy as np\n'), ((7464, 7491), 'numpy.stack', 'np.stack', (['[r for r in c2ws]'], {}), '([r for r in c2ws])\n', (7472, 7491), True, 'import numpy as np\n'), ((7510, 7540), 'numpy.stack', 'np.stack', (['[r for r in normals]'], {}), '([r for r in normals])\n', (7518, 7540), True, 'import numpy as np\n'), ((7651, 7757), 'mesh_to_sdf.get_surface_point_cloud', 'get_surface_point_cloud', (['mesh', '"""scan"""', 'args.sphere_radius', '(100)', '(400)', '(10000000)'], {'calculate_normals': '(True)'}), "(mesh, 'scan', args.sphere_radius, 100, 400, \n 10000000, calculate_normals=True)\n", (7674, 7757), False, 'from mesh_to_sdf import get_surface_point_cloud\n'), ((8034, 8078), 'numpy.concatenate', 'np.concatenate', (['[pts, sdf[:, None]]'], {'axis': '(-1)'}), '([pts, sdf[:, None]], axis=-1)\n', (8048, 8078), True, 'import numpy as np\n'), ((9527, 9558), 'json.dump', 'json.dump', (['mesh_errors', 'outfile'], {}), '(mesh_errors, outfile)\n', (9536, 9558), False, 'import json\n'), ((3039, 3059), 'os.path.join', 'os.path.join', (['cat', 'f'], {}), '(cat, f)\n', (3051, 3059), False, 'import os\n'), ((3790, 3842), 'os.path.join', 'os.path.join', (['args.output_dir', 'f"""{instance_name}.h5"""'], {}), "(args.output_dir, f'{instance_name}.h5')\n", (3802, 3842), False, 'import os\n'), ((4747, 4781), 'util.cv_cam2world_to_bcam2world', 'util.cv_cam2world_to_bcam2world', (['m'], {}), '(m)\n', (4778, 4781), False, 'import util\n'), ((5235, 5254), 'trimesh.Scene', 'trimesh.Scene', (['mesh'], {}), '(mesh)\n', (5248, 5254), False, 'import trimesh\n'), ((8126, 8144), 'numpy.zeros_like', 'np.zeros_like', (['pts'], {}), '(pts)\n', (8139, 8144), True, 'import numpy as np\n'), ((8193, 8218), 'open3d.geometry.PointCloud', 'o3d.geometry.PointCloud', ([], {}), '()\n', (8216, 8218), True, 'import open3d as o3d\n'), ((8244, 8275), 'open3d.utility.Vector3dVector', 'o3d.utility.Vector3dVector', (['pts'], {}), '(pts)\n', (8270, 8275), True, 'import open3d as o3d\n'), ((8301, 8335), 'open3d.utility.Vector3dVector', 'o3d.utility.Vector3dVector', (['colors'], {}), '(colors)\n', (8327, 8335), True, 'import open3d as o3d\n'), ((8534, 8583), 'open3d.visualization.draw_geometries', 'o3d.visualization.draw_geometries', (['(frames + [pcd])'], {}), '(frames + [pcd])\n', (8567, 8583), True, 'import open3d as o3d\n'), ((8608, 8660), 'os.path.join', 'os.path.join', (['args.output_dir', 'f"""{instance_name}.h5"""'], {}), "(args.output_dir, f'{instance_name}.h5')\n", (8620, 8660), False, 'import os\n'), ((9454, 9500), 'os.path.join', 'os.path.join', (['args.output_dir', '"""failures.json"""'], {}), "(args.output_dir, 'failures.json')\n", (9466, 9500), False, 'import os\n'), ((3929, 4019), 'os.path.join', 'os.path.join', (['args.data_dir', 'category', 'instance_name', '"""models"""', '"""model_normalized.obj"""'], {}), "(args.data_dir, category, instance_name, 'models',\n 'model_normalized.obj')\n", (3941, 4019), False, 'import os\n'), ((5389, 5404), 'random.random', 'random.random', ([], {}), '()\n', (5402, 5404), False, 'import random\n'), ((5924, 5963), 'pyrender.OffscreenRenderer', 'pyrender.OffscreenRenderer', (['*image_size'], {}), '(*image_size)\n', (5950, 5963), False, 'import pyrender\n'), ((6111, 6131), 'numpy.all', 'np.all', (['(color == 255)'], {}), '(color == 255)\n', (6117, 6131), True, 'import numpy as np\n'), ((6589, 6632), 'util.depth_2_normal', 'util.depth_2_normal', (['depth', '(depth == 0.0)', 'K'], {}), '(depth, depth == 0.0, K)\n', (6608, 6632), False, 'import util\n'), ((6692, 6732), 'util.get_world2cam_from_blender_cam', 'util.get_world2cam_from_blender_cam', (['w2c'], {}), '(w2c)\n', (6727, 6732), False, 'import util\n'), ((6854, 6872), 'numpy.linalg.inv', 'np.linalg.inv', (['w2c'], {}), '(w2c)\n', (6867, 6872), True, 'import numpy as np\n'), ((3088, 3104), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (3098, 3104), False, 'import os\n'), ((3135, 3156), 'os.path.join', 'os.path.join', (['path', 'f'], {}), '(path, f)\n', (3147, 3156), False, 'import os\n'), ((5486, 5499), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (5492, 5499), True, 'import numpy as np\n'), ((5545, 5558), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (5551, 5558), True, 'import numpy as np\n'), ((5560, 5573), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (5566, 5573), True, 'import numpy as np\n'), ((5792, 5805), 'numpy.array', 'np.array', (['w2c'], {}), '(w2c)\n', (5800, 5805), True, 'import numpy as np\n'), ((5871, 5884), 'numpy.array', 'np.array', (['w2c'], {}), '(w2c)\n', (5879, 5884), True, 'import numpy as np\n'), ((5502, 5515), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (5508, 5515), True, 'import numpy as np\n'), ((8439, 8490), 'open3d.geometry.TriangleMesh.create_coordinate_frame', 'o3d.geometry.TriangleMesh.create_coordinate_frame', ([], {}), '()\n', (8488, 8490), True, 'import open3d as o3d\n')]
|
# =======================================================================
#
# Copyright (C) 2018, Hisilicon Technologies Co., Ltd. All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1 Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2 Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3 Neither the names of the copyright holders nor the names of the
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# =======================================================================
#
"""presenter facial recognition server module"""
import os
import json
import threading
import random
import logging
from logging.config import fileConfig
import numpy as np
from json.decoder import JSONDecodeError
from google.protobuf.message import DecodeError
import common.presenter_message_pb2 as presenter_message_pb2
from common.channel_manager import ChannelManager
from common.presenter_socket_server import PresenterSocketServer
from common.app_manager import AppManager
import facial_recognition.src.facial_recognition_message_pb2 as pb2
from facial_recognition.src.config_parser import ConfigParser
from facial_recognition.src.facial_recognition_handler import FacialRecognitionHandler
# Face Registration timeout is 10 seconds
FACE_REGISTER_TIME_OUT = 10
# Presenter Server Type
SERVER_TYPE = "facial_recognition"
# max app name length
APP_ID_MAX_LENGTH = 20
# max support 2 app connect
MAX_APP_NUM = 2
# length of face feature vector
FEATURE_VECTOR_LENGTH = 1024
# Face Registration Status code
FACE_REGISTER_STATUS_WAITING = 1
FACE_REGISTER_STATUS_SUCCEED = 2
FACE_REGISTER_STATUS_FAILED = 3
class FacialRecognitionServer(PresenterSocketServer):
'''A server for face recognition'''
def __init__(self, config):
"""
Description: class init func
Input:
config: config information
Returns: NA
"""
server_address = (config.presenter_server_ip,
int(config.presenter_server_port))
super(FacialRecognitionServer, self).__init__(server_address)
self.storage_dir = config.storage_dir
self.max_face_num = int(config.max_face_num)
self.face_match_threshold = float(config.face_match_threshold)
self.register_dict = {}
self.app_manager = AppManager()
self.channel_manager = ChannelManager()
# 登记人脸数据文件
self.face_register_file = os.path.join(self.storage_dir,
"registered_faces.json")
self._init_face_database()
def _init_face_database(self):
"""
Description: Init face recognition database,
read information from face_register_file
Input: NA
Returns: NA
"""
if not os.path.isfile(self.face_register_file):
with open(self.face_register_file, "w", encoding="utf-8") as f:
f.write("{}")
with open(self.face_register_file, "r") as f:
self.face_lock = threading.Lock()
self.registered_faces = json.load(f)
self._filter_registration_data()
def _filter_registration_data(self):
face_dict = self.registered_faces.copy()
for i in face_dict:
image_path = os.path.join(self.storage_dir, i + ".jpg")
if not os.path.isfile(image_path):
del self.registered_faces[i]
def get_all_face(self):
"""
Description: get registered face list.
Input: NA
Returns: NA
"""
with self.face_lock:
return [i for i in self.registered_faces]
def save_face_image(self, name, image):
"""
Description: save face image.
Input:
name face name
image: face image
Returns: True or False
"""
image_file = os.path.join(self.storage_dir, name + ".jpg")
try:
#image = image.decode("utf-8")
with open(image_file, "wb") as f:
f.write(image)
return True
except (OSError, TypeError) as exp:
logging.error(exp)
return False
def get_app_socket(self, app_id):
"""
Description: get a socket which is bound to the app.
Input:
app_id: id of the app
Returns: socket
"""
return self.app_manager.get_socket_by_app_id(app_id)
def list_registered_apps(self):
"""
Description: get registered apps list.
Input: NA
Returns: app list
"""
return self.app_manager.list_app()
def delete_faces(self, name_list):
"""
Description: delete registered faces in name_list
Input:
name_list: a name list
Returns: True or False
"""
with self.face_lock:
for i in name_list:
if self.registered_faces.get(i):
backup = self.registered_faces[i]
del self.registered_faces[i]
try:
with open(self.face_register_file, "w") as f:
json.dump(self.registered_faces, f)
image_file = os.path.join(
self.storage_dir, i + ".jpg")
os.remove(image_file)
except (OSError, JSONDecodeError) as exp:
logging.error(exp)
self.registered_faces[i] = backup
return False
return True
def _clean_connect(self, sock_fileno, epoll, conns, msgs):
"""
Description: close socket, and clean local variables
Input:
sock_fileno: a socket fileno, return value of socket.fileno()
epoll: a set of select.epoll.
conns: all socket connections registered in epoll
msgs: msg read from a socket
"""
logging.info("clean fd:%s, conns:%s", sock_fileno, conns)
self.app_manager.unregister_app_by_fd(sock_fileno)
epoll.unregister(sock_fileno)
conns[sock_fileno].close()
del conns[sock_fileno]
del msgs[sock_fileno]
def _process_msg(self, conn, msg_name, msg_data):
"""
Total entrance to process protobuf msg
Input:
conn: a socket connection
msg_name: name of a msg.
msg_data: msg body, serialized by protobuf
Returns:
False:somme error occured
True:succeed
"""
# process open channel request
if msg_name == pb2._REGISTERAPP.full_name:
ret = self._process_register_app(conn, msg_data)
# process image request, receive an image data from presenter agent
elif msg_name == pb2._FACERESULT.full_name:
ret = self._process_face_result(msg_data)
elif msg_name == pb2._FRAMEINFO.full_name:
ret = self._process_frame_info(conn, msg_data)
elif msg_name == presenter_message_pb2._OPENCHANNELREQUEST.full_name:
ret = self._process_open_channel(conn, msg_data)
# process heartbeat request, it used to keepalive a channel path
elif msg_name == presenter_message_pb2._HEARTBEATMESSAGE.full_name:
ret = self._process_heartbeat(conn)
else:
logging.error("Not recognized msg type %s", msg_name)
ret = False
return ret
def _process_heartbeat(self, conn):
'''
set heartbeat
Input:
conn: a socket connection
Returns:
True: set heartbeat ok.
'''
sock_fileno = conn.fileno()
if self.app_manager.get_app_id_by_socket(sock_fileno):
self.app_manager.set_heartbeat(sock_fileno)
handler = self.channel_manager.get_channel_handler_by_fd(sock_fileno)
if handler is not None:
handler.set_heartbeat()
return True
def _parse_protobuf(self, protobuf, msg_data):
"""
Description: parse protobuf
Input:
protobuf: a struct defined by protobuf
msg_data: msg body, serialized by protobuf
Returns: True or False
"""
try:
protobuf.ParseFromString(msg_data)
return True
except DecodeError as exp:
logging.error(exp)
return False
def _process_register_app(self, conn, msg_data):
"""
Description: process register_app message
Input:
conn: a socket connection
msg_data: msg body, serialized by protobuf
Returns: True or False
"""
request = pb2.RegisterApp()
response = pb2.CommonResponse()
msg_name = pb2._COMMONRESPONSE.full_name
if not self._parse_protobuf(request, msg_data):
response.ret = pb2.kErrorOther
response.message = "ParseFromString exception"
self.send_message(conn, response, msg_name)
return False
app_id = request.id
app_type = request.type
# check app id if exist
if self.app_manager.is_app_exist(app_id):
logging.error("App %s is already exist.", app_id)
response.ret = pb2.kErrorAppRegisterExist
response.message = "App {} is already exist.".format(app_id)
self.send_message(conn, response, msg_name)
elif self.app_manager.get_app_num() >= MAX_APP_NUM:
logging.error("App number reach the upper limit")
response.ret = pb2.kErrorAppRegisterLimit
response.message = "App number reach the upper limit"
self.send_message(conn, response, msg_name)
elif app_type != SERVER_TYPE:
logging.error("App type %s error", app_type)
response.ret = pb2.kErrorAppRegisterType
response.message = "App type {} error".format(app_type)
self.send_message(conn, response, msg_name)
elif len(app_id) > APP_ID_MAX_LENGTH:
logging.error("App id %s is too long", app_id)
response.ret = pb2.kErrorOther
response.message = "App id: {} is too long".format(app_id)
self.send_message(conn, response, msg_name)
else:
self.app_manager.register_app(app_id, conn)
response.ret = pb2.kErrorNone
response.message = "Register app {} succeed".format(app_id)
self.send_message(conn, response, msg_name)
return True
return False
def _process_face_result(self, msg_data):
"""
Description: process face_result message
Input:
msg_data: msg body, serialized by protobuf
Returns: True or False
"""
face_result = pb2.FaceResult()
if not self._parse_protobuf(face_result, msg_data):
return False
face_id = face_result.id
if not self.register_dict.get(face_id):
logging.warning("face id %s is already deleted", face_id)
return True
ret = face_result.response.ret
if ret != pb2.kErrorNone:
err_msg = face_result.response.message
logging.error("get face feature error message: %s", err_msg)
status = FACE_REGISTER_STATUS_FAILED
message = "Get face feature failed"
self._update_register_dict(face_id, status, message)
return True
face_num = len(face_result.feature)
if face_num == 0:
status = FACE_REGISTER_STATUS_FAILED
message = "No face recognized"
self._update_register_dict(face_id, status, message)
elif face_num > 1:
status = FACE_REGISTER_STATUS_FAILED
message = "{} faces recognized".format(face_num)
self._update_register_dict(face_id, status, message)
else:
box = face_result.feature[0].box
face_coordinate = [box.lt_x, box.lt_y, box.rb_x, box.rb_x]
feature_vector = [i for i in face_result.feature[0].vector]
if len(feature_vector) != FEATURE_VECTOR_LENGTH:
logging.error("feature_vector length not equal 1024")
status = FACE_REGISTER_STATUS_FAILED
message = "Face feature vector length invalid"
self._update_register_dict(face_id, status, message)
return True
return self._save_face_feature(face_id, face_coordinate,
feature_vector)
return True
def _update_register_dict(self, face_id, status, message):
"""
Description: update register_dict
Input:
face_id: id of face
status: status of face register
message: message of status of face register
Returns: True or False
"""
if self.register_dict.get(face_id):
self.register_dict[face_id]["status"] = status
self.register_dict[face_id]["message"] = message
self.register_dict[face_id]["event"].set()
def _save_face_feature(self, face_id, face_coordinate, feature_vector):
"""
Description: save face_feature
Input:
face_id: id of face
face_coordinate: face coordinates
feature_vector: face feature vector
Returns: True or False
"""
with self.face_lock:
self.registered_faces[face_id] = {
"coordinate": face_coordinate,
"feature": feature_vector
}
try:
with open(self.face_register_file, "w") as f:
json.dump(self.registered_faces, f)
status = FACE_REGISTER_STATUS_SUCCEED
message = "Successful registration"
self._update_register_dict(face_id, status, message)
return True
except (OSError, JSONDecodeError) as exp:
logging.error(exp)
del self.registered_faces[face_id]
status = FACE_REGISTER_STATUS_FAILED
message = "save face feature to json file failed"
self._update_register_dict(face_id, status, message)
return False
def _process_open_channel(self, conn, msg_data):
"""
Description: process open channel message
Input:
conn: a socket connection
msg_data: msg body, serialized by protobuf
Returns: True or False
"""
request = presenter_message_pb2.OpenChannelRequest()
response = presenter_message_pb2.OpenChannelResponse()
if not self._parse_protobuf(request, msg_data):
channel_name = "unknown channel"
err_code = presenter_message_pb2.kOpenChannelErrorOther
return self._response_open_channel(conn, channel_name,
response, err_code)
channel_name = request.channel_name
# check channel name if exist
if not self.channel_manager.is_channel_exist(channel_name):
logging.error("channel name %s is not exist.", channel_name)
err_code = presenter_message_pb2.kOpenChannelErrorNoSuchChannel
return self._response_open_channel(conn, channel_name,
response, err_code)
#ret = self.channel_manager.register_one_channel(channel_name)
# if ret != ChannelManager.err_code_ok:
# logging.error("Create the channel %s failed!, and ret is %d", channel_name, ret)
# err_code = pb2.kOpenChannelErrorOther
# self._response_open_channel(conn, channel_name, response, err_code)
# check channel path if busy
if self.channel_manager.is_channel_busy(channel_name):
logging.error("channel path %s is busy.", channel_name)
err = presenter_message_pb2.kOpenChannelErrorChannelAlreadyOpened
return self._response_open_channel(conn, channel_name,
response, err)
content_type = presenter_message_pb2.kChannelContentTypeVideo
if request.content_type == content_type:
media_type = "video"
else:
logging.error("media type %s is not recognized.",
request.content_type)
err_code = presenter_message_pb2.kOpenChannelErrorOther
return self._response_open_channel(conn, channel_name,
response, err_code)
handler = FacialRecognitionHandler(channel_name, media_type)
sock = conn.fileno()
self.channel_manager.create_channel_resource(channel_name, sock,
media_type, handler)
err_code = presenter_message_pb2.kOpenChannelErrorNone
return self._response_open_channel(conn, channel_name,
response, err_code)
def _process_frame_info(self, conn, msg_data):
"""
Description: process frame info message
Input:
conn: a socket connection
msg_data: msg body, serialized by protobuf
Returns: True or False
"""
request = pb2.FrameInfo()
response = pb2.CommonResponse()
msg_name = pb2._COMMONRESPONSE.full_name
if not self._parse_protobuf(request, msg_data):
return False
sock_fileno = conn.fileno()
handler = self.channel_manager.get_channel_handler_by_fd(sock_fileno)
if handler is None:
logging.error("get channel handler failed")
response.ret = pb2.kErrorOther
response.message = "channel error."
self.send_message(conn, response, msg_name)
return False
face_list = self._recognize_face(request.feature)
handler.save_frame(request.image, face_list)
response.ret = pb2.kErrorNone
response.message = "process frame info suceed."
self.send_message(conn, response, msg_name)
return True
def _recognize_face(self, face_feature):
"""
Description: recognize which face it is.
Input:
face_feature: face feature
Returns: face list
"""
face_list = []
for i in face_feature:
face_info = {}
box = i.box
coordinate = [box.lt_x, box.lt_y, box.rb_x, box.rb_y]
feature_vector = i.vector
if len(feature_vector) != FEATURE_VECTOR_LENGTH:
logging.error("feature_vector length not equal 1024")
continue
(name, score) = self._compute_face_feature(feature_vector)
face_info["coordinate"] = coordinate
face_info["name"] = name
face_info["confidence"] = score
face_list.append(face_info)
return face_list
def _compute_face_feature(self, feture_vector):
"""
Description: compute score of the feture_vector
Input:
feture_vector: face feature vector
Returns: face name and score
"""
highest_score_face = "Unknown"
highest_score = 0
with self.face_lock:
for i in self.registered_faces:
feature = self.registered_faces[i]["feature"]
score = self._compute_similar_degree(feature, feture_vector)
if score < self.face_match_threshold:
continue
if score > highest_score:
highest_score = score
highest_score_face = i
return (highest_score_face, highest_score)
def _compute_similar_degree(self, feture_vector1, feture_vector2):
"""
Description: compute cosine similarity of two vectors
Input:
feture_vector1: face feature vector
feture_vector2: face feature vector
Returns: score
"""
vector1 = np.array(feture_vector1)
vector2 = np.array(feture_vector2)
square_diff = ((np.linalg.norm(vector1)) * (np.linalg.norm(vector2)))
score = np.dot(vector1, vector2) / square_diff
return score
def stop_thread(self):
"""
Description: clean thread when process exit.
Input: NA
Returns: NA
"""
channel_manager = ChannelManager([])
channel_manager.close_all_thread()
self.set_exit_switch()
self.app_manager.set_thread_switch()
class FacialRecognitionManager():
'''Manager of Face Recognition, a class providing APIs'''
__instance = None
server = None
def __init__(self, server=None):
'''init func'''
def __new__(cls, server=None):
"""ensure only a single instance created. """
if cls.__instance is None:
cls.__instance = object.__new__(cls)
cls.server = server
return cls.__instance
def _choose_random_app(self):
"""
Description: choose a random app online.
Input: NA
Returns: a app name
"""
app_list = self.server.list_registered_apps()
if app_list:
index = random.randint(0, len(app_list) - 1)
return app_list[index]
return None
def get_app_list(self):
"""
Description: API for getting online app list
Input: NA
Returns: app list
"""
return self.server.list_registered_apps()
def register_face(self, name, image):
"""
Description: API for registering face
Input:
name: a face name
image: a face picture
Returns: (ret, msg)
"""
# Input para check
if not isinstance(name, str):
return (False, "Name is not string")
if not isinstance(image, bytes):
return (False, "Image is not bytes")
if self._get_face_number() >= self.server.max_face_num:
return (False, "Face number limit")
app_id = self._choose_random_app()
if app_id is None:
return (False, "No app is online")
conn = self.server.get_app_socket(app_id)
if conn is None:
return (False, "Internal Error, app lost socket")
# Prepare sending face register message to agent
request = pb2.FaceInfo()
request.id = name
request.image = image
register_dict = self.server.register_dict
register_dict[name] = {
"status": FACE_REGISTER_STATUS_WAITING,
"message": "",
"event": threading.Event()
}
msg_name = pb2._FACEINFO.full_name
self.server.send_message(conn, request, msg_name)
register_dict[name]["event"].wait(FACE_REGISTER_TIME_OUT)
if register_dict[name]["status"] == FACE_REGISTER_STATUS_WAITING:
logging.warning("Register face %s timeout", name)
del register_dict[name]
return (False, "10 sec Timeout")
if register_dict[name]["status"] == FACE_REGISTER_STATUS_FAILED:
err_msg = register_dict[name]["message"]
logging.error("Register face %s failed, reason:%s",
name, register_dict[name]["message"])
del register_dict[name]
return (False, err_msg)
ret = self.server.save_face_image(name, image)
del register_dict[name]
if ret:
logging.info("Register face %s succeed", name)
return (True, "Successful Registration")
logging.error("Save face %s to database failed", name)
return (False, "Save database error")
def unregister_face(self, name_list):
"""
Description: API for unregistering faces
Input:
name_list: a name list which will be deleted.
Returns: True or False
"""
if isinstance(name_list, list):
return self.server.delete_faces(name_list)
logging.error("unregister face fail")
return False
def get_all_face_name(self):
"""
Description: API for geting all registered face names
Input: NA
Returns: a name list
"""
return self.server.get_all_face()
def _get_face_number(self):
"""
Description: geting total face number
Input: NA
Returns: total face number
"""
return len(self.get_all_face_name())
def get_faces(self, name_list):
"""
Description: API for geting specified face info.
Input: a name list.
Returns: a list include face name and image.
"""
if not isinstance(name_list, list):
return []
face_list = []
for i in name_list:
face_info = {}
face_info["name"] = i
try:
image_file = os.path.join(self.server.storage_dir, i + ".jpg")
face_info["image"] = open(image_file, 'rb').read()
except OSError as exp:
logging.error(exp)
continue
face_list.append(face_info)
return face_list
def run():
'''Face Recognition server startup function'''
# read config file
config = ConfigParser()
# config log
log_file_path = os.path.join(ConfigParser.root_path, "config/logging.conf")
fileConfig(log_file_path)
logging.getLogger('facial_recognition')
if not config.config_verify():
return None
server = FacialRecognitionServer(config)
FacialRecognitionManager(server)
return server
|
[
"logging.getLogger",
"common.presenter_message_pb2.OpenChannelRequest",
"facial_recognition.src.facial_recognition_handler.FacialRecognitionHandler",
"common.app_manager.AppManager",
"numpy.array",
"numpy.linalg.norm",
"facial_recognition.src.facial_recognition_message_pb2.RegisterApp",
"logging.info",
"logging.error",
"os.remove",
"facial_recognition.src.facial_recognition_message_pb2.CommonResponse",
"facial_recognition.src.facial_recognition_message_pb2.FaceInfo",
"threading.Lock",
"common.channel_manager.ChannelManager",
"numpy.dot",
"common.presenter_message_pb2.OpenChannelResponse",
"logging.warning",
"os.path.isfile",
"facial_recognition.src.config_parser.ConfigParser",
"os.path.join",
"facial_recognition.src.facial_recognition_message_pb2.FaceResult",
"facial_recognition.src.facial_recognition_message_pb2.FrameInfo",
"threading.Event",
"logging.config.fileConfig",
"json.load",
"json.dump"
] |
[((26636, 26650), 'facial_recognition.src.config_parser.ConfigParser', 'ConfigParser', ([], {}), '()\n', (26648, 26650), False, 'from facial_recognition.src.config_parser import ConfigParser\n'), ((26689, 26748), 'os.path.join', 'os.path.join', (['ConfigParser.root_path', '"""config/logging.conf"""'], {}), "(ConfigParser.root_path, 'config/logging.conf')\n", (26701, 26748), False, 'import os\n'), ((26753, 26778), 'logging.config.fileConfig', 'fileConfig', (['log_file_path'], {}), '(log_file_path)\n', (26763, 26778), False, 'from logging.config import fileConfig\n'), ((26783, 26822), 'logging.getLogger', 'logging.getLogger', (['"""facial_recognition"""'], {}), "('facial_recognition')\n", (26800, 26822), False, 'import logging\n'), ((3522, 3534), 'common.app_manager.AppManager', 'AppManager', ([], {}), '()\n', (3532, 3534), False, 'from common.app_manager import AppManager\n'), ((3566, 3582), 'common.channel_manager.ChannelManager', 'ChannelManager', ([], {}), '()\n', (3580, 3582), False, 'from common.channel_manager import ChannelManager\n'), ((3637, 3692), 'os.path.join', 'os.path.join', (['self.storage_dir', '"""registered_faces.json"""'], {}), "(self.storage_dir, 'registered_faces.json')\n", (3649, 3692), False, 'import os\n'), ((5076, 5121), 'os.path.join', 'os.path.join', (['self.storage_dir', "(name + '.jpg')"], {}), "(self.storage_dir, name + '.jpg')\n", (5088, 5121), False, 'import os\n'), ((7173, 7230), 'logging.info', 'logging.info', (['"""clean fd:%s, conns:%s"""', 'sock_fileno', 'conns'], {}), "('clean fd:%s, conns:%s', sock_fileno, conns)\n", (7185, 7230), False, 'import logging\n'), ((9921, 9938), 'facial_recognition.src.facial_recognition_message_pb2.RegisterApp', 'pb2.RegisterApp', ([], {}), '()\n', (9936, 9938), True, 'import facial_recognition.src.facial_recognition_message_pb2 as pb2\n'), ((9958, 9978), 'facial_recognition.src.facial_recognition_message_pb2.CommonResponse', 'pb2.CommonResponse', ([], {}), '()\n', (9976, 9978), True, 'import facial_recognition.src.facial_recognition_message_pb2 as pb2\n'), ((12030, 12046), 'facial_recognition.src.facial_recognition_message_pb2.FaceResult', 'pb2.FaceResult', ([], {}), '()\n', (12044, 12046), True, 'import facial_recognition.src.facial_recognition_message_pb2 as pb2\n'), ((15815, 15857), 'common.presenter_message_pb2.OpenChannelRequest', 'presenter_message_pb2.OpenChannelRequest', ([], {}), '()\n', (15855, 15857), True, 'import common.presenter_message_pb2 as presenter_message_pb2\n'), ((15877, 15920), 'common.presenter_message_pb2.OpenChannelResponse', 'presenter_message_pb2.OpenChannelResponse', ([], {}), '()\n', (15918, 15920), True, 'import common.presenter_message_pb2 as presenter_message_pb2\n'), ((17898, 17948), 'facial_recognition.src.facial_recognition_handler.FacialRecognitionHandler', 'FacialRecognitionHandler', (['channel_name', 'media_type'], {}), '(channel_name, media_type)\n', (17922, 17948), False, 'from facial_recognition.src.facial_recognition_handler import FacialRecognitionHandler\n'), ((18595, 18610), 'facial_recognition.src.facial_recognition_message_pb2.FrameInfo', 'pb2.FrameInfo', ([], {}), '()\n', (18608, 18610), True, 'import facial_recognition.src.facial_recognition_message_pb2 as pb2\n'), ((18630, 18650), 'facial_recognition.src.facial_recognition_message_pb2.CommonResponse', 'pb2.CommonResponse', ([], {}), '()\n', (18648, 18650), True, 'import facial_recognition.src.facial_recognition_message_pb2 as pb2\n'), ((21345, 21369), 'numpy.array', 'np.array', (['feture_vector1'], {}), '(feture_vector1)\n', (21353, 21369), True, 'import numpy as np\n'), ((21388, 21412), 'numpy.array', 'np.array', (['feture_vector2'], {}), '(feture_vector2)\n', (21396, 21412), True, 'import numpy as np\n'), ((21736, 21754), 'common.channel_manager.ChannelManager', 'ChannelManager', (['[]'], {}), '([])\n', (21750, 21754), False, 'from common.channel_manager import ChannelManager\n'), ((23722, 23736), 'facial_recognition.src.facial_recognition_message_pb2.FaceInfo', 'pb2.FaceInfo', ([], {}), '()\n', (23734, 23736), True, 'import facial_recognition.src.facial_recognition_message_pb2 as pb2\n'), ((24941, 24995), 'logging.error', 'logging.error', (['"""Save face %s to database failed"""', 'name'], {}), "('Save face %s to database failed', name)\n", (24954, 24995), False, 'import logging\n'), ((25365, 25402), 'logging.error', 'logging.error', (['"""unregister face fail"""'], {}), "('unregister face fail')\n", (25378, 25402), False, 'import logging\n'), ((4003, 4042), 'os.path.isfile', 'os.path.isfile', (['self.face_register_file'], {}), '(self.face_register_file)\n', (4017, 4042), False, 'import os\n'), ((4234, 4250), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (4248, 4250), False, 'import threading\n'), ((4287, 4299), 'json.load', 'json.load', (['f'], {}), '(f)\n', (4296, 4299), False, 'import json\n'), ((4489, 4531), 'os.path.join', 'os.path.join', (['self.storage_dir', "(i + '.jpg')"], {}), "(self.storage_dir, i + '.jpg')\n", (4501, 4531), False, 'import os\n'), ((10423, 10472), 'logging.error', 'logging.error', (['"""App %s is already exist."""', 'app_id'], {}), "('App %s is already exist.', app_id)\n", (10436, 10472), False, 'import logging\n'), ((12226, 12283), 'logging.warning', 'logging.warning', (['"""face id %s is already deleted"""', 'face_id'], {}), "('face id %s is already deleted', face_id)\n", (12241, 12283), False, 'import logging\n'), ((12445, 12505), 'logging.error', 'logging.error', (['"""get face feature error message: %s"""', 'err_msg'], {}), "('get face feature error message: %s', err_msg)\n", (12458, 12505), False, 'import logging\n'), ((16387, 16447), 'logging.error', 'logging.error', (['"""channel name %s is not exist."""', 'channel_name'], {}), "('channel name %s is not exist.', channel_name)\n", (16400, 16447), False, 'import logging\n'), ((17137, 17192), 'logging.error', 'logging.error', (['"""channel path %s is busy."""', 'channel_name'], {}), "('channel path %s is busy.', channel_name)\n", (17150, 17192), False, 'import logging\n'), ((17579, 17650), 'logging.error', 'logging.error', (['"""media type %s is not recognized."""', 'request.content_type'], {}), "('media type %s is not recognized.', request.content_type)\n", (17592, 17650), False, 'import logging\n'), ((18936, 18979), 'logging.error', 'logging.error', (['"""get channel handler failed"""'], {}), "('get channel handler failed')\n", (18949, 18979), False, 'import logging\n'), ((21437, 21460), 'numpy.linalg.norm', 'np.linalg.norm', (['vector1'], {}), '(vector1)\n', (21451, 21460), True, 'import numpy as np\n'), ((21465, 21488), 'numpy.linalg.norm', 'np.linalg.norm', (['vector2'], {}), '(vector2)\n', (21479, 21488), True, 'import numpy as np\n'), ((21507, 21531), 'numpy.dot', 'np.dot', (['vector1', 'vector2'], {}), '(vector1, vector2)\n', (21513, 21531), True, 'import numpy as np\n'), ((23976, 23993), 'threading.Event', 'threading.Event', ([], {}), '()\n', (23991, 23993), False, 'import threading\n'), ((24258, 24307), 'logging.warning', 'logging.warning', (['"""Register face %s timeout"""', 'name'], {}), "('Register face %s timeout', name)\n", (24273, 24307), False, 'import logging\n'), ((24528, 24622), 'logging.error', 'logging.error', (['"""Register face %s failed, reason:%s"""', 'name', "register_dict[name]['message']"], {}), "('Register face %s failed, reason:%s', name, register_dict[\n name]['message'])\n", (24541, 24622), False, 'import logging\n'), ((24832, 24878), 'logging.info', 'logging.info', (['"""Register face %s succeed"""', 'name'], {}), "('Register face %s succeed', name)\n", (24844, 24878), False, 'import logging\n'), ((4551, 4577), 'os.path.isfile', 'os.path.isfile', (['image_path'], {}), '(image_path)\n', (4565, 4577), False, 'import os\n'), ((5335, 5353), 'logging.error', 'logging.error', (['exp'], {}), '(exp)\n', (5348, 5353), False, 'import logging\n'), ((9592, 9610), 'logging.error', 'logging.error', (['exp'], {}), '(exp)\n', (9605, 9610), False, 'import logging\n'), ((10728, 10777), 'logging.error', 'logging.error', (['"""App number reach the upper limit"""'], {}), "('App number reach the upper limit')\n", (10741, 10777), False, 'import logging\n'), ((19917, 19970), 'logging.error', 'logging.error', (['"""feature_vector length not equal 1024"""'], {}), "('feature_vector length not equal 1024')\n", (19930, 19970), False, 'import logging\n'), ((26258, 26307), 'os.path.join', 'os.path.join', (['self.server.storage_dir', "(i + '.jpg')"], {}), "(self.server.storage_dir, i + '.jpg')\n", (26270, 26307), False, 'import os\n'), ((11004, 11048), 'logging.error', 'logging.error', (['"""App type %s error"""', 'app_type'], {}), "('App type %s error', app_type)\n", (11017, 11048), False, 'import logging\n'), ((13401, 13454), 'logging.error', 'logging.error', (['"""feature_vector length not equal 1024"""'], {}), "('feature_vector length not equal 1024')\n", (13414, 13454), False, 'import logging\n'), ((14934, 14969), 'json.dump', 'json.dump', (['self.registered_faces', 'f'], {}), '(self.registered_faces, f)\n', (14943, 14969), False, 'import json\n'), ((15243, 15261), 'logging.error', 'logging.error', (['exp'], {}), '(exp)\n', (15256, 15261), False, 'import logging\n'), ((26426, 26444), 'logging.error', 'logging.error', (['exp'], {}), '(exp)\n', (26439, 26444), False, 'import logging\n'), ((6444, 6486), 'os.path.join', 'os.path.join', (['self.storage_dir', "(i + '.jpg')"], {}), "(self.storage_dir, i + '.jpg')\n", (6456, 6486), False, 'import os\n'), ((6540, 6561), 'os.remove', 'os.remove', (['image_file'], {}), '(image_file)\n', (6549, 6561), False, 'import os\n'), ((11284, 11330), 'logging.error', 'logging.error', (['"""App id %s is too long"""', 'app_id'], {}), "('App id %s is too long', app_id)\n", (11297, 11330), False, 'import logging\n'), ((6371, 6406), 'json.dump', 'json.dump', (['self.registered_faces', 'f'], {}), '(self.registered_faces, f)\n', (6380, 6406), False, 'import json\n'), ((6648, 6666), 'logging.error', 'logging.error', (['exp'], {}), '(exp)\n', (6661, 6666), False, 'import logging\n'), ((8582, 8635), 'logging.error', 'logging.error', (['"""Not recognized msg type %s"""', 'msg_name'], {}), "('Not recognized msg type %s', msg_name)\n", (8595, 8635), False, 'import logging\n')]
|
import numpy as np
import pandas as pd
import xarray as xr
import matplotlib.pyplot as plt
import geocat.viz.util as gvutil
path = r'H:\Python project 2021\climate_data_analysis_with_python\data\sst.mnmean.nc'
ds= xr.open_dataset(path)
# time slicing
sst = ds.sst.sel(time=slice('1920-01-01','2020-12-01'))
# anomaly with respect to 1971-2000 period
clm = ds.sst.sel(time=slice('1971-01-01','2000-12-01')).groupby('time.month').mean(dim='time')
anm = (sst.groupby('time.month') - clm)
time = anm.time
itime=np.arange(time.size)
def wgt_areaave(indat, latS, latN, lonW, lonE):
lat=indat.lat
lon=indat.lon
if ( ((lonW < 0) or (lonE < 0 )) and (lon.values.min() > -1) ):
anm=indat.assign_coords(lon=( (lon + 180) % 360 - 180) )
lon=( (lon + 180) % 360 - 180)
else:
anm=indat
iplat = lat.where( (lat >= latS ) & (lat <= latN), drop=True)
iplon = lon.where( (lon >= lonW ) & (lon <= lonE), drop=True)
# print(iplat)
# print(iplon)
wgt = np.cos(np.deg2rad(lat))
odat=anm.sel(lat=iplat,lon=iplon).weighted(wgt).mean(("lon", "lat"), skipna=True)
return(odat)
# bob sst
bob_anm = wgt_areaave(anm,5,25,80,100)
bob_ranm = bob_anm.rolling(time=7, center=True).mean('time')
##
# Create a list of colors based on the color bar values
colors = ['C1' if (value > 0) else 'C0' for value in bob_anm]
fig = plt.figure(figsize=[8,5])
ax1 = fig.add_subplot(111)
# Plot bar chart
ax1.bar(itime, bob_anm, align='edge', edgecolor="none", color=colors, width=1.0)
ax1.plot(itime, bob_ranm, color="black", linewidth=1.5)
ax1.legend(['7-month running mean'],fontsize=12)
# Use geocat.viz.util convenience function to add minor and major tick lines
gvutil.add_major_minor_ticks(ax1,
x_minor_per_major=4,
y_minor_per_major=5,
labelsize=12)
# Use geocat.viz.util convenience function to set axes parameters
ystr = 1920
yend = 2020
dyr = 20
ist, = np.where(time == pd.Timestamp(year=ystr, month=1, day=1) )
iet, = np.where(time == pd.Timestamp(year=yend, month=1, day=1) )
gvutil.set_axes_limits_and_ticks(ax1,
ylim=(-1.5, 1),
yticks=np.linspace(-1.5, 1, 6),
yticklabels=np.linspace(-1.5, 1, 6),
xlim=(itime[0], itime[-1]),
xticks=itime[ist[0]:iet[0]+1:12*dyr],
xticklabels=np.arange(ystr, yend+1, dyr))
# Use geocat.viz.util convenience function to set titles and labels
gvutil.set_titles_and_labels(ax1,
maintitle="SSTA in BoB (ERSST)",
ylabel='Anomalies',
xlabel= 'Year',
maintitlefontsize=18,
labelfontsize=15)
plt.tight_layout()
plt.savefig("bob_anomalies.png",dpi = 300)
########## BoB SST with respect to ENSO and IOD (ERSST)
#nino 3.4 and dipole mode index plot together
nino = wgt_areaave(anm,-5,5,-170,-120)
nino = nino.rolling(time=7, center=True).mean('time')
#IOD west: 50 ° E to 70 ° E and 10 ° S to 10 ° N.
iod_west = wgt_areaave(anm,-10,10,50,70)
# IOD east: 90 ° E to 110 ° E and 10 ° S to 0 ° S.
iod_east = wgt_areaave(anm,-10,0,90,110)
dmi = iod_west - iod_east
dmi = dmi.rolling(time=7, center=True).mean('time')
### Figure Plot
fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(8, 8))
ax1.set_title('BoB anomaly with repect to ENSO')
ax1.plot(time, bob_ranm, '-', linewidth=1)
ax1.plot(time, nino, '-', linewidth=1)
ax1.tick_params(length = 7,right=True,labelsize=12)
ax1.legend(['BoB anomaly','Nino3.4 Index'],fontsize=12,frameon=False)
ax1.set_ylabel('SSTA (°C)',fontsize=12)
ax2.set_title('BoB anomaly with respect to IOD')
ax2.plot(time, bob_ranm, '-', linewidth=1)
ax2.plot(time, dmi, '-', linewidth=1)
ax2.tick_params(length = 7,right=True,labelsize=12)
ax2.legend(['BoB anomaly','Dipole Mode Index'],fontsize=12,frameon=False)
ax2.set_ylabel('SSTA (°C)',fontsize=12)
# Show the plot
plt.draw()
plt.tight_layout()
plt.savefig("nino-bob-dmi.png",dpi = 300)
####################### (Ploting Nino 3.4 Index)
nino = wgt_areaave(anm,-5,5,-170,-120)
rnino = nino.rolling(time=7, center=True).mean('time')
#nino standard
ninoSD=nino/nino.std(dim='time')
rninoSD=ninoSD.rolling(time=7, center=True).mean('time')
# -- -- -- -- -- -- -- -- - -- - -- --- -- - -- - -- - - -- - -
# -- figure plot
fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(8, 8))
# Create a list of colors based on the color bar values
colors = ['C1' if (value > 0) else 'C0' for value in ninoSD]
# Plot bar chart
ax1.bar(itime, nino, align='edge', edgecolor="none", color=colors, width=1.0)
ax1.plot(itime, rnino, color="black", linewidth=1.5)
ax1.legend(['7-month running mean'],fontsize=12,frameon=False)
ax2.bar(itime, ninoSD, align='edge', edgecolor="none", color=colors, width=1.0)
ax2.plot(itime, rninoSD, color="black", linewidth=1.5)
# Use geocat.viz.util convenience function to set axes parameters
ystr = 1920
yend = 2020
dyr = 20
ist, = np.where(time == pd.Timestamp(year=ystr, month=1, day=1) )
iet, = np.where(time == pd.Timestamp(year=yend, month=1, day=1) )
gvutil.set_axes_limits_and_ticks(ax1,
ylim=(-3, 3.5),
yticks=np.linspace(-3, 3, 7),
yticklabels=np.linspace(-3, 3, 7),
xlim=(itime[0], itime[-1]),
xticks=itime[ist[0]:iet[0]+1:12*dyr],
xticklabels=np.arange(ystr, yend+1, dyr) )
gvutil.set_axes_limits_and_ticks(ax2,
ylim=(-3, 3.5),
yticks=np.linspace(-3, 3, 7),
yticklabels=np.linspace(-3, 3, 7),
xlim=(itime[0], itime[-1]),
xticks=itime[ist[0]:iet[0]+1:12*dyr],
xticklabels=np.arange(ystr, yend+1, dyr) )
# Use geocat.viz.util convenience function to add minor and major tick lines
gvutil.add_major_minor_ticks(ax1,
x_minor_per_major=4,
y_minor_per_major=5,
labelsize=12)
gvutil.add_major_minor_ticks(ax2,
x_minor_per_major=4,
y_minor_per_major=5,
labelsize=12)
# Use geocat.viz.util convenience function to set titles and labels
gvutil.set_titles_and_labels(ax1,
maintitle="SSTA in Nino3.4 region",
ylabel='Anomalies',
maintitlefontsize=18,
labelfontsize=15)
gvutil.set_titles_and_labels(ax2,
maintitle="Nino3.4 Index",
ylabel='Standardized',
xlabel='Year',
maintitlefontsize=18,
labelfontsize=15)
plt.draw()
plt.tight_layout()
plt.savefig("nino3.4_ERSST.png",dpi=300)
############### (Ploting DMI Index)
iod_west = wgt_areaave(anm,-10,10,50,70)
# IOD east: 90 ° E to 110 ° E and 10 ° S to 0 ° S.
iod_east = wgt_areaave(anm,-10,0,90,110)
dmi = iod_west - iod_east
rdmi = dmi.rolling(time=7, center=True).mean('time')
colors = ['C1' if (value > 0) else 'C0' for value in dmi]
fig = plt.figure(figsize=[8,5])
ax1 = fig.add_subplot(111)
# Plot bar chart
ax1.bar(itime, dmi, align='edge', edgecolor="none", color=colors, width=1.0)
ax1.plot(itime, rdmi, color="black", linewidth=1.5)
ax1.legend(['7-month running mean'],fontsize=12,frameon=False)
# Use geocat.viz.util convenience function to add minor and major tick lines
gvutil.add_major_minor_ticks(ax1,
x_minor_per_major=4,
y_minor_per_major=5,
labelsize=12)
# Use geocat.viz.util convenience function to set axes parameters
ystr = 1920
yend = 2020
dyr = 20
ist, = np.where(time == pd.Timestamp(year=ystr, month=1, day=1) )
iet, = np.where(time == pd.Timestamp(year=yend, month=1, day=1) )
gvutil.set_axes_limits_and_ticks(ax1,
ylim=(-1.5, 1.90),
yticks=np.linspace(-1, 1.5, 6),
yticklabels=np.linspace(-1, 1.5, 6),
xlim=(itime[0], itime[-1]),
xticks=itime[ist[0]:iet[0]+1:12*dyr],
xticklabels=np.arange(ystr, yend+1, dyr))
# Use geocat.viz.util convenience function to set titles and labels
gvutil.set_titles_and_labels(ax1,
maintitle=" Dipole Mode Index",
ylabel='Anomalies',
xlabel= 'Year',
maintitlefontsize=18,
labelfontsize=15)
plt.tight_layout()
plt.savefig("dmi_ersst.png",dpi = 300)
### (Global vs BoB time Series -ERSST v5)
# global vs bob sst anomaly
glob_anom = anm.mean(('lon','lat'),skipna = True)
glob_anom_ra = glob_anom.rolling(time=12, center=True).mean('time')
bob_anm = wgt_areaave(anm,5,25,80,100)
bob_anm_ra = bob_anm.rolling(time=12, center=True).mean('time')
xr.corr(glob_anom_ra,bob_anm_ra)
# plot
fig = plt.figure(figsize=[8,5])
ax1 = fig.add_subplot(111)
ax1.set_title('Global SSTA & BOB SSTA with 1 year moving average (ERSST v5)')
ax1.plot(time, glob_anom_ra, '-', linewidth=1)
ax1.plot(time, bob_anm_ra, '-', linewidth=1)
ax1.tick_params(length = 7,right=True,labelsize=12)
ax1.legend(['Globally averaged','BoB averaged'],fontsize=12,frameon=False)
ax1.set_ylabel('SSTA (°C)',fontsize=12)
ax1.set_xlabel('Year',fontsize=12)
ax1.text(pd.to_datetime('1975-01-01'),-0.8,'Correlation Coefficient = 0.89',fontsize=12)
#ax1.axis(xmin=pd.Timestamp("1982-01"), xmax=pd.Timestamp("2020-12"))
# Show the plot
plt.draw()
plt.tight_layout()
plt.savefig("bobvsgloobalanom_ersst.png",dpi = 300)
|
[
"matplotlib.pyplot.savefig",
"geocat.viz.util.add_major_minor_ticks",
"pandas.Timestamp",
"pandas.to_datetime",
"matplotlib.pyplot.figure",
"xarray.corr",
"numpy.deg2rad",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.tight_layout",
"numpy.linspace",
"matplotlib.pyplot.draw",
"xarray.open_dataset",
"geocat.viz.util.set_titles_and_labels",
"numpy.arange"
] |
[((215, 236), 'xarray.open_dataset', 'xr.open_dataset', (['path'], {}), '(path)\n', (230, 236), True, 'import xarray as xr\n'), ((509, 529), 'numpy.arange', 'np.arange', (['time.size'], {}), '(time.size)\n', (518, 529), True, 'import numpy as np\n'), ((1330, 1356), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '[8, 5]'}), '(figsize=[8, 5])\n', (1340, 1356), True, 'import matplotlib.pyplot as plt\n'), ((1665, 1758), 'geocat.viz.util.add_major_minor_ticks', 'gvutil.add_major_minor_ticks', (['ax1'], {'x_minor_per_major': '(4)', 'y_minor_per_major': '(5)', 'labelsize': '(12)'}), '(ax1, x_minor_per_major=4, y_minor_per_major=5,\n labelsize=12)\n', (1693, 1758), True, 'import geocat.viz.util as gvutil\n'), ((2571, 2717), 'geocat.viz.util.set_titles_and_labels', 'gvutil.set_titles_and_labels', (['ax1'], {'maintitle': '"""SSTA in BoB (ERSST)"""', 'ylabel': '"""Anomalies"""', 'xlabel': '"""Year"""', 'maintitlefontsize': '(18)', 'labelfontsize': '(15)'}), "(ax1, maintitle='SSTA in BoB (ERSST)', ylabel=\n 'Anomalies', xlabel='Year', maintitlefontsize=18, labelfontsize=15)\n", (2599, 2717), True, 'import geocat.viz.util as gvutil\n'), ((2859, 2877), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2875, 2877), True, 'import matplotlib.pyplot as plt\n'), ((2878, 2919), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""bob_anomalies.png"""'], {'dpi': '(300)'}), "('bob_anomalies.png', dpi=300)\n", (2889, 2919), True, 'import matplotlib.pyplot as plt\n'), ((3428, 3462), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(1)'], {'figsize': '(8, 8)'}), '(2, 1, figsize=(8, 8))\n', (3440, 3462), True, 'import matplotlib.pyplot as plt\n'), ((4076, 4086), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (4084, 4086), True, 'import matplotlib.pyplot as plt\n'), ((4087, 4105), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (4103, 4105), True, 'import matplotlib.pyplot as plt\n'), ((4106, 4146), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""nino-bob-dmi.png"""'], {'dpi': '(300)'}), "('nino-bob-dmi.png', dpi=300)\n", (4117, 4146), True, 'import matplotlib.pyplot as plt\n'), ((4590, 4624), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(1)'], {'figsize': '(8, 8)'}), '(2, 1, figsize=(8, 8))\n', (4602, 4624), True, 'import matplotlib.pyplot as plt\n'), ((6253, 6346), 'geocat.viz.util.add_major_minor_ticks', 'gvutil.add_major_minor_ticks', (['ax1'], {'x_minor_per_major': '(4)', 'y_minor_per_major': '(5)', 'labelsize': '(12)'}), '(ax1, x_minor_per_major=4, y_minor_per_major=5,\n labelsize=12)\n', (6281, 6346), True, 'import geocat.viz.util as gvutil\n'), ((6431, 6524), 'geocat.viz.util.add_major_minor_ticks', 'gvutil.add_major_minor_ticks', (['ax2'], {'x_minor_per_major': '(4)', 'y_minor_per_major': '(5)', 'labelsize': '(12)'}), '(ax2, x_minor_per_major=4, y_minor_per_major=5,\n labelsize=12)\n', (6459, 6524), True, 'import geocat.viz.util as gvutil\n'), ((6676, 6809), 'geocat.viz.util.set_titles_and_labels', 'gvutil.set_titles_and_labels', (['ax1'], {'maintitle': '"""SSTA in Nino3.4 region"""', 'ylabel': '"""Anomalies"""', 'maintitlefontsize': '(18)', 'labelfontsize': '(15)'}), "(ax1, maintitle='SSTA in Nino3.4 region',\n ylabel='Anomalies', maintitlefontsize=18, labelfontsize=15)\n", (6704, 6809), True, 'import geocat.viz.util as gvutil\n'), ((6923, 7066), 'geocat.viz.util.set_titles_and_labels', 'gvutil.set_titles_and_labels', (['ax2'], {'maintitle': '"""Nino3.4 Index"""', 'ylabel': '"""Standardized"""', 'xlabel': '"""Year"""', 'maintitlefontsize': '(18)', 'labelfontsize': '(15)'}), "(ax2, maintitle='Nino3.4 Index', ylabel=\n 'Standardized', xlabel='Year', maintitlefontsize=18, labelfontsize=15)\n", (6951, 7066), True, 'import geocat.viz.util as gvutil\n'), ((7208, 7218), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (7216, 7218), True, 'import matplotlib.pyplot as plt\n'), ((7219, 7237), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (7235, 7237), True, 'import matplotlib.pyplot as plt\n'), ((7238, 7279), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""nino3.4_ERSST.png"""'], {'dpi': '(300)'}), "('nino3.4_ERSST.png', dpi=300)\n", (7249, 7279), True, 'import matplotlib.pyplot as plt\n'), ((7614, 7640), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '[8, 5]'}), '(figsize=[8, 5])\n', (7624, 7640), True, 'import matplotlib.pyplot as plt\n'), ((7955, 8048), 'geocat.viz.util.add_major_minor_ticks', 'gvutil.add_major_minor_ticks', (['ax1'], {'x_minor_per_major': '(4)', 'y_minor_per_major': '(5)', 'labelsize': '(12)'}), '(ax1, x_minor_per_major=4, y_minor_per_major=5,\n labelsize=12)\n', (7983, 8048), True, 'import geocat.viz.util as gvutil\n'), ((8864, 9009), 'geocat.viz.util.set_titles_and_labels', 'gvutil.set_titles_and_labels', (['ax1'], {'maintitle': '""" Dipole Mode Index"""', 'ylabel': '"""Anomalies"""', 'xlabel': '"""Year"""', 'maintitlefontsize': '(18)', 'labelfontsize': '(15)'}), "(ax1, maintitle=' Dipole Mode Index', ylabel=\n 'Anomalies', xlabel='Year', maintitlefontsize=18, labelfontsize=15)\n", (8892, 9009), True, 'import geocat.viz.util as gvutil\n'), ((9151, 9169), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (9167, 9169), True, 'import matplotlib.pyplot as plt\n'), ((9170, 9207), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""dmi_ersst.png"""'], {'dpi': '(300)'}), "('dmi_ersst.png', dpi=300)\n", (9181, 9207), True, 'import matplotlib.pyplot as plt\n'), ((9523, 9556), 'xarray.corr', 'xr.corr', (['glob_anom_ra', 'bob_anm_ra'], {}), '(glob_anom_ra, bob_anm_ra)\n', (9530, 9556), True, 'import xarray as xr\n'), ((9572, 9598), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '[8, 5]'}), '(figsize=[8, 5])\n', (9582, 9598), True, 'import matplotlib.pyplot as plt\n'), ((10178, 10188), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (10186, 10188), True, 'import matplotlib.pyplot as plt\n'), ((10189, 10207), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (10205, 10207), True, 'import matplotlib.pyplot as plt\n'), ((10208, 10258), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""bobvsgloobalanom_ersst.png"""'], {'dpi': '(300)'}), "('bobvsgloobalanom_ersst.png', dpi=300)\n", (10219, 10258), True, 'import matplotlib.pyplot as plt\n'), ((10009, 10037), 'pandas.to_datetime', 'pd.to_datetime', (['"""1975-01-01"""'], {}), "('1975-01-01')\n", (10023, 10037), True, 'import pandas as pd\n'), ((975, 990), 'numpy.deg2rad', 'np.deg2rad', (['lat'], {}), '(lat)\n', (985, 990), True, 'import numpy as np\n'), ((1965, 2004), 'pandas.Timestamp', 'pd.Timestamp', ([], {'year': 'ystr', 'month': '(1)', 'day': '(1)'}), '(year=ystr, month=1, day=1)\n', (1977, 2004), True, 'import pandas as pd\n'), ((2031, 2070), 'pandas.Timestamp', 'pd.Timestamp', ([], {'year': 'yend', 'month': '(1)', 'day': '(1)'}), '(year=yend, month=1, day=1)\n', (2043, 2070), True, 'import pandas as pd\n'), ((2200, 2223), 'numpy.linspace', 'np.linspace', (['(-1.5)', '(1)', '(6)'], {}), '(-1.5, 1, 6)\n', (2211, 2223), True, 'import numpy as np\n'), ((2270, 2293), 'numpy.linspace', 'np.linspace', (['(-1.5)', '(1)', '(6)'], {}), '(-1.5, 1, 6)\n', (2281, 2293), True, 'import numpy as np\n'), ((2472, 2502), 'numpy.arange', 'np.arange', (['ystr', '(yend + 1)', 'dyr'], {}), '(ystr, yend + 1, dyr)\n', (2481, 2502), True, 'import numpy as np\n'), ((5214, 5253), 'pandas.Timestamp', 'pd.Timestamp', ([], {'year': 'ystr', 'month': '(1)', 'day': '(1)'}), '(year=ystr, month=1, day=1)\n', (5226, 5253), True, 'import pandas as pd\n'), ((5280, 5319), 'pandas.Timestamp', 'pd.Timestamp', ([], {'year': 'yend', 'month': '(1)', 'day': '(1)'}), '(year=yend, month=1, day=1)\n', (5292, 5319), True, 'import pandas as pd\n'), ((5449, 5470), 'numpy.linspace', 'np.linspace', (['(-3)', '(3)', '(7)'], {}), '(-3, 3, 7)\n', (5460, 5470), True, 'import numpy as np\n'), ((5517, 5538), 'numpy.linspace', 'np.linspace', (['(-3)', '(3)', '(7)'], {}), '(-3, 3, 7)\n', (5528, 5538), True, 'import numpy as np\n'), ((5717, 5747), 'numpy.arange', 'np.arange', (['ystr', '(yend + 1)', 'dyr'], {}), '(ystr, yend + 1, dyr)\n', (5726, 5747), True, 'import numpy as np\n'), ((5876, 5897), 'numpy.linspace', 'np.linspace', (['(-3)', '(3)', '(7)'], {}), '(-3, 3, 7)\n', (5887, 5897), True, 'import numpy as np\n'), ((5944, 5965), 'numpy.linspace', 'np.linspace', (['(-3)', '(3)', '(7)'], {}), '(-3, 3, 7)\n', (5955, 5965), True, 'import numpy as np\n'), ((6144, 6174), 'numpy.arange', 'np.arange', (['ystr', '(yend + 1)', 'dyr'], {}), '(ystr, yend + 1, dyr)\n', (6153, 6174), True, 'import numpy as np\n'), ((8255, 8294), 'pandas.Timestamp', 'pd.Timestamp', ([], {'year': 'ystr', 'month': '(1)', 'day': '(1)'}), '(year=ystr, month=1, day=1)\n', (8267, 8294), True, 'import pandas as pd\n'), ((8321, 8360), 'pandas.Timestamp', 'pd.Timestamp', ([], {'year': 'yend', 'month': '(1)', 'day': '(1)'}), '(year=yend, month=1, day=1)\n', (8333, 8360), True, 'import pandas as pd\n'), ((8493, 8516), 'numpy.linspace', 'np.linspace', (['(-1)', '(1.5)', '(6)'], {}), '(-1, 1.5, 6)\n', (8504, 8516), True, 'import numpy as np\n'), ((8563, 8586), 'numpy.linspace', 'np.linspace', (['(-1)', '(1.5)', '(6)'], {}), '(-1, 1.5, 6)\n', (8574, 8586), True, 'import numpy as np\n'), ((8765, 8795), 'numpy.arange', 'np.arange', (['ystr', '(yend + 1)', 'dyr'], {}), '(ystr, yend + 1, dyr)\n', (8774, 8795), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Sun Jul 5 06:34:04 2015
@author: tanay
"""
from lasagne.layers import InputLayer, DropoutLayer, DenseLayer
from lasagne.updates import nesterov_momentum
from lasagne.objectives import binary_crossentropy
from nolearn.lasagne import NeuralNet
import theano
from theano import tensor as T
from theano.tensor.nnet import sigmoid
from sklearn import metrics
from sklearn.utils import shuffle
import numpy as np
learning_rate = theano.shared(np.float32(0.1))
input_size=Xtrh.shape
class AdjustVariable(object):
def __init__(self, variable, target, half_life=20):
self.variable = variable
self.target = target
self.half_life = half_life
def __call__(self, nn, train_history):
delta = self.variable.get_value() - self.target
delta /= 2**(1.0/self.half_life)
self.variable.set_value(np.float32(self.target + delta))
net = NeuralNet(
layers=[
('input', InputLayer),
('hidden1', DenseLayer),
('dropout1', DropoutLayer),
('hidden2', DenseLayer),
('dropout2', DropoutLayer),
('output', DenseLayer),
],
# layer parameters:
input_shape=(None, input_size),
hidden1_num_units=400,
dropout1_p=0.4,
hidden2_num_units=200,
dropout2_p=0.4,
output_nonlinearity=sigmoid,
output_num_units=4,
# optimization method:
update=nesterov_momentum,
update_learning_rate=learning_rate,
update_momentum=0.899,
# Decay the learning rate
on_epoch_finished=[
AdjustVariable(learning_rate, target=0, half_life=4),
],
# This is silly, but we don't want a stratified K-Fold here
# To compensate we need to pass in the y_tensor_type and the loss.
regression=True,
y_tensor_type = T.imatrix,
objective_loss_function = binary_crossentropy,
max_epochs=75,
eval_size=0.1,
verbose=1,
)
X, y = shuffle(Xtrh, y, random_state=123)
net.fit(X, y)
_, X_valid, _, y_valid = net.train_test_split(X, y, net.eval_size)
probas = net.predict_proba(X_valid)[:,0]
print("ROC score", metrics.roc_auc_score(y_valid, probas))
|
[
"sklearn.utils.shuffle",
"numpy.float32",
"sklearn.metrics.roc_auc_score"
] |
[((1809, 1843), 'sklearn.utils.shuffle', 'shuffle', (['Xtrh', 'y'], {'random_state': '(123)'}), '(Xtrh, y, random_state=123)\n', (1816, 1843), False, 'from sklearn.utils import shuffle\n'), ((479, 494), 'numpy.float32', 'np.float32', (['(0.1)'], {}), '(0.1)\n', (489, 494), True, 'import numpy as np\n'), ((1986, 2024), 'sklearn.metrics.roc_auc_score', 'metrics.roc_auc_score', (['y_valid', 'probas'], {}), '(y_valid, probas)\n', (2007, 2024), False, 'from sklearn import metrics\n'), ((875, 906), 'numpy.float32', 'np.float32', (['(self.target + delta)'], {}), '(self.target + delta)\n', (885, 906), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.feature_selection import VarianceThreshold
from sklearn.preprocessing import PolynomialFeatures, StandardScaler
from sklearn import metrics
from sklearn.base import BaseEstimator
from sklearn.neighbors import KNeighborsRegressor
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
# Semilla fijada
np.random.seed(1)
# Constantes
FILENAME = 'datos/data_regression.csv'
TEST_SIZE = 0.2
N_JOBS = 6
VISUALIZE_TRAIN_SET = False
CROSS_VALIDATION = False
CROSS_VALIDATION_KNR = False
VARIANCE_THRESHOLD = 1e-3
POL_DEGREE = 2
PCA_EXPLAINED_VARIANCE = 0.99999
K_SPLITS = 5
REG_PARAM_VALUES1 = [0.1, 1, 5, 10, 20]
REG_PARAM_VALUES2 = [4, 4.5, 5, 5.5, 6]
REG_PARAM = 5
NUM_NEIGHBORS_VALUES = [5, 10, 15, 20]
NUM_NEIGHBORS = 5
def readData(filename):
X = []
y = []
with open(filename) as f:
for line in f:
attribs_label = line.split(",")
X.append(attribs_label[:-1])
y.append(attribs_label[-1])
X.pop(0)
y.pop(0)
X = np.array(X, np.float64)
y = np.array(y, np.float64)
return X, y
def tableCVResults(cv_results, precision=5):
row = list(cv_results["params"][0].keys())+["mean E_in","mean E_cv"]
format_row = "{:<20}" * len(row)
print(format_row.format(*row))
for i in range(len(cv_results["params"])):
row = list(cv_results["params"][i].values())
row.append(round(1-cv_results["mean_train_score"][i],precision))
row.append(round(1-cv_results["mean_test_score"][i],precision))
print(format_row.format(*row))
class PseudoinverseLinearRegression(BaseEstimator):
def __init__(self, reg_param=0.0):
self.reg_param = reg_param # regularization parameter (lambda)
# Ajuste del modelo
def fit(self, X, y):
inverse = np.linalg.inv(X.T @ X + self.reg_param*np.identity(X.shape[1]))
self.w = np.dot( inverse, np.dot(X.T,y) )
# Predicción de clases
def predict(self, X):
return np.dot(X,self.w)
# Error Cuadrático Medio
def mse(self, X, y):
return metrics.mean_squared_error(y,self.predict(X))
# Error Absoluto Medio
def mae(self, X, y):
return metrics.mean_absolute_error(y,self.predict(X))
# Coeficiente de determinación (R^2)
def R2(self, X, y):
return 1-self.mse(X,y)/np.var(y)
# Score: R^2
def score(self, X, y):
return self.R2(X,y)
class KNR(BaseEstimator):
def __init__(self, num_neighbors=5, weight_function='uniform'):
self.num_neighbors = num_neighbors # número de vecinos
# Ajuste del modelo
def fit(self, X, y):
self.model = KNeighborsRegressor(n_neighbors=self.num_neighbors,
weights='uniform',
n_jobs=N_JOBS)
self.model.fit(X,y)
# Predicción de clases
def predict(self, X):
return self.model.predict(X)
# Error Cuadrático Medio
def mse(self, X, y):
return metrics.mean_squared_error(y,self.predict(X))
# Error Absoluto Medio
def mae(self, X, y):
return metrics.mean_absolute_error(y,self.predict(X))
# Coeficiente de determinación (R^2)
def R2(self, X, y):
return self.model.score(X,y)
# Score: R^2
def score(self, X, y):
return self.R2(X,y)
if __name__ == "__main__":
X, y = readData(FILENAME)
# Separación train-test
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=TEST_SIZE, random_state=42)
# Representación de datos en histogramas y en un espacio bidimensional mediante PCA y t-SNE
if VISUALIZE_TRAIN_SET:
print("#################################################################")
print("########## VISUALIZACIÓN DEL CONJUNTO DE ENTRENAMIENTO ##########")
print("#################################################################\n")
print("Histograma con las temperaturas críticas y sus frec. absolutas")
plt.hist(y_train, bins=37, density=False, cumulative=False)
plt.xlabel("Temperatura crítica")
plt.ylabel("Frecuencia absoluta")
plt.title("Histograma con las temperaturas críticas y sus frec. absolutas")
plt.grid(True)
plt.show()
input("\n--- Pulsar tecla para continuar ---\n")
print("Histograma con las temperaturas críticas y sus frec. relativas acum.")
plt.hist(y_train, bins=37, density=True, cumulative=True)
plt.xlabel("Temperatura crítica")
plt.ylabel("Frecuencia relativa acumulada")
plt.title("Histograma con las temperaturas críticas y sus frec. relativas acum.")
plt.grid(True)
plt.show()
input("\n--- Pulsar tecla para continuar ---\n")
cmap='plasma'
alpha=0.2
X_train_95 = X_train[np.where(y_train<95.0)]
y_train_95 = y_train[np.where(y_train<95.0)]
print("Representación de los datos con reducción de dimensionalidad usando PCA")
X_PCA = PCA(n_components=2, random_state=42).fit_transform(X_train_95)
plt.scatter(X_PCA[:,0], X_PCA[:,1], c=y_train_95, cmap=cmap, alpha=alpha)
plt.colorbar()
plt.title("Representación de los datos en 2D usando PCA")
plt.show()
input("\n--- Pulsar tecla para continuar ---\n")
print("Representación de los datos con reducción de dimensionalidad usando t-SNE")
X_TSNE = TSNE(n_components=2, init=X_PCA).fit_transform(X_train_95)
plt.scatter(X_TSNE[:,0], X_TSNE[:,1], c=y_train_95, cmap=cmap, alpha=alpha)
plt.colorbar()
plt.title("Representación de los datos en 2D usando t-SNE")
plt.show()
input("\n--- Pulsar tecla para continuar ---\n")
print("##################################")
print("########## PREPROCESADO ##########")
print("##################################\n")
# Matriz de coeficientes de correlación de Pearson con los datos iniciales
# (previamente, eliminamos características constantes)
correlation_matrix = np.corrcoef(np.transpose(VarianceThreshold().fit_transform(X_train)))
print("Matriz de coeficientes de correlación de Pearson (datos iniciales)")
plt.matshow(correlation_matrix, cmap='plasma')
plt.colorbar()
plt.title("Matriz de coef. de corr. de Pearson \n(datos iniciales)", pad=40.0)
plt.show()
input("\n--- Pulsar tecla para continuar ---\n")
print("Evolución del número de características:")
print("\tDatos iniciales:", X_train.shape[1])
# Eliminación de características con varianza muy baja
variance_threshold = VarianceThreshold(VARIANCE_THRESHOLD)
X_train = variance_threshold.fit_transform(X_train)
X_test = variance_threshold.transform(X_test)
print("\tVarianceThreshold:",X_train.shape[1])
# Ampliación con características no lineales (polinomios con grado acotado)
# También añade la característica asociada al término independiente
polynomial_features = PolynomialFeatures(POL_DEGREE)
X_train = polynomial_features.fit_transform(X_train)
X_test = polynomial_features.transform(X_test)
print("\tPolynomialFeatures:",X_train.shape[1])
# Estándarización (características con media 0 y varianza 1)
standard_scaler = StandardScaler()
X_train = standard_scaler.fit_transform(X_train)
X_test = standard_scaler.transform(X_test)
print("\tStandardScaler:",X_train.shape[1])
# Reducción de dimensionalidad mediante Análisis de Componentes Principales
pca = PCA(n_components=PCA_EXPLAINED_VARIANCE)
X_train = pca.fit_transform(X_train)
X_test = pca.transform(X_test)
print("\tPCA:",X_train.shape[1])
input("\n--- Pulsar tecla para continuar ---\n")
# Matriz de coeficientes de correlación de Pearson con los datos preprocesados
correlation_matrix = np.corrcoef(np.transpose(X_train))
print("Matriz de coeficientes de correlación de Pearson (datos preprocesados)")
plt.matshow(correlation_matrix, cmap='plasma')
plt.colorbar()
plt.title("Matriz de coef. de corr. de Pearson \n(datos preprocesados)", pad=40.0)
plt.show()
input("\n--- Pulsar tecla para continuar ---\n")
# Creación del modelo de Regresión Lineal que usa la Pseudoinversa
plr = PseudoinverseLinearRegression(reg_param=REG_PARAM)
# Añado el término independiente a los elementos del conjunto de entrenamiento y test
X_train = np.hstack(( np.ones((X_train.shape[0],1)), X_train ))
X_test = np.hstack(( np.ones((X_test.shape[0],1)), X_test ))
if CROSS_VALIDATION:
print("######################################")
print("########## CROSS-VALIDATION ##########")
print("######################################\n")
param_grid = {'reg_param':REG_PARAM_VALUES1}
cv_searcher = GridSearchCV(plr, param_grid, n_jobs=N_JOBS, verbose=1, return_train_score=True)
cv_searcher.fit(X_train, y_train)
print()
tableCVResults(cv_searcher.cv_results_)
print()
print("Mejores hiperparámetros:",cv_searcher.best_params_)
print("E_in medio:",round(1-cv_searcher.cv_results_["mean_train_score"][np.where(cv_searcher.cv_results_["rank_test_score"]==1)[0][0]],5))
print("E_cv medio:",round(1-cv_searcher.best_score_,5))
print()
param_grid = {'reg_param':REG_PARAM_VALUES2}
cv_searcher = GridSearchCV(plr, param_grid, n_jobs=N_JOBS, verbose=1, return_train_score=True)
cv_searcher.fit(X_train, y_train)
print()
tableCVResults(cv_searcher.cv_results_)
print()
print("Mejores hiperparámetros:",cv_searcher.best_params_)
print("E_in medio:",round(1-cv_searcher.cv_results_["mean_train_score"][np.where(cv_searcher.cv_results_["rank_test_score"]==1)[0][0]],5))
print("E_cv medio:",round(1-cv_searcher.best_score_,5))
print()
plr.set_params(**(cv_searcher.best_params_))
input("\n--- Pulsar tecla para continuar ---\n")
print("##########################################################")
print("########## EVALUACIÓN DE LA HIPÓTESIS FINAL ##############")
print("##########################################################\n")
plr.fit(X_train, y_train)
print("\nE_in =",round(1-plr.R2(X_train,y_train),5))
print("R²_in =",round(plr.R2(X_train,y_train),5))
print("MAE_in =",round(plr.mae(X_train,y_train),5))
print("\nE_test =",round(1-plr.R2(X_test,y_test),5))
print("R²_test =",round(plr.R2(X_test,y_test),5))
print("MAE_test:",round(plr.mae(X_test,y_test),5))
input("\n--- Pulsar tecla para continuar ---\n")
# Creación del modelo KNR
knr = KNR(num_neighbors=NUM_NEIGHBORS)
# Elimino el término independiente de los elementos del conjunto de entrenamiento y test
X_train = X_train[:,1:]
X_test = X_test[:,1:]
if CROSS_VALIDATION_KNR:
print("############################################")
print("########## CROSS-VALIDATION (KNR) ##########")
print("############################################\n")
param_grid = {'num_neighbors':NUM_NEIGHBORS_VALUES}
cv_searcher = GridSearchCV(knr, param_grid, n_jobs=N_JOBS, verbose=1, return_train_score=True)
cv_searcher.fit(X_train, y_train)
print()
tableCVResults(cv_searcher.cv_results_)
print()
print("Mejores hiperparámetros:",cv_searcher.best_params_)
print("E_in medio:",round(1-cv_searcher.cv_results_["mean_train_score"][np.where(cv_searcher.cv_results_["rank_test_score"]==1)[0][0]],5))
print("E_cv medio:",round(1-cv_searcher.best_score_,5))
print()
knr.set_params(**(cv_searcher.best_params_))
input("\n--- Pulsar tecla para continuar ---\n")
print("################################################################")
print("########## EVALUACIÓN DE LA HIPÓTESIS FINAL (KNR) ##############")
print("################################################################\n")
knr.fit(X_train,y_train)
print("\nE_in =",round(1-knr.R2(X_train,y_train),5))
print("R²_in =",round(knr.R2(X_train,y_train),5))
print("MAE_in =",round(knr.mae(X_train,y_train),5))
print("\nE_test =",round(1-knr.R2(X_test,y_test),5))
print("R²_test =",round(knr.R2(X_test,y_test),5))
print("MAE_test:",round(knr.mae(X_test,y_test),5))
|
[
"sklearn.model_selection.GridSearchCV",
"sklearn.preprocessing.PolynomialFeatures",
"matplotlib.pyplot.hist",
"sklearn.feature_selection.VarianceThreshold",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.ylabel",
"numpy.array",
"sklearn.decomposition.PCA",
"numpy.where",
"matplotlib.pyplot.xlabel",
"sklearn.manifold.TSNE",
"numpy.dot",
"numpy.random.seed",
"matplotlib.pyplot.scatter",
"numpy.identity",
"numpy.ones",
"sklearn.model_selection.train_test_split",
"matplotlib.pyplot.title",
"matplotlib.pyplot.matshow",
"numpy.transpose",
"matplotlib.pyplot.show",
"sklearn.neighbors.KNeighborsRegressor",
"matplotlib.pyplot.colorbar",
"sklearn.preprocessing.StandardScaler",
"numpy.var"
] |
[((491, 508), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (505, 508), True, 'import numpy as np\n'), ((1217, 1240), 'numpy.array', 'np.array', (['X', 'np.float64'], {}), '(X, np.float64)\n', (1225, 1240), True, 'import numpy as np\n'), ((1250, 1273), 'numpy.array', 'np.array', (['y', 'np.float64'], {}), '(y, np.float64)\n', (1258, 1273), True, 'import numpy as np\n'), ((3826, 3886), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': 'TEST_SIZE', 'random_state': '(42)'}), '(X, y, test_size=TEST_SIZE, random_state=42)\n', (3842, 3886), False, 'from sklearn.model_selection import train_test_split, GridSearchCV\n'), ((6819, 6865), 'matplotlib.pyplot.matshow', 'plt.matshow', (['correlation_matrix'], {'cmap': '"""plasma"""'}), "(correlation_matrix, cmap='plasma')\n", (6830, 6865), True, 'import matplotlib.pyplot as plt\n'), ((6871, 6885), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (6883, 6885), True, 'import matplotlib.pyplot as plt\n'), ((6891, 6977), 'matplotlib.pyplot.title', 'plt.title', (['"""Matriz de coef. de corr. de Pearson \n(datos iniciales)"""'], {'pad': '(40.0)'}), '("""Matriz de coef. de corr. de Pearson \n(datos iniciales)""", pad\n =40.0)\n', (6900, 6977), True, 'import matplotlib.pyplot as plt\n'), ((6975, 6985), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6983, 6985), True, 'import matplotlib.pyplot as plt\n'), ((7256, 7293), 'sklearn.feature_selection.VarianceThreshold', 'VarianceThreshold', (['VARIANCE_THRESHOLD'], {}), '(VARIANCE_THRESHOLD)\n', (7273, 7293), False, 'from sklearn.feature_selection import VarianceThreshold\n'), ((7641, 7671), 'sklearn.preprocessing.PolynomialFeatures', 'PolynomialFeatures', (['POL_DEGREE'], {}), '(POL_DEGREE)\n', (7659, 7671), False, 'from sklearn.preprocessing import PolynomialFeatures, StandardScaler\n'), ((7930, 7946), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (7944, 7946), False, 'from sklearn.preprocessing import PolynomialFeatures, StandardScaler\n'), ((8196, 8236), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': 'PCA_EXPLAINED_VARIANCE'}), '(n_components=PCA_EXPLAINED_VARIANCE)\n', (8199, 8236), False, 'from sklearn.decomposition import PCA\n'), ((8660, 8706), 'matplotlib.pyplot.matshow', 'plt.matshow', (['correlation_matrix'], {'cmap': '"""plasma"""'}), "(correlation_matrix, cmap='plasma')\n", (8671, 8706), True, 'import matplotlib.pyplot as plt\n'), ((8712, 8726), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (8724, 8726), True, 'import matplotlib.pyplot as plt\n'), ((8732, 8821), 'matplotlib.pyplot.title', 'plt.title', (['"""Matriz de coef. de corr. de Pearson \n(datos preprocesados)"""'], {'pad': '(40.0)'}), '("""Matriz de coef. de corr. de Pearson \n(datos preprocesados)""",\n pad=40.0)\n', (8741, 8821), True, 'import matplotlib.pyplot as plt\n'), ((8820, 8830), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8828, 8830), True, 'import matplotlib.pyplot as plt\n'), ((2228, 2245), 'numpy.dot', 'np.dot', (['X', 'self.w'], {}), '(X, self.w)\n', (2234, 2245), True, 'import numpy as np\n'), ((2935, 3024), 'sklearn.neighbors.KNeighborsRegressor', 'KNeighborsRegressor', ([], {'n_neighbors': 'self.num_neighbors', 'weights': '"""uniform"""', 'n_jobs': 'N_JOBS'}), "(n_neighbors=self.num_neighbors, weights='uniform',\n n_jobs=N_JOBS)\n", (2954, 3024), False, 'from sklearn.neighbors import KNeighborsRegressor\n'), ((4395, 4454), 'matplotlib.pyplot.hist', 'plt.hist', (['y_train'], {'bins': '(37)', 'density': '(False)', 'cumulative': '(False)'}), '(y_train, bins=37, density=False, cumulative=False)\n', (4403, 4454), True, 'import matplotlib.pyplot as plt\n'), ((4464, 4497), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Temperatura crítica"""'], {}), "('Temperatura crítica')\n", (4474, 4497), True, 'import matplotlib.pyplot as plt\n'), ((4507, 4540), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Frecuencia absoluta"""'], {}), "('Frecuencia absoluta')\n", (4517, 4540), True, 'import matplotlib.pyplot as plt\n'), ((4550, 4625), 'matplotlib.pyplot.title', 'plt.title', (['"""Histograma con las temperaturas críticas y sus frec. absolutas"""'], {}), "('Histograma con las temperaturas críticas y sus frec. absolutas')\n", (4559, 4625), True, 'import matplotlib.pyplot as plt\n'), ((4635, 4649), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (4643, 4649), True, 'import matplotlib.pyplot as plt\n'), ((4659, 4669), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4667, 4669), True, 'import matplotlib.pyplot as plt\n'), ((4864, 4921), 'matplotlib.pyplot.hist', 'plt.hist', (['y_train'], {'bins': '(37)', 'density': '(True)', 'cumulative': '(True)'}), '(y_train, bins=37, density=True, cumulative=True)\n', (4872, 4921), True, 'import matplotlib.pyplot as plt\n'), ((4931, 4964), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Temperatura crítica"""'], {}), "('Temperatura crítica')\n", (4941, 4964), True, 'import matplotlib.pyplot as plt\n'), ((4974, 5017), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Frecuencia relativa acumulada"""'], {}), "('Frecuencia relativa acumulada')\n", (4984, 5017), True, 'import matplotlib.pyplot as plt\n'), ((5027, 5113), 'matplotlib.pyplot.title', 'plt.title', (['"""Histograma con las temperaturas críticas y sus frec. relativas acum."""'], {}), "(\n 'Histograma con las temperaturas críticas y sus frec. relativas acum.')\n", (5036, 5113), True, 'import matplotlib.pyplot as plt\n'), ((5118, 5132), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (5126, 5132), True, 'import matplotlib.pyplot as plt\n'), ((5142, 5152), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5150, 5152), True, 'import matplotlib.pyplot as plt\n'), ((5600, 5675), 'matplotlib.pyplot.scatter', 'plt.scatter', (['X_PCA[:, 0]', 'X_PCA[:, 1]'], {'c': 'y_train_95', 'cmap': 'cmap', 'alpha': 'alpha'}), '(X_PCA[:, 0], X_PCA[:, 1], c=y_train_95, cmap=cmap, alpha=alpha)\n', (5611, 5675), True, 'import matplotlib.pyplot as plt\n'), ((5683, 5697), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (5695, 5697), True, 'import matplotlib.pyplot as plt\n'), ((5707, 5764), 'matplotlib.pyplot.title', 'plt.title', (['"""Representación de los datos en 2D usando PCA"""'], {}), "('Representación de los datos en 2D usando PCA')\n", (5716, 5764), True, 'import matplotlib.pyplot as plt\n'), ((5774, 5784), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5782, 5784), True, 'import matplotlib.pyplot as plt\n'), ((6061, 6138), 'matplotlib.pyplot.scatter', 'plt.scatter', (['X_TSNE[:, 0]', 'X_TSNE[:, 1]'], {'c': 'y_train_95', 'cmap': 'cmap', 'alpha': 'alpha'}), '(X_TSNE[:, 0], X_TSNE[:, 1], c=y_train_95, cmap=cmap, alpha=alpha)\n', (6072, 6138), True, 'import matplotlib.pyplot as plt\n'), ((6146, 6160), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (6158, 6160), True, 'import matplotlib.pyplot as plt\n'), ((6170, 6229), 'matplotlib.pyplot.title', 'plt.title', (['"""Representación de los datos en 2D usando t-SNE"""'], {}), "('Representación de los datos en 2D usando t-SNE')\n", (6179, 6229), True, 'import matplotlib.pyplot as plt\n'), ((6239, 6249), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6247, 6249), True, 'import matplotlib.pyplot as plt\n'), ((8547, 8568), 'numpy.transpose', 'np.transpose', (['X_train'], {}), '(X_train)\n', (8559, 8568), True, 'import numpy as np\n'), ((9579, 9664), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['plr', 'param_grid'], {'n_jobs': 'N_JOBS', 'verbose': '(1)', 'return_train_score': '(True)'}), '(plr, param_grid, n_jobs=N_JOBS, verbose=1, return_train_score=True\n )\n', (9591, 9664), False, 'from sklearn.model_selection import train_test_split, GridSearchCV\n'), ((10171, 10256), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['plr', 'param_grid'], {'n_jobs': 'N_JOBS', 'verbose': '(1)', 'return_train_score': '(True)'}), '(plr, param_grid, n_jobs=N_JOBS, verbose=1, return_train_score=True\n )\n', (10183, 10256), False, 'from sklearn.model_selection import train_test_split, GridSearchCV\n'), ((12077, 12162), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['knr', 'param_grid'], {'n_jobs': 'N_JOBS', 'verbose': '(1)', 'return_train_score': '(True)'}), '(knr, param_grid, n_jobs=N_JOBS, verbose=1, return_train_score=True\n )\n', (12089, 12162), False, 'from sklearn.model_selection import train_test_split, GridSearchCV\n'), ((2135, 2149), 'numpy.dot', 'np.dot', (['X.T', 'y'], {}), '(X.T, y)\n', (2141, 2149), True, 'import numpy as np\n'), ((5323, 5347), 'numpy.where', 'np.where', (['(y_train < 95.0)'], {}), '(y_train < 95.0)\n', (5331, 5347), True, 'import numpy as np\n'), ((5377, 5401), 'numpy.where', 'np.where', (['(y_train < 95.0)'], {}), '(y_train < 95.0)\n', (5385, 5401), True, 'import numpy as np\n'), ((9167, 9197), 'numpy.ones', 'np.ones', (['(X_train.shape[0], 1)'], {}), '((X_train.shape[0], 1))\n', (9174, 9197), True, 'import numpy as np\n'), ((9235, 9264), 'numpy.ones', 'np.ones', (['(X_test.shape[0], 1)'], {}), '((X_test.shape[0], 1))\n', (9242, 9264), True, 'import numpy as np\n'), ((2597, 2606), 'numpy.var', 'np.var', (['y'], {}), '(y)\n', (2603, 2606), True, 'import numpy as np\n'), ((5528, 5564), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': '(2)', 'random_state': '(42)'}), '(n_components=2, random_state=42)\n', (5531, 5564), False, 'from sklearn.decomposition import PCA\n'), ((5993, 6025), 'sklearn.manifold.TSNE', 'TSNE', ([], {'n_components': '(2)', 'init': 'X_PCA'}), '(n_components=2, init=X_PCA)\n', (5997, 6025), False, 'from sklearn.manifold import TSNE\n'), ((2075, 2098), 'numpy.identity', 'np.identity', (['X.shape[1]'], {}), '(X.shape[1])\n', (2086, 2098), True, 'import numpy as np\n'), ((6688, 6707), 'sklearn.feature_selection.VarianceThreshold', 'VarianceThreshold', ([], {}), '()\n', (6705, 6707), False, 'from sklearn.feature_selection import VarianceThreshold\n'), ((9935, 9992), 'numpy.where', 'np.where', (["(cv_searcher.cv_results_['rank_test_score'] == 1)"], {}), "(cv_searcher.cv_results_['rank_test_score'] == 1)\n", (9943, 9992), True, 'import numpy as np\n'), ((10527, 10584), 'numpy.where', 'np.where', (["(cv_searcher.cv_results_['rank_test_score'] == 1)"], {}), "(cv_searcher.cv_results_['rank_test_score'] == 1)\n", (10535, 10584), True, 'import numpy as np\n'), ((12433, 12490), 'numpy.where', 'np.where', (["(cv_searcher.cv_results_['rank_test_score'] == 1)"], {}), "(cv_searcher.cv_results_['rank_test_score'] == 1)\n", (12441, 12490), True, 'import numpy as np\n')]
|
"""Test ImageNet pretrained DenseNet"""
import cv2
import numpy as np
from tensorflow.keras.optimizers import SGD
import tensorflow.keras.backend as K
# We only test DenseNet-121 in this script for demo purpose
from densenet121 import DenseNet
im = cv2.resize(cv2.imread('resources/cat.jpg'), (224, 224)).astype(np.float32)
#im = cv2.resize(cv2.imread('shark.jpg'), (224, 224)).astype(np.float32)
# Subtract mean pixel and multiple by scaling constant
# Reference: https://github.com/shicai/DenseNet-Caffe
im[:,:,0] = (im[:,:,0] - 103.94) * 0.017
im[:,:,1] = (im[:,:,1] - 116.78) * 0.017
im[:,:,2] = (im[:,:,2] - 123.68) * 0.017
print(K.image_data_format())
# Use pre-trained weights for Tensorflow backend
weights_path = 'imagenet_models/densenet121_weights_tf.h5'
# Insert a new dimension for the batch_size
im = np.expand_dims(im, axis=0)
# Test pretrained model
model = DenseNet(reduction=0.5, classes=1000, weights_path=weights_path)
sgd = SGD(lr=1e-2, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(optimizer=sgd, loss='categorical_crossentropy', metrics=['accuracy'])
out = model.predict(im)
# Load ImageNet classes file
classes = []
with open('resources/classes.txt', 'r') as list_:
for line in list_:
classes.append(line.rstrip('\n'))
print('Prediction: '+str(classes[np.argmax(out)]))
|
[
"numpy.argmax",
"tensorflow.keras.optimizers.SGD",
"densenet121.DenseNet",
"numpy.expand_dims",
"cv2.imread",
"tensorflow.keras.backend.image_data_format"
] |
[((822, 848), 'numpy.expand_dims', 'np.expand_dims', (['im'], {'axis': '(0)'}), '(im, axis=0)\n', (836, 848), True, 'import numpy as np\n'), ((882, 946), 'densenet121.DenseNet', 'DenseNet', ([], {'reduction': '(0.5)', 'classes': '(1000)', 'weights_path': 'weights_path'}), '(reduction=0.5, classes=1000, weights_path=weights_path)\n', (890, 946), False, 'from densenet121 import DenseNet\n'), ((954, 1008), 'tensorflow.keras.optimizers.SGD', 'SGD', ([], {'lr': '(0.01)', 'decay': '(1e-06)', 'momentum': '(0.9)', 'nesterov': '(True)'}), '(lr=0.01, decay=1e-06, momentum=0.9, nesterov=True)\n', (957, 1008), False, 'from tensorflow.keras.optimizers import SGD\n'), ((641, 662), 'tensorflow.keras.backend.image_data_format', 'K.image_data_format', ([], {}), '()\n', (660, 662), True, 'import tensorflow.keras.backend as K\n'), ((264, 295), 'cv2.imread', 'cv2.imread', (['"""resources/cat.jpg"""'], {}), "('resources/cat.jpg')\n", (274, 295), False, 'import cv2\n'), ((1303, 1317), 'numpy.argmax', 'np.argmax', (['out'], {}), '(out)\n', (1312, 1317), True, 'import numpy as np\n')]
|
import cv2
import math
import numpy as np
import os
import matplotlib.pyplot as plt
from scipy import ndimage
from utils import ValueInvert
# TO-DO: Refactor this with np.nonzero??
def find_center_image(img):
left = 0
right = img.shape[1] - 1
empty_left = True
empty_right = True
for col in range(int(img.shape[1])):
if empty_left == False and empty_right == False:
break
for row in range(img.shape[0] - 1):
if img[row, col] > 0 and empty_left == True:
empty_left = False
left = col
if img[row, img.shape[1] - col - 1] > 0 and empty_right == True:
empty_right = False
right = img.shape[1] - col
top = 0
bottom = img.shape[0] - 1
empty_top = True
empty_bottom = True
for row in range(int(img.shape[0])):
if empty_top == False and empty_bottom == False:
break
for col in range(img.shape[1] - 1):
if img[row, col] > 0 and empty_top == True:
empty_top = False
top = row
if img[img.shape[0] - row - 1, col] > 0 and empty_bottom == True:
empty_bottom = False
bottom = img.shape[0] - row
return top, right, bottom, left
def getBestShift(img):
cy, cx = ndimage.measurements.center_of_mass(img)
rows, cols = img.shape
shiftx = np.round(cols/2.0-cx).astype(int)
shifty = np.round(rows/2.0-cy).astype(int)
return shiftx, shifty
def shift(img, sx, sy):
rows, cols = img.shape
M = np.float32([[1, 0, sx], [0, 1, sy]])
shifted = cv2.warpAffine(img, M, (cols, rows))
return shifted
def process_image(img):
img = ValueInvert(img)
img = cv2.resize(img, (28, 28))
(thresh, gray) = cv2.threshold(img, 128,
255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
top, right, bottom, left = find_center_image(img)
cropped_img = img[top:bottom, left:right]
rows, cols = cropped_img.shape
# resize 20x20 keeping ratio
if rows > cols:
rows = 20
factor = cols/rows
cols = int(round(rows*factor))
else:
cols = 20
factor = rows/cols
rows = int(round(cols*factor))
gray = cv2.resize(cropped_img, (cols, rows))
# plt.imshow(gray)
# plt.show()
colsPadding = (int(math.ceil((28-cols)/2.0)),
int(math.floor((28-cols)/2.0)))
rowsPadding = (int(math.ceil((28-rows)/2.0)),
int(math.floor((28-rows)/2.0)))
gray = np.lib.pad(gray, (rowsPadding, colsPadding), 'constant')
shiftx, shifty = getBestShift(gray)
shifted = shift(gray, shiftx, shifty)
gray = shifted
return gray
|
[
"cv2.warpAffine",
"math.ceil",
"math.floor",
"cv2.threshold",
"utils.ValueInvert",
"numpy.lib.pad",
"scipy.ndimage.measurements.center_of_mass",
"cv2.resize",
"numpy.float32",
"numpy.round"
] |
[((1339, 1379), 'scipy.ndimage.measurements.center_of_mass', 'ndimage.measurements.center_of_mass', (['img'], {}), '(img)\n', (1374, 1379), False, 'from scipy import ndimage\n'), ((1590, 1626), 'numpy.float32', 'np.float32', (['[[1, 0, sx], [0, 1, sy]]'], {}), '([[1, 0, sx], [0, 1, sy]])\n', (1600, 1626), True, 'import numpy as np\n'), ((1641, 1677), 'cv2.warpAffine', 'cv2.warpAffine', (['img', 'M', '(cols, rows)'], {}), '(img, M, (cols, rows))\n', (1655, 1677), False, 'import cv2\n'), ((1733, 1749), 'utils.ValueInvert', 'ValueInvert', (['img'], {}), '(img)\n', (1744, 1749), False, 'from utils import ValueInvert\n'), ((1760, 1785), 'cv2.resize', 'cv2.resize', (['img', '(28, 28)'], {}), '(img, (28, 28))\n', (1770, 1785), False, 'import cv2\n'), ((1807, 1872), 'cv2.threshold', 'cv2.threshold', (['img', '(128)', '(255)', '(cv2.THRESH_BINARY | cv2.THRESH_OTSU)'], {}), '(img, 128, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)\n', (1820, 1872), False, 'import cv2\n'), ((2288, 2325), 'cv2.resize', 'cv2.resize', (['cropped_img', '(cols, rows)'], {}), '(cropped_img, (cols, rows))\n', (2298, 2325), False, 'import cv2\n'), ((2579, 2635), 'numpy.lib.pad', 'np.lib.pad', (['gray', '(rowsPadding, colsPadding)', '"""constant"""'], {}), "(gray, (rowsPadding, colsPadding), 'constant')\n", (2589, 2635), True, 'import numpy as np\n'), ((1421, 1446), 'numpy.round', 'np.round', (['(cols / 2.0 - cx)'], {}), '(cols / 2.0 - cx)\n', (1429, 1446), True, 'import numpy as np\n'), ((1468, 1493), 'numpy.round', 'np.round', (['(rows / 2.0 - cy)'], {}), '(rows / 2.0 - cy)\n', (1476, 1493), True, 'import numpy as np\n'), ((2389, 2417), 'math.ceil', 'math.ceil', (['((28 - cols) / 2.0)'], {}), '((28 - cols) / 2.0)\n', (2398, 2417), False, 'import math\n'), ((2439, 2468), 'math.floor', 'math.floor', (['((28 - cols) / 2.0)'], {}), '((28 - cols) / 2.0)\n', (2449, 2468), False, 'import math\n'), ((2490, 2518), 'math.ceil', 'math.ceil', (['((28 - rows) / 2.0)'], {}), '((28 - rows) / 2.0)\n', (2499, 2518), False, 'import math\n'), ((2540, 2569), 'math.floor', 'math.floor', (['((28 - rows) / 2.0)'], {}), '((28 - rows) / 2.0)\n', (2550, 2569), False, 'import math\n')]
|
import numpy as np
from collections import namedtuple, deque
import random
Transition = namedtuple('Transition', ('state', 'action', 'next_state', 'reward', 'not_done'))
class ReplayBuffer(object):
def __init__(self, capacity):
self.memory = deque([], maxlen=capacity)
def push(self, *args):
self.memory.append([*args])
def sample(self, batch_size):
batch = random.sample(self.memory, batch_size)
batch = list(map(np.asarray, zip(*batch)))[
0].T # FIXME: VisibleDeprecationWarning: Creating an ndarray from ragged nested sequences (which is a list-or-tuple of lists-or-tuples-or ndarrays with different lengths or shapes) is deprecated. If you meant to do this, you must specify 'dtype=object' when creating the ndarray.
states, actions, rewards, next_states, done = np.vstack(batch[0]), np.vstack(batch[1]), np.vstack(batch[2]), np.vstack(batch[3]), \
np.vstack(batch[4])
return states, actions, rewards, next_states, done
def sample_last(self):
batch = self.memory[-1]
return batch
def __len__(self):
return len(self.memory)
|
[
"random.sample",
"collections.namedtuple",
"collections.deque",
"numpy.vstack"
] |
[((93, 178), 'collections.namedtuple', 'namedtuple', (['"""Transition"""', "('state', 'action', 'next_state', 'reward', 'not_done')"], {}), "('Transition', ('state', 'action', 'next_state', 'reward',\n 'not_done'))\n", (103, 178), False, 'from collections import namedtuple, deque\n'), ((268, 294), 'collections.deque', 'deque', (['[]'], {'maxlen': 'capacity'}), '([], maxlen=capacity)\n', (273, 294), False, 'from collections import namedtuple, deque\n'), ((416, 454), 'random.sample', 'random.sample', (['self.memory', 'batch_size'], {}), '(self.memory, batch_size)\n', (429, 454), False, 'import random\n'), ((857, 876), 'numpy.vstack', 'np.vstack', (['batch[0]'], {}), '(batch[0])\n', (866, 876), True, 'import numpy as np\n'), ((878, 897), 'numpy.vstack', 'np.vstack', (['batch[1]'], {}), '(batch[1])\n', (887, 897), True, 'import numpy as np\n'), ((899, 918), 'numpy.vstack', 'np.vstack', (['batch[2]'], {}), '(batch[2])\n', (908, 918), True, 'import numpy as np\n'), ((920, 939), 'numpy.vstack', 'np.vstack', (['batch[3]'], {}), '(batch[3])\n', (929, 939), True, 'import numpy as np\n'), ((998, 1017), 'numpy.vstack', 'np.vstack', (['batch[4]'], {}), '(batch[4])\n', (1007, 1017), True, 'import numpy as np\n')]
|
import os
import csv
import librosa
import numpy as np
import pandas as pd
from spider.featurization.audio_featurization import AudioFeaturization
# Read the test data csv
csv_file='data/testAudioData.csv'
df = pd.read_csv(csv_file)
# Read in the audio data specified by the csv
data = []
for idx, row in df.iterrows():
filename = os.path.join('data/raw_data', row['filename'])
datum, sampling_rate = librosa.load(filename)
data.append(datum)
# Optional audio featurization parameter specification
frame_length = 0.050
overlap = 0.025
# Request feature generation
print("Generating features...")
featurizer = AudioFeaturization(
sampling_rate=sampling_rate,
frame_length=frame_length,
overlap=overlap
)
features = featurizer.produce(data)
# Save features to disk
with open('features.csv', 'w+') as f:
for feature in features:
np.savetxt(f, feature)
|
[
"pandas.read_csv",
"os.path.join",
"numpy.savetxt",
"spider.featurization.audio_featurization.AudioFeaturization",
"librosa.load"
] |
[((212, 233), 'pandas.read_csv', 'pd.read_csv', (['csv_file'], {}), '(csv_file)\n', (223, 233), True, 'import pandas as pd\n'), ((616, 711), 'spider.featurization.audio_featurization.AudioFeaturization', 'AudioFeaturization', ([], {'sampling_rate': 'sampling_rate', 'frame_length': 'frame_length', 'overlap': 'overlap'}), '(sampling_rate=sampling_rate, frame_length=frame_length,\n overlap=overlap)\n', (634, 711), False, 'from spider.featurization.audio_featurization import AudioFeaturization\n'), ((334, 380), 'os.path.join', 'os.path.join', (['"""data/raw_data"""', "row['filename']"], {}), "('data/raw_data', row['filename'])\n", (346, 380), False, 'import os\n'), ((405, 427), 'librosa.load', 'librosa.load', (['filename'], {}), '(filename)\n', (417, 427), False, 'import librosa\n'), ((841, 863), 'numpy.savetxt', 'np.savetxt', (['f', 'feature'], {}), '(f, feature)\n', (851, 863), True, 'import numpy as np\n')]
|
import scipy.io as sio
import numpy as np
class MatWrapper(object):
def __init__(self,mat_file):
self.mat_fp = mat_file
self.data = None
class NeuroSurgMat(MatWrapper):
def __init__(self, mat_file):
self.mat_fp = mat_file
self.data = None
self._clfp = None
self._cmacro_lfp = None
self._metadata = None
@property
def CLFP(self):
# Lazy load CLFP files
if self.data is None:
self.data = sio.loadmat(self.mat_fp)
if self._clfp is None:
clfp = np.empty((3,self.data['CLFP_01'].shape[1]))
for i in np.arange(3):
clfp[i,:] = np.squeeze(self.data['CLFP_0'+str(i+1)])
self._clfp = clfp
return self._clfp
@property
def CMacro_LFP(self):
if self.data is None:
self.data = sio.loadmat(self.mat_fp)
if self._cmacro_lfp is None:
cmacro_lfp = np.empty((3,self.data['CMacro_LFP_01'].shape[1]))
for i in np.arange(3):
cmacro_lfp[i,:] = np.squeeze(self.data['CMacro_LFP_0'+str(i+1)])
self._cmacro_lfp = cmacro_lfp
return self._cmacro_lfp
@property
def metadata(self):
if self.data is None:
self.data = sio.loadmat(self.mat_fp)
if self._metadata is None:
self._metadata = {
'lfp':{'sampFreqHz':None,'timeStart':None,'timeEnd':None},
'mer':{'sampFreqHz':None,'timeStart':None,'timeEnd':None},
'eeg':{'sampFreqHz':None,'timeStart':None,'timeEnd':None},
}
for rec in list(self._metadata.keys()):
self._metadata[rec]['sampFreqHz']=self.data[rec][0][0][0][0][0]
self._metadata[rec]['timeStart']=np.squeeze(self.data[rec][0][0][1]).item()
self._metadata[rec]['timeEnd']=np.squeeze(self.data[rec][0][0][2]).item()
return self._metadata
class NeuroSurgDataset(object):
def __init__(self, data_dir):
self.data_dir = data_dir
# TODO Check if manifest file exists, if not create empty one
|
[
"scipy.io.loadmat",
"numpy.empty",
"numpy.arange",
"numpy.squeeze"
] |
[((492, 516), 'scipy.io.loadmat', 'sio.loadmat', (['self.mat_fp'], {}), '(self.mat_fp)\n', (503, 516), True, 'import scipy.io as sio\n'), ((567, 611), 'numpy.empty', 'np.empty', (["(3, self.data['CLFP_01'].shape[1])"], {}), "((3, self.data['CLFP_01'].shape[1]))\n", (575, 611), True, 'import numpy as np\n'), ((632, 644), 'numpy.arange', 'np.arange', (['(3)'], {}), '(3)\n', (641, 644), True, 'import numpy as np\n'), ((866, 890), 'scipy.io.loadmat', 'sio.loadmat', (['self.mat_fp'], {}), '(self.mat_fp)\n', (877, 890), True, 'import scipy.io as sio\n'), ((953, 1003), 'numpy.empty', 'np.empty', (["(3, self.data['CMacro_LFP_01'].shape[1])"], {}), "((3, self.data['CMacro_LFP_01'].shape[1]))\n", (961, 1003), True, 'import numpy as np\n'), ((1024, 1036), 'numpy.arange', 'np.arange', (['(3)'], {}), '(3)\n', (1033, 1036), True, 'import numpy as np\n'), ((1286, 1310), 'scipy.io.loadmat', 'sio.loadmat', (['self.mat_fp'], {}), '(self.mat_fp)\n', (1297, 1310), True, 'import scipy.io as sio\n'), ((1817, 1852), 'numpy.squeeze', 'np.squeeze', (['self.data[rec][0][0][1]'], {}), '(self.data[rec][0][0][1])\n', (1827, 1852), True, 'import numpy as np\n'), ((1907, 1942), 'numpy.squeeze', 'np.squeeze', (['self.data[rec][0][0][2]'], {}), '(self.data[rec][0][0][2])\n', (1917, 1942), True, 'import numpy as np\n')]
|
# Copyright 2019,2020,2021 Sony Corporation.
# Copyright 2021 Sony Group Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import nnabla as nn
import nnabla.functions as F
import nnabla.parametric_functions as PF
import nnabla.initializer as I
import numpy as np
np.random.seed(1)
class ResidualStack(object):
def __init__(self, in_channels, num_hidden, num_res_layers, rng=313):
self.in_channels = in_channels
self.num_hidden = num_hidden
self.num_res_layers = num_res_layers
self.rng = rng
def __call__(self, x, test):
out = x
for i in range(self.num_res_layers):
out = self.res_block(out, scope_name='res_block_'+str(i))
return F.relu(out)
def res_block(self, x, scope_name='res_block', test=False):
with nn.parameter_scope(scope_name):
out = F.relu(x)
out = PF.convolution(out, self.num_hidden, (3, 3),
stride=(1, 1), pad=(1, 1), with_bias=False, name='conv_1', rng=self.rng)
out = PF.batch_normalization(out, name='bn_1', batch_stat=not test)
out = F.relu(out)
out = PF.convolution(out, self.num_hidden, (1, 1),
stride=(1, 1), with_bias=False, name='conv_2', rng=self.rng)
out = PF.batch_normalization(out, name='bn_2', batch_stat=not test)
return x + out
class VectorQuantizer(object):
def __init__(self, embedding_dim, num_embedding, commitment_cost, rng,
scope_name='vector_quantizer'):
self.embedding_dim = embedding_dim
self.num_embedding = num_embedding
self.commitment_cost = commitment_cost
self.rng = rng
self.scope_name = scope_name
with nn.parameter_scope(scope_name):
self.embedding_weight = nn.parameter.get_parameter_or_create('W', shape=(self.num_embedding, self.embedding_dim),
initializer=I.UniformInitializer((-1./self.num_embedding, 1./self.num_embedding), rng=self.rng), need_grad=True)
def __call__(self, x, return_encoding_indices=False):
x = F.transpose(x, (0, 2, 3, 1))
x_flat = x.reshape((-1, self.embedding_dim))
x_flat_squared = F.broadcast(
F.sum(x_flat**2, axis=1, keepdims=True), (x_flat.shape[0], self.num_embedding))
emb_wt_squared = F.transpose(
F.sum(self.embedding_weight**2, axis=1, keepdims=True), (1, 0))
distances = x_flat_squared + emb_wt_squared - 2 * \
F.affine(x_flat, F.transpose(self.embedding_weight, (1, 0)))
encoding_indices = F.min(
distances, only_index=True, axis=1, keepdims=True)
encoding_indices.need_grad = False
quantized = F.embed(encoding_indices.reshape(
encoding_indices.shape[:-1]), self.embedding_weight).reshape(x.shape)
if return_encoding_indices:
return encoding_indices, F.transpose(quantized, (0, 3, 1, 2))
encodings = F.one_hot(encoding_indices, (self.num_embedding,))
e_latent_loss = F.mean(F.squared_error(
quantized.get_unlinked_variable(need_grad=False), x))
q_latent_loss = F.mean(F.squared_error(
quantized, x.get_unlinked_variable(need_grad=False)))
loss = q_latent_loss + self.commitment_cost*e_latent_loss
quantized = x + (quantized - x).get_unlinked_variable(need_grad=False)
avg_probs = F.mean(encodings, axis=0)
perplexity = F.exp(-F.sum(avg_probs*F.log(avg_probs+1.0e-10)))
return loss, F.transpose(quantized, (0, 3, 1, 2)), perplexity, encodings
class VQVAE(object):
def __init__(self, config, training=True):
self.in_channels = config['model']['in_channels']
self.num_hidden = config['model']['num_hidden']
self.num_res_layers = config['model']['num_res_layers']
self.rng = np.random.RandomState(config['model']['rng'])
self.encoder_res_stack = ResidualStack(in_channels=self.num_hidden,
num_hidden=self.num_hidden, num_res_layers=self.num_res_layers,
rng=self.rng)
self.decoder_res_stack = ResidualStack(in_channels=self.num_hidden,
num_hidden=self.num_hidden, num_res_layers=self.num_res_layers,
rng=self.rng)
self.num_embedding = config['model']['num_embeddings']
self.embedding_dim = config['model']['embedding_dim']
self.commitment_cost = config['model']['commitment_cost']
self.decay = config['model']['decay']
self.training = training
self.vq = VectorQuantizer(
self.embedding_dim, self.num_embedding, self.commitment_cost, self.rng)
def encoder(self, x, test):
with nn.parameter_scope('encoder'):
out = PF.convolution(x, self.num_hidden, (4, 4), stride=(2, 2),
pad=(1, 1), name='conv_1', rng=self.rng)
out = PF.batch_normalization(out, batch_stat=not test)
out = F.relu(out)
out = PF.convolution(out, self.num_hidden, (4, 4), stride=(2, 2),
pad=(1, 1), name='conv_2', rng=self.rng)
out = self.encoder_res_stack(out, test=test)
return out
def decoder(self, x, test):
with nn.parameter_scope('decoder'):
out = self.decoder_res_stack(x, test=test)
out = F.relu(out)
out = PF.deconvolution(out, self.num_hidden, (4, 4), stride=(2, 2),
pad=(1, 1), name='deconv_1', rng=self.rng)
out = PF.batch_normalization(out, batch_stat=not test)
out = F.relu(out)
out = PF.deconvolution(out, self.in_channels, (4, 4), stride=(2, 2),
pad=(1, 1), name='deconv_2', rng=self.rng)
out = F.tanh(out)
return out
def __call__(self, img, return_encoding_indices=False, quantized_as_input=False, test=False):
with nn.parameter_scope('vq_vae'):
# import pdb; pdb.set_trace()
if quantized_as_input:
return self.decoder(img, test)
z = self.encoder(img, test)
z = PF.convolution(z, self.embedding_dim, (1, 1), stride=(1, 1))
if return_encoding_indices:
return self.vq(z, return_encoding_indices=True)
loss, quantized, perplexity, encodings = self.vq(z)
img_recon = self.decoder(quantized, test)
return loss, img_recon, perplexity
|
[
"nnabla.functions.transpose",
"nnabla.functions.one_hot",
"nnabla.parametric_functions.convolution",
"nnabla.functions.log",
"nnabla.parametric_functions.deconvolution",
"nnabla.parametric_functions.batch_normalization",
"nnabla.parameter_scope",
"nnabla.functions.sum",
"numpy.random.seed",
"nnabla.functions.tanh",
"nnabla.functions.mean",
"nnabla.initializer.UniformInitializer",
"nnabla.functions.relu",
"numpy.random.RandomState",
"nnabla.functions.min"
] |
[((774, 791), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (788, 791), True, 'import numpy as np\n'), ((1222, 1233), 'nnabla.functions.relu', 'F.relu', (['out'], {}), '(out)\n', (1228, 1233), True, 'import nnabla.functions as F\n'), ((2692, 2720), 'nnabla.functions.transpose', 'F.transpose', (['x', '(0, 2, 3, 1)'], {}), '(x, (0, 2, 3, 1))\n', (2703, 2720), True, 'import nnabla.functions as F\n'), ((3189, 3245), 'nnabla.functions.min', 'F.min', (['distances'], {'only_index': '(True)', 'axis': '(1)', 'keepdims': '(True)'}), '(distances, only_index=True, axis=1, keepdims=True)\n', (3194, 3245), True, 'import nnabla.functions as F\n'), ((3575, 3625), 'nnabla.functions.one_hot', 'F.one_hot', (['encoding_indices', '(self.num_embedding,)'], {}), '(encoding_indices, (self.num_embedding,))\n', (3584, 3625), True, 'import nnabla.functions as F\n'), ((4030, 4055), 'nnabla.functions.mean', 'F.mean', (['encodings'], {'axis': '(0)'}), '(encodings, axis=0)\n', (4036, 4055), True, 'import nnabla.functions as F\n'), ((4477, 4522), 'numpy.random.RandomState', 'np.random.RandomState', (["config['model']['rng']"], {}), "(config['model']['rng'])\n", (4498, 4522), True, 'import numpy as np\n'), ((1312, 1342), 'nnabla.parameter_scope', 'nn.parameter_scope', (['scope_name'], {}), '(scope_name)\n', (1330, 1342), True, 'import nnabla as nn\n'), ((1362, 1371), 'nnabla.functions.relu', 'F.relu', (['x'], {}), '(x)\n', (1368, 1371), True, 'import nnabla.functions as F\n'), ((1390, 1511), 'nnabla.parametric_functions.convolution', 'PF.convolution', (['out', 'self.num_hidden', '(3, 3)'], {'stride': '(1, 1)', 'pad': '(1, 1)', 'with_bias': '(False)', 'name': '"""conv_1"""', 'rng': 'self.rng'}), "(out, self.num_hidden, (3, 3), stride=(1, 1), pad=(1, 1),\n with_bias=False, name='conv_1', rng=self.rng)\n", (1404, 1511), True, 'import nnabla.parametric_functions as PF\n'), ((1559, 1620), 'nnabla.parametric_functions.batch_normalization', 'PF.batch_normalization', (['out'], {'name': '"""bn_1"""', 'batch_stat': '(not test)'}), "(out, name='bn_1', batch_stat=not test)\n", (1581, 1620), True, 'import nnabla.parametric_functions as PF\n'), ((1639, 1650), 'nnabla.functions.relu', 'F.relu', (['out'], {}), '(out)\n', (1645, 1650), True, 'import nnabla.functions as F\n'), ((1669, 1778), 'nnabla.parametric_functions.convolution', 'PF.convolution', (['out', 'self.num_hidden', '(1, 1)'], {'stride': '(1, 1)', 'with_bias': '(False)', 'name': '"""conv_2"""', 'rng': 'self.rng'}), "(out, self.num_hidden, (1, 1), stride=(1, 1), with_bias=False,\n name='conv_2', rng=self.rng)\n", (1683, 1778), True, 'import nnabla.parametric_functions as PF\n'), ((1826, 1887), 'nnabla.parametric_functions.batch_normalization', 'PF.batch_normalization', (['out'], {'name': '"""bn_2"""', 'batch_stat': '(not test)'}), "(out, name='bn_2', batch_stat=not test)\n", (1848, 1887), True, 'import nnabla.parametric_functions as PF\n'), ((2276, 2306), 'nnabla.parameter_scope', 'nn.parameter_scope', (['scope_name'], {}), '(scope_name)\n', (2294, 2306), True, 'import nnabla as nn\n'), ((2829, 2870), 'nnabla.functions.sum', 'F.sum', (['(x_flat ** 2)'], {'axis': '(1)', 'keepdims': '(True)'}), '(x_flat ** 2, axis=1, keepdims=True)\n', (2834, 2870), True, 'import nnabla.functions as F\n'), ((2963, 3019), 'nnabla.functions.sum', 'F.sum', (['(self.embedding_weight ** 2)'], {'axis': '(1)', 'keepdims': '(True)'}), '(self.embedding_weight ** 2, axis=1, keepdims=True)\n', (2968, 3019), True, 'import nnabla.functions as F\n'), ((4149, 4185), 'nnabla.functions.transpose', 'F.transpose', (['quantized', '(0, 3, 1, 2)'], {}), '(quantized, (0, 3, 1, 2))\n', (4160, 4185), True, 'import nnabla.functions as F\n'), ((5458, 5487), 'nnabla.parameter_scope', 'nn.parameter_scope', (['"""encoder"""'], {}), "('encoder')\n", (5476, 5487), True, 'import nnabla as nn\n'), ((5507, 5610), 'nnabla.parametric_functions.convolution', 'PF.convolution', (['x', 'self.num_hidden', '(4, 4)'], {'stride': '(2, 2)', 'pad': '(1, 1)', 'name': '"""conv_1"""', 'rng': 'self.rng'}), "(x, self.num_hidden, (4, 4), stride=(2, 2), pad=(1, 1), name=\n 'conv_1', rng=self.rng)\n", (5521, 5610), True, 'import nnabla.parametric_functions as PF\n'), ((5657, 5705), 'nnabla.parametric_functions.batch_normalization', 'PF.batch_normalization', (['out'], {'batch_stat': '(not test)'}), '(out, batch_stat=not test)\n', (5679, 5705), True, 'import nnabla.parametric_functions as PF\n'), ((5724, 5735), 'nnabla.functions.relu', 'F.relu', (['out'], {}), '(out)\n', (5730, 5735), True, 'import nnabla.functions as F\n'), ((5754, 5858), 'nnabla.parametric_functions.convolution', 'PF.convolution', (['out', 'self.num_hidden', '(4, 4)'], {'stride': '(2, 2)', 'pad': '(1, 1)', 'name': '"""conv_2"""', 'rng': 'self.rng'}), "(out, self.num_hidden, (4, 4), stride=(2, 2), pad=(1, 1),\n name='conv_2', rng=self.rng)\n", (5768, 5858), True, 'import nnabla.parametric_functions as PF\n'), ((6011, 6040), 'nnabla.parameter_scope', 'nn.parameter_scope', (['"""decoder"""'], {}), "('decoder')\n", (6029, 6040), True, 'import nnabla as nn\n'), ((6115, 6126), 'nnabla.functions.relu', 'F.relu', (['out'], {}), '(out)\n', (6121, 6126), True, 'import nnabla.functions as F\n'), ((6145, 6253), 'nnabla.parametric_functions.deconvolution', 'PF.deconvolution', (['out', 'self.num_hidden', '(4, 4)'], {'stride': '(2, 2)', 'pad': '(1, 1)', 'name': '"""deconv_1"""', 'rng': 'self.rng'}), "(out, self.num_hidden, (4, 4), stride=(2, 2), pad=(1, 1),\n name='deconv_1', rng=self.rng)\n", (6161, 6253), True, 'import nnabla.parametric_functions as PF\n'), ((6303, 6351), 'nnabla.parametric_functions.batch_normalization', 'PF.batch_normalization', (['out'], {'batch_stat': '(not test)'}), '(out, batch_stat=not test)\n', (6325, 6351), True, 'import nnabla.parametric_functions as PF\n'), ((6370, 6381), 'nnabla.functions.relu', 'F.relu', (['out'], {}), '(out)\n', (6376, 6381), True, 'import nnabla.functions as F\n'), ((6400, 6509), 'nnabla.parametric_functions.deconvolution', 'PF.deconvolution', (['out', 'self.in_channels', '(4, 4)'], {'stride': '(2, 2)', 'pad': '(1, 1)', 'name': '"""deconv_2"""', 'rng': 'self.rng'}), "(out, self.in_channels, (4, 4), stride=(2, 2), pad=(1, 1),\n name='deconv_2', rng=self.rng)\n", (6416, 6509), True, 'import nnabla.parametric_functions as PF\n'), ((6559, 6570), 'nnabla.functions.tanh', 'F.tanh', (['out'], {}), '(out)\n', (6565, 6570), True, 'import nnabla.functions as F\n'), ((6704, 6732), 'nnabla.parameter_scope', 'nn.parameter_scope', (['"""vq_vae"""'], {}), "('vq_vae')\n", (6722, 6732), True, 'import nnabla as nn\n'), ((6914, 6974), 'nnabla.parametric_functions.convolution', 'PF.convolution', (['z', 'self.embedding_dim', '(1, 1)'], {'stride': '(1, 1)'}), '(z, self.embedding_dim, (1, 1), stride=(1, 1))\n', (6928, 6974), True, 'import nnabla.parametric_functions as PF\n'), ((3517, 3553), 'nnabla.functions.transpose', 'F.transpose', (['quantized', '(0, 3, 1, 2)'], {}), '(quantized, (0, 3, 1, 2))\n', (3528, 3553), True, 'import nnabla.functions as F\n'), ((2519, 2612), 'nnabla.initializer.UniformInitializer', 'I.UniformInitializer', (['(-1.0 / self.num_embedding, 1.0 / self.num_embedding)'], {'rng': 'self.rng'}), '((-1.0 / self.num_embedding, 1.0 / self.num_embedding),\n rng=self.rng)\n', (2539, 2612), True, 'import nnabla.initializer as I\n'), ((3117, 3159), 'nnabla.functions.transpose', 'F.transpose', (['self.embedding_weight', '(1, 0)'], {}), '(self.embedding_weight, (1, 0))\n', (3128, 3159), True, 'import nnabla.functions as F\n'), ((4100, 4124), 'nnabla.functions.log', 'F.log', (['(avg_probs + 1e-10)'], {}), '(avg_probs + 1e-10)\n', (4105, 4124), True, 'import nnabla.functions as F\n')]
|
import time
import numpy as np
import torch
class Profiler:
def __init__(self, dummy=False, device=None):
self.events = []
self.dummy = dummy
self.device = device if device != torch.device('cpu') else None
self.log('start')
def log(self, name):
if self.dummy:
return
# Optionally synchronize cuda before logging time
if self.device is not None:
torch.cuda.synchronize(self.device)
self.events.append((name, time.time()))
def print_profile_summary(self, step, detailed=False):
event_names, event_times = zip(*self.events)
total_duration = event_times[-1] - event_times[0]
print(
"-------------- Step {} total duration: {:.3f} ms -------------------".format(step, total_duration * 1000))
event_durations = np.diff(event_times)
event_names = event_names[1:]
total_generate = sum(d for e, d in zip(event_names, event_durations) if "expansion" in e)
total_reduce = sum(d for e, d in zip(event_names, event_durations) if "reduced" in e)
print("Total generate expansions time {:.3f} ms".format(total_generate * 1000))
print("Total topk selection time {:.3f} ms".format(total_reduce * 1000))
print(
"Total rest time {:.3f} ms".format((total_duration - total_generate - total_reduce) * 1000))
maxlen = max(len(en) for en in event_names)
if detailed:
for i in np.argsort(-event_durations): # Sort descending by duration
print(("{:" + str(maxlen) + "s} {:.3f} ms").format(event_names[i], event_durations[i] * 1000))
def debug_memory(device=None):
print('*' * 20, "Memory Dump", '*' * 20)
if device is not None:
print(torch.cuda.memory_summary(device))
import gc
for obj in gc.get_objects():
try:
if torch.is_tensor(obj) or (hasattr(obj, 'data') and torch.is_tensor(obj.data)):
print(type(obj), obj.device, obj.dtype, obj.size())
except:
pass
|
[
"numpy.diff",
"torch.cuda.synchronize",
"numpy.argsort",
"torch.is_tensor",
"gc.get_objects",
"time.time",
"torch.cuda.memory_summary",
"torch.device"
] |
[((1863, 1879), 'gc.get_objects', 'gc.get_objects', ([], {}), '()\n', (1877, 1879), False, 'import gc\n'), ((853, 873), 'numpy.diff', 'np.diff', (['event_times'], {}), '(event_times)\n', (860, 873), True, 'import numpy as np\n'), ((437, 472), 'torch.cuda.synchronize', 'torch.cuda.synchronize', (['self.device'], {}), '(self.device)\n', (459, 472), False, 'import torch\n'), ((1508, 1536), 'numpy.argsort', 'np.argsort', (['(-event_durations)'], {}), '(-event_durations)\n', (1518, 1536), True, 'import numpy as np\n'), ((1799, 1832), 'torch.cuda.memory_summary', 'torch.cuda.memory_summary', (['device'], {}), '(device)\n', (1824, 1832), False, 'import torch\n'), ((207, 226), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (219, 226), False, 'import torch\n'), ((507, 518), 'time.time', 'time.time', ([], {}), '()\n', (516, 518), False, 'import time\n'), ((1909, 1929), 'torch.is_tensor', 'torch.is_tensor', (['obj'], {}), '(obj)\n', (1924, 1929), False, 'import torch\n'), ((1959, 1984), 'torch.is_tensor', 'torch.is_tensor', (['obj.data'], {}), '(obj.data)\n', (1974, 1984), False, 'import torch\n')]
|
"""
WLS filter: Edge-preserving smoothing based onthe weightd least squares
optimization framework, as described in Farbman, Fattal, Lischinski, and
Szeliski, "Edge-Preserving Decompositions for Multi-Scale Tone and Detail
Manipulation", ACM Transactions on Graphics, 27(3), August 2008.
Given an input image IN, we seek a new image OUT, which, on the one hand,
is as close as possible to IN, and, at the same time, is as smooth as
possible everywhere, except across significant gradients in L.
"""
import cv2
import numpy as np
from scipy.sparse import spdiags
from scipy.sparse.linalg import spsolve, lsqr
def wlsFilter(IN, Lambda=1.0, Alpha=1.2):
"""
IN : Input image (2D grayscale image, type float)
Lambda : Balances between the data term and the smoothness term.
Increasing lbda will produce smoother images.
Default value is 1.0
Alpha : Gives a degree of control over the affinities by
non-lineary scaling the gradients. Increasing alpha
will result in sharper preserved edges. Default value: 1.2
"""
L = np.log(IN+1e-22) # Source image for the affinity matrix. log_e(IN)
smallNum = 1e-6
height, width = IN.shape
k = height * width
# Compute affinities between adjacent pixels based on gradients of L
dy = np.diff(L, n=1, axis=0) # axis=0 is vertical direction
dy = -Lambda/(np.abs(dy)**Alpha + smallNum)
dy = np.pad(dy, ((0,1),(0,0)), 'constant') # add zeros row
dy = dy.flatten(order='F')
dx = np.diff(L, n=1, axis=1)
dx = -Lambda/(np.abs(dx)**Alpha + smallNum)
dx = np.pad(dx, ((0,0),(0,1)), 'constant') # add zeros col
dx = dx.flatten(order='F')
# Construct a five-point spatially inhomogeneous Laplacian matrix
B = np.concatenate([[dx], [dy]], axis=0)
d = np.array([-height, -1])
A = spdiags(B, d, k, k)
e = dx
w = np.pad(dx, (height, 0), 'constant'); w = w[0:-height]
s = dy
n = np.pad(dy, (1, 0), 'constant'); n = n[0:-1]
D = 1.0 - (e + w + s + n)
A = A + A.transpose() + spdiags(D, 0, k, k)
# Solve
OUT = spsolve(A, IN.flatten(order='F'))
return np.reshape(OUT, (height, width), order='F')
# Unit test
if __name__ == '__main__':
image = cv2.imread('1.png')
if image.shape[2] == 4: # Format RGBA
image = image[:,:, 0:3] # Discard alpha channel
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
image1 = 1.0*image / np.max(image)
result = wlsFilter(image1)
cv2.imshow('1', result)
cv2.waitKey(0)
|
[
"numpy.abs",
"numpy.reshape",
"numpy.log",
"numpy.diff",
"cv2.imshow",
"numpy.max",
"numpy.array",
"cv2.waitKey",
"numpy.concatenate",
"cv2.cvtColor",
"numpy.pad",
"scipy.sparse.spdiags",
"cv2.imread"
] |
[((1124, 1142), 'numpy.log', 'np.log', (['(IN + 1e-22)'], {}), '(IN + 1e-22)\n', (1130, 1142), True, 'import numpy as np\n'), ((1353, 1376), 'numpy.diff', 'np.diff', (['L'], {'n': '(1)', 'axis': '(0)'}), '(L, n=1, axis=0)\n', (1360, 1376), True, 'import numpy as np\n'), ((1468, 1508), 'numpy.pad', 'np.pad', (['dy', '((0, 1), (0, 0))', '"""constant"""'], {}), "(dy, ((0, 1), (0, 0)), 'constant')\n", (1474, 1508), True, 'import numpy as np\n'), ((1566, 1589), 'numpy.diff', 'np.diff', (['L'], {'n': '(1)', 'axis': '(1)'}), '(L, n=1, axis=1)\n', (1573, 1589), True, 'import numpy as np\n'), ((1648, 1688), 'numpy.pad', 'np.pad', (['dx', '((0, 0), (0, 1))', '"""constant"""'], {}), "(dx, ((0, 0), (0, 1)), 'constant')\n", (1654, 1688), True, 'import numpy as np\n'), ((1820, 1856), 'numpy.concatenate', 'np.concatenate', (['[[dx], [dy]]'], {'axis': '(0)'}), '([[dx], [dy]], axis=0)\n', (1834, 1856), True, 'import numpy as np\n'), ((1865, 1888), 'numpy.array', 'np.array', (['[-height, -1]'], {}), '([-height, -1])\n', (1873, 1888), True, 'import numpy as np\n'), ((1899, 1918), 'scipy.sparse.spdiags', 'spdiags', (['B', 'd', 'k', 'k'], {}), '(B, d, k, k)\n', (1906, 1918), False, 'from scipy.sparse import spdiags\n'), ((1941, 1976), 'numpy.pad', 'np.pad', (['dx', '(height, 0)', '"""constant"""'], {}), "(dx, (height, 0), 'constant')\n", (1947, 1976), True, 'import numpy as np\n'), ((2014, 2044), 'numpy.pad', 'np.pad', (['dy', '(1, 0)', '"""constant"""'], {}), "(dy, (1, 0), 'constant')\n", (2020, 2044), True, 'import numpy as np\n'), ((2206, 2249), 'numpy.reshape', 'np.reshape', (['OUT', '(height, width)'], {'order': '"""F"""'}), "(OUT, (height, width), order='F')\n", (2216, 2249), True, 'import numpy as np\n'), ((2304, 2323), 'cv2.imread', 'cv2.imread', (['"""1.png"""'], {}), "('1.png')\n", (2314, 2323), False, 'import cv2\n'), ((2445, 2484), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2GRAY'], {}), '(image, cv2.COLOR_BGR2GRAY)\n', (2457, 2484), False, 'import cv2\n'), ((2559, 2582), 'cv2.imshow', 'cv2.imshow', (['"""1"""', 'result'], {}), "('1', result)\n", (2569, 2582), False, 'import cv2\n'), ((2587, 2601), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (2598, 2601), False, 'import cv2\n'), ((2118, 2137), 'scipy.sparse.spdiags', 'spdiags', (['D', '(0)', 'k', 'k'], {}), '(D, 0, k, k)\n', (2125, 2137), False, 'from scipy.sparse import spdiags\n'), ((2510, 2523), 'numpy.max', 'np.max', (['image'], {}), '(image)\n', (2516, 2523), True, 'import numpy as np\n'), ((1429, 1439), 'numpy.abs', 'np.abs', (['dy'], {}), '(dy)\n', (1435, 1439), True, 'import numpy as np\n'), ((1609, 1619), 'numpy.abs', 'np.abs', (['dx'], {}), '(dx)\n', (1615, 1619), True, 'import numpy as np\n')]
|
import numpy as np
def place_mirror(im, x1, x2, y1, y2, mr):
""" Place an image mr in specified locations of an image im. The edge locations in im where mr is to be placed are
(x1,y1) and (x2,y2)
Programmer
---------
<NAME> (JHU/APL, 10/12/05)
"""
nxa = np.zeros(2)
nya = np.zeros(2)
res = im[x1:x2 + 1, y1:y2 + 1].shape
nxa[0] = res[0]
nya[0] = res[1]
res = mr.shape
nxa[1] = res[0]
nya[1] = res[1]
nx = np.min(nxa)
ny = np.min(nya)
im[x1:x1 + nx, y1:y1 + ny] = mr[0:nx, 0:ny]
return im
def expand_image(im, ext_x, ext_y, mirror=0):
"""Enlarge the linear dimensions of an image by a (ext_x,ext_y) and put the initial image at the center.
If the keyword /mirror is set, the additional space corresponds to a mirror image of the initial image.
Programmer
----------
<NAME> (JHU/APL, 09/30/05)
"""
res = im.shape
id1 = res[0]
id2 = res[1]
mim = np.zeros((int(id1 + ext_x), int(id2 + ext_y)))
stx = np.fix(np.float(ext_x) / 2. + 0.5)
sty = np.fix(np.float(ext_y) / 2. + 0.5)
mim[int(stx):int(stx + id1), int(sty):int(sty + id2)] = im
if mirror != 0:
if stx <= id1:
xmr = int(stx)
else:
xmr = int(id1)
mr1 = im[0:xmr, :]
mr1 = np.flip(mr1, axis=0)
mr2 = im[id1 - xmr:id1, :]
mr2 = np.flip(mr2, axis=0)
mim = place_mirror(mim, 0, stx - 1, sty, sty + id2 - 1, mr1)
mim = place_mirror(mim, stx + id1, id1 + ext_x - 1, sty, sty + id2 - 1, mr2)
if sty <= id2:
ymr = int(sty)
else:
ymr = int(id2)
mr1 = mim[:, ymr:2 * ymr]
mr1 = np.flip(mr1, axis=1)
mr2 = mim[:, id2:ymr + id2]
mr2 = np.flip(mr2, axis=1)
mim = place_mirror(mim, 0, id1 + ext_x - 1, 0, sty - 1, mr1)
mim = place_mirror(mim, 0, id1 + ext_x - 1, sty + id2, id2 + ext_y - 1, mr2)
return mim, stx, sty
|
[
"numpy.flip",
"numpy.zeros",
"numpy.float",
"numpy.min"
] |
[((290, 301), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (298, 301), True, 'import numpy as np\n'), ((312, 323), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (320, 323), True, 'import numpy as np\n'), ((474, 485), 'numpy.min', 'np.min', (['nxa'], {}), '(nxa)\n', (480, 485), True, 'import numpy as np\n'), ((495, 506), 'numpy.min', 'np.min', (['nya'], {}), '(nya)\n', (501, 506), True, 'import numpy as np\n'), ((1327, 1347), 'numpy.flip', 'np.flip', (['mr1'], {'axis': '(0)'}), '(mr1, axis=0)\n', (1334, 1347), True, 'import numpy as np\n'), ((1397, 1417), 'numpy.flip', 'np.flip', (['mr2'], {'axis': '(0)'}), '(mr2, axis=0)\n', (1404, 1417), True, 'import numpy as np\n'), ((1711, 1731), 'numpy.flip', 'np.flip', (['mr1'], {'axis': '(1)'}), '(mr1, axis=1)\n', (1718, 1731), True, 'import numpy as np\n'), ((1782, 1802), 'numpy.flip', 'np.flip', (['mr2'], {'axis': '(1)'}), '(mr2, axis=1)\n', (1789, 1802), True, 'import numpy as np\n'), ((1038, 1053), 'numpy.float', 'np.float', (['ext_x'], {}), '(ext_x)\n', (1046, 1053), True, 'import numpy as np\n'), ((1083, 1098), 'numpy.float', 'np.float', (['ext_y'], {}), '(ext_y)\n', (1091, 1098), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
#
# Testing module for ACME's `ParallelMap` interface
#
# Builtin/3rd party package imports
from multiprocessing import Value
import os
import sys
import pickle
import shutil
import inspect
import subprocess
import getpass
import time
import itertools
import logging
from typing import Type
import h5py
import pytest
import signal as sys_signal
import numpy as np
import dask.distributed as dd
from glob import glob
from scipy import signal
# Import main actors here
from acme import ParallelMap, cluster_cleanup, esi_cluster_setup
from acme.shared import is_slurm_node
# Construct decorators for skipping certain tests
skip_in_win32 = pytest.mark.skipif(sys.platform == "win32", reason="Not running in Windows")
# Functions that act as stand-ins for user-funcs
def simple_func(x, y, z=3):
return (x + y) * z
def medium_func(x, y, z=3, w=np.ones((3, 3))):
return (sum(x) + y) * z * w.max()
def hard_func(x, y, z=3, w=np.zeros((3, 1)), **kwargs):
return sum(x) + y, z * w
def lowpass_simple(h5name, channel_no):
with h5py.File(h5name, "r") as h5f:
channel = h5f["data"][:, channel_no]
b = h5f["data"].attrs["b"]
a = h5f["data"].attrs["a"]
res = signal.filtfilt(b, a, channel, padlen=200)
return res
def lowpass_hard(arr_like, b, a, res_dir, res_base="lowpass_hard_", dset_name="custom_dset_name", padlen=200, taskID=None):
channel = arr_like[:, taskID]
res = signal.filtfilt(b, a, channel, padlen=padlen)
h5name = os.path.join(res_dir, res_base +"{}.h5".format(taskID))
with h5py.File(h5name, "w") as h5f:
h5f.create_dataset(dset_name, data=res)
return
def pickle_func(arr, b, a, channel_no, sabotage_hdf5=False):
res = signal.filtfilt(b, a, arr[:, channel_no], padlen=200)
if sabotage_hdf5:
if channel_no % 2 == 0:
return {"b" : b}
return res
# Perform SLURM-specific tests only on cluster nodes
useSLURM = is_slurm_node()
# Main testing class
class TestParallelMap():
# Construct linear combination of low- and high-frequency sine waves
# and use an IIR filter to reconstruct the low-frequency component
nChannels = 32
nTrials = 8
fData = 2
fNoise = 64
fs = 1000
t = np.linspace(-1, 1, fs)
orig = np.sin(2 * np.pi * fData * t)
sig = orig + np.sin(2 * np.pi * fNoise * t)
cutoff = 50
b, a = signal.butter(8, 2 * cutoff / fs)
# Blow up the signal to have "channels" and "trials": even/odd channels have
# opposing periodicity; do the same to the low-freq component
sig = np.repeat(sig.reshape(-1, 1), axis=1, repeats=nChannels)
sig[:, ::2] *= -1
sig = np.tile(sig, (nTrials, 1))
orig = np.repeat(orig.reshape(-1, 1), axis=1, repeats=nChannels)
orig[:, ::2] *= -1
orig = np.tile(orig, (nTrials, 1))
# Error tolerance for low-pass filtered results
tol = 1e-3
# Test setup of `ParallelMap` w/different functions args/kwargs
def test_init(self):
# Collected auto-generated output directories in list for later cleanup
outDirs = []
# Basic functionality w/simplest conceivable user-func
pmap = ParallelMap(simple_func, [2, 4, 6, 8], 4, setup_interactive=False)
outDirs.append(pmap.kwargv["outDir"][0])
pmap = ParallelMap(simple_func, [2, 4, 6, 8], y=4, setup_interactive=False) # pos arg referenced via kwarg, cfg #2
outDirs.append(pmap.kwargv["outDir"][0])
pmap = ParallelMap(simple_func, 0, 4, z=[3, 4, 5, 6], setup_interactive=False)
outDirs.append(pmap.kwargv["outDir"][0])
pmap = ParallelMap(simple_func, [2, 4, 6, 8], [2, 2], n_inputs=2, setup_interactive=False)
outDirs.append(pmap.kwargv["outDir"][0])
# User func has `np.ndarray` as keyword
pmap = ParallelMap(medium_func, [2, 4, 6, 8], y=[2, 2], n_inputs=2, setup_interactive=False)
outDirs.append(pmap.kwargv["outDir"][0])
pmap = ParallelMap(medium_func, None, None, w=[np.ones((3, 3)), 2 * np.ones((3,3))], setup_interactive=False)
outDirs.append(pmap.kwargv["outDir"][0])
pmap = ParallelMap(medium_func, None, None, z=np.zeros((3,)), setup_interactive=False)
outDirs.append(pmap.kwargv["outDir"][0])
pmap = ParallelMap(medium_func, None, None, z=np.zeros((3, 1)), setup_interactive=False)
outDirs.append(pmap.kwargv["outDir"][0])
# Lots of ways for this to go wrong...
pmap = ParallelMap(hard_func, [2, 4, 6, 8], 2, w=np.ones((3,)), setup_interactive=False)
outDirs.append(pmap.kwargv["outDir"][0])
pmap = ParallelMap(hard_func, [2, 4, 6, 8], y=22, w=np.ones((7, 1)), setup_interactive=False)
outDirs.append(pmap.kwargv["outDir"][0])
pmap = ParallelMap(hard_func, np.ones((3,)), 1, w=np.ones((7, 1)), setup_interactive=False)
outDirs.append(pmap.kwargv["outDir"][0])
pmap = ParallelMap(hard_func, [2, 4, 6, 8], [2, 2], z=np.array([1, 2]), w=np.ones((8, 1)), n_inputs=2, setup_interactive=False)
outDirs.append(pmap.kwargv["outDir"][0])
pmap = ParallelMap(hard_func, [2, 4, 6, 8], [2, 2], w=np.ones((8, 1)), n_inputs=4, setup_interactive=False)
outDirs.append(pmap.kwargv["outDir"][0])
# Ensure erroneous/ambiguous setups trigger the appropriate errors:
# not enough positional args
with pytest.raises(ValueError) as valerr:
ParallelMap(simple_func, 4, setup_interactive=False)
assert "simple_func expects 2 positional arguments ('x', 'y'), found 1" in str(valerr.value)
# invalid kwargs
with pytest.raises(ValueError) as valerr:
ParallelMap(simple_func, 4, 4, z=3, w=4, setup_interactive=False)
assert "simple_func accepts at maximum 1 keyword arguments ('z'), found 2" in str(valerr.value)
# ill-posed parallelization: two candidate lists for input distribution
with pytest.raises(ValueError) as valerr:
ParallelMap(simple_func, [2, 4, 6, 8], [2, 2], setup_interactive=False)
assert "automatic input distribution failed: found 2 objects containing 2 to 4 elements" in str(valerr.value)
# ill-posed parallelization: two candidate lists for input distribution (`x` and `w`)
with pytest.raises(ValueError) as valerr:
ParallelMap(medium_func, [1, 2, 3], None, w=[np.ones((3,3)), 2 * np.ones((3,3))], setup_interactive=False)
assert "automatic input distribution failed: found 2 objects containing 2 to 3 elements." in str(valerr.value)
# invalid input spec
with pytest.raises(ValueError) as valerr:
ParallelMap(simple_func, [2, 4, 6, 8], [2, 2], n_inputs=3, setup_interactive=False)
assert "No object has required length of 3 matching `n_inputs`" in str(valerr.value)
# invalid input spec: `w` expects a NumPy array, thus it is not considered for input distribution
with pytest.raises(ValueError) as valerr:
ParallelMap(hard_func, [2, 4, 6, 8], [2, 2], w=np.ones((8, 1)), n_inputs=8, setup_interactive=False)
assert "No object has required length of 8 matching `n_inputs`" in str(valerr.value)
# Clean up testing folder and any running clients
cluster_cleanup()
for folder in outDirs:
shutil.rmtree(folder, ignore_errors=True)
# Functionality tests: perform channel-concurrent low-pass filtering
def test_filter_example(self):
# If called by `test_existing_cluster` use pre-allocated client for all computations
try:
dd.get_client()
existingClient = True
except ValueError:
existingClient = False
# Create tmp directory and create data-containers
tempDir = os.path.join(os.path.abspath(os.path.expanduser("~")), "acme_tmp")
if useSLURM:
tempDir = "/cs/home/{}/acme_tmp".format(getpass.getuser())
os.makedirs(tempDir, exist_ok=True)
sigName = os.path.join(tempDir, "sigdata.h5")
origName = os.path.join(tempDir, "origdata.h5")
with h5py.File(sigName, "w") as sigFile:
dset = sigFile.create_dataset("data", data=self.sig)
dset.attrs["b"] = self.b
dset.attrs["a"] = self.a
with h5py.File(origName, "w") as origFile:
origFile.create_dataset("data", data=self.orig)
# Collected auto-generated output directories in list for later cleanup
outDirs = []
# Parallelize across channels, write results to disk
with ParallelMap(lowpass_simple, sigName, range(self.nChannels), setup_interactive=False) as pmap:
resOnDisk = pmap.compute()
outDirs.append(pmap.kwargv["outDir"][0])
assert len(pmap.kwargv["outFile"]) == pmap.n_calls
resFiles = [os.path.join(pmap.kwargv["outDir"][0], outFile) for outFile in pmap.kwargv["outFile"]]
assert resOnDisk == resFiles
assert all(os.path.isfile(fle) for fle in resOnDisk)
# Compare computed single-channel results to expected low-freq signal
for chNo, h5name in enumerate(resOnDisk):
with h5py.File(h5name, "r") as h5f:
assert np.mean(np.abs(h5f["result_0"][()] - self.orig[:, chNo])) < self.tol
# Same, but collect results in memory: ensure nothing freaky happens
with ParallelMap(lowpass_simple,
sigName,
range(self.nChannels),
write_worker_results=False,
setup_interactive=False) as pmap:
resInMem = pmap.compute()
for chNo in range(self.nChannels):
assert np.mean(np.abs(resInMem[chNo] - self.orig[:, chNo])) < self.tol
# Be double-paranoid: ensure on-disk and in-memory results match up
for chNo, h5name in enumerate(resOnDisk):
with h5py.File(h5name, "r") as h5f:
assert np.array_equal(h5f["result_0"][()], resInMem[chNo])
# Simulate user-defined results-directory
tempDir2 = os.path.join(os.path.abspath(os.path.expanduser("~")), "acme_tmp_lowpass_hard")
if useSLURM:
tempDir2 = "/cs/home/{}/acme_tmp_lowpass_hard".format(getpass.getuser())
shutil.rmtree(tempDir2, ignore_errors=True)
os.makedirs(tempDir2, exist_ok=True)
# Same task, different function: simulate user-defined saving scheme and "weird" inputs
sigData = h5py.File(sigName, "r")["data"]
res_base = "lowpass_hard_"
dset_name = "custom_dset_name"
with ParallelMap(lowpass_hard,
sigData,
self.b,
self.a,
res_dir=tempDir2,
res_base=res_base,
dset_name=dset_name,
padlen=[200] * self.nChannels,
n_inputs=self.nChannels,
write_worker_results=False,
setup_interactive=False) as pmap:
pmap.compute()
resFiles = glob(os.path.join(tempDir2, res_base + "*"))
assert len(resFiles) == pmap.n_calls
# Compare computed single-channel results to expected low-freq signal
for chNo in range(self.nChannels):
h5name = res_base + "{}.h5".format(chNo)
with h5py.File(os.path.join(tempDir2, h5name), "r") as h5f:
assert np.mean(np.abs(h5f[dset_name][()] - self.orig[:, chNo])) < self.tol
# Ensure log-file generation produces a non-empty log-file at the expected location
# Bonus: leave computing client alive and vet default SLURM settings
if not existingClient:
cluster_cleanup(pmap.client)
for handler in pmap.log.handlers:
if isinstance(handler, logging.FileHandler):
pmap.log.handlers.remove(handler)
with ParallelMap(lowpass_simple,
sigName,
range(self.nChannels),
logfile=True,
stop_client=False,
setup_interactive=False) as pmap:
pmap.compute()
outDirs.append(pmap.kwargv["outDir"][0])
logFileList = [handler.baseFilename for handler in pmap.log.handlers if isinstance(handler, logging.FileHandler)]
assert len(logFileList) == 1
logFile = logFileList[0]
assert os.path.dirname(os.path.realpath(__file__)) in logFile
with open(logFile, "r") as fl:
assert len(fl.readlines()) > 1
# Ensure client has not been killed; perform post-hoc check of default SLURM settings
assert dd.get_client()
client = dd.get_client()
if useSLURM and not existingClient:
assert pmap.n_calls == pmap.n_jobs
assert len(client.cluster.workers) == pmap.n_jobs
partition = client.cluster.job_header.split("-p ")[1].split("\n")[0]
assert "8GB" in partition
memory = np.unique([w["memory_limit"] for w in client.cluster.scheduler_info["workers"].values()])
assert memory.size == 1
assert round(memory[0] / 1000**3) == [int(s) for s in partition if s.isdigit()][0]
# Same, but use custom log-file
for handler in pmap.log.handlers:
if isinstance(handler, logging.FileHandler):
pmap.log.handlers.remove(handler)
customLog = os.path.join(tempDir, "acme_log.txt")
with ParallelMap(lowpass_simple,
sigName,
range(self.nChannels),
logfile=customLog,
verbose=True,
stop_client=True,
setup_interactive=False) as pmap:
pmap.compute()
outDirs.append(pmap.kwargv["outDir"][0])
assert os.path.isfile(customLog)
with open(customLog, "r") as fl:
assert len(fl.readlines()) > 1
# Ensure client has been stopped
with pytest.raises(ValueError):
dd.get_client()
# Underbook SLURM (more calls than jobs)
partition = "8GBXS"
n_jobs = int(self.nChannels / 2)
mem_per_job = "2GB"
with ParallelMap(lowpass_simple,
sigName,
range(self.nChannels),
partition=partition,
n_jobs=n_jobs,
mem_per_job=mem_per_job,
stop_client=False,
setup_interactive=False) as pmap:
pmap.compute()
outDirs.append(pmap.kwargv["outDir"][0])
# Post-hoc check of client to ensure custom settings were respected
client = pmap.client
assert pmap.n_calls == self.nChannels
if useSLURM:
assert pmap.n_jobs == n_jobs
assert len(client.cluster.workers) == pmap.n_jobs
actualPartition = client.cluster.job_header.split("-p ")[1].split("\n")[0]
assert actualPartition == partition
memory = np.unique([w["memory_limit"] for w in client.cluster.scheduler_info["workers"].values()])
assert memory.size == 1
assert round(memory[0] / 1000**3) == int(mem_per_job.replace("GB", ""))
# Let `cluster_cleanup` murder the custom setup and ensure it did its job
if not existingClient:
cluster_cleanup(pmap.client)
with pytest.raises(ValueError):
dd.get_client()
# Overbook SLURM (more jobs than calls)
partition = "8GBXS"
n_jobs = self.nChannels + 2
mem_per_job = "3000MB"
with ParallelMap(lowpass_simple,
sigName,
range(self.nChannels),
partition=partition,
n_jobs=n_jobs,
mem_per_job=mem_per_job,
stop_client=False,
setup_interactive=False) as pmap:
pmap.compute()
outDirs.append(pmap.kwargv["outDir"][0])
# Post-hoc check of client to ensure custom settings were respected
client = pmap.client
assert pmap.n_calls == self.nChannels
if useSLURM:
assert pmap.n_jobs == n_jobs
assert len(client.cluster.workers) == pmap.n_jobs
actualPartition = client.cluster.job_header.split("-p ")[1].split("\n")[0]
assert actualPartition == partition
memory = np.unique([w["memory_limit"] for w in client.cluster.scheduler_info["workers"].values()])
assert memory.size == 1
assert round(memory[0] / 1000**3) * 1000 == int(mem_per_job.replace("MB", ""))
if not existingClient:
cluster_cleanup(pmap.client)
# Close any open HDF5 files to not trigger any `OSError`s, close running clusters
# and clean up tmp dirs and created directories/log-files
sigData.file.close()
try:
os.unlink(logFile)
except PermissionError:
pass
shutil.rmtree(tempDir, ignore_errors=True)
shutil.rmtree(tempDir2, ignore_errors=True)
for folder in outDirs:
shutil.rmtree(folder, ignore_errors=True)
# Wait a second (literally) so that no new parallel jobs started by
# `test_existing_cluster` erroneously use existing HDF files
time.sleep(1.0)
# Test if pickling/emergency pickling and I/O in general works as intended
def test_pickling(self):
# Collected auto-generated output directories in list for later cleanup
outDirs = []
# Execute `pickle_func` w/regular HDF5 saving
with ParallelMap(pickle_func,
self.sig,
self.b,
self.a,
range(self.nChannels),
sabotage_hdf5=False,
n_inputs=self.nChannels,
setup_interactive=False) as pmap:
hdfResults = pmap.compute()
outDirs.append(pmap.kwargv["outDir"][0])
# Execute `pickle_func` w/pickling
with ParallelMap(pickle_func,
self.sig,
self.b,
self.a,
range(self.nChannels),
n_inputs=self.nChannels,
write_pickle=True,
setup_interactive=False) as pmap:
pklResults = pmap.compute()
outDirs.append(pmap.kwargv["outDir"][0])
# Ensure HDF5 and pickle match up
for chNo, h5name in enumerate(hdfResults):
with open(pklResults[chNo], "rb") as pkf:
pklRes = pickle.load(pkf)
with h5py.File(h5name, "r") as h5f:
assert np.array_equal(pklRes, h5f["result_0"][()])
# Test emergency pickling
with ParallelMap(pickle_func,
self.sig,
self.b,
self.a,
range(self.nChannels),
sabotage_hdf5=True,
n_inputs=self.nChannels,
setup_interactive=False) as pmap:
mixedResults = pmap.compute()
outDirs.append(pmap.kwargv["outDir"][0])
# Ensure non-compliant dicts were pickled, rest is in HDF5
for chNo, fname in enumerate(mixedResults):
if chNo % 2 == 0:
assert fname.endswith(".pickle")
with open(fname, "rb") as pkf:
assert np.array_equal(self.b, pickle.load(pkf)["b"])
else:
assert fname.endswith(".h5")
with h5py.File(fname, "r") as h5f:
with h5py.File(hdfResults[chNo], "r") as h5ref:
assert np.array_equal(h5f["result_0"][()], h5ref["result_0"][()])
# Test write breakdown (both for HDF5 saving and pickling)
pmap = ParallelMap(pickle_func,
self.sig,
self.b,
self.a,
range(self.nChannels),
sabotage_hdf5=True,
n_inputs=self.nChannels,
setup_interactive=False)
outDirs.append(pmap.kwargv["outDir"][0])
pmap.kwargv["outDir"][0] = "/path/to/nowhere"
with pytest.raises(RuntimeError) as runerr:
pmap.compute()
assert "<ACMEdaemon> Parallel computation failed" in str(runerr.value)
pmap = ParallelMap(pickle_func,
self.sig,
self.b,
self.a,
range(self.nChannels),
sabotage_hdf5=True,
n_inputs=self.nChannels,
write_pickle=True,
setup_interactive=False)
outDirs.append(pmap.kwargv["outDir"][0])
pmap.kwargv["outDir"][0] = "/path/to/nowhere"
with pytest.raises(RuntimeError) as runerr:
pmap.compute()
assert "<ACMEdaemon> Parallel computation failed" in str(runerr.value)
# Clean up testing folder
for folder in outDirs:
shutil.rmtree(folder, ignore_errors=True)
# test if KeyboardInterrupts are handled correctly
@skip_in_win32
def test_cancel(self):
# Setup temp-directory layout for subprocess-scripts and prepare interpreters
tempDir = os.path.join(os.path.abspath(os.path.expanduser("~")), "acme_tmp")
os.makedirs(tempDir, exist_ok=True)
pshells = [os.path.join(os.path.split(sys.executable)[0], pyExec) for pyExec in ["python", "ipython"]]
# Prepare ad-hoc script for execution in new process
scriptName = os.path.join(tempDir, "dummy.py")
scriptContents = \
"from acme import ParallelMap\n" +\
"import time\n" +\
"def long_running(dummy):\n" +\
" time.sleep(10)\n" +\
" return\n" +\
"if __name__ == '__main__':\n" +\
" with ParallelMap(long_running, [None]*2, setup_interactive=False, write_worker_results=False) as pmap: \n" +\
" pmap.compute()\n" +\
" print('ALL DONE')\n"
with open(scriptName, "w") as f:
f.write(scriptContents)
# Execute the above script both in Python and iPython to ensure global functionality
for pshell in pshells:
# Launch new process in background (`stdbuf` prevents buffering of stdout)
proc = subprocess.Popen("stdbuf -o0 " + pshell + " " + scriptName,
shell=True, start_new_session=True,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT, bufsize=0)
# Wait for ACME to start up (as soon as logging info is shown, `pmap.compute()` is running)
# However: don't wait indefinitely - if `pmap.compute` is not started within 30s, abort
logStr = "<ParallelMap> INFO: Log information available at"
buffer = bytearray()
timeout = 30
t0 = time.time()
for line in itertools.takewhile(lambda x: time.time() - t0 < timeout, iter(proc.stdout.readline, b"")):
buffer.extend(line)
if logStr in line.decode("utf8"):
break
assert logStr in buffer.decode("utf8")
# Wait a bit, then simulate CTRL+C in sub-process; make sure the above
# impromptu script did not run to completion *but* the created client was
# shut down with CTRL + C
time.sleep(2)
os.killpg(proc.pid, sys_signal.SIGINT)
time.sleep(1)
out = proc.stdout.read().decode()
assert "ALL DONE" not in out
assert "INFO: <cluster_cleanup> Successfully shut down" in out
# Almost identical script, this time use an externally started client
scriptName = os.path.join(tempDir, "dummy2.py")
scriptContents = \
"from acme import ParallelMap, esi_cluster_setup\n" +\
"import time\n" +\
"def long_running(dummy):\n" +\
" time.sleep(10)\n" +\
" return\n" +\
"if __name__ == '__main__':\n" +\
" client = esi_cluster_setup(partition='8GBDEV',n_jobs=1, interactive=False)\n" +\
" with ParallelMap(long_running, [None]*2, setup_interactive=False, write_worker_results=False) as pmap: \n" +\
" pmap.compute()\n" +\
" print('ALL DONE')\n"
with open(scriptName, "w") as f:
f.write(scriptContents)
# Test script functionality in both Python and iPython
for pshell in pshells:
proc = subprocess.Popen("stdbuf -o0 " + sys.executable + " " + scriptName,
shell=True, start_new_session=True,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT, bufsize=0)
logStr = "<ParallelMap> INFO: Log information available at"
buffer = bytearray()
timeout = 30
t0 = time.time()
for line in itertools.takewhile(lambda x: time.time() - t0 < timeout, iter(proc.stdout.readline, b"")):
buffer.extend(line)
if logStr in line.decode("utf8"):
break
assert logStr in buffer.decode("utf8")
time.sleep(2)
os.killpg(proc.pid, sys_signal.SIGINT)
time.sleep(2)
out = proc.stdout.read().decode()
assert "ALL DONE" not in out
assert "<ParallelMap> INFO: <ACME> CTRL + C acknowledged, client and workers successfully killed" in out
# Ensure random exception does not immediately kill an active client
scriptName = os.path.join(tempDir, "dummy3.py")
scriptContents = \
"from acme import esi_cluster_setup\n" +\
"import time\n" +\
"if __name__ == '__main__':\n" +\
" esi_cluster_setup(partition='8GBDEV',n_jobs=1, interactive=False)\n" +\
" time.sleep(60)\n"
with open(scriptName, "w") as f:
f.write(scriptContents)
proc = subprocess.Popen("stdbuf -o0 " + sys.executable + " " + scriptName,
shell=True, start_new_session=True,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT, bufsize=0)
# Give the client time to start up, then send a floating-point exception
# (equivalent to a `ZeroDivsionError` to the child process)
time.sleep(5)
assert proc.poll() is None
proc.send_signal(sys_signal.SIGFPE)
# Ensure the `ZeroDivsionError` did not kill the process. Then terminate it
# and confirm that the floating-exception was propagated correctly
assert proc.poll() is None
proc.terminate()
proc.wait()
assert proc.returncode in [-sys_signal.SIGFPE.value, -sys_signal.SIGTERM.value]
# Clean up tmp folder
shutil.rmtree(tempDir, ignore_errors=True)
# test esi-cluster-setup called separately before pmap
def test_existing_cluster(self):
# Test custom SLURM cluster setup
if useSLURM:
# Ensure invalid partition/memory specifications are caught
with pytest.raises(ValueError):
esi_cluster_setup(partition="invalid", interactive=False)
cluster_cleanup()
with pytest.raises(ValueError):
esi_cluster_setup(mem_per_job="invalidGB", interactive=False)
cluster_cleanup()
with pytest.raises(ValueError):
esi_cluster_setup(mem_per_job="-20MB", interactive=False)
cluster_cleanup()
# Over-allocation of memory should default to partition max
client = esi_cluster_setup(partition="8GBDEV", n_jobs=1, mem_per_job="9000MB", interactive=False)
memory = np.unique([w["memory_limit"] for w in client.cluster.scheduler_info["workers"].values()])
assert memory.size == 1
assert np.round(memory / 1000**3)[0] == 8
cluster_cleanup(client)
# Test if invalid extra args are caught
slurmOut = "/cs/home/{}/acme_out".format(getpass.getuser())
with pytest.raises(TypeError):
esi_cluster_setup(job_extra="--output={}".format(slurmOut), interactive=False)
cluster_cleanup()
with pytest.raises(ValueError):
esi_cluster_setup(job_extra=["output={}".format(slurmOut)], interactive=False)
cluster_cleanup()
with pytest.raises(ValueError):
esi_cluster_setup(job_extra=["--output=/path/to/nowhere"], interactive=False)
cluster_cleanup()
# Supply extra args to start client for actual tests
client = esi_cluster_setup(partition="8GBXS", job_extra=["--output={}".format(slurmOut)], interactive=False)
assert "--output={}".format(slurmOut) in client.cluster.job_header
else:
client = esi_cluster_setup(n_jobs=6, interactive=False)
# Re-run tests with pre-allocated client (except for `test_cancel`)
skipTests = ["test_existing_cluster", "test_cancel"]
all_tests = [attr for attr in self.__dir__()
if (inspect.ismethod(getattr(self, attr)) and attr not in skipTests)]
for test in all_tests:
getattr(self, test)()
client.close()
client.cluster.close()
if useSLURM:
shutil.rmtree(slurmOut, ignore_errors=True)
|
[
"acme.esi_cluster_setup",
"scipy.signal.filtfilt",
"acme.cluster_cleanup",
"time.sleep",
"numpy.array",
"numpy.sin",
"getpass.getuser",
"acme.shared.is_slurm_node",
"subprocess.Popen",
"os.path.split",
"numpy.linspace",
"os.unlink",
"pytest.mark.skipif",
"os.path.expanduser",
"numpy.round",
"numpy.tile",
"numpy.abs",
"numpy.ones",
"pickle.load",
"h5py.File",
"os.path.isfile",
"pytest.raises",
"os.killpg",
"time.time",
"os.makedirs",
"dask.distributed.get_client",
"scipy.signal.butter",
"os.path.join",
"os.path.realpath",
"numpy.zeros",
"numpy.array_equal",
"shutil.rmtree",
"acme.ParallelMap"
] |
[((663, 739), 'pytest.mark.skipif', 'pytest.mark.skipif', (["(sys.platform == 'win32')"], {'reason': '"""Not running in Windows"""'}), "(sys.platform == 'win32', reason='Not running in Windows')\n", (681, 739), False, 'import pytest\n'), ((1951, 1966), 'acme.shared.is_slurm_node', 'is_slurm_node', ([], {}), '()\n', (1964, 1966), False, 'from acme.shared import is_slurm_node\n'), ((871, 886), 'numpy.ones', 'np.ones', (['(3, 3)'], {}), '((3, 3))\n', (878, 886), True, 'import numpy as np\n'), ((955, 971), 'numpy.zeros', 'np.zeros', (['(3, 1)'], {}), '((3, 1))\n', (963, 971), True, 'import numpy as np\n'), ((1220, 1262), 'scipy.signal.filtfilt', 'signal.filtfilt', (['b', 'a', 'channel'], {'padlen': '(200)'}), '(b, a, channel, padlen=200)\n', (1235, 1262), False, 'from scipy import signal\n'), ((1447, 1492), 'scipy.signal.filtfilt', 'signal.filtfilt', (['b', 'a', 'channel'], {'padlen': 'padlen'}), '(b, a, channel, padlen=padlen)\n', (1462, 1492), False, 'from scipy import signal\n'), ((1733, 1786), 'scipy.signal.filtfilt', 'signal.filtfilt', (['b', 'a', 'arr[:, channel_no]'], {'padlen': '(200)'}), '(b, a, arr[:, channel_no], padlen=200)\n', (1748, 1786), False, 'from scipy import signal\n'), ((2246, 2268), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)', 'fs'], {}), '(-1, 1, fs)\n', (2257, 2268), True, 'import numpy as np\n'), ((2280, 2309), 'numpy.sin', 'np.sin', (['(2 * np.pi * fData * t)'], {}), '(2 * np.pi * fData * t)\n', (2286, 2309), True, 'import numpy as np\n'), ((2385, 2418), 'scipy.signal.butter', 'signal.butter', (['(8)', '(2 * cutoff / fs)'], {}), '(8, 2 * cutoff / fs)\n', (2398, 2418), False, 'from scipy import signal\n'), ((2666, 2692), 'numpy.tile', 'np.tile', (['sig', '(nTrials, 1)'], {}), '(sig, (nTrials, 1))\n', (2673, 2692), True, 'import numpy as np\n'), ((2796, 2823), 'numpy.tile', 'np.tile', (['orig', '(nTrials, 1)'], {}), '(orig, (nTrials, 1))\n', (2803, 2823), True, 'import numpy as np\n'), ((1064, 1086), 'h5py.File', 'h5py.File', (['h5name', '"""r"""'], {}), "(h5name, 'r')\n", (1073, 1086), False, 'import h5py\n'), ((1571, 1593), 'h5py.File', 'h5py.File', (['h5name', '"""w"""'], {}), "(h5name, 'w')\n", (1580, 1593), False, 'import h5py\n'), ((2327, 2357), 'numpy.sin', 'np.sin', (['(2 * np.pi * fNoise * t)'], {}), '(2 * np.pi * fNoise * t)\n', (2333, 2357), True, 'import numpy as np\n'), ((3167, 3233), 'acme.ParallelMap', 'ParallelMap', (['simple_func', '[2, 4, 6, 8]', '(4)'], {'setup_interactive': '(False)'}), '(simple_func, [2, 4, 6, 8], 4, setup_interactive=False)\n', (3178, 3233), False, 'from acme import ParallelMap, cluster_cleanup, esi_cluster_setup\n'), ((3298, 3366), 'acme.ParallelMap', 'ParallelMap', (['simple_func', '[2, 4, 6, 8]'], {'y': '(4)', 'setup_interactive': '(False)'}), '(simple_func, [2, 4, 6, 8], y=4, setup_interactive=False)\n', (3309, 3366), False, 'from acme import ParallelMap, cluster_cleanup, esi_cluster_setup\n'), ((3471, 3542), 'acme.ParallelMap', 'ParallelMap', (['simple_func', '(0)', '(4)'], {'z': '[3, 4, 5, 6]', 'setup_interactive': '(False)'}), '(simple_func, 0, 4, z=[3, 4, 5, 6], setup_interactive=False)\n', (3482, 3542), False, 'from acme import ParallelMap, cluster_cleanup, esi_cluster_setup\n'), ((3607, 3694), 'acme.ParallelMap', 'ParallelMap', (['simple_func', '[2, 4, 6, 8]', '[2, 2]'], {'n_inputs': '(2)', 'setup_interactive': '(False)'}), '(simple_func, [2, 4, 6, 8], [2, 2], n_inputs=2,\n setup_interactive=False)\n', (3618, 3694), False, 'from acme import ParallelMap, cluster_cleanup, esi_cluster_setup\n'), ((3804, 3893), 'acme.ParallelMap', 'ParallelMap', (['medium_func', '[2, 4, 6, 8]'], {'y': '[2, 2]', 'n_inputs': '(2)', 'setup_interactive': '(False)'}), '(medium_func, [2, 4, 6, 8], y=[2, 2], n_inputs=2,\n setup_interactive=False)\n', (3815, 3893), False, 'from acme import ParallelMap, cluster_cleanup, esi_cluster_setup\n'), ((7262, 7279), 'acme.cluster_cleanup', 'cluster_cleanup', ([], {}), '()\n', (7277, 7279), False, 'from acme import ParallelMap, cluster_cleanup, esi_cluster_setup\n'), ((7949, 7984), 'os.makedirs', 'os.makedirs', (['tempDir'], {'exist_ok': '(True)'}), '(tempDir, exist_ok=True)\n', (7960, 7984), False, 'import os\n'), ((8003, 8038), 'os.path.join', 'os.path.join', (['tempDir', '"""sigdata.h5"""'], {}), "(tempDir, 'sigdata.h5')\n", (8015, 8038), False, 'import os\n'), ((8058, 8094), 'os.path.join', 'os.path.join', (['tempDir', '"""origdata.h5"""'], {}), "(tempDir, 'origdata.h5')\n", (8070, 8094), False, 'import os\n'), ((10277, 10320), 'shutil.rmtree', 'shutil.rmtree', (['tempDir2'], {'ignore_errors': '(True)'}), '(tempDir2, ignore_errors=True)\n', (10290, 10320), False, 'import shutil\n'), ((10329, 10365), 'os.makedirs', 'os.makedirs', (['tempDir2'], {'exist_ok': '(True)'}), '(tempDir2, exist_ok=True)\n', (10340, 10365), False, 'import os\n'), ((12737, 12752), 'dask.distributed.get_client', 'dd.get_client', ([], {}), '()\n', (12750, 12752), True, 'import dask.distributed as dd\n'), ((12770, 12785), 'dask.distributed.get_client', 'dd.get_client', ([], {}), '()\n', (12783, 12785), True, 'import dask.distributed as dd\n'), ((13510, 13547), 'os.path.join', 'os.path.join', (['tempDir', '"""acme_log.txt"""'], {}), "(tempDir, 'acme_log.txt')\n", (13522, 13547), False, 'import os\n'), ((13947, 13972), 'os.path.isfile', 'os.path.isfile', (['customLog'], {}), '(customLog)\n', (13961, 13972), False, 'import os\n'), ((17215, 17257), 'shutil.rmtree', 'shutil.rmtree', (['tempDir'], {'ignore_errors': '(True)'}), '(tempDir, ignore_errors=True)\n', (17228, 17257), False, 'import shutil\n'), ((17266, 17309), 'shutil.rmtree', 'shutil.rmtree', (['tempDir2'], {'ignore_errors': '(True)'}), '(tempDir2, ignore_errors=True)\n', (17279, 17309), False, 'import shutil\n'), ((17549, 17564), 'time.sleep', 'time.sleep', (['(1.0)'], {}), '(1.0)\n', (17559, 17564), False, 'import time\n'), ((21840, 21875), 'os.makedirs', 'os.makedirs', (['tempDir'], {'exist_ok': '(True)'}), '(tempDir, exist_ok=True)\n', (21851, 21875), False, 'import os\n'), ((22070, 22103), 'os.path.join', 'os.path.join', (['tempDir', '"""dummy.py"""'], {}), "(tempDir, 'dummy.py')\n", (22082, 22103), False, 'import os\n'), ((24324, 24358), 'os.path.join', 'os.path.join', (['tempDir', '"""dummy2.py"""'], {}), "(tempDir, 'dummy2.py')\n", (24336, 24358), False, 'import os\n'), ((26213, 26247), 'os.path.join', 'os.path.join', (['tempDir', '"""dummy3.py"""'], {}), "(tempDir, 'dummy3.py')\n", (26225, 26247), False, 'import os\n'), ((26620, 26794), 'subprocess.Popen', 'subprocess.Popen', (["('stdbuf -o0 ' + sys.executable + ' ' + scriptName)"], {'shell': '(True)', 'start_new_session': '(True)', 'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.STDOUT', 'bufsize': '(0)'}), "('stdbuf -o0 ' + sys.executable + ' ' + scriptName, shell=\n True, start_new_session=True, stdout=subprocess.PIPE, stderr=subprocess\n .STDOUT, bufsize=0)\n", (26636, 26794), False, 'import subprocess\n'), ((27007, 27020), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (27017, 27020), False, 'import time\n'), ((27467, 27509), 'shutil.rmtree', 'shutil.rmtree', (['tempDir'], {'ignore_errors': '(True)'}), '(tempDir, ignore_errors=True)\n', (27480, 27509), False, 'import shutil\n'), ((4779, 4792), 'numpy.ones', 'np.ones', (['(3,)'], {}), '((3,))\n', (4786, 4792), True, 'import numpy as np\n'), ((5367, 5392), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (5380, 5392), False, 'import pytest\n'), ((5416, 5468), 'acme.ParallelMap', 'ParallelMap', (['simple_func', '(4)'], {'setup_interactive': '(False)'}), '(simple_func, 4, setup_interactive=False)\n', (5427, 5468), False, 'from acme import ParallelMap, cluster_cleanup, esi_cluster_setup\n'), ((5612, 5637), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (5625, 5637), False, 'import pytest\n'), ((5661, 5726), 'acme.ParallelMap', 'ParallelMap', (['simple_func', '(4)', '(4)'], {'z': '(3)', 'w': '(4)', 'setup_interactive': '(False)'}), '(simple_func, 4, 4, z=3, w=4, setup_interactive=False)\n', (5672, 5726), False, 'from acme import ParallelMap, cluster_cleanup, esi_cluster_setup\n'), ((5928, 5953), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (5941, 5953), False, 'import pytest\n'), ((5977, 6048), 'acme.ParallelMap', 'ParallelMap', (['simple_func', '[2, 4, 6, 8]', '[2, 2]'], {'setup_interactive': '(False)'}), '(simple_func, [2, 4, 6, 8], [2, 2], setup_interactive=False)\n', (5988, 6048), False, 'from acme import ParallelMap, cluster_cleanup, esi_cluster_setup\n'), ((6278, 6303), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (6291, 6303), False, 'import pytest\n'), ((6599, 6624), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (6612, 6624), False, 'import pytest\n'), ((6648, 6735), 'acme.ParallelMap', 'ParallelMap', (['simple_func', '[2, 4, 6, 8]', '[2, 2]'], {'n_inputs': '(3)', 'setup_interactive': '(False)'}), '(simple_func, [2, 4, 6, 8], [2, 2], n_inputs=3,\n setup_interactive=False)\n', (6659, 6735), False, 'from acme import ParallelMap, cluster_cleanup, esi_cluster_setup\n'), ((6948, 6973), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (6961, 6973), False, 'import pytest\n'), ((7323, 7364), 'shutil.rmtree', 'shutil.rmtree', (['folder'], {'ignore_errors': '(True)'}), '(folder, ignore_errors=True)\n', (7336, 7364), False, 'import shutil\n'), ((7593, 7608), 'dask.distributed.get_client', 'dd.get_client', ([], {}), '()\n', (7606, 7608), True, 'import dask.distributed as dd\n'), ((8108, 8131), 'h5py.File', 'h5py.File', (['sigName', '"""w"""'], {}), "(sigName, 'w')\n", (8117, 8131), False, 'import h5py\n'), ((8296, 8320), 'h5py.File', 'h5py.File', (['origName', '"""w"""'], {}), "(origName, 'w')\n", (8305, 8320), False, 'import h5py\n'), ((8832, 8879), 'os.path.join', 'os.path.join', (["pmap.kwargv['outDir'][0]", 'outFile'], {}), "(pmap.kwargv['outDir'][0], outFile)\n", (8844, 8879), False, 'import os\n'), ((10481, 10504), 'h5py.File', 'h5py.File', (['sigName', '"""r"""'], {}), "(sigName, 'r')\n", (10490, 10504), False, 'import h5py\n'), ((10600, 10830), 'acme.ParallelMap', 'ParallelMap', (['lowpass_hard', 'sigData', 'self.b', 'self.a'], {'res_dir': 'tempDir2', 'res_base': 'res_base', 'dset_name': 'dset_name', 'padlen': '([200] * self.nChannels)', 'n_inputs': 'self.nChannels', 'write_worker_results': '(False)', 'setup_interactive': '(False)'}), '(lowpass_hard, sigData, self.b, self.a, res_dir=tempDir2,\n res_base=res_base, dset_name=dset_name, padlen=[200] * self.nChannels,\n n_inputs=self.nChannels, write_worker_results=False, setup_interactive=\n False)\n', (10611, 10830), False, 'from acme import ParallelMap, cluster_cleanup, esi_cluster_setup\n'), ((11128, 11166), 'os.path.join', 'os.path.join', (['tempDir2', "(res_base + '*')"], {}), "(tempDir2, res_base + '*')\n", (11140, 11166), False, 'import os\n'), ((11764, 11792), 'acme.cluster_cleanup', 'cluster_cleanup', (['pmap.client'], {}), '(pmap.client)\n', (11779, 11792), False, 'from acme import ParallelMap, cluster_cleanup, esi_cluster_setup\n'), ((14112, 14137), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (14125, 14137), False, 'import pytest\n'), ((14151, 14166), 'dask.distributed.get_client', 'dd.get_client', ([], {}), '()\n', (14164, 14166), True, 'import dask.distributed as dd\n'), ((15520, 15548), 'acme.cluster_cleanup', 'cluster_cleanup', (['pmap.client'], {}), '(pmap.client)\n', (15535, 15548), False, 'from acme import ParallelMap, cluster_cleanup, esi_cluster_setup\n'), ((16899, 16927), 'acme.cluster_cleanup', 'cluster_cleanup', (['pmap.client'], {}), '(pmap.client)\n', (16914, 16927), False, 'from acme import ParallelMap, cluster_cleanup, esi_cluster_setup\n'), ((17139, 17157), 'os.unlink', 'os.unlink', (['logFile'], {}), '(logFile)\n', (17148, 17157), False, 'import os\n'), ((17353, 17394), 'shutil.rmtree', 'shutil.rmtree', (['folder'], {'ignore_errors': '(True)'}), '(folder, ignore_errors=True)\n', (17366, 17394), False, 'import shutil\n'), ((20630, 20657), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (20643, 20657), False, 'import pytest\n'), ((21289, 21316), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (21302, 21316), False, 'import pytest\n'), ((21516, 21557), 'shutil.rmtree', 'shutil.rmtree', (['folder'], {'ignore_errors': '(True)'}), '(folder, ignore_errors=True)\n', (21529, 21557), False, 'import shutil\n'), ((22879, 23044), 'subprocess.Popen', 'subprocess.Popen', (["('stdbuf -o0 ' + pshell + ' ' + scriptName)"], {'shell': '(True)', 'start_new_session': '(True)', 'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.STDOUT', 'bufsize': '(0)'}), "('stdbuf -o0 ' + pshell + ' ' + scriptName, shell=True,\n start_new_session=True, stdout=subprocess.PIPE, stderr=subprocess.\n STDOUT, bufsize=0)\n", (22895, 23044), False, 'import subprocess\n'), ((23460, 23471), 'time.time', 'time.time', ([], {}), '()\n', (23469, 23471), False, 'import time\n'), ((23971, 23984), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (23981, 23984), False, 'import time\n'), ((23997, 24035), 'os.killpg', 'os.killpg', (['proc.pid', 'sys_signal.SIGINT'], {}), '(proc.pid, sys_signal.SIGINT)\n', (24006, 24035), False, 'import os\n'), ((24048, 24061), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (24058, 24061), False, 'import time\n'), ((25132, 25306), 'subprocess.Popen', 'subprocess.Popen', (["('stdbuf -o0 ' + sys.executable + ' ' + scriptName)"], {'shell': '(True)', 'start_new_session': '(True)', 'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.STDOUT', 'bufsize': '(0)'}), "('stdbuf -o0 ' + sys.executable + ' ' + scriptName, shell=\n True, start_new_session=True, stdout=subprocess.PIPE, stderr=subprocess\n .STDOUT, bufsize=0)\n", (25148, 25306), False, 'import subprocess\n'), ((25516, 25527), 'time.time', 'time.time', ([], {}), '()\n', (25525, 25527), False, 'import time\n'), ((25819, 25832), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (25829, 25832), False, 'import time\n'), ((25845, 25883), 'os.killpg', 'os.killpg', (['proc.pid', 'sys_signal.SIGINT'], {}), '(proc.pid, sys_signal.SIGINT)\n', (25854, 25883), False, 'import os\n'), ((25896, 25909), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (25906, 25909), False, 'import time\n'), ((27874, 27891), 'acme.cluster_cleanup', 'cluster_cleanup', ([], {}), '()\n', (27889, 27891), False, 'from acme import ParallelMap, cluster_cleanup, esi_cluster_setup\n'), ((28026, 28043), 'acme.cluster_cleanup', 'cluster_cleanup', ([], {}), '()\n', (28041, 28043), False, 'from acme import ParallelMap, cluster_cleanup, esi_cluster_setup\n'), ((28174, 28191), 'acme.cluster_cleanup', 'cluster_cleanup', ([], {}), '()\n', (28189, 28191), False, 'from acme import ParallelMap, cluster_cleanup, esi_cluster_setup\n'), ((28286, 28378), 'acme.esi_cluster_setup', 'esi_cluster_setup', ([], {'partition': '"""8GBDEV"""', 'n_jobs': '(1)', 'mem_per_job': '"""9000MB"""', 'interactive': '(False)'}), "(partition='8GBDEV', n_jobs=1, mem_per_job='9000MB',\n interactive=False)\n", (28303, 28378), False, 'from acme import ParallelMap, cluster_cleanup, esi_cluster_setup\n'), ((28588, 28611), 'acme.cluster_cleanup', 'cluster_cleanup', (['client'], {}), '(client)\n', (28603, 28611), False, 'from acme import ParallelMap, cluster_cleanup, esi_cluster_setup\n'), ((28887, 28904), 'acme.cluster_cleanup', 'cluster_cleanup', ([], {}), '()\n', (28902, 28904), False, 'from acme import ParallelMap, cluster_cleanup, esi_cluster_setup\n'), ((29056, 29073), 'acme.cluster_cleanup', 'cluster_cleanup', ([], {}), '()\n', (29071, 29073), False, 'from acme import ParallelMap, cluster_cleanup, esi_cluster_setup\n'), ((29224, 29241), 'acme.cluster_cleanup', 'cluster_cleanup', ([], {}), '()\n', (29239, 29241), False, 'from acme import ParallelMap, cluster_cleanup, esi_cluster_setup\n'), ((29544, 29590), 'acme.esi_cluster_setup', 'esi_cluster_setup', ([], {'n_jobs': '(6)', 'interactive': '(False)'}), '(n_jobs=6, interactive=False)\n', (29561, 29590), False, 'from acme import ParallelMap, cluster_cleanup, esi_cluster_setup\n'), ((30025, 30068), 'shutil.rmtree', 'shutil.rmtree', (['slurmOut'], {'ignore_errors': '(True)'}), '(slurmOut, ignore_errors=True)\n', (30038, 30068), False, 'import shutil\n'), ((4160, 4174), 'numpy.zeros', 'np.zeros', (['(3,)'], {}), '((3,))\n', (4168, 4174), True, 'import numpy as np\n'), ((4304, 4320), 'numpy.zeros', 'np.zeros', (['(3, 1)'], {}), '((3, 1))\n', (4312, 4320), True, 'import numpy as np\n'), ((4501, 4514), 'numpy.ones', 'np.ones', (['(3,)'], {}), '((3,))\n', (4508, 4514), True, 'import numpy as np\n'), ((4650, 4665), 'numpy.ones', 'np.ones', (['(7, 1)'], {}), '((7, 1))\n', (4657, 4665), True, 'import numpy as np\n'), ((4799, 4814), 'numpy.ones', 'np.ones', (['(7, 1)'], {}), '((7, 1))\n', (4806, 4814), True, 'import numpy as np\n'), ((4952, 4968), 'numpy.array', 'np.array', (['[1, 2]'], {}), '([1, 2])\n', (4960, 4968), True, 'import numpy as np\n'), ((4972, 4987), 'numpy.ones', 'np.ones', (['(8, 1)'], {}), '((8, 1))\n', (4979, 4987), True, 'import numpy as np\n'), ((5137, 5152), 'numpy.ones', 'np.ones', (['(8, 1)'], {}), '((8, 1))\n', (5144, 5152), True, 'import numpy as np\n'), ((7811, 7834), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (7829, 7834), False, 'import os\n'), ((7922, 7939), 'getpass.getuser', 'getpass.getuser', ([], {}), '()\n', (7937, 7939), False, 'import getpass\n'), ((8975, 8994), 'os.path.isfile', 'os.path.isfile', (['fle'], {}), '(fle)\n', (8989, 8994), False, 'import os\n'), ((9163, 9185), 'h5py.File', 'h5py.File', (['h5name', '"""r"""'], {}), "(h5name, 'r')\n", (9172, 9185), False, 'import h5py\n'), ((9907, 9929), 'h5py.File', 'h5py.File', (['h5name', '"""r"""'], {}), "(h5name, 'r')\n", (9916, 9929), False, 'import h5py\n'), ((9961, 10012), 'numpy.array_equal', 'np.array_equal', (["h5f['result_0'][()]", 'resInMem[chNo]'], {}), "(h5f['result_0'][()], resInMem[chNo])\n", (9975, 10012), True, 'import numpy as np\n'), ((10112, 10135), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (10130, 10135), False, 'import os\n'), ((10250, 10267), 'getpass.getuser', 'getpass.getuser', ([], {}), '()\n', (10265, 10267), False, 'import getpass\n'), ((12506, 12532), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (12522, 12532), False, 'import os\n'), ((15566, 15591), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (15579, 15591), False, 'import pytest\n'), ((15609, 15624), 'dask.distributed.get_client', 'dd.get_client', ([], {}), '()\n', (15622, 15624), True, 'import dask.distributed as dd\n'), ((18908, 18924), 'pickle.load', 'pickle.load', (['pkf'], {}), '(pkf)\n', (18919, 18924), False, 'import pickle\n'), ((18942, 18964), 'h5py.File', 'h5py.File', (['h5name', '"""r"""'], {}), "(h5name, 'r')\n", (18951, 18964), False, 'import h5py\n'), ((18996, 19039), 'numpy.array_equal', 'np.array_equal', (['pklRes', "h5f['result_0'][()]"], {}), "(pklRes, h5f['result_0'][()])\n", (19010, 19039), True, 'import numpy as np\n'), ((21794, 21817), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (21812, 21817), False, 'import os\n'), ((27761, 27786), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (27774, 27786), False, 'import pytest\n'), ((27804, 27861), 'acme.esi_cluster_setup', 'esi_cluster_setup', ([], {'partition': '"""invalid"""', 'interactive': '(False)'}), "(partition='invalid', interactive=False)\n", (27821, 27861), False, 'from acme import ParallelMap, cluster_cleanup, esi_cluster_setup\n'), ((27909, 27934), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (27922, 27934), False, 'import pytest\n'), ((27952, 28013), 'acme.esi_cluster_setup', 'esi_cluster_setup', ([], {'mem_per_job': '"""invalidGB"""', 'interactive': '(False)'}), "(mem_per_job='invalidGB', interactive=False)\n", (27969, 28013), False, 'from acme import ParallelMap, cluster_cleanup, esi_cluster_setup\n'), ((28061, 28086), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (28074, 28086), False, 'import pytest\n'), ((28104, 28161), 'acme.esi_cluster_setup', 'esi_cluster_setup', ([], {'mem_per_job': '"""-20MB"""', 'interactive': '(False)'}), "(mem_per_job='-20MB', interactive=False)\n", (28121, 28161), False, 'from acme import ParallelMap, cluster_cleanup, esi_cluster_setup\n'), ((28718, 28735), 'getpass.getuser', 'getpass.getuser', ([], {}), '()\n', (28733, 28735), False, 'import getpass\n'), ((28754, 28778), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (28767, 28778), False, 'import pytest\n'), ((28922, 28947), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (28935, 28947), False, 'import pytest\n'), ((29091, 29116), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (29104, 29116), False, 'import pytest\n'), ((29134, 29211), 'acme.esi_cluster_setup', 'esi_cluster_setup', ([], {'job_extra': "['--output=/path/to/nowhere']", 'interactive': '(False)'}), "(job_extra=['--output=/path/to/nowhere'], interactive=False)\n", (29151, 29211), False, 'from acme import ParallelMap, cluster_cleanup, esi_cluster_setup\n'), ((3994, 4009), 'numpy.ones', 'np.ones', (['(3, 3)'], {}), '((3, 3))\n', (4001, 4009), True, 'import numpy as np\n'), ((7044, 7059), 'numpy.ones', 'np.ones', (['(8, 1)'], {}), '((8, 1))\n', (7051, 7059), True, 'import numpy as np\n'), ((9707, 9750), 'numpy.abs', 'np.abs', (['(resInMem[chNo] - self.orig[:, chNo])'], {}), '(resInMem[chNo] - self.orig[:, chNo])\n', (9713, 9750), True, 'import numpy as np\n'), ((11415, 11445), 'os.path.join', 'os.path.join', (['tempDir2', 'h5name'], {}), '(tempDir2, h5name)\n', (11427, 11445), False, 'import os\n'), ((19910, 19931), 'h5py.File', 'h5py.File', (['fname', '"""r"""'], {}), "(fname, 'r')\n", (19919, 19931), False, 'import h5py\n'), ((21908, 21937), 'os.path.split', 'os.path.split', (['sys.executable'], {}), '(sys.executable)\n', (21921, 21937), False, 'import os\n'), ((28541, 28569), 'numpy.round', 'np.round', (['(memory / 1000 ** 3)'], {}), '(memory / 1000 ** 3)\n', (28549, 28569), True, 'import numpy as np\n'), ((4015, 4030), 'numpy.ones', 'np.ones', (['(3, 3)'], {}), '((3, 3))\n', (4022, 4030), True, 'import numpy as np\n'), ((6372, 6387), 'numpy.ones', 'np.ones', (['(3, 3)'], {}), '((3, 3))\n', (6379, 6387), True, 'import numpy as np\n'), ((9225, 9273), 'numpy.abs', 'np.abs', (["(h5f['result_0'][()] - self.orig[:, chNo])"], {}), "(h5f['result_0'][()] - self.orig[:, chNo])\n", (9231, 9273), True, 'import numpy as np\n'), ((11491, 11538), 'numpy.abs', 'np.abs', (['(h5f[dset_name][()] - self.orig[:, chNo])'], {}), '(h5f[dset_name][()] - self.orig[:, chNo])\n', (11497, 11538), True, 'import numpy as np\n'), ((19965, 19997), 'h5py.File', 'h5py.File', (['hdfResults[chNo]', '"""r"""'], {}), "(hdfResults[chNo], 'r')\n", (19974, 19997), False, 'import h5py\n'), ((20039, 20097), 'numpy.array_equal', 'np.array_equal', (["h5f['result_0'][()]", "h5ref['result_0'][()]"], {}), "(h5f['result_0'][()], h5ref['result_0'][()])\n", (20053, 20097), True, 'import numpy as np\n'), ((6392, 6407), 'numpy.ones', 'np.ones', (['(3, 3)'], {}), '((3, 3))\n', (6399, 6407), True, 'import numpy as np\n'), ((19803, 19819), 'pickle.load', 'pickle.load', (['pkf'], {}), '(pkf)\n', (19814, 19819), False, 'import pickle\n'), ((23526, 23537), 'time.time', 'time.time', ([], {}), '()\n', (23535, 23537), False, 'import time\n'), ((25582, 25593), 'time.time', 'time.time', ([], {}), '()\n', (25591, 25593), False, 'import time\n')]
|
import numpy as np
from numpy import log
#Se define la función a integrar
def f(x):
return 1 / log(x)
#Implementación del método de Simpson
#Parámetros:
#f es la función a integrar
#a el límite inferior de la integral
#b el límite superior de la integral
#n el número de intervalos
def simpson (f, a, b ,n):
h = (b - a) / n
g = f(a) + f(b)
#Suma de áreas
for i in range (1, n // 2):
g = g + 2 * f(a + 2 * i * h)
for i in range (0, n // 2):
g = g + 4 * f(a + (2 * i + 1) * h)
return h * g / 3
def main():
li = simpson(f, 2, 3 ,16)
print("Li(3): ", li)
main()
|
[
"numpy.log"
] |
[((105, 111), 'numpy.log', 'log', (['x'], {}), '(x)\n', (108, 111), False, 'from numpy import log\n')]
|
from typing import List, Tuple
import mlflow
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from interpret.glassbox import ExplainableBoostingClassifier, ExplainableBoostingRegressor
from ..OEA_model import OEAModelInterface, ModelType, ExplanationType
from ..modeling_utils import log_metrics
class mlflow_pyfunc_wrapper(mlflow.pyfunc.PythonModel):
"""
Wrapper class that allows us to use generic predictors in the mlflow pyfunc format.
Used to wrap predictor types that are not already in the mlflow.* setup.
In order to work with this class, needs to have generic: predict, fit, score, predict_proba functions
"""
def __init__(self, model):
"""
Initialized Wrapped python model
Parameters
----------
model: Python Object
A model that implements the predict, fit, score, and predict_proba functions
"""
self.model = model
def predict(self, *args):
"""
Use Predict function of wrapped model
Parameters
----------
*args :
Arguments needed for wrapped predict function
Returns
-------
predictions: pandas.DataFrame or numpy.ndarray
Predictions of wrapped model on passed arguments
"""
predictions = self.model.predict(*args)
return predictions
def fit(self, *args):
"""
Train/Fit Wrapped model on passed arguments
Parameters
----------
*args :
Arguments needed for wrapped fit(train) function
Returns
-------
Wrapped model after being fit on passed arguments
"""
return self.model.fit(*args)
def score(self, *args):
"""
Predicts and Scores the wrapped model on passed arguments
Parameters
----------
*args :
Arguments needed for wrapped score function
Returns
-------
score: Float
Resulting score of wrapped model score function. (Generally accuracy)
"""
score = self.model.score(*args)
return score
def predict_proba(self,*args):
"""
Generate prediction probabilities of the wrapped model on passed arguments
Parameters
----------
*args :
Arguments needed for wrapped prediction probability functin
Returns
-------
probabilities: pandas.DataFrame or numpy.ndarray
Predicted output probabilities
"""
probabilities = self.model.predict_proba(*args)
return probabilities
class wrapped_basic(OEAModelInterface):
def __init__(self, modelname):
"""
Initialize Basic Wrapped Pyfunc Model utilities (base class)
Parameters
----------
modelname: String
Name of the model for registration and saving purposes
"""
self.predictor = None
self.modelname = modelname
def load_split_data(self, X, Y, A, key, split=.4, stratify=None):
"""
Splits Data into training, validation, and test sets
Parameters
----------
X: pandas.DataFrame
Feature data
Y: pandas.DataFrame
Label data
A: pandas.DataFrame
Senstive Feature data (may or may not be overlap with X)
key: String or List[Strings]
Columns to identify as Keys for all three dataframes. Dropped at loading time.
split: Float
Percentage of data to exclude for testing set
stratify: pandas.DataFrame
Dataframe used to stratify split of data. I.e. if labels are provided, will ensure equal label distribution in train / test sets.
Returns
-------
X_train: pandas.DataFrame
Feature data for training set
X_val: pandas.DataFrame
Feature data for validation set
X_test: pandas.DataFrame
Feature data for test set
y_train: pandas.DataFrame
Label data for training set
y_val: pandas.DataFrame
Label data for validation set
y_test: pandas.DataFrame
Label data for test set
A_train: pandas.DataFrame
Senstive Feature data for training set
A_val: pandas.DataFrame
Senstive Feature data for validation set
A_test: pandas.DataFrame
Senstive Feature data for test set
classes: List[str]
List of classes for classification problem outcomes
"""
if not (A is None):
(
X_train,
X_val_test,
y_train,
y_val_test,
A_train,
A_val_test,
) = train_test_split(
X,
Y,
A,
test_size=split,
random_state=12345,
stratify=stratify,
)
(X_val, X_test, y_val, y_test, A_val, A_test) = train_test_split(
X_val_test, y_val_test, A_val_test, test_size=0.5, random_state=12345
)
else:
(X_train, X_val_test, y_train, y_val_test) = train_test_split(
X,
Y,
test_size=split,
random_state=12345,
stratify=stratify,
)
(X_val, X_test, y_val, y_test) = train_test_split(
X_val_test, y_val_test, test_size=0.5, random_state=12345
)
X_train = X_train.drop(key, axis='columns').reset_index(drop=True)
X_val = X_val.drop(key, axis='columns').reset_index(drop=True)
X_test = X_test.drop(key, axis='columns').reset_index(drop=True)
y_train = y_train.drop(key, axis='columns')
y_train = y_train[y_train.columns[:1]].reset_index(drop=True)
y_val = y_val.drop(key, axis='columns').reset_index(drop=True)
y_val = y_val[y_val.columns[:1]].reset_index(drop=True)
y_test = y_test.drop(key, axis='columns').reset_index(drop=True)
y_test = y_test[y_test.columns[:1]].reset_index(drop=True)
classes = None
self.X_train = X_train
self.X_val = X_val
self.X_test = X_test
self.y_train = y_train.values.reshape(-1)
self.y_val = y_val.values.reshape(-1)
self.y_test = y_test.values.reshape(-1)
self.classes = classes
if not(A is None):
A_train = A_train.drop(key, axis='columns').reset_index(drop=True)
A_val = A_val.drop(key, axis='columns').reset_index(drop=True)
A_test = A_test.drop(key, axis='columns').reset_index(drop=True)
self.A_train = A_train
self.A_val = A_val
self.A_test = A_test
else:
A_train = None
A_val = None
A_test = None
self.A_train = A_train
self.A_val = A_val
self.A_test = A_test
return (
X_train,
X_val,
X_test,
y_train,
y_val,
y_test,
A_train,
A_val,
A_test,
classes,
)
def infer(self, data):
"""
Infer using model
Parameters
----------
data: pandas.DataFrame OR numpy array
Feature data
Returns
-------
predictions: pandas.DataFrame OR numpy array
Results of running inference of the predictor
"""
return self.predictor.predict(data)
def train(self):
"""
Trains model based on data originally loaded using load_split_data. Logs training metrics.
Returns
-------
self.predictor: sklearn Predictor
Trained predictor model object
"""
X_train_val = pd.concat([self.X_train, self.X_val], axis=0)
y_train_val = np.concatenate([self.y_train, self.y_val], axis=0)
self.predictor.fit(X_train_val, y_train_val)
log_metrics(self, dataset="training_val")
return self.predictor
def test(self):
"""
Evaluates model on the test set originally loaded using load_split_data. Logs testing metrics and returns predictions on test set.
Returns
-------
preds: pandas.DataFrame OR numpy array
Results of running inference of the predictor
"""
preds = log_metrics(self, dataset="test")
return preds
def save_model(self, foldername):
"""
Save Wrapped Pyfunc Model to a Path
Parameters
----------
foldername: String
Name of intermediate folder to save model to using mlflow utilities.
"""
mlflow.pyfunc.save_model(foldername, python_model=self.predictor)
def register_model(self, foldername):
"""
Register Model to repository attached to mlflow instance
Parameters
----------
foldername: String
Path of folder to upload to model repository
"""
mlflow.pyfunc.log_model(foldername, python_model=self.predictor, registered_model_name=self.modelname)
def load_model(self, modelname, version):
"""
Load Model from a registered endpoint
Parameters
----------
modelname: String
name of model to load from remote repository
version: String
version of model to load from mllow model repository.
Returns
-------
self.predictor: Wrapped PyFunc Predictor
Returns the predictor loaded from the registered endpoint
"""
model_version_uri = "models:/{model_name}/{version}".format(model_name=modelname,version=version)
self.predictor = mlflow.pyfunc.load_model(model_version_uri)
return self.predictor
class classification_EBM(wrapped_basic):
"""
Model Class for EBM used for Binary CLassification. Inherits from base wrapped model class (OEA Interface Type)
Classification type with Eplainable Boosting Machine special explanation type
"""
model_type = ModelType.binary_classification
explanation_type = ExplanationType.ebm
def init_model(self, seed=5):
"""Initialize Model"""
self.predictor = mlflow_pyfunc_wrapper(ExplainableBoostingClassifier(random_state=seed))
class multi_classification_EBM(wrapped_basic):
"""
Model Class for EBM used for Multiclass CLassification. Inherits from base wrapped model class (OEA Interface Type)
Multiclass Classification type with Eplainable Boosting Machine special explanation type
"""
model_type = ModelType.multiclass_classification
explanation_type = ExplanationType.ebm
def init_model(self, seed=5):
"""Initialize Model"""
self.predictor = mlflow_pyfunc_wrapper(ExplainableBoostingClassifier(random_state=seed))
class regression_EBM(wrapped_basic):
"""
Model Class for EBM used for Regression. Inherits from base wrapped model class (OEA Interface Type)
Regression type with Eplainable Boosting Machine special explanation type
"""
model_type = ModelType.regression
explanation_type = ExplanationType.ebm
def init_model(self, seed=5):
"""Initialize Model"""
self.predictor = mlflow_pyfunc_wrapper(ExplainableBoostingRegressor(random_state=seed))
|
[
"interpret.glassbox.ExplainableBoostingRegressor",
"sklearn.model_selection.train_test_split",
"interpret.glassbox.ExplainableBoostingClassifier",
"numpy.concatenate",
"pandas.concat",
"mlflow.pyfunc.load_model",
"mlflow.pyfunc.save_model",
"mlflow.pyfunc.log_model"
] |
[((8051, 8096), 'pandas.concat', 'pd.concat', (['[self.X_train, self.X_val]'], {'axis': '(0)'}), '([self.X_train, self.X_val], axis=0)\n', (8060, 8096), True, 'import pandas as pd\n'), ((8119, 8169), 'numpy.concatenate', 'np.concatenate', (['[self.y_train, self.y_val]'], {'axis': '(0)'}), '([self.y_train, self.y_val], axis=0)\n', (8133, 8169), True, 'import numpy as np\n'), ((9003, 9068), 'mlflow.pyfunc.save_model', 'mlflow.pyfunc.save_model', (['foldername'], {'python_model': 'self.predictor'}), '(foldername, python_model=self.predictor)\n', (9027, 9068), False, 'import mlflow\n'), ((9341, 9447), 'mlflow.pyfunc.log_model', 'mlflow.pyfunc.log_model', (['foldername'], {'python_model': 'self.predictor', 'registered_model_name': 'self.modelname'}), '(foldername, python_model=self.predictor,\n registered_model_name=self.modelname)\n', (9364, 9447), False, 'import mlflow\n'), ((10056, 10099), 'mlflow.pyfunc.load_model', 'mlflow.pyfunc.load_model', (['model_version_uri'], {}), '(model_version_uri)\n', (10080, 10099), False, 'import mlflow\n'), ((4886, 4972), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'Y', 'A'], {'test_size': 'split', 'random_state': '(12345)', 'stratify': 'stratify'}), '(X, Y, A, test_size=split, random_state=12345, stratify=\n stratify)\n', (4902, 4972), False, 'from sklearn.model_selection import train_test_split\n'), ((5140, 5231), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X_val_test', 'y_val_test', 'A_val_test'], {'test_size': '(0.5)', 'random_state': '(12345)'}), '(X_val_test, y_val_test, A_val_test, test_size=0.5,\n random_state=12345)\n', (5156, 5231), False, 'from sklearn.model_selection import train_test_split\n'), ((5330, 5408), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'Y'], {'test_size': 'split', 'random_state': '(12345)', 'stratify': 'stratify'}), '(X, Y, test_size=split, random_state=12345, stratify=stratify)\n', (5346, 5408), False, 'from sklearn.model_selection import train_test_split\n'), ((5550, 5625), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X_val_test', 'y_val_test'], {'test_size': '(0.5)', 'random_state': '(12345)'}), '(X_val_test, y_val_test, test_size=0.5, random_state=12345)\n', (5566, 5625), False, 'from sklearn.model_selection import train_test_split\n'), ((10593, 10641), 'interpret.glassbox.ExplainableBoostingClassifier', 'ExplainableBoostingClassifier', ([], {'random_state': 'seed'}), '(random_state=seed)\n', (10622, 10641), False, 'from interpret.glassbox import ExplainableBoostingClassifier, ExplainableBoostingRegressor\n'), ((11130, 11178), 'interpret.glassbox.ExplainableBoostingClassifier', 'ExplainableBoostingClassifier', ([], {'random_state': 'seed'}), '(random_state=seed)\n', (11159, 11178), False, 'from interpret.glassbox import ExplainableBoostingClassifier, ExplainableBoostingRegressor\n'), ((11612, 11659), 'interpret.glassbox.ExplainableBoostingRegressor', 'ExplainableBoostingRegressor', ([], {'random_state': 'seed'}), '(random_state=seed)\n', (11640, 11659), False, 'from interpret.glassbox import ExplainableBoostingClassifier, ExplainableBoostingRegressor\n')]
|
"""
This module contains utility functions used in the example scripts. They are
implemented separately because they use scipy and numpy and we want to remove
external dependencies from within the core library.
"""
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
from __future__ import division
from math import sqrt
from scipy.stats import sem
from scipy.stats import t
from scipy import linalg
import numpy as np
from concept_formation.utils import mean
def moving_average(a, n=3):
"""A function for computing the moving average, so that we can smooth out the
curves on a graph.
"""
ret = np.cumsum(a, dtype=float)
ret[n:] = ret[n:] - ret[:-n]
return ret[n - 1:] / n
def lowess(x, y, f=1./3., iter=3, confidence=0.95):
"""
Performs Lowess smoothing
Code adapted from: https://gist.github.com/agramfort/850437
lowess(x, y, f=2./3., iter=3) -> yest
Lowess smoother: Robust locally weighted regression.
The lowess function fits a nonparametric regression curve to a scatterplot.
The arrays x and y contain an equal number of elements; each pair
(x[i], y[i]) defines a data point in the scatterplot. The function returns
the estimated (smooth) values of y.
The smoothing span is given by f. A larger value for f will result in a
smoother curve. The number of robustifying iterations is given by iter. The
function will run faster with a smaller number of iterations.
.. todo:: double check that the confidence bounds are correct
"""
n = len(x)
r = int(np.ceil(f*n))
h = [np.sort(np.abs(x - x[i]))[r] for i in range(n)]
w = np.clip(np.abs((x[:, None] - x[None, :]) / h), 0.0, 1.0)
w = (1 - w**3)**3
yest = np.zeros(n)
delta = np.ones(n)
for iteration in range(iter):
for i in range(n):
weights = delta * w[:, i]
b = np.array([np.sum(weights*y), np.sum(weights*y*x)])
A = np.array([[np.sum(weights), np.sum(weights*x)],
[np.sum(weights*x), np.sum(weights*x*x)]])
beta = linalg.solve(A, b)
yest[i] = beta[0] + beta[1]*x[i]
residuals = y - yest
s = np.median(np.abs(residuals))
delta = np.clip(residuals / (6.0 * s), -1, 1)
delta = (1 - delta**2)**2
h = np.zeros(n)
for x_idx, x_val in enumerate(x):
r2 = np.array([v*v for i, v in enumerate(residuals) if x[i] == x_val])
n = len(r2)
se = sqrt(mean(r2)) / sqrt(len(r2))
h[x_idx] = se * t._ppf((1+confidence)/2., n-1)
return yest, yest-h, yest+h
def avg_lines(x, y, confidence=0.95):
n = len(x)
mean = np.zeros(n)
lower = np.zeros(n)
upper = np.zeros(n)
for x_idx, x_val in enumerate(x):
ys = np.array([v for i, v in enumerate(y) if x[i] == x_val])
m, l, u = mean_confidence_interval(ys)
mean[x_idx] = m
lower[x_idx] = l
upper[x_idx] = u
return mean, lower, upper
def mean_confidence_interval(data, confidence=0.95):
"""
Given a list or vector of data, this returns the mean, lower, and upper
confidence intervals to the level of confidence specified (default = 95%
confidence interval).
"""
a = 1.0*np.array(data)
n = len(a)
m, se = np.mean(a), sem(a)
h = se * t._ppf((1+confidence)/2., n-1)
return m, m-h, m+h
|
[
"numpy.clip",
"numpy.abs",
"numpy.ceil",
"numpy.mean",
"scipy.stats.t._ppf",
"numpy.ones",
"scipy.linalg.solve",
"numpy.array",
"numpy.zeros",
"concept_formation.utils.mean",
"numpy.sum",
"scipy.stats.sem",
"numpy.cumsum"
] |
[((703, 728), 'numpy.cumsum', 'np.cumsum', (['a'], {'dtype': 'float'}), '(a, dtype=float)\n', (712, 728), True, 'import numpy as np\n'), ((1840, 1851), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (1848, 1851), True, 'import numpy as np\n'), ((1865, 1875), 'numpy.ones', 'np.ones', (['n'], {}), '(n)\n', (1872, 1875), True, 'import numpy as np\n'), ((2441, 2452), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (2449, 2452), True, 'import numpy as np\n'), ((2800, 2811), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (2808, 2811), True, 'import numpy as np\n'), ((2825, 2836), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (2833, 2836), True, 'import numpy as np\n'), ((2850, 2861), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (2858, 2861), True, 'import numpy as np\n'), ((1667, 1681), 'numpy.ceil', 'np.ceil', (['(f * n)'], {}), '(f * n)\n', (1674, 1681), True, 'import numpy as np\n'), ((1756, 1793), 'numpy.abs', 'np.abs', (['((x[:, None] - x[None, :]) / h)'], {}), '((x[:, None] - x[None, :]) / h)\n', (1762, 1793), True, 'import numpy as np\n'), ((2357, 2394), 'numpy.clip', 'np.clip', (['(residuals / (6.0 * s))', '(-1)', '(1)'], {}), '(residuals / (6.0 * s), -1, 1)\n', (2364, 2394), True, 'import numpy as np\n'), ((3402, 3416), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (3410, 3416), True, 'import numpy as np\n'), ((3446, 3456), 'numpy.mean', 'np.mean', (['a'], {}), '(a)\n', (3453, 3456), True, 'import numpy as np\n'), ((3458, 3464), 'scipy.stats.sem', 'sem', (['a'], {}), '(a)\n', (3461, 3464), False, 'from scipy.stats import sem\n'), ((3479, 3516), 'scipy.stats.t._ppf', 't._ppf', (['((1 + confidence) / 2.0)', '(n - 1)'], {}), '((1 + confidence) / 2.0, n - 1)\n', (3485, 3516), False, 'from scipy.stats import t\n'), ((2201, 2219), 'scipy.linalg.solve', 'linalg.solve', (['A', 'b'], {}), '(A, b)\n', (2213, 2219), False, 'from scipy import linalg\n'), ((2321, 2338), 'numpy.abs', 'np.abs', (['residuals'], {}), '(residuals)\n', (2327, 2338), True, 'import numpy as np\n'), ((2663, 2700), 'scipy.stats.t._ppf', 't._ppf', (['((1 + confidence) / 2.0)', '(n - 1)'], {}), '((1 + confidence) / 2.0, n - 1)\n', (2669, 2700), False, 'from scipy.stats import t\n'), ((1699, 1715), 'numpy.abs', 'np.abs', (['(x - x[i])'], {}), '(x - x[i])\n', (1705, 1715), True, 'import numpy as np\n'), ((2612, 2620), 'concept_formation.utils.mean', 'mean', (['r2'], {}), '(r2)\n', (2616, 2620), False, 'from concept_formation.utils import mean\n'), ((2005, 2024), 'numpy.sum', 'np.sum', (['(weights * y)'], {}), '(weights * y)\n', (2011, 2024), True, 'import numpy as np\n'), ((2024, 2047), 'numpy.sum', 'np.sum', (['(weights * y * x)'], {}), '(weights * y * x)\n', (2030, 2047), True, 'import numpy as np\n'), ((2074, 2089), 'numpy.sum', 'np.sum', (['weights'], {}), '(weights)\n', (2080, 2089), True, 'import numpy as np\n'), ((2091, 2110), 'numpy.sum', 'np.sum', (['(weights * x)'], {}), '(weights * x)\n', (2097, 2110), True, 'import numpy as np\n'), ((2139, 2158), 'numpy.sum', 'np.sum', (['(weights * x)'], {}), '(weights * x)\n', (2145, 2158), True, 'import numpy as np\n'), ((2158, 2181), 'numpy.sum', 'np.sum', (['(weights * x * x)'], {}), '(weights * x * x)\n', (2164, 2181), True, 'import numpy as np\n')]
|
# -*- coding: UTF-8 -*-
import numpy as np
from numpy.testing import assert_array_almost_equal
from spectral_clustering.spectral_embedding_ import spectral_embedding
def assert_first_col_equal(maps):
constant_vec = [1] * maps.shape[0]
assert_array_almost_equal(maps[:, 0] / maps[0, 0], constant_vec)
def test_spectral_embedding():
"""
根据spectral embedding的定义,第一列的数据是恒等的
"""
adjacency = np.array([
[0., 0.8, 0.9, 0.],
[0.8, 0., 0., 0.],
[0.9, 0., 0., 1.],
[0., 0., 1., 0.]])
maps = spectral_embedding(
adjacency, n_components=2, drop_first=False, eigen_solver="arpack")
assert_first_col_equal(maps)
maps_1 = spectral_embedding(
adjacency, n_components=2, drop_first=False, eigen_solver="lobpcg")
assert_first_col_equal(maps_1)
|
[
"spectral_clustering.spectral_embedding_.spectral_embedding",
"numpy.array",
"numpy.testing.assert_array_almost_equal"
] |
[((245, 309), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['(maps[:, 0] / maps[0, 0])', 'constant_vec'], {}), '(maps[:, 0] / maps[0, 0], constant_vec)\n', (270, 309), False, 'from numpy.testing import assert_array_almost_equal\n'), ((414, 516), 'numpy.array', 'np.array', (['[[0.0, 0.8, 0.9, 0.0], [0.8, 0.0, 0.0, 0.0], [0.9, 0.0, 0.0, 1.0], [0.0, \n 0.0, 1.0, 0.0]]'], {}), '([[0.0, 0.8, 0.9, 0.0], [0.8, 0.0, 0.0, 0.0], [0.9, 0.0, 0.0, 1.0],\n [0.0, 0.0, 1.0, 0.0]])\n', (422, 516), True, 'import numpy as np\n'), ((545, 635), 'spectral_clustering.spectral_embedding_.spectral_embedding', 'spectral_embedding', (['adjacency'], {'n_components': '(2)', 'drop_first': '(False)', 'eigen_solver': '"""arpack"""'}), "(adjacency, n_components=2, drop_first=False,\n eigen_solver='arpack')\n", (563, 635), False, 'from spectral_clustering.spectral_embedding_ import spectral_embedding\n'), ((687, 777), 'spectral_clustering.spectral_embedding_.spectral_embedding', 'spectral_embedding', (['adjacency'], {'n_components': '(2)', 'drop_first': '(False)', 'eigen_solver': '"""lobpcg"""'}), "(adjacency, n_components=2, drop_first=False,\n eigen_solver='lobpcg')\n", (705, 777), False, 'from spectral_clustering.spectral_embedding_ import spectral_embedding\n')]
|
"""Transformer from 'Attention is all you need' (Vaswani et al., 2017)"""
# Reference: https://www.tensorflow.org/text/tutorials/transformer
# Reference: https://keras.io/examples/nlp/text_classification_with_transformer/
import numpy as np
import tensorflow as tf
class Transformer(tf.keras.Model):
def __init__(
self,
num_layers,
d_model,
num_heads,
dff,
input_vocab_size,
target_vocab_size,
pe_input,
pe_target,
rate=0.1,
):
super().__init__()
self.encoder = Encoder(
num_layers, d_model, num_heads, dff, input_vocab_size, pe_input, rate
)
self.decoder = Decoder(
num_layers, d_model, num_heads, dff, target_vocab_size, pe_target, rate
)
self.final_layer = tf.keras.layers.Dense(target_vocab_size)
def call(self, inputs, training):
# Keras models prefer if you pass all your inputs in the first argument
inp, tar = inputs
enc_padding_mask, look_ahead_mask, dec_padding_mask = self.create_masks(
inp, tar
)
enc_output = self.encoder(
inp, training, enc_padding_mask
) # (batch_size, inp_seq_len, d_model)
# dec_output.shape == (batch_size, tar_seq_len, d_model)
dec_output, attention_weights = self.decoder(
tar, enc_output, training, look_ahead_mask, dec_padding_mask
)
final_output = self.final_layer(
dec_output
) # (batch_size, tar_seq_len, target_vocab_size)
return final_output, attention_weights
def create_masks(self, inp, tar):
# Encoder padding mask
enc_padding_mask = _create_padding_mask(inp)
# Used in the 2nd attention block in the decoder
# This padding mask is used to mask the encoder outputs.
dec_padding_mask = _create_padding_mask(inp)
# Used in the 1st attention block in the decoder.
# It is used to pad and mask future tokens in the input received by
# the decoder.
look_ahead_mask = _create_look_ahead_mask(tf.shape(tar)[1])
dec_target_padding_mask = _create_padding_mask(tar)
look_ahead_mask = tf.maximum(dec_target_padding_mask, look_ahead_mask)
return enc_padding_mask, look_ahead_mask, dec_padding_mask
class Encoder(tf.keras.layers.Layer):
"""Transformer encoder from 'Attention is all you need' (Vaswani et al., 2017)
Contains:
1. Input Embedding
2. Positional Encoding
3. N encoder layers
"""
def __init__(
self,
num_layers,
d_model,
num_heads,
dff,
input_vocab_size,
maximum_position_encoding,
rate=0.1,
):
super(Encoder, self).__init__()
self.d_model = d_model
self.num_layers = num_layers
self.embedding = tf.keras.layers.Embedding(input_vocab_size, d_model)
self.pos_encoding = _positional_encoding(
maximum_position_encoding, self.d_model
)
self.enc_layers = [
EncoderLayer(d_model, num_heads, dff, rate) for _ in range(num_layers)
]
self.dropout = tf.keras.layers.Dropout(rate)
def call(self, x, training, mask):
seq_len = tf.shape(x)[1]
# adding embedding and position encoding
x = self.embedding(x) # (batch_size, input_seq_len, d_model)
x *= tf.math.sqrt(tf.cast(self.d_model, tf.float32))
x += self.pos_encoding[:, :seq_len, :]
x = self.dropout(x, training=training)
for i in range(self.num_layers):
x = self.enc_layers[i](x, training, mask)
return x # (batch_size, input_seq_len, d_model)
class Decoder(tf.keras.layers.Layer):
def __init__(
self,
num_layers,
d_model,
num_heads,
dff,
target_vocab_size,
maximum_position_encoding,
rate=0.1,
):
super(Decoder, self).__init__()
self.d_model = d_model
self.num_layers = num_layers
self.embedding = tf.keras.layers.Embedding(target_vocab_size, d_model)
self.pos_encoding = _positional_encoding(maximum_position_encoding, d_model)
self.dec_layers = [
DecoderLayer(d_model, num_heads, dff, rate) for _ in range(num_layers)
]
self.dropout = tf.keras.layers.Dropout(rate)
def call(self, x, enc_output, training, look_ahead_mask, padding_mask):
seq_len = tf.shape(x)[1]
attention_weights = {}
x = self.embedding(x) # (batch_size, target_seq_len, d_model)
x *= tf.math.sqrt(tf.cast(self.d_model, tf.float32))
x += self.pos_encoding[:, :seq_len, :]
x = self.dropout(x, training=training)
for i in range(self.num_layers):
x, block1, block2 = self.dec_layers[i](
x, enc_output, training, look_ahead_mask, padding_mask
)
attention_weights[f"decoder_layer{i+1}_block1"] = block1
attention_weights[f"decoder_layer{i+1}_block2"] = block2
# x.shape == (batch_size, target_seq_len, d_model)
return x, attention_weights
class EncoderLayer(tf.keras.layers.Layer):
"""Transformer encoder layer from 'Attention is all you need' (Vaswani et al., 2017)
One of the main difference between the transformer encoder from decoder is
the self-attention. The reasons for it is detailed in the Section 4 and can
be summarized as a way to reduce the path length between long-range depencies
in the network.
"""
def __init__(self, d_model=512, num_heads=8, dff=2048, rate=0.1):
"""Initializer a Transformer Encoder Layer
Attributes
----------
d_model : int
Model dimension used on all sub-layers and embedding.
num_heads : int
Number of heads. Vaswani et al., 2017 describes as $h$
dff : int
FeedForward dimension.
rate : float
Dropout rate parameter applied after self-attention and
FeedForward.
"""
super(EncoderLayer, self).__init__()
self.mha = MultiHeadAttention(num_heads=num_heads, d_model=d_model)
self.ffn = _point_wise_feed_forward_network(d_model, dff)
self.layernorm1 = tf.keras.layers.LayerNormalization(epsilon=1e-6)
self.layernorm2 = tf.keras.layers.LayerNormalization(epsilon=1e-6)
self.dropout1 = tf.keras.layers.Dropout(rate)
self.dropout2 = tf.keras.layers.Dropout(rate)
def call(self, x, training, mask):
attn_output, _ = self.mha(x, x, x, mask) # (batch_size, input_seq_len, d_model)
attn_output = self.dropout1(attn_output, training=training)
out1 = self.layernorm1(x + attn_output) # (batch_size, input_seq_len, d_model)
ffn_output = self.ffn(out1) # (batch_size, input_seq_len, d_model)
ffn_output = self.dropout2(ffn_output, training=training)
return self.layernorm2(
out1 + ffn_output
) # (batch_size, input_seq_len, d_model)
class DecoderLayer(tf.keras.layers.Layer):
"""Transformer decoder layer from 'Attention is all you need' (Vaswani et al., 2017)
Decoder layer is similar to encoder but have a third sub-layer performing
multi-head attention over the encoder stack. The self-attention sub-layer
is modified preventing positions from attending to subsequent positions.
Embeddings are also offset by one position, forcing predictions of
position i to depend on the known outputs at positions less than i.
"""
def __init__(self, d_model, num_heads, dff, rate=0.1):
super(DecoderLayer, self).__init__()
self.mha1 = MultiHeadAttention(num_heads=num_heads, d_model=d_model)
self.mha2 = MultiHeadAttention(num_heads=num_heads, d_model=d_model)
self.ffn = _point_wise_feed_forward_network(d_model, dff)
self.layernorm1 = tf.keras.layers.LayerNormalization(epsilon=1e-6)
self.layernorm2 = tf.keras.layers.LayerNormalization(epsilon=1e-6)
self.layernorm3 = tf.keras.layers.LayerNormalization(epsilon=1e-6)
self.dropout1 = tf.keras.layers.Dropout(rate)
self.dropout2 = tf.keras.layers.Dropout(rate)
self.dropout3 = tf.keras.layers.Dropout(rate)
def call(self, x, enc_output, training, look_ahead_mask, padding_mask):
# enc_output.shape == (batch_size, input_seq_len, d_model)
attn1, attn_weights_block1 = self.mha1(
x, x, x, look_ahead_mask
) # (batch_size, target_seq_len, d_model)
attn1 = self.dropout1(attn1, training=training)
out1 = self.layernorm1(attn1 + x)
attn2, attn_weights_block2 = self.mha2(
enc_output, enc_output, out1, padding_mask
) # (batch_size, target_seq_len, d_model)
attn2 = self.dropout2(attn2, training=training)
out2 = self.layernorm2(attn2 + out1) # (batch_size, target_seq_len, d_model)
ffn_output = self.ffn(out2) # (batch_size, target_seq_len, d_model)
ffn_output = self.dropout3(ffn_output, training=training)
out3 = self.layernorm3(
ffn_output + out2
) # (batch_size, target_seq_len, d_model)
return out3, attn_weights_block1, attn_weights_block2
def _point_wise_feed_forward_network(d_model, dff):
"""Position-wise Feed-Forward Network
It's a fully connnected feed-forward network applied to each position
separately and identically represented by:
```
FFN(x) = max(0, xW_1 + b_1)W_2 + b2$
```
It contains two linear transformation with a ReLU activation in between.
"""
return tf.keras.Sequential(
[
tf.keras.layers.Dense(dff, activation="relu"), # (batch_size, seq_len, dff)
tf.keras.layers.Dense(d_model), # (batch_size, seq_len, d_model)
]
)
def _create_padding_mask(seq):
"""Mask all the pad tokens in the batch of sequence"""
seq = tf.cast(tf.math.equal(seq, 0), tf.float32)
# add extra dimensions to add the padding
# to the attention logits.
return seq[:, tf.newaxis, tf.newaxis, :] # (batch_size, 1, 1, seq_len)
def _create_look_ahead_mask(size):
"""Mask the future tokens in a sequence"""
mask = 1 - tf.linalg.band_part(tf.ones((size, size)), -1, 0)
return mask # (seq_len, seq_len)
def _positional_encoding(position, d_model):
"""Position Encoding (PE)
Because the model contains no recurrence and convolution, positional
encoding is inject to add information about absolute position of the
tokens in the sequence. It can be fixed or learned, however, fixed
has proven to be as efficient as learned.
This is the fixed Positional Encoding and are derived from sine and
cosine functions of different frequencies:
$PE(pos, 2i) = sin(pos/10000^{2i/d_model})
$PE(pos, 2i + 1) = cos(pos/10000^{2i/d_model})
where pos is the absolute position of a token in the sequence and $i$
is the dimension.
"""
def get_angles(pos, i, d_model):
angle_rates = 1 / np.power(10000, (2 * (i // 2)) / np.float32(d_model))
return pos * angle_rates
angle_rads = get_angles(
np.arange(position)[:, np.newaxis], np.arange(d_model)[np.newaxis, :], d_model
)
# apply sin to even indices in the array; 2i
angle_rads[:, 0::2] = np.sin(angle_rads[:, 0::2])
# apply cos to odd indices in the array; 2i+1
angle_rads[:, 1::2] = np.cos(angle_rads[:, 1::2])
pos_encoding = angle_rads[np.newaxis, ...]
return tf.cast(pos_encoding, dtype=tf.float32)
def scaled_dot_product_attention(q, k, v, mask):
"""Calculate the attention weights.
q, k, v must have matching leading dimensions.
k, v must have matching penultimate dimension, i.e.: seq_len_k = seq_len_v.
The mask has different shapes depending on its type(padding or look ahead)
but it must be broadcastable for addition.
Args:
q: query shape == (..., seq_len_q, depth)
k: key shape == (..., seq_len_k, depth)
v: value shape == (..., seq_len_v, depth_v)
mask: Float tensor with shape broadcastable
to (..., seq_len_q, seq_len_k). Defaults to None.
Returns:
output, attention_weights
"""
# (..., seq_len_q, seq_len_k)
matmul_qk = tf.matmul(q, k, transpose_b=True)
# scale matmul_qk
dk = tf.cast(tf.shape(k)[-1], tf.float32)
scaled_attention_logits = matmul_qk / tf.math.sqrt(dk)
# add the mask to the scaled tensor.
if mask is not None:
scaled_attention_logits += mask * -1e9
# softmax is normalized on the last axis (seq_len_k) so that the scores
# add up to 1.
attention_weights = tf.nn.softmax(
scaled_attention_logits, axis=-1
) # (..., seq_len_q, seq_len_k)
output = tf.matmul(attention_weights, v) # (..., seq_len_q, depth_v)
return output, attention_weights
class MultiHeadAttention(tf.keras.layers.Layer):
def __init__(self, d_model, num_heads):
super(MultiHeadAttention, self).__init__()
self.num_heads = num_heads
self.d_model = d_model
assert d_model % self.num_heads == 0
self.depth = d_model // self.num_heads
self.wq = tf.keras.layers.Dense(d_model)
self.wk = tf.keras.layers.Dense(d_model)
self.wv = tf.keras.layers.Dense(d_model)
self.dense = tf.keras.layers.Dense(d_model)
def split_heads(self, x, batch_size):
"""Split the last dimension into (num_heads, depth).
Transpose the result such that the shape is (batch_size, num_heads, seq_len, depth)
"""
x = tf.reshape(x, (batch_size, -1, self.num_heads, self.depth))
return tf.transpose(x, perm=[0, 2, 1, 3])
def call(self, v, k, q, mask):
batch_size = tf.shape(q)[0]
q = self.wq(q) # (batch_size, seq_len, d_model)
k = self.wk(k) # (batch_size, seq_len, d_model)
v = self.wv(v) # (batch_size, seq_len, d_model)
q = self.split_heads(q, batch_size) # (batch_size, num_heads, seq_len_q, depth)
k = self.split_heads(k, batch_size) # (batch_size, num_heads, seq_len_k, depth)
v = self.split_heads(v, batch_size) # (batch_size, num_heads, seq_len_v, depth)
# scaled_attention.shape == (batch_size, num_heads, seq_len_q, depth)
# attention_weights.shape == (batch_size, num_heads, seq_len_q, seq_len_k)
scaled_attention, attention_weights = scaled_dot_product_attention(
q, k, v, mask
)
scaled_attention = tf.transpose(
scaled_attention, perm=[0, 2, 1, 3]
) # (batch_size, seq_len_q, num_heads, depth)
concat_attention = tf.reshape(
scaled_attention, (batch_size, -1, self.d_model)
) # (batch_size, seq_len_q, d_model)
output = self.dense(concat_attention) # (batch_size, seq_len_q, d_model)
return output, attention_weights
def optimizer(d_model):
"""Adam optimizer as of Section 5.3"""
class CustomSchedule(tf.keras.optimizers.schedules.LearningRateSchedule):
def __init__(self, d_model, warmup_steps=4000):
super(CustomSchedule, self).__init__()
self.d_model = d_model
self.d_model = tf.cast(self.d_model, tf.float32)
self.warmup_steps = warmup_steps
def __call__(self, step):
arg1 = tf.math.rsqrt(step)
arg2 = step * (self.warmup_steps ** -1.5)
return tf.math.rsqrt(self.d_model) * tf.math.minimum(arg1, arg2)
learning_rate = CustomSchedule(d_model)
return tf.keras.optimizers.Adam(
learning_rate, beta_1=0.9, beta_2=0.98, epsilon=1e-9
)
|
[
"tensorflow.shape",
"tensorflow.transpose",
"tensorflow.keras.layers.Dense",
"tensorflow.nn.softmax",
"numpy.sin",
"tensorflow.cast",
"numpy.arange",
"tensorflow.math.minimum",
"tensorflow.math.sqrt",
"tensorflow.matmul",
"tensorflow.math.equal",
"tensorflow.maximum",
"tensorflow.keras.layers.Dropout",
"tensorflow.keras.layers.Embedding",
"tensorflow.math.rsqrt",
"numpy.cos",
"tensorflow.reshape",
"tensorflow.ones",
"tensorflow.keras.optimizers.Adam",
"numpy.float32",
"tensorflow.keras.layers.LayerNormalization"
] |
[((11454, 11481), 'numpy.sin', 'np.sin', (['angle_rads[:, 0::2]'], {}), '(angle_rads[:, 0::2])\n', (11460, 11481), True, 'import numpy as np\n'), ((11558, 11585), 'numpy.cos', 'np.cos', (['angle_rads[:, 1::2]'], {}), '(angle_rads[:, 1::2])\n', (11564, 11585), True, 'import numpy as np\n'), ((11645, 11684), 'tensorflow.cast', 'tf.cast', (['pos_encoding'], {'dtype': 'tf.float32'}), '(pos_encoding, dtype=tf.float32)\n', (11652, 11684), True, 'import tensorflow as tf\n'), ((12404, 12437), 'tensorflow.matmul', 'tf.matmul', (['q', 'k'], {'transpose_b': '(True)'}), '(q, k, transpose_b=True)\n', (12413, 12437), True, 'import tensorflow as tf\n'), ((12800, 12847), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['scaled_attention_logits'], {'axis': '(-1)'}), '(scaled_attention_logits, axis=-1)\n', (12813, 12847), True, 'import tensorflow as tf\n'), ((12907, 12938), 'tensorflow.matmul', 'tf.matmul', (['attention_weights', 'v'], {}), '(attention_weights, v)\n', (12916, 12938), True, 'import tensorflow as tf\n'), ((15706, 15785), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', (['learning_rate'], {'beta_1': '(0.9)', 'beta_2': '(0.98)', 'epsilon': '(1e-09)'}), '(learning_rate, beta_1=0.9, beta_2=0.98, epsilon=1e-09)\n', (15730, 15785), True, 'import tensorflow as tf\n'), ((825, 865), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['target_vocab_size'], {}), '(target_vocab_size)\n', (846, 865), True, 'import tensorflow as tf\n'), ((2236, 2288), 'tensorflow.maximum', 'tf.maximum', (['dec_target_padding_mask', 'look_ahead_mask'], {}), '(dec_target_padding_mask, look_ahead_mask)\n', (2246, 2288), True, 'import tensorflow as tf\n'), ((2911, 2963), 'tensorflow.keras.layers.Embedding', 'tf.keras.layers.Embedding', (['input_vocab_size', 'd_model'], {}), '(input_vocab_size, d_model)\n', (2936, 2963), True, 'import tensorflow as tf\n'), ((3222, 3251), 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['rate'], {}), '(rate)\n', (3245, 3251), True, 'import tensorflow as tf\n'), ((4118, 4171), 'tensorflow.keras.layers.Embedding', 'tf.keras.layers.Embedding', (['target_vocab_size', 'd_model'], {}), '(target_vocab_size, d_model)\n', (4143, 4171), True, 'import tensorflow as tf\n'), ((4402, 4431), 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['rate'], {}), '(rate)\n', (4425, 4431), True, 'import tensorflow as tf\n'), ((6346, 6395), 'tensorflow.keras.layers.LayerNormalization', 'tf.keras.layers.LayerNormalization', ([], {'epsilon': '(1e-06)'}), '(epsilon=1e-06)\n', (6380, 6395), True, 'import tensorflow as tf\n'), ((6421, 6470), 'tensorflow.keras.layers.LayerNormalization', 'tf.keras.layers.LayerNormalization', ([], {'epsilon': '(1e-06)'}), '(epsilon=1e-06)\n', (6455, 6470), True, 'import tensorflow as tf\n'), ((6495, 6524), 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['rate'], {}), '(rate)\n', (6518, 6524), True, 'import tensorflow as tf\n'), ((6549, 6578), 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['rate'], {}), '(rate)\n', (6572, 6578), True, 'import tensorflow as tf\n'), ((7992, 8041), 'tensorflow.keras.layers.LayerNormalization', 'tf.keras.layers.LayerNormalization', ([], {'epsilon': '(1e-06)'}), '(epsilon=1e-06)\n', (8026, 8041), True, 'import tensorflow as tf\n'), ((8067, 8116), 'tensorflow.keras.layers.LayerNormalization', 'tf.keras.layers.LayerNormalization', ([], {'epsilon': '(1e-06)'}), '(epsilon=1e-06)\n', (8101, 8116), True, 'import tensorflow as tf\n'), ((8142, 8191), 'tensorflow.keras.layers.LayerNormalization', 'tf.keras.layers.LayerNormalization', ([], {'epsilon': '(1e-06)'}), '(epsilon=1e-06)\n', (8176, 8191), True, 'import tensorflow as tf\n'), ((8216, 8245), 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['rate'], {}), '(rate)\n', (8239, 8245), True, 'import tensorflow as tf\n'), ((8270, 8299), 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['rate'], {}), '(rate)\n', (8293, 8299), True, 'import tensorflow as tf\n'), ((8324, 8353), 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['rate'], {}), '(rate)\n', (8347, 8353), True, 'import tensorflow as tf\n'), ((10058, 10079), 'tensorflow.math.equal', 'tf.math.equal', (['seq', '(0)'], {}), '(seq, 0)\n', (10071, 10079), True, 'import tensorflow as tf\n'), ((12549, 12565), 'tensorflow.math.sqrt', 'tf.math.sqrt', (['dk'], {}), '(dk)\n', (12561, 12565), True, 'import tensorflow as tf\n'), ((13331, 13361), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['d_model'], {}), '(d_model)\n', (13352, 13361), True, 'import tensorflow as tf\n'), ((13380, 13410), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['d_model'], {}), '(d_model)\n', (13401, 13410), True, 'import tensorflow as tf\n'), ((13429, 13459), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['d_model'], {}), '(d_model)\n', (13450, 13459), True, 'import tensorflow as tf\n'), ((13482, 13512), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['d_model'], {}), '(d_model)\n', (13503, 13512), True, 'import tensorflow as tf\n'), ((13733, 13792), 'tensorflow.reshape', 'tf.reshape', (['x', '(batch_size, -1, self.num_heads, self.depth)'], {}), '(x, (batch_size, -1, self.num_heads, self.depth))\n', (13743, 13792), True, 'import tensorflow as tf\n'), ((13808, 13842), 'tensorflow.transpose', 'tf.transpose', (['x'], {'perm': '[0, 2, 1, 3]'}), '(x, perm=[0, 2, 1, 3])\n', (13820, 13842), True, 'import tensorflow as tf\n'), ((14657, 14706), 'tensorflow.transpose', 'tf.transpose', (['scaled_attention'], {'perm': '[0, 2, 1, 3]'}), '(scaled_attention, perm=[0, 2, 1, 3])\n', (14669, 14706), True, 'import tensorflow as tf\n'), ((14802, 14862), 'tensorflow.reshape', 'tf.reshape', (['scaled_attention', '(batch_size, -1, self.d_model)'], {}), '(scaled_attention, (batch_size, -1, self.d_model))\n', (14812, 14862), True, 'import tensorflow as tf\n'), ((3310, 3321), 'tensorflow.shape', 'tf.shape', (['x'], {}), '(x)\n', (3318, 3321), True, 'import tensorflow as tf\n'), ((3471, 3504), 'tensorflow.cast', 'tf.cast', (['self.d_model', 'tf.float32'], {}), '(self.d_model, tf.float32)\n', (3478, 3504), True, 'import tensorflow as tf\n'), ((4528, 4539), 'tensorflow.shape', 'tf.shape', (['x'], {}), '(x)\n', (4536, 4539), True, 'import tensorflow as tf\n'), ((4672, 4705), 'tensorflow.cast', 'tf.cast', (['self.d_model', 'tf.float32'], {}), '(self.d_model, tf.float32)\n', (4679, 4705), True, 'import tensorflow as tf\n'), ((9776, 9821), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['dff'], {'activation': '"""relu"""'}), "(dff, activation='relu')\n", (9797, 9821), True, 'import tensorflow as tf\n'), ((9865, 9895), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['d_model'], {}), '(d_model)\n', (9886, 9895), True, 'import tensorflow as tf\n'), ((10366, 10387), 'tensorflow.ones', 'tf.ones', (['(size, size)'], {}), '((size, size))\n', (10373, 10387), True, 'import tensorflow as tf\n'), ((11293, 11312), 'numpy.arange', 'np.arange', (['position'], {}), '(position)\n', (11302, 11312), True, 'import numpy as np\n'), ((11329, 11347), 'numpy.arange', 'np.arange', (['d_model'], {}), '(d_model)\n', (11338, 11347), True, 'import numpy as np\n'), ((12478, 12489), 'tensorflow.shape', 'tf.shape', (['k'], {}), '(k)\n', (12486, 12489), True, 'import tensorflow as tf\n'), ((13900, 13911), 'tensorflow.shape', 'tf.shape', (['q'], {}), '(q)\n', (13908, 13911), True, 'import tensorflow as tf\n'), ((15364, 15397), 'tensorflow.cast', 'tf.cast', (['self.d_model', 'tf.float32'], {}), '(self.d_model, tf.float32)\n', (15371, 15397), True, 'import tensorflow as tf\n'), ((15498, 15517), 'tensorflow.math.rsqrt', 'tf.math.rsqrt', (['step'], {}), '(step)\n', (15511, 15517), True, 'import tensorflow as tf\n'), ((2132, 2145), 'tensorflow.shape', 'tf.shape', (['tar'], {}), '(tar)\n', (2140, 2145), True, 'import tensorflow as tf\n'), ((15592, 15619), 'tensorflow.math.rsqrt', 'tf.math.rsqrt', (['self.d_model'], {}), '(self.d_model)\n', (15605, 15619), True, 'import tensorflow as tf\n'), ((15622, 15649), 'tensorflow.math.minimum', 'tf.math.minimum', (['arg1', 'arg2'], {}), '(arg1, arg2)\n', (15637, 15649), True, 'import tensorflow as tf\n'), ((11201, 11220), 'numpy.float32', 'np.float32', (['d_model'], {}), '(d_model)\n', (11211, 11220), True, 'import numpy as np\n')]
|
"""
Script for analyzing model's performance
"""
import argparse
import sys
import collections
import yaml
import tensorflow as tf
import tqdm
import numpy as np
import net.data
import net.ml
import net.utilities
def report_iou_results(categories_intersections_counts_map, categories_unions_counts_map):
"""
Reports iou analysis results
:param categories_intersections_counts_map: dictionary mapping categories to a list of intersection counts
for different images for that category
:param categories_unions_counts_map: dictionary mapping categories to a list of unions counts
for different images for that category
"""
categories = sorted(categories_intersections_counts_map.keys())
categories_means = []
for category in categories:
category_intersections_counts = categories_intersections_counts_map[category]
category_unions_counts = categories_unions_counts_map[category]
category_mean = np.sum(category_intersections_counts) / np.sum(category_unions_counts)
print("{} mean iou -> {:.5f}".format(category, category_mean))
categories_means.append(category_mean)
print("\nMean iou across all categories: {:.5f}".format(np.mean(categories_means)))
def get_segmentation_cubes_generator(samples_generator, model, indices_to_colors_map, void_color):
"""
Get a generator that uses samples_generator to obtain (image, segmentation) tuple and yields a tuple
(ground_truth_segmentation_cube, predicted_segmentation_cube)
:param samples_generator: generator that yields (image, segmentation) tuple
:param model: net.ml.Model instance
:param indices_to_colors_map: dictionary mapping categories indices to their colors in segmentation images
:param void_color: 3-elements tuple that represents color of pixels without a category
:return: generator that yields (ground_truth_segmentation_cube, predicted_segmentation_cube) tuples
"""
while True:
image, segmentation = next(samples_generator)
ground_truth_segmentation_cube = net.data.get_segmentation_cube(segmentation, indices_to_colors_map)
# Raw predictions are floats before thresholding
raw_predicted_segmentation_cube = model.predict(image)
predicted_segmentation_image = net.data.get_segmentation_image(
raw_predicted_segmentation_cube, indices_to_colors_map, void_color)
predicted_segmentation_cube = net.data.get_segmentation_cube(
predicted_segmentation_image, indices_to_colors_map)
yield ground_truth_segmentation_cube, predicted_segmentation_cube
def analyze_iou(model, generator_factory, config):
"""
Analyses intersection over union of model predictions with ground truth using VOC validation dataset
:param model: net.ml.Model instance
:param generator_factory: VOCSamplesGeneratorFactory instance
:param config: object with configuration details
"""
indices_to_colors_map, void_color = net.data.get_colors_info(len(config["categories"]))
segmentation_cubes_generator = get_segmentation_cubes_generator(
generator_factory.get_generator(), model, indices_to_colors_map, void_color)
categories_intersections_counts_map = collections.defaultdict(list)
categories_unions_counts_map = collections.defaultdict(list)
# for _ in tqdm.tqdm(range(10)):
for _ in tqdm.tqdm(range(generator_factory.get_size())):
ground_truth_segmentation_cube, predicted_segmentation_cube = next(segmentation_cubes_generator)
# Get iou for each category that is present in ground truth cube
for index, category in enumerate(config["categories"]):
intersection_pixels = np.logical_and(
ground_truth_segmentation_cube[:, :, index], predicted_segmentation_cube[:, :, index])
categories_intersections_counts_map[category].append(np.sum(intersection_pixels))
union_pixels = np.logical_or(
ground_truth_segmentation_cube[:, :, index], predicted_segmentation_cube[:, :, index])
categories_unions_counts_map[category].append(np.sum(union_pixels))
report_iou_results(categories_intersections_counts_map, categories_unions_counts_map)
def main():
"""
Script entry point
"""
parser = argparse.ArgumentParser()
parser.add_argument('--config', action="store", required=True)
arguments = parser.parse_args(sys.argv[1:])
with open(arguments.config) as file:
config = yaml.safe_load(file)
network = net.ml.FullyConvolutionalNetwork(categories_count=len(config["categories"]))
session = tf.keras.backend.get_session()
model = net.ml.Model(session, network, config["categories"])
model.load(config["model_checkpoint_path"])
generator_factory = net.data.VOCSamplesGeneratorFactory(
config["voc"]["data_directory"], config["voc"]["validation_set_path"], config["size_factor"])
analyze_iou(model, generator_factory, config)
if __name__ == "__main__":
main()
|
[
"numpy.mean",
"argparse.ArgumentParser",
"numpy.logical_and",
"tensorflow.keras.backend.get_session",
"numpy.logical_or",
"yaml.safe_load",
"numpy.sum",
"collections.defaultdict"
] |
[((3250, 3279), 'collections.defaultdict', 'collections.defaultdict', (['list'], {}), '(list)\n', (3273, 3279), False, 'import collections\n'), ((3315, 3344), 'collections.defaultdict', 'collections.defaultdict', (['list'], {}), '(list)\n', (3338, 3344), False, 'import collections\n'), ((4322, 4347), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (4345, 4347), False, 'import argparse\n'), ((4650, 4680), 'tensorflow.keras.backend.get_session', 'tf.keras.backend.get_session', ([], {}), '()\n', (4678, 4680), True, 'import tensorflow as tf\n'), ((4523, 4543), 'yaml.safe_load', 'yaml.safe_load', (['file'], {}), '(file)\n', (4537, 4543), False, 'import yaml\n'), ((967, 1004), 'numpy.sum', 'np.sum', (['category_intersections_counts'], {}), '(category_intersections_counts)\n', (973, 1004), True, 'import numpy as np\n'), ((1007, 1037), 'numpy.sum', 'np.sum', (['category_unions_counts'], {}), '(category_unions_counts)\n', (1013, 1037), True, 'import numpy as np\n'), ((1218, 1243), 'numpy.mean', 'np.mean', (['categories_means'], {}), '(categories_means)\n', (1225, 1243), True, 'import numpy as np\n'), ((3723, 3828), 'numpy.logical_and', 'np.logical_and', (['ground_truth_segmentation_cube[:, :, index]', 'predicted_segmentation_cube[:, :, index]'], {}), '(ground_truth_segmentation_cube[:, :, index],\n predicted_segmentation_cube[:, :, index])\n', (3737, 3828), True, 'import numpy as np\n'), ((3965, 4069), 'numpy.logical_or', 'np.logical_or', (['ground_truth_segmentation_cube[:, :, index]', 'predicted_segmentation_cube[:, :, index]'], {}), '(ground_truth_segmentation_cube[:, :, index],\n predicted_segmentation_cube[:, :, index])\n', (3978, 4069), True, 'import numpy as np\n'), ((3908, 3935), 'numpy.sum', 'np.sum', (['intersection_pixels'], {}), '(intersection_pixels)\n', (3914, 3935), True, 'import numpy as np\n'), ((4142, 4162), 'numpy.sum', 'np.sum', (['union_pixels'], {}), '(union_pixels)\n', (4148, 4162), True, 'import numpy as np\n')]
|
import os
import numpy as np
import tensorflow as tf
from PIL import Image
def modcrop(im, modulo):
if len(im.shape) == 3:
size = np.array(im.shape)
size = size - (size % modulo)
im = im[0 : size[0], 0 : size[1], :]
elif len(im.shape) == 2:
size = np.array(im.shape)
size = size - (size % modulo)
im = im[0 : size[0], 0 : size[1]]
else: raise AttributeError
return im
def shave(im, border):
if len(im.shape) == 3:
return im[border[0] : -border[0],
border[1] : -border[1], :]
elif len(im.shape) == 2:
return im[border[0] : -border[0],
border[1] : -border[1]]
else: raise AttributeError
def compute_psnr(im1, im2):
if im1.shape != im2.shape:
raise Exception('the shapes of two images are not equal')
rmse = np.sqrt(((np.asfarray(im1) - np.asfarray(im2)) ** 2).mean())
psnr = 20 * np.log10(255.0 / rmse)
return psnr
def main():
# folder path
folder = '../datas/Set60/ISO6400'
# generate the file list
filepath = os.listdir(folder)
filepath.sort()
im_input = tf.placeholder('float', [1, None, None, 3], name='im_input')
# create a session for running operations in the graph
config = tf.ConfigProto(allow_soft_placement=True)
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
with tf.device('/gpu:0'):
with open('./graph.pb', 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
output = tf.import_graph_def(graph_def, input_map={'im_input:0': im_input}, return_elements=['output:0'])
record_psnr = []
for i in np.arange(1, 20+1, 1):
for p in np.arange(1, 3+1, 1):
psnrs = []
im = np.array(Image.open(os.path.join(folder, '%03d/%03dMP%d.PNG' % (i, i, p))))
#Image.fromarray(im).show()
for g in np.arange(1, 10+1, 1):
im_n = np.array(Image.open(os.path.join(folder, '%03d/%03dN%02dP%d.PNG' % (i, i, g, p))))
#Image.fromarray(im_n).show()
im_n = im_n.astype(np.float32) / 255.0
im_n = np.expand_dims(im_n, axis=0)
im_dn = sess.run(output, feed_dict={im_input: im_n})
im_dn = np.squeeze(im_dn) * 255.0
im_dn = np.maximum(im_dn, 0)
im_dn = np.minimum(im_dn, 255)
#Image.fromarray(np.asarray(im_dn, dtype=np.uint8)).show()
psnr = compute_psnr(im, np.asarray(im_dn, dtype=np.uint8))
print('i%03d p%d g%02d: %.2f dB' % (i, p, g, psnr))
psnrs.append(psnr)
record_psnr.append(psnrs)
print('%.2f+-%.3f dB' % (np.mean(record_psnr), np.mean(np.std(record_psnr, 1))))
if __name__ == '__main__':
main()
|
[
"numpy.log10",
"numpy.asfarray",
"numpy.array",
"numpy.arange",
"numpy.mean",
"os.listdir",
"tensorflow.placeholder",
"tensorflow.Session",
"numpy.asarray",
"tensorflow.GraphDef",
"tensorflow.ConfigProto",
"numpy.maximum",
"tensorflow.device",
"numpy.squeeze",
"tensorflow.import_graph_def",
"numpy.std",
"numpy.minimum",
"os.path.join",
"numpy.expand_dims"
] |
[((971, 989), 'os.listdir', 'os.listdir', (['folder'], {}), '(folder)\n', (981, 989), False, 'import os\n'), ((1021, 1081), 'tensorflow.placeholder', 'tf.placeholder', (['"""float"""', '[1, None, None, 3]'], {'name': '"""im_input"""'}), "('float', [1, None, None, 3], name='im_input')\n", (1035, 1081), True, 'import tensorflow as tf\n'), ((1149, 1190), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'allow_soft_placement': '(True)'}), '(allow_soft_placement=True)\n', (1163, 1190), True, 'import tensorflow as tf\n'), ((1239, 1264), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (1249, 1264), True, 'import tensorflow as tf\n'), ((1540, 1563), 'numpy.arange', 'np.arange', (['(1)', '(20 + 1)', '(1)'], {}), '(1, 20 + 1, 1)\n', (1549, 1563), True, 'import numpy as np\n'), ((135, 153), 'numpy.array', 'np.array', (['im.shape'], {}), '(im.shape)\n', (143, 153), True, 'import numpy as np\n'), ((832, 854), 'numpy.log10', 'np.log10', (['(255.0 / rmse)'], {}), '(255.0 / rmse)\n', (840, 854), True, 'import numpy as np\n'), ((1273, 1292), 'tensorflow.device', 'tf.device', (['"""/gpu:0"""'], {}), "('/gpu:0')\n", (1282, 1292), True, 'import tensorflow as tf\n'), ((1574, 1596), 'numpy.arange', 'np.arange', (['(1)', '(3 + 1)', '(1)'], {}), '(1, 3 + 1, 1)\n', (1583, 1596), True, 'import numpy as np\n'), ((260, 278), 'numpy.array', 'np.array', (['im.shape'], {}), '(im.shape)\n', (268, 278), True, 'import numpy as np\n'), ((1348, 1361), 'tensorflow.GraphDef', 'tf.GraphDef', ([], {}), '()\n', (1359, 1361), True, 'import tensorflow as tf\n'), ((1413, 1513), 'tensorflow.import_graph_def', 'tf.import_graph_def', (['graph_def'], {'input_map': "{'im_input:0': im_input}", 'return_elements': "['output:0']"}), "(graph_def, input_map={'im_input:0': im_input},\n return_elements=['output:0'])\n", (1432, 1513), True, 'import tensorflow as tf\n'), ((1737, 1760), 'numpy.arange', 'np.arange', (['(1)', '(10 + 1)', '(1)'], {}), '(1, 10 + 1, 1)\n', (1746, 1760), True, 'import numpy as np\n'), ((1943, 1971), 'numpy.expand_dims', 'np.expand_dims', (['im_n'], {'axis': '(0)'}), '(im_n, axis=0)\n', (1957, 1971), True, 'import numpy as np\n'), ((2081, 2101), 'numpy.maximum', 'np.maximum', (['im_dn', '(0)'], {}), '(im_dn, 0)\n', (2091, 2101), True, 'import numpy as np\n'), ((2114, 2136), 'numpy.minimum', 'np.minimum', (['im_dn', '(255)'], {}), '(im_dn, 255)\n', (2124, 2136), True, 'import numpy as np\n'), ((2400, 2420), 'numpy.mean', 'np.mean', (['record_psnr'], {}), '(record_psnr)\n', (2407, 2420), True, 'import numpy as np\n'), ((1638, 1691), 'os.path.join', 'os.path.join', (['folder', "('%03d/%03dMP%d.PNG' % (i, i, p))"], {}), "(folder, '%03d/%03dMP%d.PNG' % (i, i, p))\n", (1650, 1691), False, 'import os\n'), ((2042, 2059), 'numpy.squeeze', 'np.squeeze', (['im_dn'], {}), '(im_dn)\n', (2052, 2059), True, 'import numpy as np\n'), ((2229, 2262), 'numpy.asarray', 'np.asarray', (['im_dn'], {'dtype': 'np.uint8'}), '(im_dn, dtype=np.uint8)\n', (2239, 2262), True, 'import numpy as np\n'), ((2430, 2452), 'numpy.std', 'np.std', (['record_psnr', '(1)'], {}), '(record_psnr, 1)\n', (2436, 2452), True, 'import numpy as np\n'), ((768, 784), 'numpy.asfarray', 'np.asfarray', (['im1'], {}), '(im1)\n', (779, 784), True, 'import numpy as np\n'), ((787, 803), 'numpy.asfarray', 'np.asfarray', (['im2'], {}), '(im2)\n', (798, 803), True, 'import numpy as np\n'), ((1791, 1851), 'os.path.join', 'os.path.join', (['folder', "('%03d/%03dN%02dP%d.PNG' % (i, i, g, p))"], {}), "(folder, '%03d/%03dN%02dP%d.PNG' % (i, i, g, p))\n", (1803, 1851), False, 'import os\n')]
|
import sql as sql
import streamlit as st
from streamlit_folium import folium_static
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import json
import sys
import folium
import requests
from bs4 import BeautifulSoup
import csv
from tqdm import tqdm
import webbrowser
import os.path as osp
import os
from folium.plugins import MarkerCluster
import numpy as np
from numpy import genfromtxt
import sqlite3
with st.echo(code_location='below'):
import zipfile
zipFile = zipfile.ZipFile("2019-20-fullyr-data_sa_crime.csv.zip", 'r')
zipFile.extract('2019-20-fullyr-data_sa_crime.csv')
df1 = pd.read_csv('2019-20-fullyr-data_sa_crime.csv')
# from sqlite3 import Error
#
#
# def create_connection(path):
# connection = None
# try:
# connection = sqlite3.connect(path)
# print("Connection to SQLite DB successful")
# except Error as e:
# print(f"The error '{e}' occurred")
#
# return connection
st.title("Различные данные по правонарушениям в Южной Австралии за 2018-2020гг.")
xx = df1.copy()
xx.drop(columns = ['Reported Date', 'Postcode - Incident', 'Offence Level 1 Description', 'Offence Level 2 Description', 'Offence Level 3 Description'])
xx.sort_values(by='Suburb - Incident', ascending=False)
groups = xx.groupby('Suburb - Incident', as_index=False).sum()
group1 = groups.sort_values('Offence count', ascending=False).head(15)
st.write('Статистика по пригородам с наибольшим количествам правонарушений за 2019-2020гг.')
fig2, ax2 = plt.subplots(figsize=(40, 20))
sns.barplot(data=group1, x='Suburb - Incident', y='Offence count', palette='magma')
plt.xlabel('Suburb', size=20)
plt.ylabel('Offence count in the suburb', size=20)
plt.title('Total offence count of crimes in the suburbs (top 15) 2019/2020', size=36)
st.pyplot(fig2)
if st.button('Показать статистику по пригородам с наибольшим количествам правонарушений за 2019-2020гг. в виде таблицы'):
st.dataframe(group1)
xx1 = pd.read_csv('2019-20-fullyr-data_sa_crime.csv')
xx1.drop(columns=['Reported Date', 'Postcode - Incident', 'Suburb - Incident', 'Offence Level 2 Description', 'Offence Level 3 Description'])
xx1.sort_values(by='Offence Level 1 Description', ascending=False)
groups1 = xx1.groupby('Offence Level 1 Description', as_index=False).sum()
group12 = groups1.sort_values('Offence count', ascending=False)
st.write('Статистика по количеству правонарушений по первой классификации за 2019-2020гг.')
fig3, ax3 = plt.subplots(figsize=(40, 20))
sns.barplot(data=group12, x='Offence Level 1 Description', y='Offence count', palette='magma')
plt.xlabel('Type of crime (lev1)', size=20)
plt.ylabel('Offence count', size=20)
plt.title('Total offence count of different crimes (lev1) 2019/2020', size=36)
st.pyplot(fig3)
if st.button('Показать статистику по количеству правонарушений по первой классификации за 2019-2020гг. в виде таблицы'):
st.dataframe(group12)
xx2 = pd.read_csv('2019-20-fullyr-data_sa_crime.csv')
xx2.drop(columns=['Reported Date', 'Postcode - Incident', 'Suburb - Incident', 'Offence Level 1 Description', 'Offence Level 3 Description'])
xx2.sort_values(by='Offence Level 2 Description', ascending=False)
groups1_2 = xx2.groupby('Offence Level 2 Description', as_index=False).sum()
group123 = groups1_2.sort_values('Offence count', ascending=False)
st.write('Статистика по количеству правонарушений по второй классификации за 2019-2020гг.')
fig4, ax4 = plt.subplots(figsize=(40, 20))
sns.barplot(data=group123, x='Offence Level 2 Description', y='Offence count', palette='magma')
plt.xlabel('Type of crime (lev2)', size=20)
plt.ylabel('Offence count', size=20)
plt.title('Total offence count of different crimes (lev2) 2019/2020', size=36)
st.pyplot(fig4)
if st.button('Показать статистику по количеству правонарушений по второй классификации за 2019-2020гг. в виде таблицы'):
st.dataframe(group123)
xx3 = pd.read_csv('2019-20-fullyr-data_sa_crime.csv')
xx3.drop(columns=['Reported Date', 'Postcode - Incident', 'Suburb - Incident', 'Offence Level 1 Description', 'Offence Level 2 Description'])
xx3.sort_values(by='Offence Level 3 Description', ascending=False)
groups1_2_3 = xx3.groupby('Offence Level 3 Description', as_index=False).sum()
group1234 = groups1_2_3.sort_values('Offence count', ascending=False)
st.write('Статистика по количеству правонарушений по третьей классификации за 2019-2020гг.')
fig5, ax5 = plt.subplots(figsize=(60, 20))
sns.barplot(data=group1234, x='Offence Level 3 Description', y='Offence count', palette='magma')
plt.xlabel('Type of crime (lev3)', size=20)
plt.ylabel('Offence count', size=20)
plt.title('Total offence count of different crimes (lev3)', size=36)
st.pyplot(fig5)
if st.button('Показать cтатистику по количеству правонарушений по третьей классификации за 2019-2020гг. в виде таблицы'):
st.dataframe(data=group1234)
xx4 = pd.read_csv('2019-20-fullyr-data_sa_crime.csv')
xx4.drop(columns=['Postcode - Incident', 'Suburb - Incident', 'Offence Level 1 Description', 'Offence Level 2 Description', 'Offence Level 3 Description'])
xx4.sort_values(by='Reported Date')
groups1_2_3_4 = xx4.groupby('Reported Date', as_index=False).sum()
group12345 = groups1_2_3_4.sort_values('Offence count', ascending=False)
st.write('Статистика по количеству правонарушений по датам за 2019-2020гг.')
fig6, ax6 = plt.subplots(figsize=(60, 20))
sns.lineplot(data=group12345, x='Reported Date', y='Offence count', color='red')
plt.xlabel('Date', size=20)
plt.ylabel('Offence count', size=20)
plt.title('Total offence count by date 01.07.19-30.06.20', size=36)
st.pyplot(fig6)
if st.button('Показать статистику по количеству правонарушений по датам за 2019-2020гг. в виде таблицы'):
st.dataframe(data=group12345)
x_18_19=pd.read_csv ('2018-19-data_sa_crime.csv')
x_18_19.drop(columns=['Reported Date', 'Postcode - Incident', 'Offence Level 1 Description', 'Offence Level 2 Description', 'Offence Level 3 Description'])
x_18_19.sort_values(by='Suburb - Incident', ascending=False)
groups_18_19 = x_18_19.groupby('Suburb - Incident', as_index=False).sum()
group_18_19_1 = groups_18_19.sort_values('Offence count', ascending=False).head(15)
st.write('Статистика по пригородам с наибольшим количествам правонарушений за 2018-2019гг.')
fig7, ax7 = plt.subplots(figsize=(40, 20))
sns.barplot(data=group_18_19_1, x='Suburb - Incident', y='Offence count', palette='magma')
plt.xlabel('Suburb', size=20)
plt.ylabel('Offence count in the suburb', size=20)
plt.title('Total offence count of crimes in the suburbs (top 15) 2018/2019', size=36)
st.pyplot(fig7)
if st.button('Показать статистику по пригородам с наибольшим количествам правонарушений за 2018-2019гг. в виде таблицы'):
st.dataframe(group_18_19_1)
x_18_19_2 = pd.read_csv('2018-19-data_sa_crime.csv')
x_18_19_2.drop(columns=['Reported Date', 'Postcode - Incident', 'Suburb - Incident', 'Offence Level 2 Description', 'Offence Level 3 Description'])
x_18_19_2.sort_values(by='Offence Level 1 Description', ascending=False)
groups_18_19_2 = x_18_19_2.groupby('Offence Level 1 Description', as_index=False).sum()
group_18_19_2 = groups_18_19_2.sort_values('Offence count', ascending=False)
st.write('Статистика по количеству правонарушений по первой классификации за 2018-2019гг.')
fig8, ax8 = plt.subplots(figsize=(40, 20))
sns.barplot(data=group_18_19_2, x='Offence Level 1 Description', y='Offence count', palette='magma')
plt.xlabel('Type of crime (lev1)', size=20)
plt.ylabel('Offence count', size=20)
plt.title('Total offence count of different crimes (lev1) 2018/2019', size=36)
st.pyplot(fig8)
if st.button('Показать статистику по количеству правонарушений по первой классификации за 2018-2019гг. в виде таблицы'):
st.dataframe(group_18_19_2)
x_18_19_4 = pd.read_csv('2018-19-data_sa_crime.csv')
x_18_19_4.drop(columns=['Reported Date', 'Postcode - Incident', 'Suburb - Incident', 'Offence Level 1 Description', 'Offence Level 3 Description'])
x_18_19_4.sort_values(by='Offence Level 2 Description', ascending=False)
groups_18_19_4 = x_18_19_4.groupby('Offence Level 2 Description', as_index=False).sum()
group_18_19_4 = groups_18_19_4.sort_values('Offence count', ascending=False)
st.write('Статистика по количеству правонарушений по второй классификации за 2018-2019гг.')
fig10, ax10 = plt.subplots(figsize=(40, 20))
sns.barplot(data=group_18_19_4, x='Offence Level 2 Description', y='Offence count', palette='magma')
plt.xlabel('Type of crime (lev2)', size=20)
plt.ylabel('Offence count', size=20)
plt.title('Total offence count of different crimes (lev2) 2018/2019', size=36)
st.pyplot(fig10)
if st.button('Показать статистику по количеству правонврушений по второй классификации за 2018-2019гг. в виде таблицы'):
st.dataframe(group_18_19_4)
x_18_19_3 = pd.read_csv('2018-19-data_sa_crime.csv')
x_18_19_3.drop(columns=['Reported Date', 'Postcode - Incident', 'Suburb - Incident', 'Offence Level 1 Description', 'Offence Level 2 Description'])
x_18_19_3.sort_values(by='Offence Level 3 Description', ascending=False)
groups_18_19_3 = x_18_19_3.groupby('Offence Level 3 Description', as_index=False).sum()
group_18_19_3 = groups_18_19_3.sort_values('Offence count', ascending=False)
st.write('Статистика по количеству правонарушений по третьей классификации за 2018-2019гг.')
fig9, ax9 = plt.subplots(figsize=(60, 20))
sns.barplot(data=group_18_19_3, x='Offence Level 3 Description', y='Offence count', palette='magma')
plt.xlabel('Type of crime (lev3)', size=20)
plt.ylabel('Offence count', size=20)
plt.title('Total offence count of different crimes (lev3) 2018/2019', size=36)
st.pyplot(fig9)
if st.button('Показать статистику по количеству правонарушений по третьей классификации за 2018-2019гг. в виде таблицы'):
st.dataframe(group_18_19_3)
din=pd.read_csv("Offenders, principal offence of public order offences.csv")
#din_data = genfromtxt('Offenders, principal offence of public order offences.csv', delimiter=',')
print(din)
#din.columns=["Years", 'Offenders']
#print(din)
st.write('Статистика по количеству правонарушителей 2009-2019гг.')
fig10, ax10 = plt.subplots(figsize=(40, 20))
sns.lineplot(data=din, x="Years", y='Offenders', color='red')
plt.xlabel('Year', size=40)
plt.ylabel('Offenders', size=40)
plt.title('Offenders dinamics', size=50)
st.pyplot(fig10)
if st.button('Показать статистику по количеству правонарушителей 2009-2019гг. в виде таблицы'):
st.dataframe(din)
years = np.array([2019, 2020])
st.write("(Придётся немного подождать, программа обрабатывает примерно 95тыс. результатов для каждого года)")
files = ['2019-20-fullyr-data_sa_crime.csv',
'2018-19-data_sa_crime.csv'] # выполняем весь этот код по созданию карты с маркерами для каждого файла
for file in files:
locations = []
entrypoint1 = "https://nominatim.openstreetmap.org/search"
query1 = {'q': 'MORPHETT VALE australia', 'format': 'xml'}
r1 = requests.get(entrypoint1, params=query1)
soup = BeautifulSoup(r1.text, 'xml')
st.write("Визуализация количества правонарушений по пригородам на карте " + str(years[0]) + "-" + str(years[1]) + "гг.")
years = years-1
print(years)
with open(osp.join(os.environ['HOME'], 'Downloads/first_project 2', file), newline='') as f: # если будут ошибки из-за пути, то просто вставь полный путь к папке с файлами csv
reader = csv.reader(f)
for row in reader:
place = row[1] + ' ' + row[2] # берем название города и почтовый индекс
locations.append(place)
locations.pop(0) # удаляем перую строку (название столбцов)
new_dict = {i: locations.count(i) for i in tqdm(locations)} # собираем словарь {локация : кол-во нарушений}
sorted_values = sorted(new_dict.values(), reverse=True) # сортируем от большего к меньшему значения словаря
sorted_dict = {}
for i in sorted_values: # собираем новый словарь с сортировкой по значению
for k in new_dict.keys():
if new_dict[k] == i:
sorted_dict[k] = new_dict[k]
break
# делаем срез словаря через списки
lst_slice_key = list(sorted_dict.keys())[:27] # берем первые 27 записей (ключи)
lst_slice_val = list(sorted_dict.values())[:27] # берем первые 27 записей (значения)
new_sorted_dict = dict(zip(lst_slice_key, lst_slice_val)) # собираем новый словрь-срез
print(new_sorted_dict)
lat_19_20 = []
lon_19_20 = []
lst_number = []
lst_place = []
# делаем запрос и заполняем словари нужными данными
for name, number in tqdm(new_sorted_dict.items()):
entrypoint2 = "https://nominatim.openstreetmap.org/search"
query2 = {'q': str(name), 'format': 'xml'}
r2 = requests.get(entrypoint2, params=query2)
soup1 = BeautifulSoup(r2.text, 'xml')
for place1 in soup1.find_all("place"):
lst_place.append(place1['display_name'])
lat_19_20.append(float(place1['lat']))
lon_19_20.append(float(place1['lon']))
lst_number.append(number)
break
coord_19_20 = dict(zip(lat_19_20, lon_19_20))
a = list(coord_19_20.keys())[0]
b = coord_19_20[a]
def color_change(count): # менеяем цвет в зависимости от кол-ва преступлений в точке
if (count < 800):
return ('green')
elif (800 <= count < 1100):
return ('orange')
else:
return ('red')
def radius_change(count): # менеяем радиус в зависимости от кол-ва преступлений в точке
if (count < 800):
rad = 7
return rad
elif (800 <= count < 1100):
rad = 14
return rad
else:
rad = 21
return rad
map = folium.Map(location=[a, b], zoom_start=8) # создаем карту с дефолтной локацией
marker_cluster = folium.plugins.MarkerCluster().add_to(map) # создаем кластеризацию маркеров на карте
for lat, lon, place, number in tqdm(zip(lat_19_20, lon_19_20, lst_place, lst_number)): # создаем маркеры на карте one by one
place_splited = place.split(',')
folium.CircleMarker(location=[lat, lon], radius=radius_change(int(number)),
# location - координаты маркера, radius - берем из функции radius_change
popup=f'Place: {place_splited[0]}, {place_splited[1]}, {place_splited[2]}\nCrimes: {str(number)}',
# popup - текст маркера
fill_color=color_change(int(number)), color="black", fill_opacity=0.9).add_to(
marker_cluster) # fill_color - берем из функции color_change
map.save(f"map_{file[:-4]}.html") # сохраняем карту в html формате
print(f'DONE with {file}')
url = f"map_{file[:-4]}.html"
folium_static(map)
|
[
"pandas.read_csv",
"zipfile.ZipFile",
"streamlit.echo",
"matplotlib.pyplot.ylabel",
"streamlit.button",
"numpy.array",
"streamlit.title",
"matplotlib.pyplot.xlabel",
"folium.Map",
"folium.plugins.MarkerCluster",
"csv.reader",
"streamlit.write",
"requests.get",
"seaborn.lineplot",
"streamlit.dataframe",
"matplotlib.pyplot.title",
"streamlit_folium.folium_static",
"streamlit.pyplot",
"tqdm.tqdm",
"os.path.join",
"bs4.BeautifulSoup",
"seaborn.barplot",
"matplotlib.pyplot.subplots"
] |
[((438, 468), 'streamlit.echo', 'st.echo', ([], {'code_location': '"""below"""'}), "(code_location='below')\n", (445, 468), True, 'import streamlit as st\n'), ((503, 563), 'zipfile.ZipFile', 'zipfile.ZipFile', (['"""2019-20-fullyr-data_sa_crime.csv.zip"""', '"""r"""'], {}), "('2019-20-fullyr-data_sa_crime.csv.zip', 'r')\n", (518, 563), False, 'import zipfile\n'), ((630, 677), 'pandas.read_csv', 'pd.read_csv', (['"""2019-20-fullyr-data_sa_crime.csv"""'], {}), "('2019-20-fullyr-data_sa_crime.csv')\n", (641, 677), True, 'import pandas as pd\n'), ((1026, 1112), 'streamlit.title', 'st.title', (['"""Различные данные по правонарушениям в Южной Австралии за 2018-2020гг."""'], {}), "(\n 'Различные данные по правонарушениям в Южной Австралии за 2018-2020гг.')\n", (1034, 1112), True, 'import streamlit as st\n'), ((1492, 1594), 'streamlit.write', 'st.write', (['"""Статистика по пригородам с наибольшим количествам правонарушений за 2019-2020гг."""'], {}), "(\n 'Статистика по пригородам с наибольшим количествам правонарушений за 2019-2020гг.'\n )\n", (1500, 1594), True, 'import streamlit as st\n'), ((1602, 1632), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(40, 20)'}), '(figsize=(40, 20))\n', (1614, 1632), True, 'import matplotlib.pyplot as plt\n'), ((1637, 1725), 'seaborn.barplot', 'sns.barplot', ([], {'data': 'group1', 'x': '"""Suburb - Incident"""', 'y': '"""Offence count"""', 'palette': '"""magma"""'}), "(data=group1, x='Suburb - Incident', y='Offence count', palette=\n 'magma')\n", (1648, 1725), True, 'import seaborn as sns\n'), ((1725, 1754), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Suburb"""'], {'size': '(20)'}), "('Suburb', size=20)\n", (1735, 1754), True, 'import matplotlib.pyplot as plt\n'), ((1759, 1809), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Offence count in the suburb"""'], {'size': '(20)'}), "('Offence count in the suburb', size=20)\n", (1769, 1809), True, 'import matplotlib.pyplot as plt\n'), ((1814, 1903), 'matplotlib.pyplot.title', 'plt.title', (['"""Total offence count of crimes in the suburbs (top 15) 2019/2020"""'], {'size': '(36)'}), "('Total offence count of crimes in the suburbs (top 15) 2019/2020',\n size=36)\n", (1823, 1903), True, 'import matplotlib.pyplot as plt\n'), ((1904, 1919), 'streamlit.pyplot', 'st.pyplot', (['fig2'], {}), '(fig2)\n', (1913, 1919), True, 'import streamlit as st\n'), ((1927, 2054), 'streamlit.button', 'st.button', (['"""Показать статистику по пригородам с наибольшим количествам правонарушений за 2019-2020гг. в виде таблицы"""'], {}), "(\n 'Показать статистику по пригородам с наибольшим количествам правонарушений за 2019-2020гг. в виде таблицы'\n )\n", (1936, 2054), True, 'import streamlit as st\n'), ((2086, 2133), 'pandas.read_csv', 'pd.read_csv', (['"""2019-20-fullyr-data_sa_crime.csv"""'], {}), "('2019-20-fullyr-data_sa_crime.csv')\n", (2097, 2133), True, 'import pandas as pd\n'), ((2503, 2604), 'streamlit.write', 'st.write', (['"""Статистика по количеству правонарушений по первой классификации за 2019-2020гг."""'], {}), "(\n 'Статистика по количеству правонарушений по первой классификации за 2019-2020гг.'\n )\n", (2511, 2604), True, 'import streamlit as st\n'), ((2611, 2641), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(40, 20)'}), '(figsize=(40, 20))\n', (2623, 2641), True, 'import matplotlib.pyplot as plt\n'), ((2646, 2745), 'seaborn.barplot', 'sns.barplot', ([], {'data': 'group12', 'x': '"""Offence Level 1 Description"""', 'y': '"""Offence count"""', 'palette': '"""magma"""'}), "(data=group12, x='Offence Level 1 Description', y=\n 'Offence count', palette='magma')\n", (2657, 2745), True, 'import seaborn as sns\n'), ((2745, 2788), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Type of crime (lev1)"""'], {'size': '(20)'}), "('Type of crime (lev1)', size=20)\n", (2755, 2788), True, 'import matplotlib.pyplot as plt\n'), ((2793, 2829), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Offence count"""'], {'size': '(20)'}), "('Offence count', size=20)\n", (2803, 2829), True, 'import matplotlib.pyplot as plt\n'), ((2834, 2912), 'matplotlib.pyplot.title', 'plt.title', (['"""Total offence count of different crimes (lev1) 2019/2020"""'], {'size': '(36)'}), "('Total offence count of different crimes (lev1) 2019/2020', size=36)\n", (2843, 2912), True, 'import matplotlib.pyplot as plt\n'), ((2917, 2932), 'streamlit.pyplot', 'st.pyplot', (['fig3'], {}), '(fig3)\n', (2926, 2932), True, 'import streamlit as st\n'), ((2940, 3066), 'streamlit.button', 'st.button', (['"""Показать статистику по количеству правонарушений по первой классификации за 2019-2020гг. в виде таблицы"""'], {}), "(\n 'Показать статистику по количеству правонарушений по первой классификации за 2019-2020гг. в виде таблицы'\n )\n", (2949, 3066), True, 'import streamlit as st\n'), ((3099, 3146), 'pandas.read_csv', 'pd.read_csv', (['"""2019-20-fullyr-data_sa_crime.csv"""'], {}), "('2019-20-fullyr-data_sa_crime.csv')\n", (3110, 3146), True, 'import pandas as pd\n'), ((3521, 3622), 'streamlit.write', 'st.write', (['"""Статистика по количеству правонарушений по второй классификации за 2019-2020гг."""'], {}), "(\n 'Статистика по количеству правонарушений по второй классификации за 2019-2020гг.'\n )\n", (3529, 3622), True, 'import streamlit as st\n'), ((3629, 3659), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(40, 20)'}), '(figsize=(40, 20))\n', (3641, 3659), True, 'import matplotlib.pyplot as plt\n'), ((3664, 3764), 'seaborn.barplot', 'sns.barplot', ([], {'data': 'group123', 'x': '"""Offence Level 2 Description"""', 'y': '"""Offence count"""', 'palette': '"""magma"""'}), "(data=group123, x='Offence Level 2 Description', y=\n 'Offence count', palette='magma')\n", (3675, 3764), True, 'import seaborn as sns\n'), ((3764, 3807), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Type of crime (lev2)"""'], {'size': '(20)'}), "('Type of crime (lev2)', size=20)\n", (3774, 3807), True, 'import matplotlib.pyplot as plt\n'), ((3812, 3848), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Offence count"""'], {'size': '(20)'}), "('Offence count', size=20)\n", (3822, 3848), True, 'import matplotlib.pyplot as plt\n'), ((3853, 3931), 'matplotlib.pyplot.title', 'plt.title', (['"""Total offence count of different crimes (lev2) 2019/2020"""'], {'size': '(36)'}), "('Total offence count of different crimes (lev2) 2019/2020', size=36)\n", (3862, 3931), True, 'import matplotlib.pyplot as plt\n'), ((3936, 3951), 'streamlit.pyplot', 'st.pyplot', (['fig4'], {}), '(fig4)\n', (3945, 3951), True, 'import streamlit as st\n'), ((3959, 4085), 'streamlit.button', 'st.button', (['"""Показать статистику по количеству правонарушений по второй классификации за 2019-2020гг. в виде таблицы"""'], {}), "(\n 'Показать статистику по количеству правонарушений по второй классификации за 2019-2020гг. в виде таблицы'\n )\n", (3968, 4085), True, 'import streamlit as st\n'), ((4119, 4166), 'pandas.read_csv', 'pd.read_csv', (['"""2019-20-fullyr-data_sa_crime.csv"""'], {}), "('2019-20-fullyr-data_sa_crime.csv')\n", (4130, 4166), True, 'import pandas as pd\n'), ((4546, 4648), 'streamlit.write', 'st.write', (['"""Статистика по количеству правонарушений по третьей классификации за 2019-2020гг."""'], {}), "(\n 'Статистика по количеству правонарушений по третьей классификации за 2019-2020гг.'\n )\n", (4554, 4648), True, 'import streamlit as st\n'), ((4655, 4685), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(60, 20)'}), '(figsize=(60, 20))\n', (4667, 4685), True, 'import matplotlib.pyplot as plt\n'), ((4690, 4791), 'seaborn.barplot', 'sns.barplot', ([], {'data': 'group1234', 'x': '"""Offence Level 3 Description"""', 'y': '"""Offence count"""', 'palette': '"""magma"""'}), "(data=group1234, x='Offence Level 3 Description', y=\n 'Offence count', palette='magma')\n", (4701, 4791), True, 'import seaborn as sns\n'), ((4791, 4834), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Type of crime (lev3)"""'], {'size': '(20)'}), "('Type of crime (lev3)', size=20)\n", (4801, 4834), True, 'import matplotlib.pyplot as plt\n'), ((4839, 4875), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Offence count"""'], {'size': '(20)'}), "('Offence count', size=20)\n", (4849, 4875), True, 'import matplotlib.pyplot as plt\n'), ((4880, 4948), 'matplotlib.pyplot.title', 'plt.title', (['"""Total offence count of different crimes (lev3)"""'], {'size': '(36)'}), "('Total offence count of different crimes (lev3)', size=36)\n", (4889, 4948), True, 'import matplotlib.pyplot as plt\n'), ((4953, 4968), 'streamlit.pyplot', 'st.pyplot', (['fig5'], {}), '(fig5)\n', (4962, 4968), True, 'import streamlit as st\n'), ((4976, 5103), 'streamlit.button', 'st.button', (['"""Показать cтатистику по количеству правонарушений по третьей классификации за 2019-2020гг. в виде таблицы"""'], {}), "(\n 'Показать cтатистику по количеству правонарушений по третьей классификации за 2019-2020гг. в виде таблицы'\n )\n", (4985, 5103), True, 'import streamlit as st\n'), ((5143, 5190), 'pandas.read_csv', 'pd.read_csv', (['"""2019-20-fullyr-data_sa_crime.csv"""'], {}), "('2019-20-fullyr-data_sa_crime.csv')\n", (5154, 5190), True, 'import pandas as pd\n'), ((5544, 5620), 'streamlit.write', 'st.write', (['"""Статистика по количеству правонарушений по датам за 2019-2020гг."""'], {}), "('Статистика по количеству правонарушений по датам за 2019-2020гг.')\n", (5552, 5620), True, 'import streamlit as st\n'), ((5637, 5667), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(60, 20)'}), '(figsize=(60, 20))\n', (5649, 5667), True, 'import matplotlib.pyplot as plt\n'), ((5672, 5757), 'seaborn.lineplot', 'sns.lineplot', ([], {'data': 'group12345', 'x': '"""Reported Date"""', 'y': '"""Offence count"""', 'color': '"""red"""'}), "(data=group12345, x='Reported Date', y='Offence count', color='red'\n )\n", (5684, 5757), True, 'import seaborn as sns\n'), ((5757, 5784), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Date"""'], {'size': '(20)'}), "('Date', size=20)\n", (5767, 5784), True, 'import matplotlib.pyplot as plt\n'), ((5789, 5825), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Offence count"""'], {'size': '(20)'}), "('Offence count', size=20)\n", (5799, 5825), True, 'import matplotlib.pyplot as plt\n'), ((5830, 5897), 'matplotlib.pyplot.title', 'plt.title', (['"""Total offence count by date 01.07.19-30.06.20"""'], {'size': '(36)'}), "('Total offence count by date 01.07.19-30.06.20', size=36)\n", (5839, 5897), True, 'import matplotlib.pyplot as plt\n'), ((5902, 5917), 'streamlit.pyplot', 'st.pyplot', (['fig6'], {}), '(fig6)\n', (5911, 5917), True, 'import streamlit as st\n'), ((5925, 6036), 'streamlit.button', 'st.button', (['"""Показать статистику по количеству правонарушений по датам за 2019-2020гг. в виде таблицы"""'], {}), "(\n 'Показать статистику по количеству правонарушений по датам за 2019-2020гг. в виде таблицы'\n )\n", (5934, 6036), True, 'import streamlit as st\n'), ((6080, 6120), 'pandas.read_csv', 'pd.read_csv', (['"""2018-19-data_sa_crime.csv"""'], {}), "('2018-19-data_sa_crime.csv')\n", (6091, 6120), True, 'import pandas as pd\n'), ((6519, 6621), 'streamlit.write', 'st.write', (['"""Статистика по пригородам с наибольшим количествам правонарушений за 2018-2019гг."""'], {}), "(\n 'Статистика по пригородам с наибольшим количествам правонарушений за 2018-2019гг.'\n )\n", (6527, 6621), True, 'import streamlit as st\n'), ((6628, 6658), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(40, 20)'}), '(figsize=(40, 20))\n', (6640, 6658), True, 'import matplotlib.pyplot as plt\n'), ((6663, 6757), 'seaborn.barplot', 'sns.barplot', ([], {'data': 'group_18_19_1', 'x': '"""Suburb - Incident"""', 'y': '"""Offence count"""', 'palette': '"""magma"""'}), "(data=group_18_19_1, x='Suburb - Incident', y='Offence count',\n palette='magma')\n", (6674, 6757), True, 'import seaborn as sns\n'), ((6758, 6787), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Suburb"""'], {'size': '(20)'}), "('Suburb', size=20)\n", (6768, 6787), True, 'import matplotlib.pyplot as plt\n'), ((6792, 6842), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Offence count in the suburb"""'], {'size': '(20)'}), "('Offence count in the suburb', size=20)\n", (6802, 6842), True, 'import matplotlib.pyplot as plt\n'), ((6847, 6936), 'matplotlib.pyplot.title', 'plt.title', (['"""Total offence count of crimes in the suburbs (top 15) 2018/2019"""'], {'size': '(36)'}), "('Total offence count of crimes in the suburbs (top 15) 2018/2019',\n size=36)\n", (6856, 6936), True, 'import matplotlib.pyplot as plt\n'), ((6937, 6952), 'streamlit.pyplot', 'st.pyplot', (['fig7'], {}), '(fig7)\n', (6946, 6952), True, 'import streamlit as st\n'), ((6960, 7087), 'streamlit.button', 'st.button', (['"""Показать статистику по пригородам с наибольшим количествам правонарушений за 2018-2019гг. в виде таблицы"""'], {}), "(\n 'Показать статистику по пригородам с наибольшим количествам правонарушений за 2018-2019гг. в виде таблицы'\n )\n", (6969, 7087), True, 'import streamlit as st\n'), ((7132, 7172), 'pandas.read_csv', 'pd.read_csv', (['"""2018-19-data_sa_crime.csv"""'], {}), "('2018-19-data_sa_crime.csv')\n", (7143, 7172), True, 'import pandas as pd\n'), ((7580, 7681), 'streamlit.write', 'st.write', (['"""Статистика по количеству правонарушений по первой классификации за 2018-2019гг."""'], {}), "(\n 'Статистика по количеству правонарушений по первой классификации за 2018-2019гг.'\n )\n", (7588, 7681), True, 'import streamlit as st\n'), ((7688, 7718), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(40, 20)'}), '(figsize=(40, 20))\n', (7700, 7718), True, 'import matplotlib.pyplot as plt\n'), ((7723, 7828), 'seaborn.barplot', 'sns.barplot', ([], {'data': 'group_18_19_2', 'x': '"""Offence Level 1 Description"""', 'y': '"""Offence count"""', 'palette': '"""magma"""'}), "(data=group_18_19_2, x='Offence Level 1 Description', y=\n 'Offence count', palette='magma')\n", (7734, 7828), True, 'import seaborn as sns\n'), ((7828, 7871), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Type of crime (lev1)"""'], {'size': '(20)'}), "('Type of crime (lev1)', size=20)\n", (7838, 7871), True, 'import matplotlib.pyplot as plt\n'), ((7876, 7912), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Offence count"""'], {'size': '(20)'}), "('Offence count', size=20)\n", (7886, 7912), True, 'import matplotlib.pyplot as plt\n'), ((7917, 7995), 'matplotlib.pyplot.title', 'plt.title', (['"""Total offence count of different crimes (lev1) 2018/2019"""'], {'size': '(36)'}), "('Total offence count of different crimes (lev1) 2018/2019', size=36)\n", (7926, 7995), True, 'import matplotlib.pyplot as plt\n'), ((8000, 8015), 'streamlit.pyplot', 'st.pyplot', (['fig8'], {}), '(fig8)\n', (8009, 8015), True, 'import streamlit as st\n'), ((8023, 8149), 'streamlit.button', 'st.button', (['"""Показать статистику по количеству правонарушений по первой классификации за 2018-2019гг. в виде таблицы"""'], {}), "(\n 'Показать статистику по количеству правонарушений по первой классификации за 2018-2019гг. в виде таблицы'\n )\n", (8032, 8149), True, 'import streamlit as st\n'), ((8194, 8234), 'pandas.read_csv', 'pd.read_csv', (['"""2018-19-data_sa_crime.csv"""'], {}), "('2018-19-data_sa_crime.csv')\n", (8205, 8234), True, 'import pandas as pd\n'), ((8642, 8743), 'streamlit.write', 'st.write', (['"""Статистика по количеству правонарушений по второй классификации за 2018-2019гг."""'], {}), "(\n 'Статистика по количеству правонарушений по второй классификации за 2018-2019гг.'\n )\n", (8650, 8743), True, 'import streamlit as st\n'), ((8752, 8782), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(40, 20)'}), '(figsize=(40, 20))\n', (8764, 8782), True, 'import matplotlib.pyplot as plt\n'), ((8787, 8892), 'seaborn.barplot', 'sns.barplot', ([], {'data': 'group_18_19_4', 'x': '"""Offence Level 2 Description"""', 'y': '"""Offence count"""', 'palette': '"""magma"""'}), "(data=group_18_19_4, x='Offence Level 2 Description', y=\n 'Offence count', palette='magma')\n", (8798, 8892), True, 'import seaborn as sns\n'), ((8892, 8935), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Type of crime (lev2)"""'], {'size': '(20)'}), "('Type of crime (lev2)', size=20)\n", (8902, 8935), True, 'import matplotlib.pyplot as plt\n'), ((8940, 8976), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Offence count"""'], {'size': '(20)'}), "('Offence count', size=20)\n", (8950, 8976), True, 'import matplotlib.pyplot as plt\n'), ((8981, 9059), 'matplotlib.pyplot.title', 'plt.title', (['"""Total offence count of different crimes (lev2) 2018/2019"""'], {'size': '(36)'}), "('Total offence count of different crimes (lev2) 2018/2019', size=36)\n", (8990, 9059), True, 'import matplotlib.pyplot as plt\n'), ((9064, 9080), 'streamlit.pyplot', 'st.pyplot', (['fig10'], {}), '(fig10)\n', (9073, 9080), True, 'import streamlit as st\n'), ((9088, 9214), 'streamlit.button', 'st.button', (['"""Показать статистику по количеству правонврушений по второй классификации за 2018-2019гг. в виде таблицы"""'], {}), "(\n 'Показать статистику по количеству правонврушений по второй классификации за 2018-2019гг. в виде таблицы'\n )\n", (9097, 9214), True, 'import streamlit as st\n'), ((9259, 9299), 'pandas.read_csv', 'pd.read_csv', (['"""2018-19-data_sa_crime.csv"""'], {}), "('2018-19-data_sa_crime.csv')\n", (9270, 9299), True, 'import pandas as pd\n'), ((9707, 9809), 'streamlit.write', 'st.write', (['"""Статистика по количеству правонарушений по третьей классификации за 2018-2019гг."""'], {}), "(\n 'Статистика по количеству правонарушений по третьей классификации за 2018-2019гг.'\n )\n", (9715, 9809), True, 'import streamlit as st\n'), ((9816, 9846), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(60, 20)'}), '(figsize=(60, 20))\n', (9828, 9846), True, 'import matplotlib.pyplot as plt\n'), ((9851, 9956), 'seaborn.barplot', 'sns.barplot', ([], {'data': 'group_18_19_3', 'x': '"""Offence Level 3 Description"""', 'y': '"""Offence count"""', 'palette': '"""magma"""'}), "(data=group_18_19_3, x='Offence Level 3 Description', y=\n 'Offence count', palette='magma')\n", (9862, 9956), True, 'import seaborn as sns\n'), ((9956, 9999), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Type of crime (lev3)"""'], {'size': '(20)'}), "('Type of crime (lev3)', size=20)\n", (9966, 9999), True, 'import matplotlib.pyplot as plt\n'), ((10004, 10040), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Offence count"""'], {'size': '(20)'}), "('Offence count', size=20)\n", (10014, 10040), True, 'import matplotlib.pyplot as plt\n'), ((10045, 10123), 'matplotlib.pyplot.title', 'plt.title', (['"""Total offence count of different crimes (lev3) 2018/2019"""'], {'size': '(36)'}), "('Total offence count of different crimes (lev3) 2018/2019', size=36)\n", (10054, 10123), True, 'import matplotlib.pyplot as plt\n'), ((10128, 10143), 'streamlit.pyplot', 'st.pyplot', (['fig9'], {}), '(fig9)\n', (10137, 10143), True, 'import streamlit as st\n'), ((10151, 10278), 'streamlit.button', 'st.button', (['"""Показать статистику по количеству правонарушений по третьей классификации за 2018-2019гг. в виде таблицы"""'], {}), "(\n 'Показать статистику по количеству правонарушений по третьей классификации за 2018-2019гг. в виде таблицы'\n )\n", (10160, 10278), True, 'import streamlit as st\n'), ((10316, 10388), 'pandas.read_csv', 'pd.read_csv', (['"""Offenders, principal offence of public order offences.csv"""'], {}), "('Offenders, principal offence of public order offences.csv')\n", (10327, 10388), True, 'import pandas as pd\n'), ((10567, 10633), 'streamlit.write', 'st.write', (['"""Статистика по количеству правонарушителей 2009-2019гг."""'], {}), "('Статистика по количеству правонарушителей 2009-2019гг.')\n", (10575, 10633), True, 'import streamlit as st\n'), ((10652, 10682), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(40, 20)'}), '(figsize=(40, 20))\n', (10664, 10682), True, 'import matplotlib.pyplot as plt\n'), ((10687, 10748), 'seaborn.lineplot', 'sns.lineplot', ([], {'data': 'din', 'x': '"""Years"""', 'y': '"""Offenders"""', 'color': '"""red"""'}), "(data=din, x='Years', y='Offenders', color='red')\n", (10699, 10748), True, 'import seaborn as sns\n'), ((10753, 10780), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Year"""'], {'size': '(40)'}), "('Year', size=40)\n", (10763, 10780), True, 'import matplotlib.pyplot as plt\n'), ((10785, 10817), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Offenders"""'], {'size': '(40)'}), "('Offenders', size=40)\n", (10795, 10817), True, 'import matplotlib.pyplot as plt\n'), ((10822, 10862), 'matplotlib.pyplot.title', 'plt.title', (['"""Offenders dinamics"""'], {'size': '(50)'}), "('Offenders dinamics', size=50)\n", (10831, 10862), True, 'import matplotlib.pyplot as plt\n'), ((10867, 10883), 'streamlit.pyplot', 'st.pyplot', (['fig10'], {}), '(fig10)\n', (10876, 10883), True, 'import streamlit as st\n'), ((10892, 10993), 'streamlit.button', 'st.button', (['"""Показать статистику по количеству правонарушителей 2009-2019гг. в виде таблицы"""'], {}), "(\n 'Показать статистику по количеству правонарушителей 2009-2019гг. в виде таблицы'\n )\n", (10901, 10993), True, 'import streamlit as st\n'), ((11024, 11046), 'numpy.array', 'np.array', (['[2019, 2020]'], {}), '([2019, 2020])\n', (11032, 11046), True, 'import numpy as np\n'), ((11051, 11170), 'streamlit.write', 'st.write', (['"""(Придётся немного подождать, программа обрабатывает примерно 95тыс. результатов для каждого года)"""'], {}), "(\n '(Придётся немного подождать, программа обрабатывает примерно 95тыс. результатов для каждого года)'\n )\n", (11059, 11170), True, 'import streamlit as st\n'), ((2054, 2074), 'streamlit.dataframe', 'st.dataframe', (['group1'], {}), '(group1)\n', (2066, 2074), True, 'import streamlit as st\n'), ((3066, 3087), 'streamlit.dataframe', 'st.dataframe', (['group12'], {}), '(group12)\n', (3078, 3087), True, 'import streamlit as st\n'), ((4085, 4107), 'streamlit.dataframe', 'st.dataframe', (['group123'], {}), '(group123)\n', (4097, 4107), True, 'import streamlit as st\n'), ((5103, 5131), 'streamlit.dataframe', 'st.dataframe', ([], {'data': 'group1234'}), '(data=group1234)\n', (5115, 5131), True, 'import streamlit as st\n'), ((6036, 6065), 'streamlit.dataframe', 'st.dataframe', ([], {'data': 'group12345'}), '(data=group12345)\n', (6048, 6065), True, 'import streamlit as st\n'), ((7087, 7114), 'streamlit.dataframe', 'st.dataframe', (['group_18_19_1'], {}), '(group_18_19_1)\n', (7099, 7114), True, 'import streamlit as st\n'), ((8149, 8176), 'streamlit.dataframe', 'st.dataframe', (['group_18_19_2'], {}), '(group_18_19_2)\n', (8161, 8176), True, 'import streamlit as st\n'), ((9214, 9241), 'streamlit.dataframe', 'st.dataframe', (['group_18_19_4'], {}), '(group_18_19_4)\n', (9226, 9241), True, 'import streamlit as st\n'), ((10278, 10305), 'streamlit.dataframe', 'st.dataframe', (['group_18_19_3'], {}), '(group_18_19_3)\n', (10290, 10305), True, 'import streamlit as st\n'), ((10993, 11010), 'streamlit.dataframe', 'st.dataframe', (['din'], {}), '(din)\n', (11005, 11010), True, 'import streamlit as st\n'), ((11524, 11564), 'requests.get', 'requests.get', (['entrypoint1'], {'params': 'query1'}), '(entrypoint1, params=query1)\n', (11536, 11564), False, 'import requests\n'), ((11580, 11609), 'bs4.BeautifulSoup', 'BeautifulSoup', (['r1.text', '"""xml"""'], {}), "(r1.text, 'xml')\n", (11593, 11609), False, 'from bs4 import BeautifulSoup\n'), ((14581, 14622), 'folium.Map', 'folium.Map', ([], {'location': '[a, b]', 'zoom_start': '(8)'}), '(location=[a, b], zoom_start=8)\n', (14591, 14622), False, 'import folium\n'), ((15682, 15700), 'streamlit_folium.folium_static', 'folium_static', (['map'], {}), '(map)\n', (15695, 15700), False, 'from streamlit_folium import folium_static\n'), ((11994, 12007), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (12004, 12007), False, 'import csv\n'), ((13446, 13486), 'requests.get', 'requests.get', (['entrypoint2'], {'params': 'query2'}), '(entrypoint2, params=query2)\n', (13458, 13486), False, 'import requests\n'), ((13507, 13536), 'bs4.BeautifulSoup', 'BeautifulSoup', (['r2.text', '"""xml"""'], {}), "(r2.text, 'xml')\n", (13520, 13536), False, 'from bs4 import BeautifulSoup\n'), ((11806, 11869), 'os.path.join', 'osp.join', (["os.environ['HOME']", '"""Downloads/first_project 2"""', 'file'], {}), "(os.environ['HOME'], 'Downloads/first_project 2', file)\n", (11814, 11869), True, 'import os.path as osp\n'), ((12290, 12305), 'tqdm.tqdm', 'tqdm', (['locations'], {}), '(locations)\n', (12294, 12305), False, 'from tqdm import tqdm\n'), ((14686, 14716), 'folium.plugins.MarkerCluster', 'folium.plugins.MarkerCluster', ([], {}), '()\n', (14714, 14716), False, 'import folium\n')]
|
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
Created by <NAME> (<EMAIL>)
Anisotropy data analysis
The equation for the curve as published by Marchand et al. in Nature Cell Biology in 2001 is as follows:
y = a + (b-a) / [(c(x+K)/K*d)+1], where
a is the anisotropy without protein,
b is anisotropy with protein,
c is the Kd for ligand,
d is the total concentration of protein.
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
from __future__ import division, print_function, absolute_import
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from pathlib import Path
import os
import shutil
from timeit import default_timer as timer
from scipy.stats import norm
from scipy.optimize import curve_fit
from scipy.optimize import minimize
from inspect import currentframe, getframeinfo
fname = getframeinfo(currentframe()).filename # current file name
current_dir = Path(fname).resolve().parent
# User input ----------------------------------------------------------------
red_x = np.array([100, 50, 25, 12.5, 6.25, 3.125, 1.56, 0])
red_y = np.array([0.179, 0.186, 0.19, 0.195, 0.2, 0.212, 0.222, 0.248])
red_p = np.array([0.191, 0.248, 0.05, 1])
black_x = np.array([100, 50, 25, 18.75, 14.1, 10.5, 7.9, 5.9, 0])
black_y = np.array([0.204, 0.225, 0.248, 0.26, 0.268, 0.271, 0.274, 0.277, 0.278])
black_p = np.array([0.183, 0.278, 1.5, 16])
# ---------------------------------------------------------------------------
def red_anisotropy(x, K):
a = red_p[0]
b = red_p[1]
c = red_p[2]
d = red_p[3]
return a+(b-a)/((c*(x+K)/(K*d))+1)
def black_anisotropy(x, K):
a = black_p[0]
b = black_p[1]
c = black_p[2]
d = black_p[3]
return a+(b-a)/((c*(x+K)/(K*d))+1)
def main():
red_p, _ = curve_fit(red_anisotropy, red_x, red_y, p0=[0.078])
black_p, _ = curve_fit(black_anisotropy, black_x, black_y, p0=[0.1])
# Plot the result
fit_x = np.linspace(0, 100, 1000)
fig, (ax1, ax2) = plt.subplots(figsize=(20, 10), ncols=2, nrows=1, dpi=300)
ax1.plot(red_x, red_y, 'ro', ms=10)
ax1.plot(fit_x, red_anisotropy(fit_x, red_p), 'r', lw=2)
ax1.set_xlabel('[dark D] um')
ax1.set_ylabel('Anisotropy')
ax1.set_title('Red K = %f' %(red_p))
ax1.set_ylim([0.15, 0.3])
ax2.plot(black_x, black_y, 'ko', ms=10)
ax2.plot(fit_x, black_anisotropy(fit_x, black_p), 'k', lw=2)
ax2.set_xlabel('[dark D] um')
ax2.set_ylabel('Anisotropy')
ax2.set_title('Black K = %f' %(black_p))
ax2.set_ylim([0.15, 0.3])
fig.savefig('plot_anisotropy.png')
plt.close(fig)
if __name__ == "__main__":
main()
|
[
"scipy.optimize.curve_fit",
"pathlib.Path",
"inspect.currentframe",
"matplotlib.pyplot.close",
"numpy.array",
"numpy.linspace",
"matplotlib.pyplot.subplots"
] |
[((1092, 1143), 'numpy.array', 'np.array', (['[100, 50, 25, 12.5, 6.25, 3.125, 1.56, 0]'], {}), '([100, 50, 25, 12.5, 6.25, 3.125, 1.56, 0])\n', (1100, 1143), True, 'import numpy as np\n'), ((1153, 1216), 'numpy.array', 'np.array', (['[0.179, 0.186, 0.19, 0.195, 0.2, 0.212, 0.222, 0.248]'], {}), '([0.179, 0.186, 0.19, 0.195, 0.2, 0.212, 0.222, 0.248])\n', (1161, 1216), True, 'import numpy as np\n'), ((1226, 1259), 'numpy.array', 'np.array', (['[0.191, 0.248, 0.05, 1]'], {}), '([0.191, 0.248, 0.05, 1])\n', (1234, 1259), True, 'import numpy as np\n'), ((1273, 1328), 'numpy.array', 'np.array', (['[100, 50, 25, 18.75, 14.1, 10.5, 7.9, 5.9, 0]'], {}), '([100, 50, 25, 18.75, 14.1, 10.5, 7.9, 5.9, 0])\n', (1281, 1328), True, 'import numpy as np\n'), ((1340, 1412), 'numpy.array', 'np.array', (['[0.204, 0.225, 0.248, 0.26, 0.268, 0.271, 0.274, 0.277, 0.278]'], {}), '([0.204, 0.225, 0.248, 0.26, 0.268, 0.271, 0.274, 0.277, 0.278])\n', (1348, 1412), True, 'import numpy as np\n'), ((1424, 1457), 'numpy.array', 'np.array', (['[0.183, 0.278, 1.5, 16]'], {}), '([0.183, 0.278, 1.5, 16])\n', (1432, 1457), True, 'import numpy as np\n'), ((1866, 1917), 'scipy.optimize.curve_fit', 'curve_fit', (['red_anisotropy', 'red_x', 'red_y'], {'p0': '[0.078]'}), '(red_anisotropy, red_x, red_y, p0=[0.078])\n', (1875, 1917), False, 'from scipy.optimize import curve_fit\n'), ((1936, 1991), 'scipy.optimize.curve_fit', 'curve_fit', (['black_anisotropy', 'black_x', 'black_y'], {'p0': '[0.1]'}), '(black_anisotropy, black_x, black_y, p0=[0.1])\n', (1945, 1991), False, 'from scipy.optimize import curve_fit\n'), ((2033, 2058), 'numpy.linspace', 'np.linspace', (['(0)', '(100)', '(1000)'], {}), '(0, 100, 1000)\n', (2044, 2058), True, 'import numpy as np\n'), ((2084, 2141), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(20, 10)', 'ncols': '(2)', 'nrows': '(1)', 'dpi': '(300)'}), '(figsize=(20, 10), ncols=2, nrows=1, dpi=300)\n', (2096, 2141), True, 'import matplotlib.pyplot as plt\n'), ((2738, 2752), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (2747, 2752), True, 'import matplotlib.pyplot as plt\n'), ((913, 927), 'inspect.currentframe', 'currentframe', ([], {}), '()\n', (925, 927), False, 'from inspect import currentframe, getframeinfo\n'), ((973, 984), 'pathlib.Path', 'Path', (['fname'], {}), '(fname)\n', (977, 984), False, 'from pathlib import Path\n')]
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of a DDPG agent.
Implementation of DDPG - Deep Deterministic Policy Gradient
Algorithm and hyperparameter details can be found here:
http://arxiv.org/pdf/1509.02971v2.pdf
"""
import agent
from common import replay_buffer
from common.actor_critic import ActorNetwork
from common.actor_critic import CriticNetwork
import numpy as np
class DDPG(agent.Agent):
"""DDPG agent."""
def __init__(self, env, sess, config):
"""Initialize members."""
state_dim = env.observation_space.shape[0]
self.env = env
self.action_dim = env.action_space.shape[0]
self.action_high = env.action_space.high
self.action_low = env.action_space.low
self.batch_size = config.batch_size
self.warmup_size = config.warmup_size
self.gamma = config.gamma
self.sigma = config.sigma
self.noise_cap = config.c
self.actor = ActorNetwork(sess=sess,
state_dim=state_dim,
action_dim=self.action_dim,
action_high=self.action_high,
action_low=self.action_low,
learning_rate=config.actor_lr,
grad_norm_clip=config.grad_norm_clip,
tau=config.tau,
batch_size=config.batch_size)
self.critic = CriticNetwork(sess=sess,
state_dim=state_dim,
action_dim=self.action_dim,
learning_rate=config.critic_lr,
tau=config.tau,
gamma=config.gamma)
self.replay_buffer = replay_buffer.ReplayBuffer(
buffer_size=config.buffer_size)
def random_action(self, observation):
"""Return a random action."""
return self.env.action_space.sample()
def action(self, observation):
"""Return an action according to the agent's policy."""
return self.actor.get_action(observation)
def action_with_noise(self, observation):
"""Return a noisy action."""
if self.replay_buffer.size > self.warmup_size:
action = self.action(observation)
else:
action = self.random_action(observation)
noise = np.clip(np.random.randn(self.action_dim) * self.sigma,
-self.noise_cap, self.noise_cap)
action_with_noise = action + noise
return (np.clip(action_with_noise, self.action_low, self.action_high),
action, noise)
def store_experience(self, s, a, r, t, s2):
"""Save experience to replay buffer."""
self.replay_buffer.add(s, a, r, t, s2)
def train(self, global_step):
"""Train the agent's policy for 1 iteration."""
if self.replay_buffer.size > self.warmup_size:
s0, a, r, t, s1 = self.replay_buffer.sample_batch(self.batch_size)
target_actions = self.actor.get_target_action(s1)
target_qval = self.get_target_qval(s1, target_actions)
t = t.astype(dtype=int)
y = r + self.gamma * target_qval * (1 - t)
self.critic.train(s0, a, y)
actions = self.actor.get_action(s0)
grads = self.critic.get_action_gradients(s0, actions)
self.actor.train(s0, grads[0])
self.update_targets()
def update_targets(self):
"""Update all target networks."""
self.actor.update_target_network()
self.critic.update_target_network()
def get_target_qval(self, observation, action):
"""Get target Q-val."""
return self.critic.get_target_qval(observation, action)
def get_qval(self, observation, action):
"""Get Q-val."""
return self.critic.get_qval(observation, action)
|
[
"numpy.clip",
"common.actor_critic.ActorNetwork",
"common.replay_buffer.ReplayBuffer",
"common.actor_critic.CriticNetwork",
"numpy.random.randn"
] |
[((1489, 1747), 'common.actor_critic.ActorNetwork', 'ActorNetwork', ([], {'sess': 'sess', 'state_dim': 'state_dim', 'action_dim': 'self.action_dim', 'action_high': 'self.action_high', 'action_low': 'self.action_low', 'learning_rate': 'config.actor_lr', 'grad_norm_clip': 'config.grad_norm_clip', 'tau': 'config.tau', 'batch_size': 'config.batch_size'}), '(sess=sess, state_dim=state_dim, action_dim=self.action_dim,\n action_high=self.action_high, action_low=self.action_low, learning_rate\n =config.actor_lr, grad_norm_clip=config.grad_norm_clip, tau=config.tau,\n batch_size=config.batch_size)\n', (1501, 1747), False, 'from common.actor_critic import ActorNetwork\n'), ((2029, 2174), 'common.actor_critic.CriticNetwork', 'CriticNetwork', ([], {'sess': 'sess', 'state_dim': 'state_dim', 'action_dim': 'self.action_dim', 'learning_rate': 'config.critic_lr', 'tau': 'config.tau', 'gamma': 'config.gamma'}), '(sess=sess, state_dim=state_dim, action_dim=self.action_dim,\n learning_rate=config.critic_lr, tau=config.tau, gamma=config.gamma)\n', (2042, 2174), False, 'from common.actor_critic import CriticNetwork\n'), ((2380, 2438), 'common.replay_buffer.ReplayBuffer', 'replay_buffer.ReplayBuffer', ([], {'buffer_size': 'config.buffer_size'}), '(buffer_size=config.buffer_size)\n', (2406, 2438), False, 'from common import replay_buffer\n'), ((3168, 3229), 'numpy.clip', 'np.clip', (['action_with_noise', 'self.action_low', 'self.action_high'], {}), '(action_with_noise, self.action_low, self.action_high)\n', (3175, 3229), True, 'import numpy as np\n'), ((3005, 3037), 'numpy.random.randn', 'np.random.randn', (['self.action_dim'], {}), '(self.action_dim)\n', (3020, 3037), True, 'import numpy as np\n')]
|
import numpy as np
import GPy
from .GP_interface import GPInterface, convert_lengthscale, convert_2D_format
class GPyWrapper(GPInterface):
def __init__(self):
# GPy settings
GPy.plotting.change_plotting_library("matplotlib") # use matpoltlib for drawing
super().__init__()
self.center = 0.0
def create_kernel(self, ndim, kernel_name, var_f=1.0, lengthscale=1.0, const_kernel=False):
if kernel_name == 'Matern52':
l = convert_lengthscale(ndim, lengthscale)
kernel = GPy.kern.Matern52(input_dim=ndim, ARD=True, variance=var_f, lengthscale=l, name='basic')
elif kernel_name == 'RBF':
l = convert_lengthscale(ndim, lengthscale)
kernel = GPy.kern.RBF(input_dim=ndim, ARD=True, variance=var_f, lengthscale=l, name='basic')
else:
raise ValueError('Unsupported kernel: '+ kernel_name)
self.ndim = ndim
self.kernel = kernel
if const_kernel:
self.kernel += GPy.kern.Bias(1.0)
self.stat_kernel = self.kernel.basic
else:
self.stat_kernel = self.kernel
def set_kernel_length_prior(self, prior_mean, prior_var):
if self.ndim != len(prior_mean) or self.ndim != len(prior_var):
raise ValueError('Incorrect kernel prior parameters.')
if self.kernel is None:
raise ValueError('Kernel should be defined first.')
for i in range(self.ndim):
self.stat_kernel.lengthscale[[i]].set_prior(GPy.priors.Gamma.from_EV(prior_mean[i],prior_var[i])) # don't know why, but [i] does not work
def set_kernel_var_prior(self, prior_mean, prior_var):
self.stat_kernel.variance.set_prior(GPy.priors.Gamma.from_EV(prior_mean,prior_var))
def fix_kernel_lengthscale(self):
self.stat_kernel.lengthscale.fix()
def fix_kernel_var(self):
self.stat_kernel.variance.fix()
def create_model(self, x, y, noise_var, noise_prior='fixed'):
x = convert_2D_format(x)
y = convert_2D_format(y) - self.center
self.outdim = y.shape[1]
noise_var = np.array(noise_var)
if noise_var.ndim == 0:
self.model = GPy.models.GPRegression(x, y, self.kernel, noise_var=noise_var)
noise = self.model.Gaussian_noise
else:
assert noise_var.shape == y.shape
self.model = GPy.models.GPHeteroscedasticRegression(x, y, self.kernel)
self.model['.*het_Gauss.variance'] = noise_var
noise = self.model.het_Gauss.variance
if noise_prior == 'fixed':
noise.fix()
else:
raise ValueError('Not Implemented yet.')
def predict_f(self, x, full_cov=False):
'''
Returns:
posterior mean, posterior variance
'''
x = convert_2D_format(x)
post_mean, post_var = self.model.predict_noiseless(x, full_cov=full_cov)
if self.outdim > 1:
post_var = np.concatenate([post_var]*self.outdim, axis=-1)
return post_mean + self.center, post_var
def predict_withGradients(self, x):
'''
Borrowed from https://github.com/SheffieldML/GPyOpt/blob/master/GPyOpt/models/gpmodel.py
Returns the mean, standard deviation, mean gradient and standard deviation gradient at X.
'''
x = convert_2D_format(x)
m, v = self.model.predict(x)
v = np.clip(v, 1e-10, np.inf)
dmdx, dvdx = self.model.predictive_gradients(x)
dmdx = dmdx[:,:,0]
dsdx = dvdx / (2*np.sqrt(v))
return m + self.center, np.sqrt(v), dmdx, dsdx
def posterior_sample_f(self, x, size = 10):
'''
Parameters
x: (Nnew x input_dim)
Returns
(Nnew x output_dim x samples)
'''
return self.model.posterior_samples_f(x, size) + self.center
def optimize(self, num_restarts=30, opt_messages=False, print_result=True, parallel=False):
self.model.optimize_restarts(num_restarts=num_restarts, robust=True, parallel=False, messages=opt_messages)
if print_result:
print(self.kernel)
print(self.stat_kernel.lengthscale)
print(self.stat_kernel.variance)
class GPyWrapper_Classifier(GPyWrapper):
def create_model(self, x, y):
assert self.center == 0.0
x = convert_2D_format(x)
y = convert_2D_format(y)
self.outdim = y.shape[1]
self.model = GPy.models.GPClassification(x, y, self.kernel)
def predict_prob(self, x):
x = convert_2D_format(x)
prob = self.model.predict(x, full_cov=False)[0]
return prob
def optimize(self, maxiter=1000, opt_messages=False, print_result=True):
for i in range(5):
self.model.optimize(max_iters=int(maxiter/5), messages=opt_messages)
if print_result:
print(self.kernel)
print(self.stat_kernel.lengthscale)
class GPyWrapper_MultiSeparate(object):
def create_kernel(self, ndim, outdim, kernel_name, var_f=1.0, lengthscale=1.0, const_kernel=False):
if isinstance(kernel_name, str):
kernel_name = [kernel_name]*outdim
if np.isscalar(var_f):
var_f = np.ones(outdim) * var_f
if np.isscalar(lengthscale):
var_f = np.ones(outdim) * lengthscale
if isinstance(const_kernel, bool):
const_kernel = [const_kernel]*outdim
self.gp_list = list()
for i in range(outdim):
gp = GPyWrapper()
gp.create_kernel(ndim, kernel_name[i], var_f[i], lengthscale[i], const_kernel[i])
self.gp_list.append(gp)
self.outdim = outdim
def set_kernel_length_prior(self, prior_mean, prior_var):
# Apply same prior for all outputs
for i in range(self.outdim):
self.gp_list[i].set_kernel_length_prior(prior_mean, prior_var)
def set_kernel_var_prior(self, prior_mean, prior_var):
# Apply same prior for all outputs
for i in range(self.outdim):
self.gp_list[i].set_kernel_var_prior(prior_mean, prior_var)
def fix_kernel_lengthscale(self):
for i in range(self.outdim):
self.gp_list[i].fix_kernel_lengthscale()
def fix_kernel_var(self):
for i in range(self.outdim):
self.gp_list[i].fix_kernel_var()
def create_model(self, x, y, noise_var, noise_prior='fixed'):
if not (y.ndim == 2 and y.shape[1] == self.outdim):
raise ValueError('Incorrect data shape.')
noise_var = np.array(noise_var)
for i in range(self.outdim):
if noise_var.ndim == 2 and noise_var.shape[1] == self.outdim:
noise_var_i = noise_var[:, i:i+1]
else:
noise_var_i = noise_var
gp = self.gp_list[i]
gp.create_model(x, y[:,i:i+1], noise_var_i, noise_prior)
def predict_f(self, x, full_cov=False):
post_mean_all = list()
post_var_all = list()
for i in range(self.outdim):
post_mean, post_var = self.gp_list[i].predict_f(x, full_cov)
post_mean_all.append(post_mean)
post_var_all.append(post_var)
return np.concatenate(post_mean_all,axis=-1), np.concatenate(post_var_all,axis=-1)
def posterior_sample_f(self, x, size = 10):
post_samples_all = list()
for i in range(self.outdim):
post_samples = self.gp_list[i].predict_f(x, full_cov)
post_samples_all.append(post_samples)
return np.concatenate(post_samples_all,axis=1)
def optimize(self, num_restarts=30, opt_messages=False, print_result=False):
for i in range(self.outdim):
self.gp_list[i].optimize(num_restarts, opt_messages, print_result)
def predict_withGradients(self, x):
'''
Borrowed from https://github.com/SheffieldML/GPyOpt/blob/master/GPyOpt/models/gpmodel.py
Returns the mean, standard deviation, mean gradient and standard deviation gradient at X.
m_all: (num_x, outdim)
std_all: (num_x, outdim)
dmdx_all: (num_x, outdim, n_dim)
dsdx_all: (num_x, outdim, n_dim)
'''
m_all, std_all, dmdx_all, dsdx_all = [], [], [], []
for i in range(self.outdim):
m, std, dmdx, dsdx = self.gp_list[i].predict_withGradients(x)
m_all.append(m)
std_all.append(std)
dmdx_all.append(dmdx)
dsdx_all.append(dsdx)
return np.concatenate(m_all,axis=-1), np.concatenate(std_all,axis=-1), np.stack(dmdx_all,axis=1), np.stack(dsdx_all,axis=1)
class GPyWrapper_MultiIndep(GPyWrapper):
def create_kernel(self, ndim, outdim, kernel_name, var_f=1.0, lengthscale=1.0, const_kernel=False):
super().create_kernel(ndim, kernel_name, var_f, lengthscale, const_kernel)
k_multi = GPy.kern.IndependentOutputs([self.kernel, self.kernel.copy()])
#icm = GPy.util.multioutput.ICM(input_dim=ndim, num_outputs=outdim, kernel=self.kernel)
#icm.B.W.constrain_fixed(0) # fix W matrix to 0
if const_kernel:
self.stat_kernel = k_multi.sum.basic
else:
self.stat_kernel = k_multi.basic
self.kernel = k_multi
print(self.kernel)
def create_model(self, x, y, noise_var, noise_prior='fixed'):
x = convert_2D_format(x)
y = convert_2D_format(y) - self.center
numdata = x.shape[0]
outdim = y.shape[1]
indim = x.shape[1]
yy = y.transpose().ravel()
ind = np.concatenate([ o*np.ones(numdata) for o in range(outdim)])
xx = np.concatenate([x]*outdim)
xx = np.concatenate((xx,ind[:,np.newaxis]), axis=1)
print(xx.shape, yy.shape)
self.model = GPy.models.GPRegression(x, y, self.kernel, noise_var=noise_var)
if noise_prior == 'fixed':
self.model.Gaussian_noise.fix()
else:
raise ValueError('Not Implemented yet.')
def create_GP(num_active_gates, outdim, k_name='Matern52', var_f=1.0, lengthscale=1.0, center=0.0):
if np.isscalar(lengthscale):
lengthscale = np.ones(num_active_gates)
gp = GPyWrapper() # initialize GP environment
#gp = GPyWrapper_MultiIndep() # initialize GP environment
gp.center = center
# GP kernels
gp.create_kernel(num_active_gates, k_name, var_f, lengthscale)
#gp.create_kernel(num_active_gates, outdim, k_name, var_f, lengthscale)
return gp
def main():
X = np.arange(1,6).reshape((5,1))
f = lambda x : np.square(x-4.0)
#Y = np.concatenate([f(X), -f(X)], axis=1)
Y = np.concatenate([f(X)], axis=1)
#noise_var = 0.01**2
#noise_var = np.concatenate([np.square(X / 10.)]*2, axis=1)
noise_var = np.square(X / 10.)
print(X.shape, Y.shape)
gp = create_GP(1, 2, 'Matern52', 2.0, 1.0, 0.0)
gp.create_model(X, Y, noise_var, noise_prior='fixed')
gp.optimize()
X_pred = np.linspace(1.,5.,10).reshape((-1,1))
mean, cov = gp.predict_f(X_pred)
print(mean)
#print(cov)
'''
###
# GP Classification test
###
X = np.arange(1,6).reshape((5,1))
Y = np.array([1.0, 1.0, 1.0, 0.0, 0.0]).reshape((5,1))
gpc = GPyWrapper_Classifier()
gpc.create_kernel(1, 'RBF', 1.0, 1.0)
gpc.create_model(X, Y)
X_pred = np.linspace(1.,5.,10).reshape((-1,1))
print(gpc.predict_prob(X_pred))
print(gpc.model)
gpc.optimize()
print(gpc.predict_prob(X_pred))
print(gpc.model)
'''
if __name__ == '__main__':
main()
|
[
"numpy.clip",
"numpy.sqrt",
"numpy.isscalar",
"numpy.ones",
"GPy.kern.RBF",
"GPy.plotting.change_plotting_library",
"numpy.square",
"numpy.array",
"GPy.models.GPClassification",
"GPy.kern.Matern52",
"GPy.kern.Bias",
"numpy.stack",
"numpy.concatenate",
"GPy.priors.Gamma.from_EV",
"numpy.linspace",
"GPy.models.GPHeteroscedasticRegression",
"GPy.models.GPRegression",
"numpy.arange"
] |
[((10112, 10136), 'numpy.isscalar', 'np.isscalar', (['lengthscale'], {}), '(lengthscale)\n', (10123, 10136), True, 'import numpy as np\n'), ((10773, 10792), 'numpy.square', 'np.square', (['(X / 10.0)'], {}), '(X / 10.0)\n', (10782, 10792), True, 'import numpy as np\n'), ((197, 247), 'GPy.plotting.change_plotting_library', 'GPy.plotting.change_plotting_library', (['"""matplotlib"""'], {}), "('matplotlib')\n", (233, 247), False, 'import GPy\n'), ((2126, 2145), 'numpy.array', 'np.array', (['noise_var'], {}), '(noise_var)\n', (2134, 2145), True, 'import numpy as np\n'), ((3429, 3454), 'numpy.clip', 'np.clip', (['v', '(1e-10)', 'np.inf'], {}), '(v, 1e-10, np.inf)\n', (3436, 3454), True, 'import numpy as np\n'), ((4468, 4514), 'GPy.models.GPClassification', 'GPy.models.GPClassification', (['x', 'y', 'self.kernel'], {}), '(x, y, self.kernel)\n', (4495, 4514), False, 'import GPy\n'), ((5190, 5208), 'numpy.isscalar', 'np.isscalar', (['var_f'], {}), '(var_f)\n', (5201, 5208), True, 'import numpy as np\n'), ((5265, 5289), 'numpy.isscalar', 'np.isscalar', (['lengthscale'], {}), '(lengthscale)\n', (5276, 5289), True, 'import numpy as np\n'), ((6559, 6578), 'numpy.array', 'np.array', (['noise_var'], {}), '(noise_var)\n', (6567, 6578), True, 'import numpy as np\n'), ((7545, 7585), 'numpy.concatenate', 'np.concatenate', (['post_samples_all'], {'axis': '(1)'}), '(post_samples_all, axis=1)\n', (7559, 7585), True, 'import numpy as np\n'), ((9650, 9678), 'numpy.concatenate', 'np.concatenate', (['([x] * outdim)'], {}), '([x] * outdim)\n', (9664, 9678), True, 'import numpy as np\n'), ((9690, 9738), 'numpy.concatenate', 'np.concatenate', (['(xx, ind[:, np.newaxis])'], {'axis': '(1)'}), '((xx, ind[:, np.newaxis]), axis=1)\n', (9704, 9738), True, 'import numpy as np\n'), ((9794, 9857), 'GPy.models.GPRegression', 'GPy.models.GPRegression', (['x', 'y', 'self.kernel'], {'noise_var': 'noise_var'}), '(x, y, self.kernel, noise_var=noise_var)\n', (9817, 9857), False, 'import GPy\n'), ((10160, 10185), 'numpy.ones', 'np.ones', (['num_active_gates'], {}), '(num_active_gates)\n', (10167, 10185), True, 'import numpy as np\n'), ((10565, 10583), 'numpy.square', 'np.square', (['(x - 4.0)'], {}), '(x - 4.0)\n', (10574, 10583), True, 'import numpy as np\n'), ((541, 633), 'GPy.kern.Matern52', 'GPy.kern.Matern52', ([], {'input_dim': 'ndim', 'ARD': '(True)', 'variance': 'var_f', 'lengthscale': 'l', 'name': '"""basic"""'}), "(input_dim=ndim, ARD=True, variance=var_f, lengthscale=l,\n name='basic')\n", (558, 633), False, 'import GPy\n'), ((1013, 1031), 'GPy.kern.Bias', 'GPy.kern.Bias', (['(1.0)'], {}), '(1.0)\n', (1026, 1031), False, 'import GPy\n'), ((1726, 1773), 'GPy.priors.Gamma.from_EV', 'GPy.priors.Gamma.from_EV', (['prior_mean', 'prior_var'], {}), '(prior_mean, prior_var)\n', (1750, 1773), False, 'import GPy\n'), ((2203, 2266), 'GPy.models.GPRegression', 'GPy.models.GPRegression', (['x', 'y', 'self.kernel'], {'noise_var': 'noise_var'}), '(x, y, self.kernel, noise_var=noise_var)\n', (2226, 2266), False, 'import GPy\n'), ((2398, 2455), 'GPy.models.GPHeteroscedasticRegression', 'GPy.models.GPHeteroscedasticRegression', (['x', 'y', 'self.kernel'], {}), '(x, y, self.kernel)\n', (2436, 2455), False, 'import GPy\n'), ((2990, 3039), 'numpy.concatenate', 'np.concatenate', (['([post_var] * self.outdim)'], {'axis': '(-1)'}), '([post_var] * self.outdim, axis=-1)\n', (3004, 3039), True, 'import numpy as np\n'), ((3607, 3617), 'numpy.sqrt', 'np.sqrt', (['v'], {}), '(v)\n', (3614, 3617), True, 'import numpy as np\n'), ((7218, 7256), 'numpy.concatenate', 'np.concatenate', (['post_mean_all'], {'axis': '(-1)'}), '(post_mean_all, axis=-1)\n', (7232, 7256), True, 'import numpy as np\n'), ((7257, 7294), 'numpy.concatenate', 'np.concatenate', (['post_var_all'], {'axis': '(-1)'}), '(post_var_all, axis=-1)\n', (7271, 7294), True, 'import numpy as np\n'), ((8523, 8553), 'numpy.concatenate', 'np.concatenate', (['m_all'], {'axis': '(-1)'}), '(m_all, axis=-1)\n', (8537, 8553), True, 'import numpy as np\n'), ((8554, 8586), 'numpy.concatenate', 'np.concatenate', (['std_all'], {'axis': '(-1)'}), '(std_all, axis=-1)\n', (8568, 8586), True, 'import numpy as np\n'), ((8587, 8613), 'numpy.stack', 'np.stack', (['dmdx_all'], {'axis': '(1)'}), '(dmdx_all, axis=1)\n', (8595, 8613), True, 'import numpy as np\n'), ((8614, 8640), 'numpy.stack', 'np.stack', (['dsdx_all'], {'axis': '(1)'}), '(dsdx_all, axis=1)\n', (8622, 8640), True, 'import numpy as np\n'), ((10516, 10531), 'numpy.arange', 'np.arange', (['(1)', '(6)'], {}), '(1, 6)\n', (10525, 10531), True, 'import numpy as np\n'), ((10963, 10988), 'numpy.linspace', 'np.linspace', (['(1.0)', '(5.0)', '(10)'], {}), '(1.0, 5.0, 10)\n', (10974, 10988), True, 'import numpy as np\n'), ((741, 829), 'GPy.kern.RBF', 'GPy.kern.RBF', ([], {'input_dim': 'ndim', 'ARD': '(True)', 'variance': 'var_f', 'lengthscale': 'l', 'name': '"""basic"""'}), "(input_dim=ndim, ARD=True, variance=var_f, lengthscale=l, name=\n 'basic')\n", (753, 829), False, 'import GPy\n'), ((1528, 1581), 'GPy.priors.Gamma.from_EV', 'GPy.priors.Gamma.from_EV', (['prior_mean[i]', 'prior_var[i]'], {}), '(prior_mean[i], prior_var[i])\n', (1552, 1581), False, 'import GPy\n'), ((3563, 3573), 'numpy.sqrt', 'np.sqrt', (['v'], {}), '(v)\n', (3570, 3573), True, 'import numpy as np\n'), ((5230, 5245), 'numpy.ones', 'np.ones', (['outdim'], {}), '(outdim)\n', (5237, 5245), True, 'import numpy as np\n'), ((5311, 5326), 'numpy.ones', 'np.ones', (['outdim'], {}), '(outdim)\n', (5318, 5326), True, 'import numpy as np\n'), ((9595, 9611), 'numpy.ones', 'np.ones', (['numdata'], {}), '(numdata)\n', (9602, 9611), True, 'import numpy as np\n')]
|
from numpy import zeros, ones, dot, sum, abs, max, argmax, clip, \
random, prod, asarray, set_printoptions, unravel_index
# Generate a random uniform number (array) in range [0,1].
def zero(*shape): return zeros(shape)
def randnorm(*shape): return random.normal(size=shape)
def randuni(*shape): return random.random(size=shape)
def randint(*shape, min=-3, max=9):
data = asarray(random.randint(min+1,max+1,size=shape), dtype=float)
data[data <= 0] -= 1
return data
# Build a model given four integers:
# di - dimension of input
# ds - dimension for each internal state
# ns - number of internal states
# do - dimension of output
def build_model(di, ds, ns, do):
# Use random normal vectors.
input_params = randnorm(di+1, ds)
internal_params = randnorm(ds+1, ds, ns-1)
output_params = randnorm(ds+1, do)
# Normalize the length of all random normal vectors for input.
input_params[:,:] /= ((input_params[:,:]**2).sum(axis=0))**(1/2)
internal_params[:,:,:] /= ((internal_params[:,:,:]**2).sum(axis=0))**(1/2)
# Set the bias values.
input_params[-1,:] = 1
internal_params[-1,:,:] = 0
# Set the scratch space for storing internal values to zero.
internal_values = zero(ds, ns)
return input_params, internal_params, output_params, internal_values
# Get the shape of a model (when provided the arrays).
def get_shape(*model):
di, ds = model[0].shape
di -= 1
ns = model[1].shape[-1] + 1
do = model[2].shape[-1]
return di, ds, ns, do
# Function for pushing values forward through a dense MLP.
def forward(inputs, input_params, internal_params, output_params, internal_values, display=False):
di, ds, ns, do = get_shape(input_params, internal_params, output_params)
# Compute the input layer.
internal_values[:,0] = clip(dot(inputs, input_params[:di,:]) +
input_params[di,:], 0.0, float('inf'))
if display:
print("^"*70)
print("input: ",inputs)
print()
for n in range(ds):
print(f"0.{n} ", input_params[:di,n], '+', input_params[di,n], '=', internal_values[n,0])
print(" 0 out ", internal_values[:,0])
# Compute the next set of internal values with a rectified activation.
for i in range(ns-1):
internal_values[:,i+1] = internal_params[ds,:,i] + \
dot(internal_values[:,i],
internal_params[:ds,:,i])
if display:
print()
for n in range(ds):
print(f"{i+1}.{n} ", internal_params[:ds,n,i], '+', internal_params[ds:ds+1,n,i], '=', internal_values[n,i+1])
internal_values[:,i+1] = clip(internal_values[:,i+1], 0.0, float('inf'))
if display: print(f" {i+1} out ", internal_values[:,i+1])
# compute the output.
output = dot(internal_values[:,ns-1], output_params[:ds]) + output_params[ds]
if display:
print()
for n in range(do):
print(f"{ns}.{n} ", output_params[:ds,n],'+', output_params[ds,n], '=', output[n])
print(f" {ns} out ", output[:])
print()
print("output:", output)
print("_"*70)
return output
# Compute the gradient with respect to all parameters using finite differences.
def gradient(grad, inputs, *model, display=False):
# Get the model shape.
di, ds, ns, do = get_shape(*model)
# Initialize storage for the gradients.
input_grad = zeros(model[0].shape)
internal_grad = zeros(model[1].shape)
output_grad = ones(model[2].shape)
# Retrieve the model parameters.
internal_params = model[1]
output_params = model[2]
# Retreive the internal values of the model (after executing forwards).
internal_values = model[-1]
# Compute the gradient of the last parameters.
nonzero = internal_values[:,-1].nonzero()
output_grad[ds,:] = grad[:]
for i in range(do):
output_grad[:ds,i] = internal_values[:,-1] * grad[i]
internal_values[nonzero,-1] = dot(output_params[:ds,:][nonzero], grad)
if display:
print("^"*70)
print("Output grad:")
print("",output_grad.T)
print("",nonzero, internal_values[:,-1])
# Compute the gradient of all internal parameters.
for i in range(ns-2,-1,-1):
# Compute the gradient for all weights.
# set the bias gradient.
internal_grad[ds,:,i] = internal_values[:,i+1]
# set the gradient for each column of connections
# (to a single output in next layer).
nonzero = internal_values[:,i].nonzero()
for j in range(ds):
if (internal_values[j,i+1] == 0): continue
internal_grad[:ds,j,i][nonzero] = internal_values[nonzero,i] * internal_values[j,i+1]
if display:
print(f"layer {i} -> {i+1}, output node {j}")
print(" ",internal_grad[:,j,i])
# Compute the next preceding layer of internal values.
internal_values[nonzero,i] = dot(internal_params[:ds,:,i][nonzero], internal_values[:,i+1])
if display:
print("Grads for next layer:")
print("",nonzero, internal_values[:,i])
# Compute the gradient for the input parameters.
input_grad[di,:] = internal_values[:,0]
for i in range(ds):
input_grad[:di,i] = inputs[:] * internal_values[i,0]
if display:
print("Input grad:")
print(input_grad.T)
print("_"*70)
# Return the gradients.
return input_grad, internal_grad, output_grad
# Compute the gradient with respect to all parameters using finite differences.
def finite_difference(inputs, *model, diff=0.0001, display=False):
# Shift matrices (used for computing finite differences).
input_shift = zeros(model[0].shape)
internal_shift = zeros(model[1].shape)
output_shift = zeros(model[2].shape)
# Function for producting the shifted model.
shifted_model = lambda: (model[0]+input_shift, model[1]+internal_shift, model[2]+output_shift, model[3])
# Gradient matrices.
input_grad = zeros(model[0].shape)
internal_grad = zeros(model[1].shape)
output_grad = zeros(model[2].shape)
# Total number of outputs.
output_shape = forward(inputs, *model).shape
num_outputs = prod(output_shape)
# Compute the expected set of nonzero internal activations.
forward(inputs, *model)
expected_nonzero = tuple(model[-1].nonzero()[0])
# Function for testing the effect that a shift
def measure_layer(layer, grad, shift, name):
for j in range(layer.size):
curr_idx = unravel_index(j, layer.shape)
shift[curr_idx] = diff/2
out_high = forward(inputs, *shifted_model())[out_index]
nonzero_high = tuple(model[3].nonzero()[0])
shift[curr_idx] = -diff/2
out_low = forward(inputs, *shifted_model())[out_index]
nonzero_low = tuple(model[3].nonzero()[0])
shift[curr_idx] = 0
# If a zero became nonzero (or vice versa), then the
# finite different approximation is unstable.
if ((len(nonzero_high) <= len(expected_nonzero)) and
(len(nonzero_low) <= len(expected_nonzero))):
# Compute the gradient
grad[curr_idx] += sum(out_high - out_low) / diff
if display:
print(f"{name:14s}{str(curr_idx):10s} {grad[curr_idx]: .3f}")
print(f" {float(out_high)}")
print(f" {float(out_low)}")
print(f" {float(diff)}")
# Display information.
if display:
print("^"*70)
print("shifted_model: ",[v.shape for v in shifted_model()])
print("output shape, size: ", output_shape, num_outputs)
# Cycle over each output.
for i in range(num_outputs):
out_index = unravel_index(i, output_shape)
if display: print("out_index: ",out_index)
# Cycle over all model parameters, testing effect on output.
# input layer
measure_layer(model[0], input_grad, input_shift, "input idx:")
# internal layers
measure_layer(model[1], internal_grad, internal_shift, "internal idx:")
# output layer
measure_layer(model[2], output_grad, output_shift, "output idx:")
if display: print("_"*70)
# Done computing finite difference gradient!
return input_grad, internal_grad, output_grad
def test():
print("Testing..")
di_vals = (1,2,3)
ds_vals = (1,2,3)
ns_vals = (1,2,3)
do_vals = (1,2,3)
seeds = list(range(5))
# Cycle all combination of tests.
from itertools import product
for (di, ds, ns, do, seed) in product(di_vals, ds_vals, ns_vals, do_vals, seeds):
# --------------------------------------------------------------------
# di - dimension of input
# ds - dimension for each internal state
# ns - number of internal states
# do - dimension of output
# --------------------------------------------------------------------
random.seed(seed)
# Create the model.
#
model = build_model(di, ds, ns, do)
# Call the "forward" function.
# inputs = randuni(di)
inputs = randuni(di)
# inputs = randint(di)
# Run the model forward to compute internal values.
output = forward(inputs, *model, display=False)
# Compute the gradients with a finite difference.
approx_model_grad = finite_difference(inputs, *model, display=False)
# Run the model again (fresh) to get the "internal values".
output = forward(inputs, *model, display=False)
# Print the model gradients that were directly computed.
model_grad = gradient(ones(do), inputs, *model)
# Check the correctness of the gradient function.
for i,(app, true) in enumerate(zip(approx_model_grad, model_grad)):
diff = (abs(app - true) / (abs(true) + 1)).T
# Skip "internal params" if that is empty.
if (len(diff) == 0): continue
# Check for the difference.
if (max(diff) > .01):
set_printoptions(precision=3, sign=" ")
print()
print("ERROR ON TEST")
print(" seed =",seed)
print()
print("di, ds, ns, do: ",di, ds, ns, do)
print("input_params: ",model[0].shape)
print("internal_params: ",model[1].shape)
print("output_params: ",model[2].shape)
print("internal_values: ",model[3].shape)
print()
# forward(inputs, *model, display=True)
finite_difference(inputs, *model, display=True)
print()
print("model[0]:")
print(model[0].T)
print()
print("model[1]:")
print(model[1].T)
print()
print("model[2]:")
print(model[2].T)
print()
print("internals:")
print(model[-1].T)
print()
print()
print("approx_model_grad[0]:")
print(approx_model_grad[0].T)
print()
print("approx_model_grad[1]:")
print(approx_model_grad[1].T)
print()
print("approx_model_grad[2]:")
print(approx_model_grad[2].T)
print()
print()
print("model_grad[0]:")
print(model_grad[0].T)
print()
print("model_grad[1]:")
print(model_grad[1].T)
print()
print("model_grad[2]:")
print(model_grad[2].T)
print()
print()
print("Phase",i,"(0 = input, 1 = internal, 2 = output)")
print("",max(diff))
print("",unravel_index(argmax(diff), diff.shape))
print()
print("Finite differene gradient:")
print(app.T)
print()
print("Directly computed gradient:")
print(true.T)
print()
print("Difference")
print(diff)
print()
print("ERROR ON TEST")
exit()
print(" all passed!")
if __name__ == "__main__":
test()
class NN:
def __init__(self, di, do, ds=16, ns=4):
self.di = di
self.ds = ds
self.ns = ns
self.do = do
self.model = list(build_model(di, ds, ns, do))
def fit(self, x, y, steps=1000, step_factor=0.01, display=False,
show=False, **kwargs):
# Make sure that the given data is the right shape.
assert (self.di == x.shape[-1])
assert (self.do == y.shape[-1])
if (show and (self.do == 1) and (self.di == 1)):
show_interval = max([1, steps // 100])
from util.plot import Plot
p = Plot()
p.add("Data", *(x.T), y.flatten(), group='d', frame=-1)
p.add_func("Model", self, [x.min(), x.max()], group='m', frame=-1)
loss_values = []
# For the number of training steps..
for s in range(steps):
if (not s%10): print(s, end="\r")
if (show): loss_values.append( ((y - self(x))**2).sum()**(1/2) )
grads = [zeros(l.shape) for l in self.model]
# Average gradient from all data points.
for i, (d_in, d_out) in enumerate(zip(x,y)):
m_out = forward(d_in, *self.model, display=False)
loss_grad = m_out - d_out
grad_step = gradient(loss_grad, d_in, *self.model, display=False)
# Dynamically update the average (of the gradients).
for j in range(len(grad_step)):
grads[j] += (grad_step[j] - grads[j]) / (i+1)
if display:
yhat = self(x).reshape(y.shape)
loss = ((y - yhat)**2).sum(axis=-1).mean()
# Take a step in the gradient direction.
for j in range(len(grads)):
self.model[j] -= grads[j] * step_factor
# Display progress.
if display:
print()
print("Step:", s)
print("loss:", loss)
print("model:")
for l in self.model[:-1]:
print("",l.T)
print("grads: ")
for l in grads[:-1]:
print("",-l.T)
print()
# Update the model plot, if appropriate.
if (show and (s%show_interval == 0)):
p.add("Data", *(x.T), y.flatten(), group='d', frame=s)
p.add_func("Model", self, [x.min(), x.max()], group='m', frame=s)
# Add the last frame, if it wasn't already added.
if (show):
print(" showing plot..")
# Show the plot of the model.
p.show(show=False)
p = Plot("","Step","Loss value")
p.add("Loss", list(range(len(loss_values))), loss_values,
mode="markers+lines", color=1)
p.show(append=True, show_legend=False)
# Return predictions for new data.
def predict(self, x):
if (len(x.shape) == 2):
outputs = []
for x_in in x:
outputs.append( forward(x_in, *self.model)[0] )
return asarray(outputs)
else: return forward(x, *self.model)
# Wrapper for the "__call__".
def __call__(self, *args):
return self.predict(*args)
|
[
"numpy.random.normal",
"numpy.prod",
"numpy.abs",
"numpy.ones",
"numpy.random.random",
"itertools.product",
"numpy.asarray",
"numpy.argmax",
"numpy.max",
"numpy.sum",
"numpy.dot",
"numpy.zeros",
"numpy.random.randint",
"numpy.unravel_index",
"numpy.random.seed",
"util.plot.Plot",
"numpy.set_printoptions"
] |
[((214, 226), 'numpy.zeros', 'zeros', (['shape'], {}), '(shape)\n', (219, 226), False, 'from numpy import zeros, ones, dot, sum, abs, max, argmax, clip, random, prod, asarray, set_printoptions, unravel_index\n'), ((256, 281), 'numpy.random.normal', 'random.normal', ([], {'size': 'shape'}), '(size=shape)\n', (269, 281), False, 'from numpy import zeros, ones, dot, sum, abs, max, argmax, clip, random, prod, asarray, set_printoptions, unravel_index\n'), ((310, 335), 'numpy.random.random', 'random.random', ([], {'size': 'shape'}), '(size=shape)\n', (323, 335), False, 'from numpy import zeros, ones, dot, sum, abs, max, argmax, clip, random, prod, asarray, set_printoptions, unravel_index\n'), ((3507, 3528), 'numpy.zeros', 'zeros', (['model[0].shape'], {}), '(model[0].shape)\n', (3512, 3528), False, 'from numpy import zeros, ones, dot, sum, abs, max, argmax, clip, random, prod, asarray, set_printoptions, unravel_index\n'), ((3549, 3570), 'numpy.zeros', 'zeros', (['model[1].shape'], {}), '(model[1].shape)\n', (3554, 3570), False, 'from numpy import zeros, ones, dot, sum, abs, max, argmax, clip, random, prod, asarray, set_printoptions, unravel_index\n'), ((3589, 3609), 'numpy.ones', 'ones', (['model[2].shape'], {}), '(model[2].shape)\n', (3593, 3609), False, 'from numpy import zeros, ones, dot, sum, abs, max, argmax, clip, random, prod, asarray, set_printoptions, unravel_index\n'), ((4063, 4104), 'numpy.dot', 'dot', (['output_params[:ds, :][nonzero]', 'grad'], {}), '(output_params[:ds, :][nonzero], grad)\n', (4066, 4104), False, 'from numpy import zeros, ones, dot, sum, abs, max, argmax, clip, random, prod, asarray, set_printoptions, unravel_index\n'), ((5815, 5836), 'numpy.zeros', 'zeros', (['model[0].shape'], {}), '(model[0].shape)\n', (5820, 5836), False, 'from numpy import zeros, ones, dot, sum, abs, max, argmax, clip, random, prod, asarray, set_printoptions, unravel_index\n'), ((5858, 5879), 'numpy.zeros', 'zeros', (['model[1].shape'], {}), '(model[1].shape)\n', (5863, 5879), False, 'from numpy import zeros, ones, dot, sum, abs, max, argmax, clip, random, prod, asarray, set_printoptions, unravel_index\n'), ((5901, 5922), 'numpy.zeros', 'zeros', (['model[2].shape'], {}), '(model[2].shape)\n', (5906, 5922), False, 'from numpy import zeros, ones, dot, sum, abs, max, argmax, clip, random, prod, asarray, set_printoptions, unravel_index\n'), ((6126, 6147), 'numpy.zeros', 'zeros', (['model[0].shape'], {}), '(model[0].shape)\n', (6131, 6147), False, 'from numpy import zeros, ones, dot, sum, abs, max, argmax, clip, random, prod, asarray, set_printoptions, unravel_index\n'), ((6168, 6189), 'numpy.zeros', 'zeros', (['model[1].shape'], {}), '(model[1].shape)\n', (6173, 6189), False, 'from numpy import zeros, ones, dot, sum, abs, max, argmax, clip, random, prod, asarray, set_printoptions, unravel_index\n'), ((6210, 6231), 'numpy.zeros', 'zeros', (['model[2].shape'], {}), '(model[2].shape)\n', (6215, 6231), False, 'from numpy import zeros, ones, dot, sum, abs, max, argmax, clip, random, prod, asarray, set_printoptions, unravel_index\n'), ((6331, 6349), 'numpy.prod', 'prod', (['output_shape'], {}), '(output_shape)\n', (6335, 6349), False, 'from numpy import zeros, ones, dot, sum, abs, max, argmax, clip, random, prod, asarray, set_printoptions, unravel_index\n'), ((8782, 8832), 'itertools.product', 'product', (['di_vals', 'ds_vals', 'ns_vals', 'do_vals', 'seeds'], {}), '(di_vals, ds_vals, ns_vals, do_vals, seeds)\n', (8789, 8832), False, 'from itertools import product\n'), ((391, 435), 'numpy.random.randint', 'random.randint', (['(min + 1)', '(max + 1)'], {'size': 'shape'}), '(min + 1, max + 1, size=shape)\n', (405, 435), False, 'from numpy import zeros, ones, dot, sum, abs, max, argmax, clip, random, prod, asarray, set_printoptions, unravel_index\n'), ((2892, 2943), 'numpy.dot', 'dot', (['internal_values[:, ns - 1]', 'output_params[:ds]'], {}), '(internal_values[:, ns - 1], output_params[:ds])\n', (2895, 2943), False, 'from numpy import zeros, ones, dot, sum, abs, max, argmax, clip, random, prod, asarray, set_printoptions, unravel_index\n'), ((5050, 5117), 'numpy.dot', 'dot', (['internal_params[:ds, :, i][nonzero]', 'internal_values[:, i + 1]'], {}), '(internal_params[:ds, :, i][nonzero], internal_values[:, i + 1])\n', (5053, 5117), False, 'from numpy import zeros, ones, dot, sum, abs, max, argmax, clip, random, prod, asarray, set_printoptions, unravel_index\n'), ((7945, 7975), 'numpy.unravel_index', 'unravel_index', (['i', 'output_shape'], {}), '(i, output_shape)\n', (7958, 7975), False, 'from numpy import zeros, ones, dot, sum, abs, max, argmax, clip, random, prod, asarray, set_printoptions, unravel_index\n'), ((9200, 9217), 'numpy.random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (9211, 9217), False, 'from numpy import zeros, ones, dot, sum, abs, max, argmax, clip, random, prod, asarray, set_printoptions, unravel_index\n'), ((1847, 1880), 'numpy.dot', 'dot', (['inputs', 'input_params[:di, :]'], {}), '(inputs, input_params[:di, :])\n', (1850, 1880), False, 'from numpy import zeros, ones, dot, sum, abs, max, argmax, clip, random, prod, asarray, set_printoptions, unravel_index\n'), ((2414, 2468), 'numpy.dot', 'dot', (['internal_values[:, i]', 'internal_params[:ds, :, i]'], {}), '(internal_values[:, i], internal_params[:ds, :, i])\n', (2417, 2468), False, 'from numpy import zeros, ones, dot, sum, abs, max, argmax, clip, random, prod, asarray, set_printoptions, unravel_index\n'), ((6655, 6684), 'numpy.unravel_index', 'unravel_index', (['j', 'layer.shape'], {}), '(j, layer.shape)\n', (6668, 6684), False, 'from numpy import zeros, ones, dot, sum, abs, max, argmax, clip, random, prod, asarray, set_printoptions, unravel_index\n'), ((9902, 9910), 'numpy.ones', 'ones', (['do'], {}), '(do)\n', (9906, 9910), False, 'from numpy import zeros, ones, dot, sum, abs, max, argmax, clip, random, prod, asarray, set_printoptions, unravel_index\n'), ((13193, 13215), 'numpy.max', 'max', (['[1, steps // 100]'], {}), '([1, steps // 100])\n', (13196, 13215), False, 'from numpy import zeros, ones, dot, sum, abs, max, argmax, clip, random, prod, asarray, set_printoptions, unravel_index\n'), ((13271, 13277), 'util.plot.Plot', 'Plot', ([], {}), '()\n', (13275, 13277), False, 'from util.plot import Plot\n'), ((15320, 15350), 'util.plot.Plot', 'Plot', (['""""""', '"""Step"""', '"""Loss value"""'], {}), "('', 'Step', 'Loss value')\n", (15324, 15350), False, 'from util.plot import Plot\n'), ((15753, 15769), 'numpy.asarray', 'asarray', (['outputs'], {}), '(outputs)\n', (15760, 15769), False, 'from numpy import zeros, ones, dot, sum, abs, max, argmax, clip, random, prod, asarray, set_printoptions, unravel_index\n'), ((10273, 10282), 'numpy.max', 'max', (['diff'], {}), '(diff)\n', (10276, 10282), False, 'from numpy import zeros, ones, dot, sum, abs, max, argmax, clip, random, prod, asarray, set_printoptions, unravel_index\n'), ((10307, 10346), 'numpy.set_printoptions', 'set_printoptions', ([], {'precision': '(3)', 'sign': '""" """'}), "(precision=3, sign=' ')\n", (10323, 10346), False, 'from numpy import zeros, ones, dot, sum, abs, max, argmax, clip, random, prod, asarray, set_printoptions, unravel_index\n'), ((13674, 13688), 'numpy.zeros', 'zeros', (['l.shape'], {}), '(l.shape)\n', (13679, 13688), False, 'from numpy import zeros, ones, dot, sum, abs, max, argmax, clip, random, prod, asarray, set_printoptions, unravel_index\n'), ((7362, 7385), 'numpy.sum', 'sum', (['(out_high - out_low)'], {}), '(out_high - out_low)\n', (7365, 7385), False, 'from numpy import zeros, ones, dot, sum, abs, max, argmax, clip, random, prod, asarray, set_printoptions, unravel_index\n'), ((10083, 10098), 'numpy.abs', 'abs', (['(app - true)'], {}), '(app - true)\n', (10086, 10098), False, 'from numpy import zeros, ones, dot, sum, abs, max, argmax, clip, random, prod, asarray, set_printoptions, unravel_index\n'), ((12133, 12142), 'numpy.max', 'max', (['diff'], {}), '(diff)\n', (12136, 12142), False, 'from numpy import zeros, ones, dot, sum, abs, max, argmax, clip, random, prod, asarray, set_printoptions, unravel_index\n'), ((10102, 10111), 'numpy.abs', 'abs', (['true'], {}), '(true)\n', (10105, 10111), False, 'from numpy import zeros, ones, dot, sum, abs, max, argmax, clip, random, prod, asarray, set_printoptions, unravel_index\n'), ((12183, 12195), 'numpy.argmax', 'argmax', (['diff'], {}), '(diff)\n', (12189, 12195), False, 'from numpy import zeros, ones, dot, sum, abs, max, argmax, clip, random, prod, asarray, set_printoptions, unravel_index\n')]
|
'''
Algorithm for matching the model to image points.
Based on (Cootes et al. 2000, p.9) and (Blanz et al., p.4).
'''
import numpy as np
from utils.structure import Shape
from utils.align import Aligner
class Fitter(object):
def __init__(self, pdmodel):
self.pdmodel = pdmodel
self.aligner = Aligner()
self.start_pose = ()
def fit(self, prev_shape, new_shape, pyramid_level=0, n=None):
'''
Algorithm that finds the best shape parameters that match identified
image points.
In: PointDistributionModel instance pdm,
array of new image points (x1, x2, ..., xN, y1, y2,..., yN)
Out: the pose params (Tx, Ty, s, theta) and shape parameter (c) to
fit the model to the image
'''
if not isinstance(new_shape, Shape):
new_shape = Shape(new_shape)
if not isinstance(prev_shape, Shape):
prev_shape = Shape(prev_shape)
if not self.start_pose:
raise ValueError('No inital pose parameters found.')
# find pose parameters to align with new image points
Tx, Ty, s, theta = self.start_pose
dx, dy, ds, dTheta = self.aligner.get_pose_parameters(prev_shape, new_shape)
changed_pose = (Tx + dx, Ty + dy, s*(1+ds), theta+dTheta)
# align image with model
y = self.aligner.invert_transform(new_shape, changed_pose)
# SVD on scaled eigenvectors of the model
u, w, v = np.linalg.svd(self.pdmodel.scaled_eigenvectors, full_matrices=False)
W = np.zeros_like(w)
# define weight vector n
if n is None:
last_eigenvalue = self.pdmodel.eigenvalues[-1]
n = last_eigenvalue**2 if last_eigenvalue**2 >= 0 else 0
# calculate the shape vector
W = np.diag(w/((w**2) + n))
c = (v.T).dot(W).dot(u.T).dot(y.vector)
return changed_pose, c
|
[
"utils.align.Aligner",
"numpy.diag",
"utils.structure.Shape",
"numpy.linalg.svd",
"numpy.zeros_like"
] |
[((315, 324), 'utils.align.Aligner', 'Aligner', ([], {}), '()\n', (322, 324), False, 'from utils.align import Aligner\n'), ((1480, 1548), 'numpy.linalg.svd', 'np.linalg.svd', (['self.pdmodel.scaled_eigenvectors'], {'full_matrices': '(False)'}), '(self.pdmodel.scaled_eigenvectors, full_matrices=False)\n', (1493, 1548), True, 'import numpy as np\n'), ((1561, 1577), 'numpy.zeros_like', 'np.zeros_like', (['w'], {}), '(w)\n', (1574, 1577), True, 'import numpy as np\n'), ((1812, 1837), 'numpy.diag', 'np.diag', (['(w / (w ** 2 + n))'], {}), '(w / (w ** 2 + n))\n', (1819, 1837), True, 'import numpy as np\n'), ((850, 866), 'utils.structure.Shape', 'Shape', (['new_shape'], {}), '(new_shape)\n', (855, 866), False, 'from utils.structure import Shape\n'), ((938, 955), 'utils.structure.Shape', 'Shape', (['prev_shape'], {}), '(prev_shape)\n', (943, 955), False, 'from utils.structure import Shape\n')]
|
import matplotlib
matplotlib.use('Agg') # this lets us do some headless stuff
import matplotlib.pylab as plt
import numpy as np
x = np.asarray([0,5,2])
y = np.asarray([0,1,3])
f = plt.figure()
ax = f.add_subplot(111)
ax.plot(x,y)
#plt.show() # we have a headless display, can't do this!
f.savefig('basicplot.eps',format='eps',orientation='portrait',transparent=True,dpi=5e4)
|
[
"matplotlib.use",
"numpy.asarray",
"matplotlib.pylab.figure"
] |
[((18, 39), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (32, 39), False, 'import matplotlib\n'), ((133, 154), 'numpy.asarray', 'np.asarray', (['[0, 5, 2]'], {}), '([0, 5, 2])\n', (143, 154), True, 'import numpy as np\n'), ((157, 178), 'numpy.asarray', 'np.asarray', (['[0, 1, 3]'], {}), '([0, 1, 3])\n', (167, 178), True, 'import numpy as np\n'), ((181, 193), 'matplotlib.pylab.figure', 'plt.figure', ([], {}), '()\n', (191, 193), True, 'import matplotlib.pylab as plt\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
from itertools import product
from pathlib import Path
import numpy as np
import tensorflow as tf
from dotenv import load_dotenv
from annotation.direction import (Direction, get_diagonal_directions,
get_cross_directions)
from annotation.piece import Piece
from ..ry import BlackRyMoveLayer
__author__ = 'Yasuhiro'
__date__ = '2018/3/14'
class TestBlackRyMove(tf.test.TestCase):
@classmethod
def setUpClass(cls):
dotenv_path = Path(__file__).parents[3] / '.env'
load_dotenv(str(dotenv_path))
cls.data_format = os.environ.get('DATA_FORMAT')
cls.use_cudnn = bool(os.environ.get('USE_CUDNN'))
def test_ry_move(self):
"""
RYについて成り、成らずの判定のテスト
利きが通るかどうかは別のところで判定しているので、ここでは考えない
:return:
"""
shape = (1, 1, 9, 9) if self.data_format == 'NCHW' else (1, 9, 9, 1)
# 移動距離ごとに用意
effect = {
direction: [np.empty(shape, dtype=np.bool) for _ in range(8)]
for direction in get_cross_directions()
}
effect.update({
direction: np.empty(shape, dtype=np.bool)
for direction in get_diagonal_directions()
})
board = np.empty(shape, dtype=np.int32)
ph_board = tf.placeholder(tf.int32, shape=shape)
ry_effect = {
direction: [
tf.placeholder(tf.bool, shape=shape) for _ in range(8)
] for direction in get_cross_directions()
}
ry_effect.update({
direction: tf.placeholder(tf.bool, shape=shape)
for direction in get_diagonal_directions()
})
non_promoting = BlackRyMoveLayer()(ph_board, ry_effect)
# アクセスしやすいように次元を下げる
non_promoting = {key: tf.squeeze(value)
for key, value in non_promoting.items()}
feed_dict = {}
for direction, ph_list in ry_effect.items():
if direction in get_cross_directions():
for ph, e in zip(ph_list, effect[direction]):
feed_dict[ph] = e
else:
feed_dict[ph_list] = effect[direction]
feed_dict[ph_board] = board
with self.test_session() as sess:
for i, j, piece in product(range(9), range(9), range(Piece.SIZE)):
for direction, effect_list in effect.items():
if direction in get_cross_directions():
for e in effect_list:
e[:] = False
if self.data_format == 'NCHW':
e[0, 0, i, j] = True
else:
e[0, i, j, 0] = True
else:
effect_list[:] = False
if self.data_format == 'NCHW':
effect_list[0, 0, i, j] = True
else:
effect_list[0, i, j, 0] = True
piece = Piece(piece)
board[:] = piece
n = sess.run(non_promoting, feed_dict=feed_dict)
b = np.squeeze(board)
for direction, distance in product(effect.keys(), range(8)):
if direction in get_diagonal_directions():
if distance > 0:
continue
if (direction == Direction.RIGHT_UP and
(i == 8 or j == 8)):
continue
elif (direction == Direction.RIGHT_DOWN and
(i == 8 or j == 0)):
continue
elif (direction == Direction.LEFT_UP and
(i == 8 or j == 8)):
continue
elif (direction == Direction.LEFT_DOWN and
(i == 0 or j == 0)):
continue
if direction == Direction.RIGHT:
if i + distance >= 8:
continue
elif direction == Direction.UP:
if j + distance >= 8:
continue
elif direction == Direction.DOWN:
if j - distance <= 0:
continue
elif direction == Direction.LEFT:
if i - distance <= 0:
continue
n_move = n[direction]
if direction in get_cross_directions():
with self.subTest(i=i, j=j, piece=piece,
direction=direction,
distance=distance):
self.assertTupleEqual((8, 9, 9), n_move.shape)
if b[i, j] < Piece.WHITE_FU:
# 自身の駒があって動けない
self.assertFalse(np.all(n_move[distance]))
else:
self.assertTrue(n_move[distance, i, j])
n_move[distance, i, j] = False
self.assertFalse(np.all(n_move[distance]))
else:
with self.subTest(i=i, j=j, piece=piece,
direction=direction):
self.assertTupleEqual((9, 9), n_move.shape)
if b[i, j] < Piece.WHITE_FU:
# 自身の駒があって動けない
self.assertFalse(np.all(n_move))
else:
self.assertTrue(n_move[i, j])
n_move[i, j] = False
self.assertFalse(np.all(n_move))
|
[
"annotation.direction.get_cross_directions",
"pathlib.Path",
"tensorflow.placeholder",
"os.environ.get",
"numpy.squeeze",
"annotation.piece.Piece",
"numpy.empty",
"annotation.direction.get_diagonal_directions",
"numpy.all",
"tensorflow.squeeze"
] |
[((639, 668), 'os.environ.get', 'os.environ.get', (['"""DATA_FORMAT"""'], {}), "('DATA_FORMAT')\n", (653, 668), False, 'import os\n'), ((1280, 1311), 'numpy.empty', 'np.empty', (['shape'], {'dtype': 'np.int32'}), '(shape, dtype=np.int32)\n', (1288, 1311), True, 'import numpy as np\n'), ((1332, 1369), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {'shape': 'shape'}), '(tf.int32, shape=shape)\n', (1346, 1369), True, 'import tensorflow as tf\n'), ((698, 725), 'os.environ.get', 'os.environ.get', (['"""USE_CUDNN"""'], {}), "('USE_CUDNN')\n", (712, 725), False, 'import os\n'), ((1827, 1844), 'tensorflow.squeeze', 'tf.squeeze', (['value'], {}), '(value)\n', (1837, 1844), True, 'import tensorflow as tf\n'), ((1007, 1037), 'numpy.empty', 'np.empty', (['shape'], {'dtype': 'np.bool'}), '(shape, dtype=np.bool)\n', (1015, 1037), True, 'import numpy as np\n'), ((1086, 1108), 'annotation.direction.get_cross_directions', 'get_cross_directions', ([], {}), '()\n', (1106, 1108), False, 'from annotation.direction import Direction, get_diagonal_directions, get_cross_directions\n'), ((1166, 1196), 'numpy.empty', 'np.empty', (['shape'], {'dtype': 'np.bool'}), '(shape, dtype=np.bool)\n', (1174, 1196), True, 'import numpy as np\n'), ((1433, 1469), 'tensorflow.placeholder', 'tf.placeholder', (['tf.bool'], {'shape': 'shape'}), '(tf.bool, shape=shape)\n', (1447, 1469), True, 'import tensorflow as tf\n'), ((1519, 1541), 'annotation.direction.get_cross_directions', 'get_cross_directions', ([], {}), '()\n', (1539, 1541), False, 'from annotation.direction import Direction, get_diagonal_directions, get_cross_directions\n'), ((1602, 1638), 'tensorflow.placeholder', 'tf.placeholder', (['tf.bool'], {'shape': 'shape'}), '(tf.bool, shape=shape)\n', (1616, 1638), True, 'import tensorflow as tf\n'), ((2016, 2038), 'annotation.direction.get_cross_directions', 'get_cross_directions', ([], {}), '()\n', (2036, 2038), False, 'from annotation.direction import Direction, get_diagonal_directions, get_cross_directions\n'), ((3080, 3092), 'annotation.piece.Piece', 'Piece', (['piece'], {}), '(piece)\n', (3085, 3092), False, 'from annotation.piece import Piece\n'), ((3213, 3230), 'numpy.squeeze', 'np.squeeze', (['board'], {}), '(board)\n', (3223, 3230), True, 'import numpy as np\n'), ((539, 553), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (543, 553), False, 'from pathlib import Path\n'), ((1226, 1251), 'annotation.direction.get_diagonal_directions', 'get_diagonal_directions', ([], {}), '()\n', (1249, 1251), False, 'from annotation.direction import Direction, get_diagonal_directions, get_cross_directions\n'), ((1668, 1693), 'annotation.direction.get_diagonal_directions', 'get_diagonal_directions', ([], {}), '()\n', (1691, 1693), False, 'from annotation.direction import Direction, get_diagonal_directions, get_cross_directions\n'), ((2469, 2491), 'annotation.direction.get_cross_directions', 'get_cross_directions', ([], {}), '()\n', (2489, 2491), False, 'from annotation.direction import Direction, get_diagonal_directions, get_cross_directions\n'), ((3345, 3370), 'annotation.direction.get_diagonal_directions', 'get_diagonal_directions', ([], {}), '()\n', (3368, 3370), False, 'from annotation.direction import Direction, get_diagonal_directions, get_cross_directions\n'), ((4701, 4723), 'annotation.direction.get_cross_directions', 'get_cross_directions', ([], {}), '()\n', (4721, 4723), False, 'from annotation.direction import Direction, get_diagonal_directions, get_cross_directions\n'), ((5144, 5168), 'numpy.all', 'np.all', (['n_move[distance]'], {}), '(n_move[distance])\n', (5150, 5168), True, 'import numpy as np\n'), ((5388, 5412), 'numpy.all', 'np.all', (['n_move[distance]'], {}), '(n_move[distance])\n', (5394, 5412), True, 'import numpy as np\n'), ((5795, 5809), 'numpy.all', 'np.all', (['n_move'], {}), '(n_move)\n', (5801, 5809), True, 'import numpy as np\n'), ((6009, 6023), 'numpy.all', 'np.all', (['n_move'], {}), '(n_move)\n', (6015, 6023), True, 'import numpy as np\n')]
|
import numpy as np
import random as random
def move_to_sample(Rover):
delX = 0; delY = 0;
if len(Rover.rock_angles) > 0:
dist_to_rock = np.mean(np.abs(Rover.rock_dist))
angle_to_rock = np.mean(Rover.rock_angles);
Rover.steer = np.clip(angle_to_rock* 180/np.pi, -15, 15)
if Rover.vel>0.5:
Rover.brake = 0.1;
else:
Rover.brake = 0;
Rover.throttle = 0;
if Rover.vel <0.2 and Rover.near_sample == 0:
Rover.throttle = 0.1;
Rover.brake = 0 ;
if Rover.Is_Stuck:
Rover.brake = 0;
Rover.throttle = Rover.StuckThrottle;
Rover.steer = Rover.StuckSteering;
if Rover.near_sample:
Rover.brake = Rover.brake_set;
return Rover
def is_terrain_navigable(Rover):
if len(Rover.nav_dists)>0:
if (len(Rover.nav_angles) < Rover.stop_forward):
terrain_navigable = 0;
else:
terrain_navigable = 1;
else:
terrain_navigable = 0;
return terrain_navigable;
def is_rover_stuck(Rover):
SteerVel = np.mean(np.diff(Rover.SteerVel[0:6]));
no_new_area_mapped = (np.abs(Rover.new_perc_mapped - Rover.old_perc_mapped) <= 0.25);
rover_unstucking = (np.abs(Rover.total_time - Rover.map_time) <= 2);
rover_steer_not_changing = (np.abs(SteerVel) <= 2);
is_rover_stuck = no_new_area_mapped and rover_steer_not_changing and rover_unstucking;
if no_new_area_mapped and rover_steer_not_changing and Rover.Is_Stuck == 0:
# Rover was not stuck before, but is stuck now
Rover.Is_Stuck = 1;
Rover.StuckSteering = np.random.randint(-15, 16);
Rover.StuckThrottle = np.random.randint(-1, 2);
if Rover.Is_Stuck and ~rover_unstucking:
# Rover unstucking is done
Rover.Is_Stuck = 0;
# This is where you can build a decision tree for determining throttle, brake and steer
# commands based on the output of the perception_step() function
def decision_step(Rover):
# Implement conditionals to decide what to do given perception data
# Here you're all set up with some basic functionality but you'll need to
# improve on this decision tree to do a good job of navigating autonomously!
# Example:
# Check if we have vision data to make decisions with
# If there are, we'll step through the known sample positions
# to confirm whether detections are real
is_rover_stuck(Rover);
if Rover.nav_angles is not None:
# Check for Rover.mode status
#if near a sample, navigate towards the sample and stop
if (Rover.samples_located > Rover.samples_collected):
Rover = move_to_sample(Rover);
elif Rover.mode == 'forward':
# Check the extent of navigable terrain
if Rover.Is_Stuck:
# Rover is stuck, unstuck it
Rover.brake = 0;
Rover.throttle = Rover.StuckThrottle;
Rover.steer = Rover.StuckSteering;
elif is_terrain_navigable(Rover):
# If mode is forward, navigable terrain looks good
# and velocity is below max, then throttle
if Rover.vel < Rover.max_vel:
# Set throttle value to throttle setting
Rover.throttle = Rover.throttle_set
else: # Else coast
Rover.throttle = 0
Rover.brake = 0
# Set steering to average angle clipped to the range +/- 15
Weighted_Angles = np.mean(Rover.nav_angles);
Rover.steer = np.clip(np.mean(Weighted_Angles * 180/np.pi), -15, 15)
else:
# Set mode to "stop" and hit the brakes!
Rover.throttle = 0
# Set brake to stored brake value
Rover.brake = Rover.brake_set
Rover.steer = -15;
Rover.mode = 'stop';
# If we're already in "stop" mode then make different decisions
elif Rover.mode == 'stop':
# If we're in stop mode but still moving keep braking
if Rover.vel > 0.2:
Rover.throttle = 0
Rover.brake = Rover.brake_set
Rover.steer = 0
# If we're not moving (vel < 0.2) then do something else
elif Rover.vel <= 0.2:
# Now we're stopped and we have vision data to see if there's a path forward
if len(Rover.nav_angles) < Rover.go_forward:
# Release the brake to allow turning
Rover.brake = 0;
# Turn range is +/- 15 degrees, when stopped the next line will induce 4-wheel turning
Rover.steer = -15 # Could be more clever here about which way to turn
# If we're stopped but see sufficient navigable terrain in front then go!
if is_terrain_navigable(Rover):
# Set throttle back to stored value
Rover.throttle = Rover.throttle_set
# Release the brake
Rover.brake = 0
# Set steer to mean angle
Rover.steer = np.clip(np.mean(Rover.nav_angles * 180/np.pi), -15, 15)
Rover.mode = 'forward'
# Just to make the rover do something
# even if no modifications have been made to the code
else:
Rover.throttle = Rover.throttle_set
Rover.steer = 0
Rover.brake = 0
# If in a state where want to pickup a rock send pickup command
if Rover.near_sample and Rover.vel == 0 and not Rover.picking_up:
Rover.send_pickup = True
Rover.mode = 'stop';
Rover.SteerVel[0:9] = Rover.SteerVel[1:10];
Rover.SteerVel[9] = Rover.steer;
if Rover.vel == 0:
Rover.Ok_To_Map = 0;
else:
Rover.Ok_To_Map = 1;
return Rover
|
[
"numpy.clip",
"numpy.mean",
"numpy.abs",
"numpy.diff",
"numpy.random.randint"
] |
[((211, 237), 'numpy.mean', 'np.mean', (['Rover.rock_angles'], {}), '(Rover.rock_angles)\n', (218, 237), True, 'import numpy as np\n'), ((261, 306), 'numpy.clip', 'np.clip', (['(angle_to_rock * 180 / np.pi)', '(-15)', '(15)'], {}), '(angle_to_rock * 180 / np.pi, -15, 15)\n', (268, 306), True, 'import numpy as np\n'), ((1123, 1151), 'numpy.diff', 'np.diff', (['Rover.SteerVel[0:6]'], {}), '(Rover.SteerVel[0:6])\n', (1130, 1151), True, 'import numpy as np\n'), ((1180, 1233), 'numpy.abs', 'np.abs', (['(Rover.new_perc_mapped - Rover.old_perc_mapped)'], {}), '(Rover.new_perc_mapped - Rover.old_perc_mapped)\n', (1186, 1233), True, 'import numpy as np\n'), ((1269, 1310), 'numpy.abs', 'np.abs', (['(Rover.total_time - Rover.map_time)'], {}), '(Rover.total_time - Rover.map_time)\n', (1275, 1310), True, 'import numpy as np\n'), ((1350, 1366), 'numpy.abs', 'np.abs', (['SteerVel'], {}), '(SteerVel)\n', (1356, 1366), True, 'import numpy as np\n'), ((1663, 1689), 'numpy.random.randint', 'np.random.randint', (['(-15)', '(16)'], {}), '(-15, 16)\n', (1680, 1689), True, 'import numpy as np\n'), ((1721, 1745), 'numpy.random.randint', 'np.random.randint', (['(-1)', '(2)'], {}), '(-1, 2)\n', (1738, 1745), True, 'import numpy as np\n'), ((162, 185), 'numpy.abs', 'np.abs', (['Rover.rock_dist'], {}), '(Rover.rock_dist)\n', (168, 185), True, 'import numpy as np\n'), ((3616, 3641), 'numpy.mean', 'np.mean', (['Rover.nav_angles'], {}), '(Rover.nav_angles)\n', (3623, 3641), True, 'import numpy as np\n'), ((3681, 3719), 'numpy.mean', 'np.mean', (['(Weighted_Angles * 180 / np.pi)'], {}), '(Weighted_Angles * 180 / np.pi)\n', (3688, 3719), True, 'import numpy as np\n'), ((5289, 5328), 'numpy.mean', 'np.mean', (['(Rover.nav_angles * 180 / np.pi)'], {}), '(Rover.nav_angles * 180 / np.pi)\n', (5296, 5328), True, 'import numpy as np\n')]
|
########################################
# CS/CNS/EE 155 2018
# Problem Set 1
#
# Author: <NAME>
# Description: Set 1 Perceptron helper
########################################
import numpy as np
import matplotlib.pyplot as plt
def predict(x, w, b):
'''
The method takes the weight vector and bias of a perceptron model, and
predicts the label for a single point x.
Inputs:
x: A (D, ) shaped numpy array containing a single point.
w: A (D, ) shaped numpy array containing the weight vector.
b: A float containing the bias term.
Output:
The label (1 or -1) for the point x.
'''
prod = np.dot(w, x) + b
return 1 if prod >= 0 else -1
def plot_data(X, Y, ax):
# This method plots a labeled (with -1 or 1) 2D dataset.
ax.scatter(X[Y == 1, 0], X[Y == 1, 1], c = 'green', marker='+')
ax.scatter(X[Y == -1, 0], X[Y == -1, 1], c = 'red')
def boundary(x_1, w, b):
# Gets the corresponding x_2 value given x_1 and the decision boundary of a
# perceptron model. Note this only works for a 2D perceptron.
if w[1] == 0.0:
denom = 1e-6
else:
denom = w[1]
return (-w[0] * x_1 - b) / denom
def plot_perceptron(w, b, ax):
# This method plots a perceptron decision boundary line. Note this only works for
# 2D perceptron.
xlim = ax.get_xlim(); ylim = ax.get_ylim()
x_2s = [boundary(x_1, w, b) for x_1 in xlim]
ax.plot(xlim, x_2s)
if predict([xlim[0], ylim[0]], w, b) == -1:
ax.fill_between(xlim, ylim[0], x_2s, facecolor='red', alpha=0.5)
else:
ax.fill_between(xlim, x_2s, ylim[-1], facecolor='red', alpha=0.5)
|
[
"numpy.dot"
] |
[((663, 675), 'numpy.dot', 'np.dot', (['w', 'x'], {}), '(w, x)\n', (669, 675), True, 'import numpy as np\n')]
|
from models import StandardHMM, DenseHMM, HMMLoggingMonitor
from utils import prepare_data, check_random_state, create_directories, dict_get, Timer, timestamp_msg, check_dir, is_multinomial, compute_stationary, check_sequences
from data import penntreebank_tag_sequences, protein_sequences, train_test_split
from datetime import datetime
import os
import copy
import numpy as np
""" Initializes a StandardHMM and a DenseHMM and fits given data to it
"""
def _standard_vs_dense(train_X, test_X, standard_params=None, dense_params=None, gt_AB=None):
t = Timer()
train_X, train_lengths, train_unique = prepare_data(train_X)
test_X, test_lengths, test_unique = prepare_data(test_X)
standard_hmms = []
if standard_params is None:
standard_hmms.append(StandardHMM())
elif type(standard_params) is list or type(standard_params) is tuple:
for params in standard_params:
standard_hmms.append(StandardHMM(**params))
else:
standard_params = dict(standard_params)
standard_hmms.append(StandardHMM(**standard_params))
dense_params = {} if dense_params is None else dict(dense_params)
dense_hmm = DenseHMM(**dense_params)
opt_schemes = dict_get(dense_params, 'opt_schemes', default=('em', 'cooc'))
if 'em' in opt_schemes:
t.tic("Fitting dense HMM in mode 'em' ...")
dense_hmm.fit(train_X, train_lengths, test_X, test_lengths)
t.toc("Fitting finished.")
if 'cooc' in opt_schemes:
t.tic("Fitting dense HMM in mode 'cooc' ...")
dense_hmm.fit_coocs(train_X, train_lengths, test_X, test_lengths, gt_AB)
t.toc("Fitting finished.")
for i, standard_hmm in enumerate(standard_hmms):
t.tic("Fitting standard hmm %d/%d" % (i+1, len(standard_hmms)))
standard_hmm.fit(train_X, train_lengths, test_X, test_lengths)
t.toc("Fitting finished.")
def _dirichlet_random_numbers(alpha_size, sample_size, dirichlet_param, random_state):
return random_state.dirichlet(np.ones(alpha_size) * dirichlet_param,
size=(sample_size,))
""" Initializes the transition matrices of given hmm to dirichlet distributions.
Assumes that random_state is an instance of np.RandomState """
def _dirichlet_matrix_initializer(dirichlet_param, n_hidden_states, n_observables, random_state):
pi = 1. / n_hidden_states * np.ones(n_hidden_states)
A = _dirichlet_random_numbers(n_hidden_states, n_hidden_states, dirichlet_param, random_state)
B = _dirichlet_random_numbers(n_observables, n_hidden_states, dirichlet_param, random_state) # Note: This results in an n x m matrix
return pi, A, B
def _stationary_matrix_init(n, m, rng, matrix_init_func):
pi, A, B = matrix_init_func(n, m, rng)
pi = compute_stationary(A)
return pi, A, B
def _default_standard_hmm_init():
return dict(n_hidden_states=1, startprob_prior=1.0, transmat_prior=1.0,
random_state=None, em_iter=10, convergence_tol=1e-2, verbose=False)
def _default_dense_hmm_init():
return dict(n_hidden_states=1, n_observables=None, startprob_prior=1.0, transmat_prior=1.0,
random_state=None, em_iter=10, convergence_tol=1e-10, verbose=False,
params="ste", init_params="ste", logging_monitor=None, mstep_config=None)
def _compute_fair_standard_n(m, n_dense, l_dense):
pre = - (m - 1)/2
discriminant = pre**2 + l_dense*(3*n_dense + m + 1)
if discriminant < 0:
raise Exception("Complex solution")
n_plus = pre + np.sqrt(discriminant)
n_minus = pre - np.sqrt(discriminant)
n = np.max((n_plus, n_minus))
if n <= 0:
raise Exception("Only negative solutions")
return int(np.around(n))
def _parse_base_parameters(exp_params, path_dict):
path_dict = dict(path_dict)
exp_params = dict(exp_params)
exp_params['standard_params'] = dict_get(exp_params, 'standard_params', default=_default_standard_hmm_init(), cast=dict)
exp_params['dense_params'] = dict_get(exp_params, 'dense_params', default=_default_dense_hmm_init(), cast=dict)
exp_params['dense_opt_schemes'] = dict_get(exp_params, 'dense_opt_schemes', default=('em',))
exp_params['compare_to_fair_standard'] = dict_get(exp_params, 'compare_to_fair_standard', default=False)
return exp_params
def _parse_syntheticgt_parameters(exp_params, path_dict):
exp_params = _parse_base_parameters(exp_params, path_dict)
exp_params['gt_params'] = dict_get(exp_params, 'gt_params', default=_default_standard_hmm_init(), cast=dict)
exp_params['n_seqs_train'] = dict_get(exp_params, 'n_seqs_train', default=10, cast=int)
exp_params['seqlen_train'] = dict_get(exp_params, 'seqlen_train', default=10, cast=int)
exp_params['n_seqs_test'] = dict_get(exp_params, 'n_seqs_test', default=10, cast=int)
exp_params['seqlen_test'] = dict_get(exp_params, 'seqlen_test', default=10, cast=int)
exp_params['gt_stationary'] = dict_get(exp_params, 'gt_stationary', default=False)
exp_params['gt_params']['n_observables'] = exp_params['n_emissions']
# Making sure the initializer returns stationary pi (if gt_stationary = true)...
init_params = dict_get(exp_params['gt_params'], 'init_params', default=None)
if init_params is not None and callable(init_params) and exp_params['gt_stationary']:
init_params_ = lambda n, m, rng: _stationary_matrix_init(n, m, rng, init_params)
init_params = init_params_
exp_params['gt_params']['init_params'] = init_params
if 'experiment_directory' in path_dict:
exp_dir = str(path_dict['experiment_directory'])
check_dir(exp_dir)
# Log GT EM optimization by default
gt_log_config = dict_get(exp_params, 'gt_log_config', default=dict(), cast=dict)
gt_log_config['exp_folder'] = dict_get(gt_log_config, 'exp_folder', default=exp_dir)
gt_log_config['log_folder'] = dict_get(gt_log_config, 'log_folder', default='/gt_logs/em_opt')
gt_logmon = HMMLoggingMonitor(gt_log_config)
exp_params['gt_params']['logging_monitor'] = gt_logmon
return exp_params
def _parse_syntheticgt_dirichlet_parameters(exp_params, path_dict):
exp_params = _parse_syntheticgt_parameters(exp_params, path_dict)
exp_params['dirichlet_param'] = dict_get(exp_params, 'dirichlet_param', default=0.1, cast=float)
exp_params['n_emissions'] = dict_get(exp_params, 'n_emissions', default=None)
# Initialize ground truth hmm
def _dirichlet_matrix_init(n, m, rng):
return _dirichlet_matrix_initializer(exp_params['dirichlet_param'], n, m, rng)
init_params = dict_get(exp_params['gt_params'], 'init_params', default=_dirichlet_matrix_init)
if init_params is not None and callable(init_params) and exp_params['gt_stationary']:
init_params_ = copy.deepcopy(init_params)
init_params = lambda n, m, rng: _stationary_matrix_init(n, m, rng, init_params_)
exp_params['gt_params']['init_params'] = init_params
return exp_params
def _parse_standard_and_dense(exp_params, path_dict, n_emissions):
exp_params['n_emissions'] = n_emissions
# Number of emissions must be the same for all models
exp_params['standard_params']['n_observables'] = n_emissions
exp_params['dense_params']['n_observables'] = n_emissions
# Set opt_schemes that are needed
exp_params['dense_params']['opt_schemes'] = exp_params['dense_opt_schemes']
# Setup fair standard hmm
if exp_params['compare_to_fair_standard']:
# TODO check l_uz = l_vw
n_dense, l_dense = exp_params['dense_params']['n_hidden_states'], exp_params['dense_params']['mstep_config']['l_uz']
n_fair = _compute_fair_standard_n(exp_params['n_emissions'], n_dense, l_dense)
exp_params['fair_standard_params'] = copy.deepcopy(exp_params['standard_params'])
exp_params['fair_standard_params']['n_hidden_states'] = n_fair
if 'experiment_directory' in path_dict:
exp_dir = str(path_dict['experiment_directory'])
check_dir(exp_dir)
standard_log_config = dict_get(exp_params, 'standard_log_config', default=dict(), cast=dict)
dense_log_config = dict_get(exp_params, 'dense_log_config', default=dict(), cast=dict)
standard_log_config['exp_folder'] = dict_get(standard_log_config, 'exp_folder', default=exp_dir)
standard_log_config['log_folder'] = dict_get(standard_log_config, 'log_folder', default='/standard_logs')
dense_log_config['exp_folder'] = dict_get(dense_log_config, 'exp_folder', default=exp_dir)
dense_log_config['log_folder'] = dict_get(dense_log_config, 'log_folder', default='/dense_logs')
standard_logmon, dense_logmon = HMMLoggingMonitor(standard_log_config), HMMLoggingMonitor(dense_log_config)
exp_params['standard_params']['logging_monitor'] = standard_logmon
exp_params['dense_params']['logging_monitor'] = dense_logmon
fair_standard_logmon = None
if 'fair_standard_params' in exp_params:
fair_standard_log_config = dict_get(exp_params, 'fair_standard_log_config', default=dict(), cast=dict)
fair_standard_log_config['exp_folder'] = dict_get(fair_standard_log_config, 'exp_folder', default=exp_dir)
fair_standard_log_config['log_folder'] = dict_get(fair_standard_log_config, 'log_folder', default='/fair_standard_logs')
fair_standard_logmon = HMMLoggingMonitor(fair_standard_log_config)
exp_params['fair_standard_params']['logging_monitor'] = fair_standard_logmon
return exp_params
def _sample_sequences_from_gt_hmm(exp_params, path_dict, gt_hmm=None, sample_retries=100):
t = Timer()
n_emissions = exp_params['gt_params']['n_observables']
if gt_hmm is None:
gt_hmm = StandardHMM(**exp_params['gt_params'])
# Sample train and test sequences, save them
t.tic()
cur_sample_try = 0
train_X = None
while cur_sample_try < sample_retries and not is_multinomial(train_X, min_symbols=n_emissions):
train_X = gt_hmm.sample_sequences(exp_params['n_seqs_train'], exp_params['seqlen_train'])
cur_sample_try += 1
if not is_multinomial(train_X, min_symbols=n_emissions):
raise Exception("Could not sample a multinomial distribution. Try to increase sequence length and number of sequences. Or change the dirichlet parameter")
cur_sample_try = 0
test_X = None
while cur_sample_try < sample_retries and not is_multinomial(test_X, min_symbols=n_emissions):
test_X = gt_hmm.sample_sequences(exp_params['n_seqs_test'], exp_params['seqlen_test'])
cur_sample_try += 1
t.toc("Generated train and test sequences")
if not is_multinomial(train_X, min_symbols=n_emissions):
raise Exception("Could not sample a multinomial distribution. Try to increase sequence length and number of sequences. Or change the dirichlet parameter.")
t.tic()
if 'gt_dir' in path_dict:
gt_dir = str(path_dict['gt_dir'])
check_dir(gt_dir)
np.save(gt_dir + '/transmat', gt_hmm.transmat_)
np.save(gt_dir + '/emissionprob', gt_hmm.emissionprob_)
np.save(gt_dir + '/startprob', gt_hmm.startprob_)
gt_samples = dict_get(exp_params, 'gt_samples', default=None, cast=tuple)
t.toc("Ground truth parameters logged")
gt_AB = None
if exp_params['gt_stationary']:
gt_AB = (gt_hmm.transmat_, gt_hmm.emissionprob_)
_save_data(path_dict, train_X, test_X, gt_AB)
return train_X, test_X, gt_AB
def _save_data(path_dict, train_X, test_X=None, gt_AB=None):
if 'data_dir' in path_dict:
data_dir = str(path_dict['data_dir'])
check_dir(data_dir)
np.save(data_dir + '/train_X', train_X)
if test_X is not None:
np.save(data_dir + '/test_X', test_X)
if gt_AB is not None:
np.save(data_dir + '/gt_A', gt_AB[0])
np.save(data_dir + '/gt_B', gt_AB[1])
timestamp_msg("Saved data in %s" % data_dir)
def _save_experiment_parameters(exp_params, path_dict):
if 'experiment_directory' in path_dict:
exp_dir = str(path_dict['experiment_directory'])
check_dir(exp_dir)
_exp_params = copy.deepcopy(exp_params)
gt_params = dict_get(_exp_params, 'gt_params', default=None, cast=dict)
if gt_params is not None:
_exp_params['gt_params'] = gt_params
init_params = dict_get(gt_params, 'init_params', default=None)
if callable(init_params):
_exp_params['gt_params']['init_params'] = str(init_params.__name__)
gt_logmon = dict_get(gt_params, 'logging_monitor', default=None)
if gt_logmon is not None and isinstance(gt_logmon, HMMLoggingMonitor):
_exp_params['gt_params']['logging_monitor'] = dict(gt_logmon.log_config)
standard_params = dict_get(_exp_params, 'standard_params', default=None, cast=dict)
standard_logmon = dict_get(standard_params, 'logging_monitor', default=None)
if standard_logmon is not None and isinstance(standard_logmon, HMMLoggingMonitor):
_exp_params['standard_params']['logging_monitor'] = dict(standard_logmon.log_config)
dense_params = dict_get(_exp_params, 'dense_params', default=None, cast=dict)
dense_logmon = dict_get(standard_params, 'logging_monitor', default=None)
if dense_logmon is not None and isinstance(dense_logmon, HMMLoggingMonitor):
_exp_params['dense_params']['logging_monitor'] = dict(dense_logmon.log_config)
fair_standard_params = dict_get(_exp_params, 'fair_standard_params', default=None, cast=dict)
fair_standard_logmon = dict_get(fair_standard_params, 'logging_monitor', default=None)
if fair_standard_logmon is not None and isinstance(fair_standard_logmon, HMMLoggingMonitor):
_exp_params['fair_standard_params']['logging_monitor'] = dict(fair_standard_logmon.log_config)
np.save(exp_dir + '/exp_params', _exp_params)
timestamp_msg("Saved experiment parameters in %s" % exp_dir)
return _exp_params
def synthetic_sequences_experiment(exp_params, path_dict, sample_retries=100, reuse_sequences=None):
t_exp = Timer()
start_time = t_exp.tic("Starting a 'synthetic sequences' experiment.")
# Get parameters
t = Timer()
t.tic("Parsing parameters ...")
exp_params = _parse_syntheticgt_dirichlet_parameters(exp_params, path_dict)
exp_params = _parse_standard_and_dense(exp_params, path_dict, exp_params['n_emissions'])
_exp_params = _save_experiment_parameters(exp_params, path_dict)
t.toc("Parameters parsed. Using parameters: %s" % str(_exp_params))
train_X, test_X, gt_AB = None, None, None
if reuse_sequences is None or type(reuse_sequences) != tuple or len(reuse_sequences) != 3:
train_X, test_X, gt_AB = _sample_sequences_from_gt_hmm(exp_params, path_dict, sample_retries=sample_retries)
else:
train_X, test_X, gt_AB = reuse_sequences
timestamp_msg("Reusing sequences")
if 'fair_standard_params' in exp_params:
_standard_vs_dense(train_X, test_X, (exp_params['standard_params'], exp_params['fair_standard_params']),
exp_params['dense_params'], gt_AB)
else:
_standard_vs_dense(train_X, test_X, exp_params['standard_params'], exp_params['dense_params'], gt_AB)
fin_time, diff = t_exp.toc("Finished a 'synthetic sequences' experiment.")
SUPPORTED_DATASETS = frozenset(('penntree_tag','protein'))
def get_dataset_sequences(ident, ds_params={}, log_dir=None):
if ident not in SUPPORTED_DATASETS:
raise Exception("Given Dataset %s is not supported." % str(ident))
sequences, tag_to_symb, symb_to_tag = None, None, None
if ident == 'penntree_tag':
sequences, tag_to_symb, symb_to_tag = penntreebank_tag_sequences(**ds_params)
elif ident == 'protein':
sequences, tag_to_symb, symb_to_tag = protein_sequences(**ds_params)
if log_dir is not None:
np.save(log_dir + '/symb_to_tag.npy', symb_to_tag)
np.save(log_dir + '/tag_to_symb.npy', tag_to_symb)
return sequences, tag_to_symb, symb_to_tag
def dataset_synthetic_sequences_experiment(exp_params, path_dict, sample_retries=100):
t_exp = Timer()
exp_params = dict(exp_params)
ident = dict_get(exp_params, 'dataset_ident', default='', cast=str)
start_time = t_exp.tic("Starting a 'dataset synthetic sequences' experiment. (%s)" % str(ident))
gt_dir = dict_get(path_dict, 'gt_dir', default=None)
check_dir(gt_dir)
ds_params = dict_get(exp_params, 'dataset_params', default=dict(), cast=dict)
gt_sequences, _, _ = get_dataset_sequences(ident, ds_params, gt_dir)
# Get parameters
t = Timer()
t.tic("Parsing parameters ...")
# Check gt_sequences
sequences, lengths, n_emissions = check_sequences(gt_sequences)
exp_params['n_emissions'] = n_emissions
exp_params = _parse_syntheticgt_parameters(exp_params, path_dict)
exp_params = _parse_standard_and_dense(exp_params, path_dict, exp_params['n_emissions'])
_exp_params = _save_experiment_parameters(exp_params, path_dict)
t.toc("Parameters parsed. Using parameters: %s" % str(_exp_params))
t.tic("Fitting GT HMM...")
gt_hmm = StandardHMM(**exp_params['gt_params'])
gt_hmm.fit(sequences, lengths)
t.toc("Fitting finished")
train_X, test_X, gt_AB = _sample_sequences_from_gt_hmm(exp_params, path_dict, gt_hmm=gt_hmm, sample_retries=sample_retries)
if 'fair_standard_params' in exp_params:
_standard_vs_dense(train_X, test_X, (exp_params['standard_params'], exp_params['fair_standard_params']),
exp_params['dense_params'], gt_AB)
else:
_standard_vs_dense(train_X, test_X, exp_params['standard_params'], exp_params['dense_params'], gt_AB)
fin_time, diff = t_exp.toc("Finished a 'dataset synthetic sequences' experiment.")
def dataset_sequences_experiment(exp_params, path_dict, reuse_sequences=None):
t_exp = Timer()
exp_params = dict(exp_params)
ident = dict_get(exp_params, 'dataset_ident', default='', cast=str)
start_time = t_exp.tic("Starting a 'dataset sequences' experiment. (%s)" % str(ident))
# Get parameters
t = Timer()
t.tic("Parsing parameters ...")
train_perc = dict_get(exp_params, 'train_perc', default=1., cast=float)
gt_dir = dict_get(path_dict, 'gt_dir', default=None)
check_dir(gt_dir)
ds_params = dict_get(exp_params, 'dataset_params', default=dict(), cast=dict)
if reuse_sequences is None or type(reuse_sequences) != tuple or len(reuse_sequences) != 2:
gt_sequences, _, _ = get_dataset_sequences(ident, ds_params, gt_dir)
train_X, test_X = train_test_split(gt_sequences, train_perc)
else:
train_X, test_X = reuse_sequences
timestamp_msg("Reusing sequences ...")
# Check gt_sequences
_, _, n_train_emissions = check_sequences(train_X)
n_test_emissions = None
if test_X is not None and len(test_X) > 0:
_, _, n_test_emissions = check_sequences(test_X)
_save_data(path_dict, train_X, test_X)
if n_test_emissions is not None and n_train_emissions != n_test_emissions:
raise Exception("Number of emissions in train and test sequence differs")
exp_params['n_emissions'] = n_train_emissions
exp_params = _parse_base_parameters(exp_params, path_dict)
exp_params = _parse_standard_and_dense(exp_params, path_dict, exp_params['n_emissions'])
_exp_params = _save_experiment_parameters(exp_params, path_dict)
t.toc("Parameters parsed. Using parameters: %s" % str(_exp_params))
if 'fair_standard_params' in exp_params:
_standard_vs_dense(train_X, test_X, (exp_params['standard_params'], exp_params['fair_standard_params']),
exp_params['dense_params'])
else:
_standard_vs_dense(train_X, test_X, exp_params['standard_params'], exp_params['dense_params'])
fin_time, diff = t_exp.toc("Finished a 'dataset sequences' experiment.")
def run_experiment(exp_type, exp_name, exp_params, reuse_setup=None):
experiment_directory, path_dict = None, None
if reuse_setup is None or type(reuse_setup) != tuple or len(reuse_setup) != 2:
experiment_directory, path_dict = setup_experiment(exp_name, exp_params)
else:
experiment_directory, path_dict = reuse_setup
supported_exp_types = ('synthetic_sequences', 'dataset_synthetic_sequences', 'dataset_sequences')
if exp_type == 'synthetic_sequences':
synthetic_sequences_experiment(exp_params, path_dict)
elif exp_type == 'dataset_synthetic_sequences':
dataset_synthetic_sequences_experiment(exp_params, path_dict)
elif exp_type == 'dataset_sequences':
dataset_sequences_experiment(exp_params, path_dict)
else:
raise Exception('Given experiment type "%s" is not supported. \n'
'It has to be one of the following: %s' % (str(exp_type), str(supported_exp_types)))
print(experiment_directory)
return experiment_directory
def setup_experiment(exp_name, exp_params):
path_dict = {}
experiment_directory = os.getcwd() + '/' + exp_name + datetime.now().strftime('%Y%m%d_%H-%M-%S')
path_dict['experiment_directory'] = experiment_directory
path_dict['data_dir'] = experiment_directory + '/data'
path_dict['gt_dir'] = experiment_directory + '/gt_logs'
return experiment_directory, path_dict
|
[
"data.penntreebank_tag_sequences",
"numpy.sqrt",
"utils.is_multinomial",
"copy.deepcopy",
"models.HMMLoggingMonitor",
"numpy.save",
"utils.Timer",
"numpy.max",
"utils.compute_stationary",
"utils.check_sequences",
"data.protein_sequences",
"numpy.ones",
"data.train_test_split",
"numpy.around",
"utils.dict_get",
"utils.prepare_data",
"utils.timestamp_msg",
"os.getcwd",
"datetime.datetime.now",
"models.StandardHMM",
"utils.check_dir",
"models.DenseHMM"
] |
[((571, 578), 'utils.Timer', 'Timer', ([], {}), '()\n', (576, 578), False, 'from utils import prepare_data, check_random_state, create_directories, dict_get, Timer, timestamp_msg, check_dir, is_multinomial, compute_stationary, check_sequences\n'), ((627, 648), 'utils.prepare_data', 'prepare_data', (['train_X'], {}), '(train_X)\n', (639, 648), False, 'from utils import prepare_data, check_random_state, create_directories, dict_get, Timer, timestamp_msg, check_dir, is_multinomial, compute_stationary, check_sequences\n'), ((689, 709), 'utils.prepare_data', 'prepare_data', (['test_X'], {}), '(test_X)\n', (701, 709), False, 'from utils import prepare_data, check_random_state, create_directories, dict_get, Timer, timestamp_msg, check_dir, is_multinomial, compute_stationary, check_sequences\n'), ((1193, 1217), 'models.DenseHMM', 'DenseHMM', ([], {}), '(**dense_params)\n', (1201, 1217), False, 'from models import StandardHMM, DenseHMM, HMMLoggingMonitor\n'), ((1241, 1302), 'utils.dict_get', 'dict_get', (['dense_params', '"""opt_schemes"""'], {'default': "('em', 'cooc')"}), "(dense_params, 'opt_schemes', default=('em', 'cooc'))\n", (1249, 1302), False, 'from utils import prepare_data, check_random_state, create_directories, dict_get, Timer, timestamp_msg, check_dir, is_multinomial, compute_stationary, check_sequences\n'), ((2863, 2884), 'utils.compute_stationary', 'compute_stationary', (['A'], {}), '(A)\n', (2881, 2884), False, 'from utils import prepare_data, check_random_state, create_directories, dict_get, Timer, timestamp_msg, check_dir, is_multinomial, compute_stationary, check_sequences\n'), ((3704, 3729), 'numpy.max', 'np.max', (['(n_plus, n_minus)'], {}), '((n_plus, n_minus))\n', (3710, 3729), True, 'import numpy as np\n'), ((4237, 4295), 'utils.dict_get', 'dict_get', (['exp_params', '"""dense_opt_schemes"""'], {'default': "('em',)"}), "(exp_params, 'dense_opt_schemes', default=('em',))\n", (4245, 4295), False, 'from utils import prepare_data, check_random_state, create_directories, dict_get, Timer, timestamp_msg, check_dir, is_multinomial, compute_stationary, check_sequences\n'), ((4341, 4404), 'utils.dict_get', 'dict_get', (['exp_params', '"""compare_to_fair_standard"""'], {'default': '(False)'}), "(exp_params, 'compare_to_fair_standard', default=False)\n", (4349, 4404), False, 'from utils import prepare_data, check_random_state, create_directories, dict_get, Timer, timestamp_msg, check_dir, is_multinomial, compute_stationary, check_sequences\n'), ((4705, 4763), 'utils.dict_get', 'dict_get', (['exp_params', '"""n_seqs_train"""'], {'default': '(10)', 'cast': 'int'}), "(exp_params, 'n_seqs_train', default=10, cast=int)\n", (4713, 4763), False, 'from utils import prepare_data, check_random_state, create_directories, dict_get, Timer, timestamp_msg, check_dir, is_multinomial, compute_stationary, check_sequences\n'), ((4797, 4855), 'utils.dict_get', 'dict_get', (['exp_params', '"""seqlen_train"""'], {'default': '(10)', 'cast': 'int'}), "(exp_params, 'seqlen_train', default=10, cast=int)\n", (4805, 4855), False, 'from utils import prepare_data, check_random_state, create_directories, dict_get, Timer, timestamp_msg, check_dir, is_multinomial, compute_stationary, check_sequences\n'), ((4888, 4945), 'utils.dict_get', 'dict_get', (['exp_params', '"""n_seqs_test"""'], {'default': '(10)', 'cast': 'int'}), "(exp_params, 'n_seqs_test', default=10, cast=int)\n", (4896, 4945), False, 'from utils import prepare_data, check_random_state, create_directories, dict_get, Timer, timestamp_msg, check_dir, is_multinomial, compute_stationary, check_sequences\n'), ((4978, 5035), 'utils.dict_get', 'dict_get', (['exp_params', '"""seqlen_test"""'], {'default': '(10)', 'cast': 'int'}), "(exp_params, 'seqlen_test', default=10, cast=int)\n", (4986, 5035), False, 'from utils import prepare_data, check_random_state, create_directories, dict_get, Timer, timestamp_msg, check_dir, is_multinomial, compute_stationary, check_sequences\n'), ((5070, 5122), 'utils.dict_get', 'dict_get', (['exp_params', '"""gt_stationary"""'], {'default': '(False)'}), "(exp_params, 'gt_stationary', default=False)\n", (5078, 5122), False, 'from utils import prepare_data, check_random_state, create_directories, dict_get, Timer, timestamp_msg, check_dir, is_multinomial, compute_stationary, check_sequences\n'), ((5304, 5366), 'utils.dict_get', 'dict_get', (["exp_params['gt_params']", '"""init_params"""'], {'default': 'None'}), "(exp_params['gt_params'], 'init_params', default=None)\n", (5312, 5366), False, 'from utils import prepare_data, check_random_state, create_directories, dict_get, Timer, timestamp_msg, check_dir, is_multinomial, compute_stationary, check_sequences\n'), ((6436, 6500), 'utils.dict_get', 'dict_get', (['exp_params', '"""dirichlet_param"""'], {'default': '(0.1)', 'cast': 'float'}), "(exp_params, 'dirichlet_param', default=0.1, cast=float)\n", (6444, 6500), False, 'from utils import prepare_data, check_random_state, create_directories, dict_get, Timer, timestamp_msg, check_dir, is_multinomial, compute_stationary, check_sequences\n'), ((6533, 6582), 'utils.dict_get', 'dict_get', (['exp_params', '"""n_emissions"""'], {'default': 'None'}), "(exp_params, 'n_emissions', default=None)\n", (6541, 6582), False, 'from utils import prepare_data, check_random_state, create_directories, dict_get, Timer, timestamp_msg, check_dir, is_multinomial, compute_stationary, check_sequences\n'), ((6775, 6860), 'utils.dict_get', 'dict_get', (["exp_params['gt_params']", '"""init_params"""'], {'default': '_dirichlet_matrix_init'}), "(exp_params['gt_params'], 'init_params', default=_dirichlet_matrix_init\n )\n", (6783, 6860), False, 'from utils import prepare_data, check_random_state, create_directories, dict_get, Timer, timestamp_msg, check_dir, is_multinomial, compute_stationary, check_sequences\n'), ((9884, 9891), 'utils.Timer', 'Timer', ([], {}), '()\n', (9889, 9891), False, 'from utils import prepare_data, check_random_state, create_directories, dict_get, Timer, timestamp_msg, check_dir, is_multinomial, compute_stationary, check_sequences\n'), ((14526, 14533), 'utils.Timer', 'Timer', ([], {}), '()\n', (14531, 14533), False, 'from utils import prepare_data, check_random_state, create_directories, dict_get, Timer, timestamp_msg, check_dir, is_multinomial, compute_stationary, check_sequences\n'), ((14643, 14650), 'utils.Timer', 'Timer', ([], {}), '()\n', (14648, 14650), False, 'from utils import prepare_data, check_random_state, create_directories, dict_get, Timer, timestamp_msg, check_dir, is_multinomial, compute_stationary, check_sequences\n'), ((16634, 16641), 'utils.Timer', 'Timer', ([], {}), '()\n', (16639, 16641), False, 'from utils import prepare_data, check_random_state, create_directories, dict_get, Timer, timestamp_msg, check_dir, is_multinomial, compute_stationary, check_sequences\n'), ((16688, 16747), 'utils.dict_get', 'dict_get', (['exp_params', '"""dataset_ident"""'], {'default': '""""""', 'cast': 'str'}), "(exp_params, 'dataset_ident', default='', cast=str)\n", (16696, 16747), False, 'from utils import prepare_data, check_random_state, create_directories, dict_get, Timer, timestamp_msg, check_dir, is_multinomial, compute_stationary, check_sequences\n'), ((16867, 16910), 'utils.dict_get', 'dict_get', (['path_dict', '"""gt_dir"""'], {'default': 'None'}), "(path_dict, 'gt_dir', default=None)\n", (16875, 16910), False, 'from utils import prepare_data, check_random_state, create_directories, dict_get, Timer, timestamp_msg, check_dir, is_multinomial, compute_stationary, check_sequences\n'), ((16915, 16932), 'utils.check_dir', 'check_dir', (['gt_dir'], {}), '(gt_dir)\n', (16924, 16932), False, 'from utils import prepare_data, check_random_state, create_directories, dict_get, Timer, timestamp_msg, check_dir, is_multinomial, compute_stationary, check_sequences\n'), ((17122, 17129), 'utils.Timer', 'Timer', ([], {}), '()\n', (17127, 17129), False, 'from utils import prepare_data, check_random_state, create_directories, dict_get, Timer, timestamp_msg, check_dir, is_multinomial, compute_stationary, check_sequences\n'), ((17234, 17263), 'utils.check_sequences', 'check_sequences', (['gt_sequences'], {}), '(gt_sequences)\n', (17249, 17263), False, 'from utils import prepare_data, check_random_state, create_directories, dict_get, Timer, timestamp_msg, check_dir, is_multinomial, compute_stationary, check_sequences\n'), ((17667, 17705), 'models.StandardHMM', 'StandardHMM', ([], {}), "(**exp_params['gt_params'])\n", (17678, 17705), False, 'from models import StandardHMM, DenseHMM, HMMLoggingMonitor\n'), ((18443, 18450), 'utils.Timer', 'Timer', ([], {}), '()\n', (18448, 18450), False, 'from utils import prepare_data, check_random_state, create_directories, dict_get, Timer, timestamp_msg, check_dir, is_multinomial, compute_stationary, check_sequences\n'), ((18497, 18556), 'utils.dict_get', 'dict_get', (['exp_params', '"""dataset_ident"""'], {'default': '""""""', 'cast': 'str'}), "(exp_params, 'dataset_ident', default='', cast=str)\n", (18505, 18556), False, 'from utils import prepare_data, check_random_state, create_directories, dict_get, Timer, timestamp_msg, check_dir, is_multinomial, compute_stationary, check_sequences\n'), ((18682, 18689), 'utils.Timer', 'Timer', ([], {}), '()\n', (18687, 18689), False, 'from utils import prepare_data, check_random_state, create_directories, dict_get, Timer, timestamp_msg, check_dir, is_multinomial, compute_stationary, check_sequences\n'), ((18748, 18807), 'utils.dict_get', 'dict_get', (['exp_params', '"""train_perc"""'], {'default': '(1.0)', 'cast': 'float'}), "(exp_params, 'train_perc', default=1.0, cast=float)\n", (18756, 18807), False, 'from utils import prepare_data, check_random_state, create_directories, dict_get, Timer, timestamp_msg, check_dir, is_multinomial, compute_stationary, check_sequences\n'), ((18820, 18863), 'utils.dict_get', 'dict_get', (['path_dict', '"""gt_dir"""'], {'default': 'None'}), "(path_dict, 'gt_dir', default=None)\n", (18828, 18863), False, 'from utils import prepare_data, check_random_state, create_directories, dict_get, Timer, timestamp_msg, check_dir, is_multinomial, compute_stationary, check_sequences\n'), ((18868, 18885), 'utils.check_dir', 'check_dir', (['gt_dir'], {}), '(gt_dir)\n', (18877, 18885), False, 'from utils import prepare_data, check_random_state, create_directories, dict_get, Timer, timestamp_msg, check_dir, is_multinomial, compute_stationary, check_sequences\n'), ((19368, 19392), 'utils.check_sequences', 'check_sequences', (['train_X'], {}), '(train_X)\n', (19383, 19392), False, 'from utils import prepare_data, check_random_state, create_directories, dict_get, Timer, timestamp_msg, check_dir, is_multinomial, compute_stationary, check_sequences\n'), ((2460, 2484), 'numpy.ones', 'np.ones', (['n_hidden_states'], {}), '(n_hidden_states)\n', (2467, 2484), True, 'import numpy as np\n'), ((3632, 3653), 'numpy.sqrt', 'np.sqrt', (['discriminant'], {}), '(discriminant)\n', (3639, 3653), True, 'import numpy as np\n'), ((3674, 3695), 'numpy.sqrt', 'np.sqrt', (['discriminant'], {}), '(discriminant)\n', (3681, 3695), True, 'import numpy as np\n'), ((3816, 3828), 'numpy.around', 'np.around', (['n'], {}), '(n)\n', (3825, 3828), True, 'import numpy as np\n'), ((5752, 5770), 'utils.check_dir', 'check_dir', (['exp_dir'], {}), '(exp_dir)\n', (5761, 5770), False, 'from utils import prepare_data, check_random_state, create_directories, dict_get, Timer, timestamp_msg, check_dir, is_multinomial, compute_stationary, check_sequences\n'), ((5951, 6005), 'utils.dict_get', 'dict_get', (['gt_log_config', '"""exp_folder"""'], {'default': 'exp_dir'}), "(gt_log_config, 'exp_folder', default=exp_dir)\n", (5959, 6005), False, 'from utils import prepare_data, check_random_state, create_directories, dict_get, Timer, timestamp_msg, check_dir, is_multinomial, compute_stationary, check_sequences\n'), ((6044, 6108), 'utils.dict_get', 'dict_get', (['gt_log_config', '"""log_folder"""'], {'default': '"""/gt_logs/em_opt"""'}), "(gt_log_config, 'log_folder', default='/gt_logs/em_opt')\n", (6052, 6108), False, 'from utils import prepare_data, check_random_state, create_directories, dict_get, Timer, timestamp_msg, check_dir, is_multinomial, compute_stationary, check_sequences\n'), ((6129, 6161), 'models.HMMLoggingMonitor', 'HMMLoggingMonitor', (['gt_log_config'], {}), '(gt_log_config)\n', (6146, 6161), False, 'from models import StandardHMM, DenseHMM, HMMLoggingMonitor\n'), ((6969, 6995), 'copy.deepcopy', 'copy.deepcopy', (['init_params'], {}), '(init_params)\n', (6982, 6995), False, 'import copy\n'), ((7972, 8016), 'copy.deepcopy', 'copy.deepcopy', (["exp_params['standard_params']"], {}), "(exp_params['standard_params'])\n", (7985, 8016), False, 'import copy\n'), ((8202, 8220), 'utils.check_dir', 'check_dir', (['exp_dir'], {}), '(exp_dir)\n', (8211, 8220), False, 'from utils import prepare_data, check_random_state, create_directories, dict_get, Timer, timestamp_msg, check_dir, is_multinomial, compute_stationary, check_sequences\n'), ((8470, 8530), 'utils.dict_get', 'dict_get', (['standard_log_config', '"""exp_folder"""'], {'default': 'exp_dir'}), "(standard_log_config, 'exp_folder', default=exp_dir)\n", (8478, 8530), False, 'from utils import prepare_data, check_random_state, create_directories, dict_get, Timer, timestamp_msg, check_dir, is_multinomial, compute_stationary, check_sequences\n'), ((8575, 8644), 'utils.dict_get', 'dict_get', (['standard_log_config', '"""log_folder"""'], {'default': '"""/standard_logs"""'}), "(standard_log_config, 'log_folder', default='/standard_logs')\n", (8583, 8644), False, 'from utils import prepare_data, check_random_state, create_directories, dict_get, Timer, timestamp_msg, check_dir, is_multinomial, compute_stationary, check_sequences\n'), ((8686, 8743), 'utils.dict_get', 'dict_get', (['dense_log_config', '"""exp_folder"""'], {'default': 'exp_dir'}), "(dense_log_config, 'exp_folder', default=exp_dir)\n", (8694, 8743), False, 'from utils import prepare_data, check_random_state, create_directories, dict_get, Timer, timestamp_msg, check_dir, is_multinomial, compute_stationary, check_sequences\n'), ((8785, 8848), 'utils.dict_get', 'dict_get', (['dense_log_config', '"""log_folder"""'], {'default': '"""/dense_logs"""'}), "(dense_log_config, 'log_folder', default='/dense_logs')\n", (8793, 8848), False, 'from utils import prepare_data, check_random_state, create_directories, dict_get, Timer, timestamp_msg, check_dir, is_multinomial, compute_stationary, check_sequences\n'), ((9996, 10034), 'models.StandardHMM', 'StandardHMM', ([], {}), "(**exp_params['gt_params'])\n", (10007, 10034), False, 'from models import StandardHMM, DenseHMM, HMMLoggingMonitor\n'), ((10385, 10433), 'utils.is_multinomial', 'is_multinomial', (['train_X'], {'min_symbols': 'n_emissions'}), '(train_X, min_symbols=n_emissions)\n', (10399, 10433), False, 'from utils import prepare_data, check_random_state, create_directories, dict_get, Timer, timestamp_msg, check_dir, is_multinomial, compute_stationary, check_sequences\n'), ((10930, 10978), 'utils.is_multinomial', 'is_multinomial', (['train_X'], {'min_symbols': 'n_emissions'}), '(train_X, min_symbols=n_emissions)\n', (10944, 10978), False, 'from utils import prepare_data, check_random_state, create_directories, dict_get, Timer, timestamp_msg, check_dir, is_multinomial, compute_stationary, check_sequences\n'), ((11241, 11258), 'utils.check_dir', 'check_dir', (['gt_dir'], {}), '(gt_dir)\n', (11250, 11258), False, 'from utils import prepare_data, check_random_state, create_directories, dict_get, Timer, timestamp_msg, check_dir, is_multinomial, compute_stationary, check_sequences\n'), ((11267, 11314), 'numpy.save', 'np.save', (["(gt_dir + '/transmat')", 'gt_hmm.transmat_'], {}), "(gt_dir + '/transmat', gt_hmm.transmat_)\n", (11274, 11314), True, 'import numpy as np\n'), ((11323, 11378), 'numpy.save', 'np.save', (["(gt_dir + '/emissionprob')", 'gt_hmm.emissionprob_'], {}), "(gt_dir + '/emissionprob', gt_hmm.emissionprob_)\n", (11330, 11378), True, 'import numpy as np\n'), ((11387, 11436), 'numpy.save', 'np.save', (["(gt_dir + '/startprob')", 'gt_hmm.startprob_'], {}), "(gt_dir + '/startprob', gt_hmm.startprob_)\n", (11394, 11436), True, 'import numpy as np\n'), ((11467, 11527), 'utils.dict_get', 'dict_get', (['exp_params', '"""gt_samples"""'], {'default': 'None', 'cast': 'tuple'}), "(exp_params, 'gt_samples', default=None, cast=tuple)\n", (11475, 11527), False, 'from utils import prepare_data, check_random_state, create_directories, dict_get, Timer, timestamp_msg, check_dir, is_multinomial, compute_stationary, check_sequences\n'), ((11937, 11956), 'utils.check_dir', 'check_dir', (['data_dir'], {}), '(data_dir)\n', (11946, 11956), False, 'from utils import prepare_data, check_random_state, create_directories, dict_get, Timer, timestamp_msg, check_dir, is_multinomial, compute_stationary, check_sequences\n'), ((11965, 12004), 'numpy.save', 'np.save', (["(data_dir + '/train_X')", 'train_X'], {}), "(data_dir + '/train_X', train_X)\n", (11972, 12004), True, 'import numpy as np\n'), ((12224, 12268), 'utils.timestamp_msg', 'timestamp_msg', (["('Saved data in %s' % data_dir)"], {}), "('Saved data in %s' % data_dir)\n", (12237, 12268), False, 'from utils import prepare_data, check_random_state, create_directories, dict_get, Timer, timestamp_msg, check_dir, is_multinomial, compute_stationary, check_sequences\n'), ((12445, 12463), 'utils.check_dir', 'check_dir', (['exp_dir'], {}), '(exp_dir)\n', (12454, 12463), False, 'from utils import prepare_data, check_random_state, create_directories, dict_get, Timer, timestamp_msg, check_dir, is_multinomial, compute_stationary, check_sequences\n'), ((12491, 12516), 'copy.deepcopy', 'copy.deepcopy', (['exp_params'], {}), '(exp_params)\n', (12504, 12516), False, 'import copy\n'), ((12538, 12597), 'utils.dict_get', 'dict_get', (['_exp_params', '"""gt_params"""'], {'default': 'None', 'cast': 'dict'}), "(_exp_params, 'gt_params', default=None, cast=dict)\n", (12546, 12597), False, 'from utils import prepare_data, check_random_state, create_directories, dict_get, Timer, timestamp_msg, check_dir, is_multinomial, compute_stationary, check_sequences\n'), ((13154, 13219), 'utils.dict_get', 'dict_get', (['_exp_params', '"""standard_params"""'], {'default': 'None', 'cast': 'dict'}), "(_exp_params, 'standard_params', default=None, cast=dict)\n", (13162, 13219), False, 'from utils import prepare_data, check_random_state, create_directories, dict_get, Timer, timestamp_msg, check_dir, is_multinomial, compute_stationary, check_sequences\n'), ((13246, 13304), 'utils.dict_get', 'dict_get', (['standard_params', '"""logging_monitor"""'], {'default': 'None'}), "(standard_params, 'logging_monitor', default=None)\n", (13254, 13304), False, 'from utils import prepare_data, check_random_state, create_directories, dict_get, Timer, timestamp_msg, check_dir, is_multinomial, compute_stationary, check_sequences\n'), ((13517, 13579), 'utils.dict_get', 'dict_get', (['_exp_params', '"""dense_params"""'], {'default': 'None', 'cast': 'dict'}), "(_exp_params, 'dense_params', default=None, cast=dict)\n", (13525, 13579), False, 'from utils import prepare_data, check_random_state, create_directories, dict_get, Timer, timestamp_msg, check_dir, is_multinomial, compute_stationary, check_sequences\n'), ((13603, 13661), 'utils.dict_get', 'dict_get', (['standard_params', '"""logging_monitor"""'], {'default': 'None'}), "(standard_params, 'logging_monitor', default=None)\n", (13611, 13661), False, 'from utils import prepare_data, check_random_state, create_directories, dict_get, Timer, timestamp_msg, check_dir, is_multinomial, compute_stationary, check_sequences\n'), ((13870, 13940), 'utils.dict_get', 'dict_get', (['_exp_params', '"""fair_standard_params"""'], {'default': 'None', 'cast': 'dict'}), "(_exp_params, 'fair_standard_params', default=None, cast=dict)\n", (13878, 13940), False, 'from utils import prepare_data, check_random_state, create_directories, dict_get, Timer, timestamp_msg, check_dir, is_multinomial, compute_stationary, check_sequences\n'), ((13972, 14035), 'utils.dict_get', 'dict_get', (['fair_standard_params', '"""logging_monitor"""'], {'default': 'None'}), "(fair_standard_params, 'logging_monitor', default=None)\n", (13980, 14035), False, 'from utils import prepare_data, check_random_state, create_directories, dict_get, Timer, timestamp_msg, check_dir, is_multinomial, compute_stationary, check_sequences\n'), ((14265, 14310), 'numpy.save', 'np.save', (["(exp_dir + '/exp_params')", '_exp_params'], {}), "(exp_dir + '/exp_params', _exp_params)\n", (14272, 14310), True, 'import numpy as np\n'), ((14319, 14379), 'utils.timestamp_msg', 'timestamp_msg', (["('Saved experiment parameters in %s' % exp_dir)"], {}), "('Saved experiment parameters in %s' % exp_dir)\n", (14332, 14379), False, 'from utils import prepare_data, check_random_state, create_directories, dict_get, Timer, timestamp_msg, check_dir, is_multinomial, compute_stationary, check_sequences\n'), ((15331, 15365), 'utils.timestamp_msg', 'timestamp_msg', (['"""Reusing sequences"""'], {}), "('Reusing sequences')\n", (15344, 15365), False, 'from utils import prepare_data, check_random_state, create_directories, dict_get, Timer, timestamp_msg, check_dir, is_multinomial, compute_stationary, check_sequences\n'), ((16175, 16214), 'data.penntreebank_tag_sequences', 'penntreebank_tag_sequences', ([], {}), '(**ds_params)\n', (16201, 16214), False, 'from data import penntreebank_tag_sequences, protein_sequences, train_test_split\n'), ((16362, 16412), 'numpy.save', 'np.save', (["(log_dir + '/symb_to_tag.npy')", 'symb_to_tag'], {}), "(log_dir + '/symb_to_tag.npy', symb_to_tag)\n", (16369, 16412), True, 'import numpy as np\n'), ((16421, 16471), 'numpy.save', 'np.save', (["(log_dir + '/tag_to_symb.npy')", 'tag_to_symb'], {}), "(log_dir + '/tag_to_symb.npy', tag_to_symb)\n", (16428, 16471), True, 'import numpy as np\n'), ((19166, 19208), 'data.train_test_split', 'train_test_split', (['gt_sequences', 'train_perc'], {}), '(gt_sequences, train_perc)\n', (19182, 19208), False, 'from data import penntreebank_tag_sequences, protein_sequences, train_test_split\n'), ((19269, 19307), 'utils.timestamp_msg', 'timestamp_msg', (['"""Reusing sequences ..."""'], {}), "('Reusing sequences ...')\n", (19282, 19307), False, 'from utils import prepare_data, check_random_state, create_directories, dict_get, Timer, timestamp_msg, check_dir, is_multinomial, compute_stationary, check_sequences\n'), ((19501, 19524), 'utils.check_sequences', 'check_sequences', (['test_X'], {}), '(test_X)\n', (19516, 19524), False, 'from utils import prepare_data, check_random_state, create_directories, dict_get, Timer, timestamp_msg, check_dir, is_multinomial, compute_stationary, check_sequences\n'), ((799, 812), 'models.StandardHMM', 'StandardHMM', ([], {}), '()\n', (810, 812), False, 'from models import StandardHMM, DenseHMM, HMMLoggingMonitor\n'), ((2056, 2075), 'numpy.ones', 'np.ones', (['alpha_size'], {}), '(alpha_size)\n', (2063, 2075), True, 'import numpy as np\n'), ((8898, 8936), 'models.HMMLoggingMonitor', 'HMMLoggingMonitor', (['standard_log_config'], {}), '(standard_log_config)\n', (8915, 8936), False, 'from models import StandardHMM, DenseHMM, HMMLoggingMonitor\n'), ((8938, 8973), 'models.HMMLoggingMonitor', 'HMMLoggingMonitor', (['dense_log_config'], {}), '(dense_log_config)\n', (8955, 8973), False, 'from models import StandardHMM, DenseHMM, HMMLoggingMonitor\n'), ((9380, 9445), 'utils.dict_get', 'dict_get', (['fair_standard_log_config', '"""exp_folder"""'], {'default': 'exp_dir'}), "(fair_standard_log_config, 'exp_folder', default=exp_dir)\n", (9388, 9445), False, 'from utils import prepare_data, check_random_state, create_directories, dict_get, Timer, timestamp_msg, check_dir, is_multinomial, compute_stationary, check_sequences\n'), ((9499, 9578), 'utils.dict_get', 'dict_get', (['fair_standard_log_config', '"""log_folder"""'], {'default': '"""/fair_standard_logs"""'}), "(fair_standard_log_config, 'log_folder', default='/fair_standard_logs')\n", (9507, 9578), False, 'from utils import prepare_data, check_random_state, create_directories, dict_get, Timer, timestamp_msg, check_dir, is_multinomial, compute_stationary, check_sequences\n'), ((9614, 9657), 'models.HMMLoggingMonitor', 'HMMLoggingMonitor', (['fair_standard_log_config'], {}), '(fair_standard_log_config)\n', (9631, 9657), False, 'from models import StandardHMM, DenseHMM, HMMLoggingMonitor\n'), ((10193, 10241), 'utils.is_multinomial', 'is_multinomial', (['train_X'], {'min_symbols': 'n_emissions'}), '(train_X, min_symbols=n_emissions)\n', (10207, 10241), False, 'from utils import prepare_data, check_random_state, create_directories, dict_get, Timer, timestamp_msg, check_dir, is_multinomial, compute_stationary, check_sequences\n'), ((10694, 10741), 'utils.is_multinomial', 'is_multinomial', (['test_X'], {'min_symbols': 'n_emissions'}), '(test_X, min_symbols=n_emissions)\n', (10708, 10741), False, 'from utils import prepare_data, check_random_state, create_directories, dict_get, Timer, timestamp_msg, check_dir, is_multinomial, compute_stationary, check_sequences\n'), ((12048, 12085), 'numpy.save', 'np.save', (["(data_dir + '/test_X')", 'test_X'], {}), "(data_dir + '/test_X', test_X)\n", (12055, 12085), True, 'import numpy as np\n'), ((12128, 12165), 'numpy.save', 'np.save', (["(data_dir + '/gt_A')", 'gt_AB[0]'], {}), "(data_dir + '/gt_A', gt_AB[0])\n", (12135, 12165), True, 'import numpy as np\n'), ((12178, 12215), 'numpy.save', 'np.save', (["(data_dir + '/gt_B')", 'gt_AB[1]'], {}), "(data_dir + '/gt_B', gt_AB[1])\n", (12185, 12215), True, 'import numpy as np\n'), ((12707, 12755), 'utils.dict_get', 'dict_get', (['gt_params', '"""init_params"""'], {'default': 'None'}), "(gt_params, 'init_params', default=None)\n", (12715, 12755), False, 'from utils import prepare_data, check_random_state, create_directories, dict_get, Timer, timestamp_msg, check_dir, is_multinomial, compute_stationary, check_sequences\n'), ((12902, 12954), 'utils.dict_get', 'dict_get', (['gt_params', '"""logging_monitor"""'], {'default': 'None'}), "(gt_params, 'logging_monitor', default=None)\n", (12910, 12954), False, 'from utils import prepare_data, check_random_state, create_directories, dict_get, Timer, timestamp_msg, check_dir, is_multinomial, compute_stationary, check_sequences\n'), ((16290, 16320), 'data.protein_sequences', 'protein_sequences', ([], {}), '(**ds_params)\n', (16307, 16320), False, 'from data import penntreebank_tag_sequences, protein_sequences, train_test_split\n'), ((1070, 1100), 'models.StandardHMM', 'StandardHMM', ([], {}), '(**standard_params)\n', (1081, 1100), False, 'from models import StandardHMM, DenseHMM, HMMLoggingMonitor\n'), ((21668, 21679), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (21677, 21679), False, 'import os\n'), ((21699, 21713), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (21711, 21713), False, 'from datetime import datetime\n'), ((960, 981), 'models.StandardHMM', 'StandardHMM', ([], {}), '(**params)\n', (971, 981), False, 'from models import StandardHMM, DenseHMM, HMMLoggingMonitor\n')]
|
import sys
sys.path.append('./train_model')
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
import torchvision.transforms as transforms
import numpy as np
import os
import argparse
parser = argparse.ArgumentParser(description='Adaptive Network Slimming')
parser.add_argument('-net', type=str, help='pretrained pkl file')
parser.add_argument('--nonuniform', action='store_true', help='set non-uniform pruning rate')
args = parser.parse_args()
# from models import *
transform_test = transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))])
testset = torchvision.datasets.CIFAR10(root='./cifar10',train=False,download=True,transform=transform_test)
testloader = torch.utils.data.DataLoader(testset,batch_size=128,shuffle=False)
classes = ('plane','car','bird','cat','deer','dog','frog','horse','ship','truck')
input_shape = (3,32,32)
if args.net == "resnet18":
START = 0.2
END = 0.81
netnum = 18
elif args.net == "resnet34":
START = 0.2
END = 0.81
netnum = 34
elif args.net == "resnet50":
START = 0.2
END = 0.8
netnum = 50
elif args.net == "resnet101":
START = 0.2
END = 0.8
netnum = 101
elif args.net == "resnet152":
START = 0.21
END = 0.79
netnum = 152
if args.nonuniform:
PRUNE_RATE = np.arange(START,END,(END-START)/(netnum-1))
FC_PRUNE_RATE = END
Model_Name = "ResNet" + str(netnum) + " (Non-uniform Pruning Rate)"
else:
PRUNE_RATE = np.zeros([netnum-1,1]) + 0.5
FC_PRUNE_RATE = 0.5
Model_Name = "ResNet" + str(netnum) + " (Uniform Pruning Rate)"
# -------------- Load Pretrained Model---------------
File_Name = "./model_pkl/" + args.net + ".pkl"
net = torch.load(File_Name, map_location= "cpu")
def RunData():
correct = 0
total = 0
with torch.no_grad():
net.eval()
net.cuda()
for (x,y) in testloader:
xa = x.cuda()
ya = y.cuda()
out = net(xa)
_,predicted = torch.max(out.data,1)
total += y.size(0)
correct += (predicted.cpu() == y).sum()
net.cpu()
Accuracy = 100*correct.cpu().numpy()/total
return Accuracy
def RunData2():
correct = 0
total = 0
for _,layer in net.named_modules():
if isinstance(layer,nn.BatchNorm2d):
layer.track_running_stats=False
with torch.no_grad():
net.eval()
net.cuda()
for (x,y) in testloader:
xa = x.cuda()
ya = y.cuda()
out = net(xa)
_,predicted = torch.max(out.data,1)
total += y.size(0)
correct += (predicted.cpu() == y).sum()
net.cpu()
Accuracy = 100*correct.cpu().numpy()/total
return Accuracy
def prune_filter(layer,PRUNE_RATE):
prune = np.sum(abs(layer),axis = (1,2,3))
sort_prune = np.sort(prune)
mask = np.ones(layer.shape)
for i in range(len(prune)):
if prune[i] < sort_prune[int(np.floor(PRUNE_RATE*len(prune)))]:
mask[i,:] = 0
return mask
def prune_weight(layer,PRUNE_RATE):
layer_flatten_sort = np.sort(abs(layer.flatten()))
mask = np.ones(layer.shape)
for i in range(layer.shape[0]):
for j in range(layer.shape[1]):
if abs(layer[i][j]) < layer_flatten_sort[int(np.floor(PRUNE_RATE*len(layer_flatten_sort)))]:
mask[i][j] = 0
return mask
def Calculate_flop():
FLOP = 0
shape = input_shape[1]
for name,layer in net.named_modules():
if isinstance(layer,nn.Conv2d) and 'shortcut' not in name:
filter_data = layer.weight.data.numpy()
skip = sum(np.sum(abs(filter_data),axis = (1,2,3)) == 0)
filter_shape = layer.weight.data.numpy().shape
padding = layer.padding
stride = layer.stride
n = filter_shape[1] * filter_shape[2] * filter_shape[3] # vector length
fpn = n + (n - 1) # n multiplication, n-1 addition
step_x = np.floor(((shape - filter_shape[2] + 2 * padding[0]) / stride[0]) + 1)
shape = step_x
step = step_x**2
fpf = fpn*step
FLOP += fpf*(filter_shape[0] - skip)
elif isinstance(layer,nn.Linear):
filter_data = layer.weight.data.numpy()
skip = sum(sum(filter_data == 0))
filter_shape = layer.weight.data.numpy().shape
FLOP += 2 * (filter_shape[0] * filter_shape[1] - skip)
return FLOP
ACC_before = RunData()
print("Model Name: " + Model_Name)
print("Accuracy : " + str(ACC_before) + "%")
FLOP_before = Calculate_flop()
if FLOP_before / 1e9 > 1: # for Giga Flops
print("FLOP : %4.2f GFLOP" % (FLOP_before / 1e9))
else:
print("FLOP : %4.2f MFLOP" % (FLOP_before / 1e6))
print(" ")
print(" Start Pruning ")
print("---------------------------------------------------")
print("|Layer| FLOP |#Filter or #Weight|Pruning |Filter|")
print("| No. | Save | before/after | Type | Size |")
print("|-----|--------|------------------|--------|------|")
# pruning
TOTAL_WEIGHT = 0
PRUNE_WEIGHT = 0
i = 0
for parname,layer in net.named_modules():
if isinstance(layer,nn.Conv2d) and 'shortcut' not in parname:
par = layer.weight.data.numpy()
par_size = par.shape
mask = prune_filter(par,PRUNE_RATE[i])
par = (par * mask)
print("| %3i" % (i+1), "|"+
" %5.2f" % float((1-(np.count_nonzero(mask)/mask.size)) * 100) + "% |"+
" %4i" % int((mask.size-np.count_nonzero(mask))/(par_size[1]*par_size[2]*par_size[2])),"/",
"%4i" % int(mask.size/(par_size[1]*par_size[2]*par_size[2])) + " | Filter |"+
" %1ix%1i |" % (par_size[2], par_size[3]))
TOTAL_WEIGHT = TOTAL_WEIGHT + (mask.size/(par_size[1]))
PRUNE_WEIGHT = PRUNE_WEIGHT + ((mask.size-np.count_nonzero(mask))/(par_size[1]))
i = i + 1
layer.weight.data = torch.from_numpy(par).type(torch.FloatTensor)
elif isinstance(layer,nn.Linear):
par = layer.weight.data.numpy()
par_size = par.shape
mask = prune_weight(par,FC_PRUNE_RATE)
par = (par * mask)
print("| %3i" % (i+1), "|"+
" %5.2f" % float((1-(np.count_nonzero(mask)/mask.size)) * 100) + "% |"+
" %5i" % int(mask.size-np.count_nonzero(mask)),"/",
"%5i" % int(mask.size) + " | Weight |" + " none |")
TOTAL_WEIGHT = TOTAL_WEIGHT + (mask.size)
PRUNE_WEIGHT = PRUNE_WEIGHT + (mask.size-np.count_nonzero(mask))
i = i + 1
layer.weight.data = torch.from_numpy(par).type(torch.FloatTensor)
print("---------------------------------------------------")
ACC_after = RunData2()
FLOP_after = Calculate_flop()
print(" ")
print(" After Pruning ")
print("Accuracy : " + str(ACC_before) + "% -> " + str(ACC_after) + "%")
if FLOP_after / 1e9 > 1: # for Giga Flops
if FLOP_before / 1e9 > 1: # for Giga Flops
print("FLOP : %4.2f GFLOP" % (FLOP_before / 1e9) + " -> %4.2f GFLOP" % (FLOP_after / 1e9))
else:
print("FLOP : %4.2f MFLOP" % (FLOP_before / 1e6) + " -> %4.2f GFLOP" % (FLOP_after / 1e9))
else:
if FLOP_before / 1e9 > 1: # for Giga Flops
print("FLOP : %4.2f GFLOP" % (FLOP_before / 1e9) + " -> %4.2f MFLOP" % (FLOP_after / 1e6))
else:
print("FLOP : %4.2f MFLOP" % (FLOP_before / 1e6) + " -> %4.2f MFLOP" % (FLOP_after / 1e6))
print("FLOP save: %5.2f" % (100*(FLOP_before - FLOP_after)/FLOP_before),"%")
|
[
"numpy.ones",
"argparse.ArgumentParser",
"torch.load",
"numpy.sort",
"torch.max",
"numpy.floor",
"torch.from_numpy",
"numpy.count_nonzero",
"torchvision.datasets.CIFAR10",
"numpy.zeros",
"torchvision.transforms.Normalize",
"torch.utils.data.DataLoader",
"torch.no_grad",
"torchvision.transforms.ToTensor",
"sys.path.append",
"numpy.arange"
] |
[((12, 44), 'sys.path.append', 'sys.path.append', (['"""./train_model"""'], {}), "('./train_model')\n", (27, 44), False, 'import sys\n'), ((232, 296), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Adaptive Network Slimming"""'}), "(description='Adaptive Network Slimming')\n", (255, 296), False, 'import argparse\n'), ((664, 768), 'torchvision.datasets.CIFAR10', 'torchvision.datasets.CIFAR10', ([], {'root': '"""./cifar10"""', 'train': '(False)', 'download': '(True)', 'transform': 'transform_test'}), "(root='./cifar10', train=False, download=True,\n transform=transform_test)\n", (692, 768), False, 'import torchvision\n'), ((775, 842), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['testset'], {'batch_size': '(128)', 'shuffle': '(False)'}), '(testset, batch_size=128, shuffle=False)\n', (802, 842), False, 'import torch\n'), ((1761, 1802), 'torch.load', 'torch.load', (['File_Name'], {'map_location': '"""cpu"""'}), "(File_Name, map_location='cpu')\n", (1771, 1802), False, 'import torch\n'), ((1369, 1420), 'numpy.arange', 'np.arange', (['START', 'END', '((END - START) / (netnum - 1))'], {}), '(START, END, (END - START) / (netnum - 1))\n', (1378, 1420), True, 'import numpy as np\n'), ((2909, 2923), 'numpy.sort', 'np.sort', (['prune'], {}), '(prune)\n', (2916, 2923), True, 'import numpy as np\n'), ((2935, 2955), 'numpy.ones', 'np.ones', (['layer.shape'], {}), '(layer.shape)\n', (2942, 2955), True, 'import numpy as np\n'), ((3205, 3225), 'numpy.ones', 'np.ones', (['layer.shape'], {}), '(layer.shape)\n', (3212, 3225), True, 'import numpy as np\n'), ((551, 572), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (570, 572), True, 'import torchvision.transforms as transforms\n'), ((578, 649), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.4914, 0.4822, 0.4465)', '(0.2023, 0.1994, 0.201)'], {}), '((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.201))\n', (598, 649), True, 'import torchvision.transforms as transforms\n'), ((1532, 1557), 'numpy.zeros', 'np.zeros', (['[netnum - 1, 1]'], {}), '([netnum - 1, 1])\n', (1540, 1557), True, 'import numpy as np\n'), ((1859, 1874), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1872, 1874), False, 'import torch\n'), ((2423, 2438), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2436, 2438), False, 'import torch\n'), ((2051, 2073), 'torch.max', 'torch.max', (['out.data', '(1)'], {}), '(out.data, 1)\n', (2060, 2073), False, 'import torch\n'), ((2615, 2637), 'torch.max', 'torch.max', (['out.data', '(1)'], {}), '(out.data, 1)\n', (2624, 2637), False, 'import torch\n'), ((4049, 4117), 'numpy.floor', 'np.floor', (['((shape - filter_shape[2] + 2 * padding[0]) / stride[0] + 1)'], {}), '((shape - filter_shape[2] + 2 * padding[0]) / stride[0] + 1)\n', (4057, 4117), True, 'import numpy as np\n'), ((6100, 6121), 'torch.from_numpy', 'torch.from_numpy', (['par'], {}), '(par)\n', (6116, 6121), False, 'import torch\n'), ((6015, 6037), 'numpy.count_nonzero', 'np.count_nonzero', (['mask'], {}), '(mask)\n', (6031, 6037), True, 'import numpy as np\n'), ((6685, 6707), 'numpy.count_nonzero', 'np.count_nonzero', (['mask'], {}), '(mask)\n', (6701, 6707), True, 'import numpy as np\n'), ((6755, 6776), 'torch.from_numpy', 'torch.from_numpy', (['par'], {}), '(par)\n', (6771, 6776), False, 'import torch\n'), ((5681, 5703), 'numpy.count_nonzero', 'np.count_nonzero', (['mask'], {}), '(mask)\n', (5697, 5703), True, 'import numpy as np\n'), ((6490, 6512), 'numpy.count_nonzero', 'np.count_nonzero', (['mask'], {}), '(mask)\n', (6506, 6512), True, 'import numpy as np\n'), ((5589, 5611), 'numpy.count_nonzero', 'np.count_nonzero', (['mask'], {}), '(mask)\n', (5605, 5611), True, 'import numpy as np\n'), ((6400, 6422), 'numpy.count_nonzero', 'np.count_nonzero', (['mask'], {}), '(mask)\n', (6416, 6422), True, 'import numpy as np\n')]
|
## @class IntraCodec
# Module designed for encoding and decoding YUV videos using the intra-frame method
# That is considering adjacent pixels in the same frame and encoding their errors
# @author <NAME> 89005
# @author <NAME> 89262
import numpy as np
import math
from Golomb import *
from Bitstream import *
class IntraCodec:
## Initialization function
# @param[in] filename Path of the file to read
# @param[in] encoded A flag used to indicate if the video in the given path was encoded by this same class
# @param[in] limitFrames Optional parameter to limit the number of frames to considered
# Initializing and setting up some useful parameters and flags
def __init__(self, filename, encoded=False, limitFrames=None):
self.vid = filename
self.encoding='utf-8'
# Array of arrays containing each frame's components
self.frameY=[]
self.frameV=[]
self.frameU=[]
self.encoded=False
self.quantizationStep=None
self.colorSpace=None
np.seterr(over='ignore')
#calls read video on initialization
if not encoded:
self.read_video()
else:
self.encoded=True
self.read_encoded_video(limitFrames=limitFrames)
## read_video function
# Reads YUV video information from file, storing all its data in our structures, calculating different components lengths and shapes
def read_video(self):
f=open(self.vid,"rb")
c=1
for line in f:
# Processing header
if c==1:
line=line.decode(self.encoding)
self.header=line.strip()
self.handleHeader()
# Rest of the video
if c>=2:
frameY=f.read(self.yLength)
frameU=f.read(self.uLength)
frameV=f.read(self.vLength)
y=np.frombuffer(frameY, dtype=np.uint8)
u=np.frombuffer(frameU, dtype=np.uint8)
v=np.frombuffer(frameV, dtype=np.uint8)
y=y.reshape(self.shape)
u=u.reshape(self.other_shape)
v=v.reshape(self.other_shape)
self.frameY+=[y]
self.frameU+=[u]
self.frameV+=[v]
c+=1
self.TotalFrames=len(self.frameY)
f.close()
## read_encoded_video function
# @param[in] limitFrames Optional parameter to limit the number of frames to be decoded
# Reads video information (encoded by this class) from file
# Starts by decoding and interpreting the header, followed by decoding of all the pixel errors and recreating the original pixel based on the predictor that was used
def read_encoded_video(self,limitFrames=None):
bs=BitStream(self.vid,'READ')
headerlen=bs.read_n_bits(8)
chars=[]
for i in range(0,headerlen*8):
chars.append(str(bs._readbit()))
res=''.join(chars)
self.header=self.decode_binary_string(res)
#handle header
self.handleHeader()
g=Golomb(self.golombParam)
bitsResto=int(math.log(self.golombParam,2))
if limitFrames==None:
l=self.TotalFrames
else:
l=limitFrames
#
self.frameY=[None]*l
self.frameU=[None]*l
self.frameV=[None]*l
#
for frame in range(0,l):
print('decoding frame',frame)
y=np.zeros(shape=self.shape,dtype=np.uint8)
u=np.zeros(shape=self.other_shape,dtype=np.uint8)
v=np.zeros(shape=self.other_shape,dtype=np.uint8)
for line in range(0, self.height):
for column in range(0,self.width):
pixel=self.decodeWithBitstream(3,bs,g,bitsResto)
a=self.getYUVPixel(frame,line,column-1, resized=False)
c=self.getYUVPixel(frame,line-1,column-1, resized=False)
b=self.getYUVPixel(frame,line-1,column, resized=False)
x=self.predict(a,c,b)
pixel=self.sum(x,pixel)
pixel=tuple(pixel)
l,c=self.adjustCoord(line,column)
y[line,column]=pixel[0]
u[l,c]=pixel[1]
v[l,c]=pixel[2]
#
self.frameY[frame]=y
self.frameU[frame]=u
self.frameV[frame]=v
#por cada frame
self.frameY+=[y]
self.frameU+=[u]
self.frameV+=[v]
#
bs.close()
## handleHeader function
# Interpreting the header of the file, containing width, height, frames per second and color space, assigning them to class variables
# This header can also contain other parameters added while encoding, such as the parameter for Golomb and the quantization steps used for lossy coding
def handleHeader(self):
print(self.header)
fields=self.header.split(" ")
for field in fields:
c=field[0]
if c=='W':
self.width=int(field[1:])
elif c=='H':
self.height=int(field[1:])
elif c=='F':
self.fps=int(field[1:3])
elif c=='C':
self.colorSpace=int(field[1:])
elif c=='G':
self.golombParam=int(field[-1:])
self.encoded=True
elif c=='z':
self.TotalFrames=int(field[1:])
elif c=='q':
qlist=field[1:]
qsteps=qlist.split(':')
self.quantizationStep=[int(qsteps[0]),int(qsteps[1]),int(qsteps[2])]
self.computeShape()
print('width=',self.width, 'height=',self.height, self.fps, self.colorSpace, self.frameLength)
if self.encoded:
print('g=',self.golombParam, 'totalframes=',self.TotalFrames)
if self.quantizationStep!=None:
print('q=',self.quantizationStep)
## adjustCoord function
# @param[in] line Line where the pixel is located
# @param[in] column Column where the pixel is located
# @param[out] line Adjusted line number
# @param[out] column Adjusted column number
# Adjusts given line and column considering the different array shapes in different color spaces
# Useful when assigning new values to a certain pixel position
def adjustCoord(self,line,column):
if self.colorSpace=='4:2:2':
c=math.floor((column/2))
return line,c
elif self.colorSpace=='4:2:0':
c=math.floor((column/2))
l=math.floor((line/2))
return l,c
else:
return line,column
## computeShape function
# Calculating array shapes for YUV components based on the color space
def computeShape(self):
if self.colorSpace==444:
self.colorSpace='4:4:4'
self.frameLength=int(self.width*self.height*3)
self.yLength=self.uLength=self.vLength=int(self.frameLength/3)
self.shape = (int(self.height), self.width)
self.other_shape = (int(self.height), self.width)
elif self.colorSpace==422:
self.colorSpace='4:2:2'
self.frameLength=int(self.width*self.height*2)
self.yLength=int(self.frameLength/2)
self.vLength=self.uLength=int(self.frameLength/4)
self.shape = (int(self.height), self.width)
self.other_shape = (int(self.height), int(self.width/2))
else:
self.colorSpace='4:2:0'
self.frameLength=int(self.width*self.height*3/2)
self.yLength=int(self.frameLength*(2/3))
self.uLength=self.vLength=int(self.frameLength*(1/6))
self.shape = (int(self.height), self.width)
self.other_shape = (int(self.height/2), int(self.width/2))
## getYUVPixel function
# @param[in] frame Number of the frame from which to read the pixel from
# @param[in] line Line in which the pixel is located
# @param[in] column Column in which the pixel is located
# @param[in] resized A flag used to indicate if the arrays have been resized or not
# @param[out] p The pixel tuple in YUV format
# Returns 0,0,0 for non existent pixels, useful for the Codecs
# Adjust line and column numbers based on the color space (and array shapes)
def getYUVPixel(self, frame, line, column, resized):
yf=self.frameY[frame]
uf=self.frameU[frame]
vf=self.frameV[frame]
if resized==False:
if self.colorSpace=='4:2:2':
c=math.floor((column/2))
if line<0 or column<0 or c<0:
return 0,0,0
p=yf[line,column], uf[line,c], vf[line,c]
elif self.colorSpace=='4:2:0':
c=math.floor((column/2))
l=math.floor((line/2))
if line<0 or column<0 or c<0 or l<0:
return 0,0,0
p=yf[line,column], uf[l,c], vf[l,c]
else:
if line<0 or column<0:
return 0,0,0
p=yf[line,column], uf[line,column], vf[line,column]
else:
if line<0 or column<0:
return 0,0,0
p=yf[line,column], uf[line,column], vf[line,column]
return p
## updateYUVPixel function
# @param[in] compNumb Number of the pixel component to be changed (0=Y,1=U,2=V)
# @param[in] frame Number of the frame where the pixel is located
# @param[in] line Line in which the pixel is located
# @param[in] column Column in which the pixel is located
# @param[in] value New value of the pixel's component
# Used for avoiding error propagation in lossy coding
def updateYUVPixel(self,compNumb,frame,line,column,value):
l,c=self.adjustCoord(line,column)
if compNumb==0:
rf=self.frameY[frame]
rf.setflags(write=1)
rf[line,column]=value
elif compNumb==1:
rf=self.frameU[frame]
rf.setflags(write=1)
rf[l,c]=value
else:
rf=self.frameV[frame]
rf.setflags(write=1)
rf[l,c]=value
## encode_video function
# @param[in] filename Path of file to write with the encoded video information
# @param[in] golombparam Golomb's parameter M (factor)
# @param[in] q Optional parameter for specifying each components quantization steps for lossy coding
# @param[in] limitFrames Optional parameter for limiting number of frames to encode
# Starts by encoding the header, passing additional parameters such as the Golomb factor
# Proceeds to encode each pixel, by calculating each component's error according to the predictor function
def encode_video(self, filename, golombparam, q=None, limitFrames=None):
if limitFrames==None:
l=self.TotalFrames
else:
l=limitFrames
g=Golomb(golombparam)
bs=BitStream(filename,'WRITE')
header='ENCODED '+self.header+' Golomb'+str(golombparam)+' z'+str(self.TotalFrames)
if q!=None:
header+=' q'+str(q[0])+':'+str(q[1])+':'+str(q[2])
self.quantizationStep=q
headerlen=len(header)
bs.write_n_bits(headerlen,8)
bs.writeTxt(header)
for frame in range(0,l):
print('encoding frame',frame)
for line in range(0,self.height):
for column in range(0,self.width):
p=self.getYUVPixel(frame,line,column, resized=False)
a=self.getYUVPixel(frame,line,column-1, resized=False)
c=self.getYUVPixel(frame,line-1,column-1, resized=False)
b=self.getYUVPixel(frame,line-1,column, resized=False)
x=self.predict(a,c,b)
erro=self.diff(p,x)
self.encodeWithBitstream(erro,bs,g,pixel=p,frame=frame,line=line,column=column)
bs.close()
## predict function
# @param[in] a Adjacent pixel in position (line,col-1)
# @param[in] c Adjacent pixel in position (line-1,col-1)
# @param[in] b Adjacent pixel in position (line-1,col)
# @param[out] ret Most similar pixel
# The returned pixel is calculated using the JPEG-LS non-linear predictor formula
def predict(self,a,c,b):
y=[int(a[0]),int(c[0]),int(b[0])]
u=[int(a[1]),int(c[1]),int(b[1])]
v=[int(a[2]),int(c[2]),int(b[2])]
l=[y]+[u]+[v]
ret=[]
for component in l:
if component[1]>=max(component[0],component[2]):
x=min(component[0],component[2])
elif component[1]<=min(component[0],component[2]):
x=max(component[0],component[2])
else:
x=component[0]+component[2]-component[1]
ret.append(x)
return ret
## diff function
# @param[in] p First pixel
# @param[in] x Second pixel
# @param[out] r Pixel result of the difference between the two pixels
# Calculates the result pixel by calculating the difference between each yuv component
def diff(self,p,x):
ey=int(p[0])-int(x[0])
eu=int(p[1])-int(x[1])
ev=int(p[2])-int(x[2])
return(ey,eu,ev)
## sum function
# @param[in] p First pixel
# @param[in] x Second pixel
# @param[out] r Pixel result of the sum between the two pixels
# Calculates the result pixel by calculating the sum between each yuv component
def sum(self,p,x):
ey=p[0]+x[0]
eu=p[1]+x[1]
ev=p[2]+x[2]
return(ey,eu,ev)
## printPixels function
# Function for printing pixels, useful during development
def printPixels(self):
l=self.TotalFrames
l=1
h=self.height
#h=20
w=self.width
#w=20
for frame in range(0,l):
#print('processing frame',frame)
for line in range(0,h):
for column in range(0,w):
if line==0 and w-10<=column<w:
p=self.getYUVPixel(frame,line,column, resized=False)
print(p, end=';')
#print('')
## decode_binary_string function
# @param[in] s String
# @param[out] r Decoded binary string
# Additional function to decode binary strings
def decode_binary_string(self,s):
return ''.join(chr(int(s[i*8:i*8+8],2)) for i in range(len(s)//8))
## getFrames function
# @param[out] frames The data structures with all the frames of each component
# Useful to check data integrity
def getFrames(self):
return self.frameY, self.frameU,self.frameV
## encodeWithBitStream function
# @param[in] value Value to be encoded
# @param[in] bs Bitstream class object
# @param[in] g Golomb class object
# @param[in] pixel Current pixel values being encoded, used for lossy coding
# @param[in] frame Frame where the pixel being encoded is located
# @param[in] line Line where the pixel being encoded is located
# @param[in] column Column where the pixel being encoded is located
# Switches the value to be encoded to positive, writing a 1 or 0 according to the original value
# If using lossy coding functionality, divides color component by quantization step and updates pixel value
# Proceeds to write the encoded value by Golomb with the Bitstream
def encodeWithBitstream(self, value,bs,g, pixel=None, frame=None, line=None, column=None):
for i in range(0,len(value)):
if value[i]<0:
n=value[i]*-1
bs.writebits(1,1)
else:
bs.writebits(0,1)
n=value[i]
if self.quantizationStep!=None and self.quantizationStep[i]!=0:
#newValue=pixel[i]+(n)
n=math.floor(n/self.quantizationStep[i])
#if line!=0 and column!=0:
#self.updateYUVPixel(i,frame,line,column,newValue)
n=g.encode(n)
bs.writebits(int(n,2),len(n))
## decodeWithBitStream function
# @param[in] len Number of values to read
# @param[in] bs Bitstream class object
# @param[in] g Golomb class object
# @param[in] bitsResto Number of bits of the remainder = log(factor,2)
# @param[out] pixel Decoded value
# Starts by reading one bit 0 or 1, determing if number was negative
# Reads the bits from the Bitstream and decodes them with Golomb
# Multiplies by quantization step if using lossy coding
def decodeWithBitstream(self, len,bs,g,bitsResto):
pixel=[]
for i in range(0,len):
ay=bs.read_n_bits(1)
seq=''
while True:
r=str(bs.read_n_bits(1))
seq+=r
if r=='0':
break
seq+=str(bs.readbits(bitsResto))
comp=g.decode(seq)
if ay==1:
comp=comp*-1
if self.quantizationStep!=None and self.quantizationStep[i]!=0:
comp=comp*self.quantizationStep[i]
pixel.append(comp)
return pixel
## verifyData function
# @param[in] video Class containing video for comparison
# @param[in] numberoframes Limits number of frames to check
# Compares data between two videos
def verifyData(self,video,numberoframes):
m1,m2,m3=self.getFrames()
m4,m5,m6=video.getFrames()
for i in range(0,numberoframes):
if (np.array_equal(m1[i],m4[i])):
print('Y-',i,'correct')
for i in range(0,numberoframes):
if (np.array_equal(m2[i],m5[i])):
print('U-',i,'correct')
for i in range(0,numberoframes):
if (np.array_equal(m3[i],m6[i])):
print('V-',i,'correct')
|
[
"math.floor",
"math.log",
"numpy.zeros",
"numpy.array_equal",
"numpy.frombuffer",
"numpy.seterr"
] |
[((1042, 1066), 'numpy.seterr', 'np.seterr', ([], {'over': '"""ignore"""'}), "(over='ignore')\n", (1051, 1066), True, 'import numpy as np\n'), ((3174, 3203), 'math.log', 'math.log', (['self.golombParam', '(2)'], {}), '(self.golombParam, 2)\n', (3182, 3203), False, 'import math\n'), ((3503, 3545), 'numpy.zeros', 'np.zeros', ([], {'shape': 'self.shape', 'dtype': 'np.uint8'}), '(shape=self.shape, dtype=np.uint8)\n', (3511, 3545), True, 'import numpy as np\n'), ((3559, 3607), 'numpy.zeros', 'np.zeros', ([], {'shape': 'self.other_shape', 'dtype': 'np.uint8'}), '(shape=self.other_shape, dtype=np.uint8)\n', (3567, 3607), True, 'import numpy as np\n'), ((3621, 3669), 'numpy.zeros', 'np.zeros', ([], {'shape': 'self.other_shape', 'dtype': 'np.uint8'}), '(shape=self.other_shape, dtype=np.uint8)\n', (3629, 3669), True, 'import numpy as np\n'), ((6625, 6647), 'math.floor', 'math.floor', (['(column / 2)'], {}), '(column / 2)\n', (6635, 6647), False, 'import math\n'), ((17791, 17819), 'numpy.array_equal', 'np.array_equal', (['m1[i]', 'm4[i]'], {}), '(m1[i], m4[i])\n', (17805, 17819), True, 'import numpy as np\n'), ((17918, 17946), 'numpy.array_equal', 'np.array_equal', (['m2[i]', 'm5[i]'], {}), '(m2[i], m5[i])\n', (17932, 17946), True, 'import numpy as np\n'), ((18045, 18073), 'numpy.array_equal', 'np.array_equal', (['m3[i]', 'm6[i]'], {}), '(m3[i], m6[i])\n', (18059, 18073), True, 'import numpy as np\n'), ((1925, 1962), 'numpy.frombuffer', 'np.frombuffer', (['frameY'], {'dtype': 'np.uint8'}), '(frameY, dtype=np.uint8)\n', (1938, 1962), True, 'import numpy as np\n'), ((1981, 2018), 'numpy.frombuffer', 'np.frombuffer', (['frameU'], {'dtype': 'np.uint8'}), '(frameU, dtype=np.uint8)\n', (1994, 2018), True, 'import numpy as np\n'), ((2037, 2074), 'numpy.frombuffer', 'np.frombuffer', (['frameV'], {'dtype': 'np.uint8'}), '(frameV, dtype=np.uint8)\n', (2050, 2074), True, 'import numpy as np\n'), ((6727, 6749), 'math.floor', 'math.floor', (['(column / 2)'], {}), '(column / 2)\n', (6737, 6749), False, 'import math\n'), ((6764, 6784), 'math.floor', 'math.floor', (['(line / 2)'], {}), '(line / 2)\n', (6774, 6784), False, 'import math\n'), ((8782, 8804), 'math.floor', 'math.floor', (['(column / 2)'], {}), '(column / 2)\n', (8792, 8804), False, 'import math\n'), ((16107, 16147), 'math.floor', 'math.floor', (['(n / self.quantizationStep[i])'], {}), '(n / self.quantizationStep[i])\n', (16117, 16147), False, 'import math\n'), ((9003, 9025), 'math.floor', 'math.floor', (['(column / 2)'], {}), '(column / 2)\n', (9013, 9025), False, 'import math\n'), ((9044, 9064), 'math.floor', 'math.floor', (['(line / 2)'], {}), '(line / 2)\n', (9054, 9064), False, 'import math\n')]
|
import os
import numpy as np
import pytest
from spectrum_overload import Spectrum
from mingle.utilities.spectrum_utils import load_spectrum, select_observation
@pytest.mark.parametrize("fname", ["HD30501-1-mixavg-tellcorr_1.fits", "HD30501-1-mixavg-h2otellcorr_1.fits"])
def test_load_spectrum(fname):
fname = os.path.join("tests", "testdata", "handy_spectra", fname)
results = load_spectrum(fname)
assert isinstance(results, Spectrum)
assert results.header["OBJECT"].upper() == "HD30501"
assert np.all(results.xaxis > 2110) # nm
assert np.all(results.xaxis < 2130) # nm
assert np.all(results.flux < 2)
assert np.all(results.flux >= 0)
def test_load_no_filename_fits():
"""Not a valid file."""
with pytest.raises(ValueError):
load_spectrum("")
@pytest.mark.parametrize("chip", [0, None, 5, 42])
def test_select_observation_with_bad_chip(chip):
with pytest.raises(ValueError):
select_observation("HD30501", "1", chip)
@pytest.mark.xfail()
def test_spectrum_plotter(spectra, label=None, show=False):
"""Plot a Spectrum object."""
assert False
@pytest.mark.xfail()
def test_plot_spectra(obs, model):
"""Plot two spectra."""
assert False
|
[
"pytest.mark.xfail",
"mingle.utilities.spectrum_utils.select_observation",
"os.path.join",
"mingle.utilities.spectrum_utils.load_spectrum",
"pytest.mark.parametrize",
"pytest.raises",
"numpy.all"
] |
[((165, 278), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""fname"""', "['HD30501-1-mixavg-tellcorr_1.fits', 'HD30501-1-mixavg-h2otellcorr_1.fits']"], {}), "('fname', ['HD30501-1-mixavg-tellcorr_1.fits',\n 'HD30501-1-mixavg-h2otellcorr_1.fits'])\n", (188, 278), False, 'import pytest\n'), ((803, 852), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""chip"""', '[0, None, 5, 42]'], {}), "('chip', [0, None, 5, 42])\n", (826, 852), False, 'import pytest\n'), ((990, 1009), 'pytest.mark.xfail', 'pytest.mark.xfail', ([], {}), '()\n', (1007, 1009), False, 'import pytest\n'), ((1124, 1143), 'pytest.mark.xfail', 'pytest.mark.xfail', ([], {}), '()\n', (1141, 1143), False, 'import pytest\n'), ((318, 375), 'os.path.join', 'os.path.join', (['"""tests"""', '"""testdata"""', '"""handy_spectra"""', 'fname'], {}), "('tests', 'testdata', 'handy_spectra', fname)\n", (330, 375), False, 'import os\n'), ((390, 410), 'mingle.utilities.spectrum_utils.load_spectrum', 'load_spectrum', (['fname'], {}), '(fname)\n', (403, 410), False, 'from mingle.utilities.spectrum_utils import load_spectrum, select_observation\n'), ((520, 548), 'numpy.all', 'np.all', (['(results.xaxis > 2110)'], {}), '(results.xaxis > 2110)\n', (526, 548), True, 'import numpy as np\n'), ((566, 594), 'numpy.all', 'np.all', (['(results.xaxis < 2130)'], {}), '(results.xaxis < 2130)\n', (572, 594), True, 'import numpy as np\n'), ((612, 636), 'numpy.all', 'np.all', (['(results.flux < 2)'], {}), '(results.flux < 2)\n', (618, 636), True, 'import numpy as np\n'), ((648, 673), 'numpy.all', 'np.all', (['(results.flux >= 0)'], {}), '(results.flux >= 0)\n', (654, 673), True, 'import numpy as np\n'), ((747, 772), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (760, 772), False, 'import pytest\n'), ((782, 799), 'mingle.utilities.spectrum_utils.load_spectrum', 'load_spectrum', (['""""""'], {}), "('')\n", (795, 799), False, 'from mingle.utilities.spectrum_utils import load_spectrum, select_observation\n'), ((911, 936), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (924, 936), False, 'import pytest\n'), ((946, 986), 'mingle.utilities.spectrum_utils.select_observation', 'select_observation', (['"""HD30501"""', '"""1"""', 'chip'], {}), "('HD30501', '1', chip)\n", (964, 986), False, 'from mingle.utilities.spectrum_utils import load_spectrum, select_observation\n')]
|
import numpy as np
from mldftdat.pyscf_utils import *
from mldftdat.workflow_utils import safe_mem_cap_mb
from pyscf.dft.numint import eval_ao, make_mask
from mldftdat.density import LDA_FACTOR,\
contract21_deriv, contract21, GG_AMIN
def dtauw(rho_data):
return - get_gradient_magnitude(rho_data)**2 / (8 * rho_data[0,:]**2 + 1e-16),\
1 / (8 * rho_data[0,:] + 1e-16)
def dsdp(s):
return 1 / (2 * s)
def dasinhsdp(s):
return arcsinh_deriv(s) / (2 * s + 1e-10)
def ds2(rho_data):
# s = |nabla n| / (b * n)
rho = rho_data[0,:]
b = 2 * (3 * np.pi * np.pi)**(1.0/3)
s = get_gradient_magnitude(rho_data) / (b * rho**(4.0/3) + 1e-16)
s2 = s**2
return -8.0 * s2 / (3 * rho + 1e-16),\
1 / (b * rho**(4.0/3) + 1e-16)**2
def dalpha(rho_data):
rho = rho_data[0,:]
tau = rho_data[5,:]
tau0 = get_uniform_tau(rho) + 1e-16
mag_grad = get_gradient_magnitude(rho_data)
tauw = get_single_orbital_tau(rho, mag_grad)
dwdn, dwds = dtauw(rho_data)
return 5.0 * (tauw - tau) / (3 * tau0 * rho + 1e-16) - dwdn / tau0,\
- dwds / tau0,\
1 / tau0
LDA_FACTOR = - 3.0 / 4.0 * (3.0 / np.pi)**(1.0/3)
def v_semilocal(rho_data, F, dfdp, dfdalpha):
# 0 - n, 1 - p, 2 - nabla^2, 3 - alpha
v = np.zeros((4, rho_data.shape[1]))
rho = rho_data[0,:]
elda = LDA_FACTOR * rho**(4.0/3)
# dE/dn line 1
v[0] = 4.0 / 3 * LDA_FACTOR * rho**(1.0/3) * F
# dE/dp line 1
v[1] = elda * dfdp
# dE/dalpha line 1
v[3] = elda * dfdalpha
return v
def v_basis_transform(rho_data, v_npalpha):
"""
Transforms the basis of the exchange potential from
density, reduced gradient, and alpha to
density, contracted gradient, and kinetic energy.
v_npalpha is a 3xN array:
0 - Functional derivative of the exchange energy
explicitly with respect to the density, i.e.
not accounting for derivatives of the XEF features
wrt density
1 - Functional derivative wrt the square of the reduced
gradient p
2 - ZERO (Functional derivative wrt normalized laplacian)
3 - Functional derivative wrt the isoorbital indicator
alpha
Returns a 3xN array:
0 - Full functional derivative of the exchange energy
wrt the density, accounting for dp/dn and dalpha/dn
1 - Derivative wrt sigma, the contracted gradient |nabla n|^2
2 - ZERO (Derivative wrt the laplacian fo the density)
3 - Derivative wrt tau, the kinetic energy density
"""
v_nst = np.zeros(v_npalpha.shape)
# dE/dn lines 1-3
v_nst[0] = v_npalpha[0]
dpdn, dpdsigma = ds2(rho_data)
# dE/dn line 4 term 1
v_nst[0] += v_npalpha[1] * dpdn
# dE/dsigma term 1
v_nst[1] += v_npalpha[1] * dpdsigma
dadn, dadsigma, dadtau = dalpha(rho_data)
# dE/dn line 4 term 2
v_nst[0] += v_npalpha[3] * dadn
# dE/dsigma term 2
v_nst[1] += v_npalpha[3] * dadsigma
# dE/dtau
v_nst[3] = v_npalpha[3] * dadtau
return v_nst
def v_nonlocal_general(rho_data, grid, dedg, density, auxmol,
g, gr2, ovlp, l = 0, mul = 1.0):
# g should have shape (2l+1, N)
N = grid.weights.shape[0]
lc = get_dft_input2(rho_data)[:3]
if l == 0:
dedb = dedg.reshape(1, -1)
elif l == 1:
#dedb = 2 * elda * g * dfdg
dedb = 2 * dedg * g #/ (np.linalg.norm(g, axis=0) + 1e-10)
elif l == 2:
dedb = 2 * dedg * g / np.sqrt(5)
elif l == -2:
dedb = dedg
l = 2
elif l == -1:
dedb = dedg
l = 1
else:
raise ValueError('angular momentum code l=%d unknown' % l)
rho, s, alpha = lc
a = np.pi * (mul * rho / 2 + 1e-16)**(2.0 / 3)
scale = 1
fac = (6 * np.pi**2)**(2.0/3) / (16 * np.pi)
scale += GG_SMUL * fac * s**2
scale += GG_AMUL * 0.6 * fac * (alpha - 1)
a = a * scale
cond = a < GG_AMIN
da = np.exp(a[cond] / GG_AMIN - 1)
a[cond] = GG_AMIN * np.exp(a[cond] / GG_AMIN - 1)
# (ngrid * (2l+1), naux)
dedb[:,rho<1e-8] = 0
dedaux = np.dot((dedb * grid.weights).T.flatten(), ovlp)
dgda = l / (2 * a) * g - gr2
#print(dgda.shape, gr2.shape)
dgda[:,rho<1e-8] = 0
dadn = mul * a / (3 * (mul * rho / 2 + 1e-16))
dadp = GG_SMUL * np.pi * fac * (mul * rho / 2 + 1e-16)**(2.0/3)
dadalpha = GG_AMUL * 0.6 * np.pi * fac * (mul * rho / 2 + 1e-16)**(2.0/3)
dadn[cond] *= da
dadp[cond] *= da
dadalpha[cond] *= da
# add in line 3 of dE/dn, line 2 of dE/dp and dE/dalpha
v_npa = np.zeros((4, N))
deda = np.einsum('mi,mi->i', dedb, dgda)
v_npa[0] = deda * dadn
v_npa[1] = deda * dadp
v_npa[3] = deda * dadalpha
return v_npa, dedaux
def v_nonlocal(rho_data, grid, dedg, density, auxmol,
g, gr2, ovlp, l=0, a0=8.0, fac_mul=0.25,
amin=GG_AMIN, l_add=0, **kwargs):
#print(l, l_add, a0, fac_mul, amin)
# g should have shape (2l+1, N)
N = grid.weights.shape[0]
lc = get_dft_input2(rho_data)[:3]
if l == 0:
dedb = dedg.reshape(1, -1)
elif l == 1:
dedb = 2 * dedg * g
elif l == 2:
dedb = 2 * dedg * g / np.sqrt(5)
elif l == -2:
dedb = dedg
l = 2
elif l == -1:
dedb = dedg
l = 1
else:
raise ValueError('angular momentum code l=%d unknown' % l)
rho, s, alpha = lc
ratio = alpha + 5./3 * s**2
fac = fac_mul * 1.2 * (6 * np.pi**2)**(2.0/3) / np.pi
a = np.pi * (rho / 2 + 1e-16)**(2.0 / 3)
scale = a0 + (ratio-1) * fac
a = a * scale
cond = a < amin
da = np.exp(a[cond] / amin - 1)
a[cond] = amin * np.exp(a[cond] / amin - 1)
# (ngrid * (2l+1), naux)
dedb[:,rho<1e-8] = 0
dedaux = np.dot((dedb * grid.weights).T.flatten(), ovlp)
dgda = (l + l_add) / (2 * a) * g - gr2
dgda[:,rho<1e-8] = 0
dadn = 2 * a / (3 * rho + 1e-16)
dadalpha = np.pi * fac * (rho / 2 + 1e-16)**(2.0/3)
dadp = 5./3 * dadalpha
dadn[cond] *= da
dadp[cond] *= da
dadalpha[cond] *= da
# add in line 3 of dE/dn, line 2 of dE/dp and dE/dalpha
v_npa = np.zeros((4, N))
deda = np.einsum('mi,mi->i', dedb, dgda)
v_npa[0] = deda * dadn
v_npa[1] = deda * dadp
v_npa[3] = deda * dadalpha
return v_npa, dedaux
def functional_derivative_loop(mol, mlfunc, dEddesc,
raw_desc, raw_desc_r2,
rho_data, density, ovlps, grid):
"""
Core functional derivative loop for the CIDER features,
called by NLNumInt.
Args:
mol (pyscf.gto.Mole): molecule object
mlfunc (MLFunctional): Exchange functional
dEddesc (np.ndarray): ngrid x ndesc array of energy derivatives
with respect to the descriptors.
raw_desc (np.ndarray): raw CIDER descriptor vectors
raw_desc_r2 (np.ndarray): raw CIDER descriptor vectors <r^2>
for use in functional derivative with respect to the Gaussian
exponents
rho_data (np.ndarray): 6 x ngrid
density (np.ndarray): density in DF basis space
ovlps (np.ndarray): Overlaps of the CIDER descriptor functions with
the DF basis
grid: contains coords and weights of the real-space grid
"""
gg_dict = {
'a0': mlfunc.a0,
'amin': mlfunc.amin,
'fac_mul': mlfunc.fac_mul
}
N = grid.weights.shape[0]
naux = mol.auxmol.nao_nr()
sprefac = 2 * (3 * np.pi * np.pi)**(1.0/3)
n43 = rho_data[0]**(4.0/3)
svec = rho_data[1:4] / (sprefac * n43 + 1e-20)
v_npa = np.zeros((4, N))
v_aniso = np.zeros((3, N))
v_aux = np.zeros(naux)
for i, d in enumerate(mlfunc.desc_order):
if d == 0:
v_npa[0] += dEddesc[:,i]
elif d == 1:
v_npa[1] += dEddesc[:,i]
elif d == 2:
v_npa[3] += dEddesc[:,i]
else:
gg_kwargs = gg_dict
l_add = 0
if d in [3, 10, 11]:
if d == 3:
g = raw_desc[6]
ovlp = ovlps[0]
gr2 = raw_desc_r2[6:7]
elif d == 10:
g = raw_desc[15]
ovlp = ovlps[3]
gr2 = raw_desc_r2[15:16]
if mlfunc.desc_version == 'c':
l_add = 2
mul = 1.0
else:
mul = 0.25**(2./3)
gg_kwargs = {
'a0': mlfunc.a0 * mul,
'fac_mul': mlfunc.fac_mul * mul,
'amin': mlfunc.amin * mul
}
else:
g = raw_desc[16]
ovlp = ovlps[4]
gr2 = raw_desc_r2[16:17]
if mlfunc.desc_version == 'c':
mul = 2.0
else:
mul = 4**(2./3)
gg_kwargs = {
'a0': mlfunc.a0 * mul,
'fac_mul': mlfunc.fac_mul * mul,
'amin': mlfunc.amin * mul
}
l = 0
elif d == 4:
g = raw_desc[7:10]
gr2 = raw_desc_r2[7:10]
ovlp = ovlps[1]
l = 1
elif d == 6:
g = raw_desc[10:15]
gr2 = raw_desc_r2[10:15]
ovlp = ovlps[2]
l = 2
elif d == 5:
g = raw_desc[7:10]
gr2 = raw_desc_r2[7:10]
ovlp = ovlps[1]
dfmul = svec
v_aniso += dEddesc[:,i] * g
l = -1
elif d == 7:
l = -2
g = raw_desc[10:15]
gr2 = raw_desc_r2[10:15]
ovlp = ovlps[2]
dfmul = contract21_deriv(svec)
ddesc_dsvec = contract21(g, svec)
v_aniso += dEddesc[:,i] * 2 * ddesc_dsvec
elif d == 8:
g2 = raw_desc[10:15]
g2r2 = raw_desc_r2[10:15]
ovlp2 = ovlps[2]
g1 = raw_desc[7:10]
g1r2 = raw_desc_r2[7:10]
ovlp1 = ovlps[1]
dfmul = contract21_deriv(svec, g1)
ddesc_dsvec = contract21(g2, g1)
ddesc_dg1 = contract21(g2, svec)
v_aniso += dEddesc[:,i] * ddesc_dsvec
vtmp1, dedaux1 = v_nonlocal(rho_data, grid,
dEddesc[:,i] * ddesc_dg1,
density, mol.auxmol, g1,
g1r2, ovlp1, l=-1, **gg_kwargs)
vtmp2, dedaux2 = v_nonlocal(rho_data, grid,
dEddesc[:,i] * dfmul,
density, mol.auxmol, g2,
g2r2, ovlp2, l=-2, **gg_kwargs)
vtmp = vtmp1 + vtmp2
dedaux = dedaux1 + dedaux2
elif d == 9:
g2 = raw_desc[10:15]
g2r2 = raw_desc_r2[10:15]
ovlp2 = ovlps[2]
g1 = raw_desc[7:10]
g1r2 = raw_desc_r2[7:10]
ovlp1 = ovlps[1]
dfmul = contract21_deriv(g1)
ddesc_dg1 = 2 * contract21(g2, g1)
vtmp1, dedaux1 = v_nonlocal(rho_data, grid,
dEddesc[:,i] * ddesc_dg1,
density, mol.auxmol, g1,
g1r2, ovlp1, l=-1, **gg_kwargs)
vtmp2, dedaux2 = v_nonlocal(rho_data, grid,
dEddesc[:,i] * dfmul,
density, mol.auxmol, g2,
g2r2, ovlp2, l=-2, **gg_kwargs)
vtmp = vtmp1 + vtmp2
dedaux = dedaux1 + dedaux2
else:
raise NotImplementedError('Cannot take derivative for code %d' % d)
if d in [5, 7]:
vtmp, dedaux = v_nonlocal(rho_data, grid,
dEddesc[:,i] * dfmul,
density, mol.auxmol, g,
gr2, ovlp, l=l, **gg_kwargs)
elif d in [8, 9]:
pass
else:
vtmp, dedaux = v_nonlocal(rho_data, grid,
dEddesc[:,i],
density, mol.auxmol, g,
gr2, ovlp, l=l, l_add=l_add,
**gg_kwargs)
v_npa += vtmp
v_aux += dedaux
vtmp = None
dedaux = None
vmol = np.einsum('a,aij->ij', v_aux, mol.ao_to_aux)
v_nst = v_basis_transform(rho_data, v_npa)
v_nst[0] += np.einsum('ap,ap->p', -4.0 * svec / (3 * rho_data[0] + 1e-20), v_aniso)
v_grad = v_aniso / (sprefac * n43 + 1e-20)
return v_nst, v_grad, vmol
def get_density_in_basis(ao_to_aux, rdm1):
return np.einsum('npq,pq->n', ao_to_aux, rdm1)
def arcsinh_deriv(x):
return 1 / np.sqrt(x * x + 1)
def get_chi(alpha):
return 1 / (1 + alpha**2)
def chi_deriv(alpha):
return -2 * alpha / (1 + alpha**2)**2
|
[
"numpy.sqrt",
"mldftdat.density.contract21_deriv",
"numpy.exp",
"numpy.zeros",
"numpy.einsum",
"mldftdat.density.contract21"
] |
[((1313, 1345), 'numpy.zeros', 'np.zeros', (['(4, rho_data.shape[1])'], {}), '((4, rho_data.shape[1]))\n', (1321, 1345), True, 'import numpy as np\n'), ((2617, 2642), 'numpy.zeros', 'np.zeros', (['v_npalpha.shape'], {}), '(v_npalpha.shape)\n', (2625, 2642), True, 'import numpy as np\n'), ((3994, 4023), 'numpy.exp', 'np.exp', (['(a[cond] / GG_AMIN - 1)'], {}), '(a[cond] / GG_AMIN - 1)\n', (4000, 4023), True, 'import numpy as np\n'), ((4624, 4640), 'numpy.zeros', 'np.zeros', (['(4, N)'], {}), '((4, N))\n', (4632, 4640), True, 'import numpy as np\n'), ((4652, 4685), 'numpy.einsum', 'np.einsum', (['"""mi,mi->i"""', 'dedb', 'dgda'], {}), "('mi,mi->i', dedb, dgda)\n", (4661, 4685), True, 'import numpy as np\n'), ((5673, 5699), 'numpy.exp', 'np.exp', (['(a[cond] / amin - 1)'], {}), '(a[cond] / amin - 1)\n', (5679, 5699), True, 'import numpy as np\n'), ((6193, 6209), 'numpy.zeros', 'np.zeros', (['(4, N)'], {}), '((4, N))\n', (6201, 6209), True, 'import numpy as np\n'), ((6221, 6254), 'numpy.einsum', 'np.einsum', (['"""mi,mi->i"""', 'dedb', 'dgda'], {}), "('mi,mi->i', dedb, dgda)\n", (6230, 6254), True, 'import numpy as np\n'), ((7667, 7683), 'numpy.zeros', 'np.zeros', (['(4, N)'], {}), '((4, N))\n', (7675, 7683), True, 'import numpy as np\n'), ((7698, 7714), 'numpy.zeros', 'np.zeros', (['(3, N)'], {}), '((3, N))\n', (7706, 7714), True, 'import numpy as np\n'), ((7727, 7741), 'numpy.zeros', 'np.zeros', (['naux'], {}), '(naux)\n', (7735, 7741), True, 'import numpy as np\n'), ((13019, 13063), 'numpy.einsum', 'np.einsum', (['"""a,aij->ij"""', 'v_aux', 'mol.ao_to_aux'], {}), "('a,aij->ij', v_aux, mol.ao_to_aux)\n", (13028, 13063), True, 'import numpy as np\n'), ((13127, 13198), 'numpy.einsum', 'np.einsum', (['"""ap,ap->p"""', '(-4.0 * svec / (3 * rho_data[0] + 1e-20))', 'v_aniso'], {}), "('ap,ap->p', -4.0 * svec / (3 * rho_data[0] + 1e-20), v_aniso)\n", (13136, 13198), True, 'import numpy as np\n'), ((13338, 13377), 'numpy.einsum', 'np.einsum', (['"""npq,pq->n"""', 'ao_to_aux', 'rdm1'], {}), "('npq,pq->n', ao_to_aux, rdm1)\n", (13347, 13377), True, 'import numpy as np\n'), ((4048, 4077), 'numpy.exp', 'np.exp', (['(a[cond] / GG_AMIN - 1)'], {}), '(a[cond] / GG_AMIN - 1)\n', (4054, 4077), True, 'import numpy as np\n'), ((5721, 5747), 'numpy.exp', 'np.exp', (['(a[cond] / amin - 1)'], {}), '(a[cond] / amin - 1)\n', (5727, 5747), True, 'import numpy as np\n'), ((13416, 13434), 'numpy.sqrt', 'np.sqrt', (['(x * x + 1)'], {}), '(x * x + 1)\n', (13423, 13434), True, 'import numpy as np\n'), ((3533, 3543), 'numpy.sqrt', 'np.sqrt', (['(5)'], {}), '(5)\n', (3540, 3543), True, 'import numpy as np\n'), ((5242, 5252), 'numpy.sqrt', 'np.sqrt', (['(5)'], {}), '(5)\n', (5249, 5252), True, 'import numpy as np\n'), ((9992, 10014), 'mldftdat.density.contract21_deriv', 'contract21_deriv', (['svec'], {}), '(svec)\n', (10008, 10014), False, 'from mldftdat.density import LDA_FACTOR, contract21_deriv, contract21, GG_AMIN\n'), ((10045, 10064), 'mldftdat.density.contract21', 'contract21', (['g', 'svec'], {}), '(g, svec)\n', (10055, 10064), False, 'from mldftdat.density import LDA_FACTOR, contract21_deriv, contract21, GG_AMIN\n'), ((10394, 10420), 'mldftdat.density.contract21_deriv', 'contract21_deriv', (['svec', 'g1'], {}), '(svec, g1)\n', (10410, 10420), False, 'from mldftdat.density import LDA_FACTOR, contract21_deriv, contract21, GG_AMIN\n'), ((10451, 10469), 'mldftdat.density.contract21', 'contract21', (['g2', 'g1'], {}), '(g2, g1)\n', (10461, 10469), False, 'from mldftdat.density import LDA_FACTOR, contract21_deriv, contract21, GG_AMIN\n'), ((10498, 10518), 'mldftdat.density.contract21', 'contract21', (['g2', 'svec'], {}), '(g2, svec)\n', (10508, 10518), False, 'from mldftdat.density import LDA_FACTOR, contract21_deriv, contract21, GG_AMIN\n'), ((11452, 11472), 'mldftdat.density.contract21_deriv', 'contract21_deriv', (['g1'], {}), '(g1)\n', (11468, 11472), False, 'from mldftdat.density import LDA_FACTOR, contract21_deriv, contract21, GG_AMIN\n'), ((11505, 11523), 'mldftdat.density.contract21', 'contract21', (['g2', 'g1'], {}), '(g2, g1)\n', (11515, 11523), False, 'from mldftdat.density import LDA_FACTOR, contract21_deriv, contract21, GG_AMIN\n')]
|
import numpy as np
import WDRT.ESSC as ESSC
import copy
import matplotlib.pyplot as plt
# Create buoy object, in this case for Station #46022
buoy46022 = ESSC.Buoy('46022', 'NDBC')
# Read data from ndbc.noaa.gov
#buoy46022.fetchFromWeb()
#buoy46022.saveAsTxt(savePath = "./Data")
#buoy46022.saveAsH5('NDBC46022.h5')
# Load data from .txt file if avilable
#buoy46022.loadFromTxt(r'C:\full\filepath\to\WDRT\examples\data\NDBC46022')
# Load data from .h5 file if available
buoy46022.loadFromH5(r'data\NDBC46022.h5')
# Declare required parameters
Time_SS = 1. # Sea state duration (hrs)
Time_R = 100 # Return periods (yrs) of interest
# Create PCA EA object for the buoy
pca46022 = ESSC.PCA(buoy46022)
# Calculate contour using PCA method
pca_Hs_Return, pca_T_Return = pca46022.getContours(Time_SS, Time_R)
# Show a plot of the data
pca46022.plotData()
# Sample Generation Example
num_contour_points = 20 # Number of points to be sampled for each
# contour interval.
contour_returns = np.array([0.001, 0.01, 0.05, 0.1, 0.5, 1, 5, 10, 50, 100])
# Probabilities defining sampling contour bounds.
random_seed = 2 # Random seed for sample generation
# Get samples for a full sea state long term analysis
Hs_sampleFSS, T_sampleFSS, Weight_sampleFSS = pca46022.getSamples(num_contour_points,
contour_returns, random_seed)
# Get samples for a contour approach long term analysis
T_sampleCA = np.arange(12, 26, 2)
Hs_sampleCA = pca46022.getContourPoints(T_sampleCA)
# Save data in h5 file
#pca46022.saveContour(r'C:\full\filepath\to\WDRT\examples\NDBC%s' % (pca46022.buoy.buoyNum))
#pca46022.saveContour(r'testNDBC%s' % (pca46022.buoy.buoyNum))
pca46022.saveContour(r'data\NDBC%s' % (pca46022.buoy.buoyNum))
import ipdb; ipdb.set_trace()
# Create EA objects for remaining contour methods
Gauss46022 = ESSC.GaussianCopula(buoy46022)
Gumbel46022 = ESSC.GumbelCopula(buoy46022)
Clayton46022 = ESSC.ClaytonCopula(buoy46022)
rosen46022 = ESSC.Rosenblatt(buoy46022)
NonParaGauss46022 = ESSC.NonParaGaussianCopula(buoy46022)
NonParaClay46022 = ESSC.NonParaClaytonCopula(buoy46022)
NonParaGum46022 = ESSC.NonParaGumbelCopula(buoy46022)
BivariateKDE46022 = ESSC.BivariateKDE(buoy46022, bw = [0.23, 0.23], logTransform = False)
BivariateLogKDE46022 = ESSC.BivariateKDE(buoy46022, bw = [0.02, 0.11], logTransform = True)
# Calculate contours for all remaining contour methods
Gauss_Hs_Return, Gauss_T_Return = Gauss46022.getContours(Time_SS, Time_R)
Gumbel_Hs_Return, Gumbel_T_Return = Gumbel46022.getContours(Time_SS, Time_R)
Clayton_Hs_Return, Clayton_T_Return = Clayton46022.getContours(Time_SS, Time_R)
rosen_Hs_Return, rosen_T_Return = rosen46022.getContours(Time_SS, Time_R)
NonParaGau_Hs_Return, NonParaGau_T_Return = NonParaGauss46022.getContours(Time_SS, Time_R)
NonParaClay_Hs_Return, NonParaClay_T_Return = NonParaClay46022.getContours(Time_SS, Time_R)
NonParaGum_Hs_Return, NonParaGum_T_Return = NonParaGum46022.getContours(Time_SS, Time_R)
KDE_Hs_Return, KDE_T_Return = BivariateKDE46022.getContours(Time_SS, Time_R)
logKDE_Hs_Return, logKDE_T_Return = BivariateLogKDE46022.getContours(Time_SS, Time_R)
# Plot all contour results for comparison
f = plt.figure()
f.canvas.set_window_title('NDBC%s, %i-year contours' % (buoy46022.buoyNum, Time_R))
plt.plot(buoy46022.T, buoy46022.Hs, 'bo', alpha=0.1, label='Data')
plt.plot(pca_T_Return, pca_Hs_Return, '-', label='PCA')
plt.plot(Gauss_T_Return, Gauss_Hs_Return, '-', label='Gaussian')
plt.plot(Gumbel_T_Return, Gumbel_Hs_Return, '-', label='Gumbel')
plt.plot(Clayton_T_Return, Clayton_Hs_Return, '-', label='Clayton')
plt.plot(rosen_T_Return, rosen_Hs_Return, '-', label='Rosenblatt')
plt.plot(NonParaGau_T_Return, NonParaGau_Hs_Return, 'g--', label='Non-Parametric Gaussian')
plt.plot(NonParaGum_T_Return, NonParaGum_Hs_Return, 'r--', label='Non-Parametric Gumbel')
plt.plot(NonParaClay_T_Return, NonParaClay_Hs_Return, 'c--', label='Non-Parametric Clayton')
plt.plot(KDE_T_Return, KDE_Hs_Return, 'm--', label = 'Bivariate KDE')
plt.plot(logKDE_T_Return, logKDE_Hs_Return, 'b--', label = 'Bivariate KDE (log)')
plt.xlabel('Energy period, $T_e$ [s]')
plt.ylabel('Sig. wave height, $H_s$ [m]')
plt.grid(True)
plt.legend(loc='center right', bbox_to_anchor=(1.4,0.5),fontsize=10, fancybox=True)
plt.show()
# Modify contour by steepness curve if they intersect
# Declare required parameters
depth = 391.4 # Depth at measurement point (m)
SteepMax = 0.07 # Optional: enter estimate of breaking steepness
T_vals = np.arange(0.1, np.amax(buoy46022.T), 0.1)
#Note, if depth is not inputted manually, it will automatically be retrieved from NDBC's website
SteepH = pca46022.steepness(SteepMax, T_vals,depth = depth)
SteepH_Return = pca46022.steepness(SteepMax, pca46022.T_ReturnContours, depth = depth)
Steep_correction = np.where(SteepH_Return < pca46022.Hs_ReturnContours)
Hs_Return_Steep = copy.deepcopy(pca46022.Hs_ReturnContours)
Hs_Return_Steep[Steep_correction] = SteepH_Return[Steep_correction]
pca46022.plotSampleData()
# Take a subset of 10 years of data and calculate a 20-year contour using the subset
Time_R = 20
subsetBuoy = buoy46022.createSubsetBuoy(10)
subsetPCA = ESSC.PCA(subsetBuoy)
Subset_Hs_Return, Subset_T_Return = subsetPCA.getContours(Time_SS, Time_R)
# Plot contour and subsetted data
f = plt.figure()
f.canvas.set_window_title('NDBC%s, %i-year contours' % (subsetBuoy.buoyNum, Time_R))
plt.plot(subsetBuoy.T, subsetBuoy.Hs, 'bo', alpha=0.1, label='Data')
plt.plot(Subset_T_Return, Subset_Hs_Return, '-', label = 'PCA')
plt.xlabel('Energy period, $T_e$ [s]')
plt.ylabel('Sig. wave height, $H_s$ [m]')
plt.grid(True)
plt.legend(loc='center right', bbox_to_anchor=(1.4,0.5),fontsize=10, fancybox=True)
plt.show()
# Determine which buoy observations are outside of the contour
outsideT, outsideHs = subsetPCA.outsidePoints()
# Determine the area of the contour
subsetPCAArea = subsetPCA.contourIntegrator()
# Calculate bootstrap confidence intervals, commented out due to long run time
# Note that stable bootstrap confidence intervals require large sample sizes
# pca46022.bootStrap(boot_size=10)
# Gauss46022.bootStrap(boot_size=10)
# Gumbel46022.bootStrap(boot_size=10)
# cc46022.bootStrap(boot_size=10)
# rosen46022.bootStrap(boot_size=10)
# NonParaGauss46022.bootStrap(boot_size=10)
# NonParaGauss46022.bootStrap(boot_size=10)
# NonParaGauss46022.bootStrap(boot_size=10)
|
[
"matplotlib.pyplot.grid",
"matplotlib.pyplot.ylabel",
"WDRT.ESSC.ClaytonCopula",
"numpy.array",
"WDRT.ESSC.BivariateKDE",
"copy.deepcopy",
"WDRT.ESSC.NonParaGumbelCopula",
"numpy.arange",
"numpy.where",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"WDRT.ESSC.Buoy",
"WDRT.ESSC.GumbelCopula",
"WDRT.ESSC.NonParaClaytonCopula",
"ipdb.set_trace",
"WDRT.ESSC.PCA",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show",
"WDRT.ESSC.GaussianCopula",
"WDRT.ESSC.NonParaGaussianCopula",
"matplotlib.pyplot.figure",
"WDRT.ESSC.Rosenblatt",
"numpy.amax"
] |
[((155, 181), 'WDRT.ESSC.Buoy', 'ESSC.Buoy', (['"""46022"""', '"""NDBC"""'], {}), "('46022', 'NDBC')\n", (164, 181), True, 'import WDRT.ESSC as ESSC\n'), ((686, 705), 'WDRT.ESSC.PCA', 'ESSC.PCA', (['buoy46022'], {}), '(buoy46022)\n', (694, 705), True, 'import WDRT.ESSC as ESSC\n'), ((993, 1051), 'numpy.array', 'np.array', (['[0.001, 0.01, 0.05, 0.1, 0.5, 1, 5, 10, 50, 100]'], {}), '([0.001, 0.01, 0.05, 0.1, 0.5, 1, 5, 10, 50, 100])\n', (1001, 1051), True, 'import numpy as np\n'), ((1448, 1468), 'numpy.arange', 'np.arange', (['(12)', '(26)', '(2)'], {}), '(12, 26, 2)\n', (1457, 1468), True, 'import numpy as np\n'), ((1777, 1793), 'ipdb.set_trace', 'ipdb.set_trace', ([], {}), '()\n', (1791, 1793), False, 'import ipdb\n'), ((1857, 1887), 'WDRT.ESSC.GaussianCopula', 'ESSC.GaussianCopula', (['buoy46022'], {}), '(buoy46022)\n', (1876, 1887), True, 'import WDRT.ESSC as ESSC\n'), ((1902, 1930), 'WDRT.ESSC.GumbelCopula', 'ESSC.GumbelCopula', (['buoy46022'], {}), '(buoy46022)\n', (1919, 1930), True, 'import WDRT.ESSC as ESSC\n'), ((1946, 1975), 'WDRT.ESSC.ClaytonCopula', 'ESSC.ClaytonCopula', (['buoy46022'], {}), '(buoy46022)\n', (1964, 1975), True, 'import WDRT.ESSC as ESSC\n'), ((1989, 2015), 'WDRT.ESSC.Rosenblatt', 'ESSC.Rosenblatt', (['buoy46022'], {}), '(buoy46022)\n', (2004, 2015), True, 'import WDRT.ESSC as ESSC\n'), ((2036, 2073), 'WDRT.ESSC.NonParaGaussianCopula', 'ESSC.NonParaGaussianCopula', (['buoy46022'], {}), '(buoy46022)\n', (2062, 2073), True, 'import WDRT.ESSC as ESSC\n'), ((2093, 2129), 'WDRT.ESSC.NonParaClaytonCopula', 'ESSC.NonParaClaytonCopula', (['buoy46022'], {}), '(buoy46022)\n', (2118, 2129), True, 'import WDRT.ESSC as ESSC\n'), ((2148, 2183), 'WDRT.ESSC.NonParaGumbelCopula', 'ESSC.NonParaGumbelCopula', (['buoy46022'], {}), '(buoy46022)\n', (2172, 2183), True, 'import WDRT.ESSC as ESSC\n'), ((2204, 2269), 'WDRT.ESSC.BivariateKDE', 'ESSC.BivariateKDE', (['buoy46022'], {'bw': '[0.23, 0.23]', 'logTransform': '(False)'}), '(buoy46022, bw=[0.23, 0.23], logTransform=False)\n', (2221, 2269), True, 'import WDRT.ESSC as ESSC\n'), ((2297, 2361), 'WDRT.ESSC.BivariateKDE', 'ESSC.BivariateKDE', (['buoy46022'], {'bw': '[0.02, 0.11]', 'logTransform': '(True)'}), '(buoy46022, bw=[0.02, 0.11], logTransform=True)\n', (2314, 2361), True, 'import WDRT.ESSC as ESSC\n'), ((3209, 3221), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3219, 3221), True, 'import matplotlib.pyplot as plt\n'), ((3306, 3372), 'matplotlib.pyplot.plot', 'plt.plot', (['buoy46022.T', 'buoy46022.Hs', '"""bo"""'], {'alpha': '(0.1)', 'label': '"""Data"""'}), "(buoy46022.T, buoy46022.Hs, 'bo', alpha=0.1, label='Data')\n", (3314, 3372), True, 'import matplotlib.pyplot as plt\n'), ((3373, 3428), 'matplotlib.pyplot.plot', 'plt.plot', (['pca_T_Return', 'pca_Hs_Return', '"""-"""'], {'label': '"""PCA"""'}), "(pca_T_Return, pca_Hs_Return, '-', label='PCA')\n", (3381, 3428), True, 'import matplotlib.pyplot as plt\n'), ((3429, 3493), 'matplotlib.pyplot.plot', 'plt.plot', (['Gauss_T_Return', 'Gauss_Hs_Return', '"""-"""'], {'label': '"""Gaussian"""'}), "(Gauss_T_Return, Gauss_Hs_Return, '-', label='Gaussian')\n", (3437, 3493), True, 'import matplotlib.pyplot as plt\n'), ((3494, 3558), 'matplotlib.pyplot.plot', 'plt.plot', (['Gumbel_T_Return', 'Gumbel_Hs_Return', '"""-"""'], {'label': '"""Gumbel"""'}), "(Gumbel_T_Return, Gumbel_Hs_Return, '-', label='Gumbel')\n", (3502, 3558), True, 'import matplotlib.pyplot as plt\n'), ((3559, 3626), 'matplotlib.pyplot.plot', 'plt.plot', (['Clayton_T_Return', 'Clayton_Hs_Return', '"""-"""'], {'label': '"""Clayton"""'}), "(Clayton_T_Return, Clayton_Hs_Return, '-', label='Clayton')\n", (3567, 3626), True, 'import matplotlib.pyplot as plt\n'), ((3627, 3693), 'matplotlib.pyplot.plot', 'plt.plot', (['rosen_T_Return', 'rosen_Hs_Return', '"""-"""'], {'label': '"""Rosenblatt"""'}), "(rosen_T_Return, rosen_Hs_Return, '-', label='Rosenblatt')\n", (3635, 3693), True, 'import matplotlib.pyplot as plt\n'), ((3694, 3790), 'matplotlib.pyplot.plot', 'plt.plot', (['NonParaGau_T_Return', 'NonParaGau_Hs_Return', '"""g--"""'], {'label': '"""Non-Parametric Gaussian"""'}), "(NonParaGau_T_Return, NonParaGau_Hs_Return, 'g--', label=\n 'Non-Parametric Gaussian')\n", (3702, 3790), True, 'import matplotlib.pyplot as plt\n'), ((3786, 3880), 'matplotlib.pyplot.plot', 'plt.plot', (['NonParaGum_T_Return', 'NonParaGum_Hs_Return', '"""r--"""'], {'label': '"""Non-Parametric Gumbel"""'}), "(NonParaGum_T_Return, NonParaGum_Hs_Return, 'r--', label=\n 'Non-Parametric Gumbel')\n", (3794, 3880), True, 'import matplotlib.pyplot as plt\n'), ((3876, 3973), 'matplotlib.pyplot.plot', 'plt.plot', (['NonParaClay_T_Return', 'NonParaClay_Hs_Return', '"""c--"""'], {'label': '"""Non-Parametric Clayton"""'}), "(NonParaClay_T_Return, NonParaClay_Hs_Return, 'c--', label=\n 'Non-Parametric Clayton')\n", (3884, 3973), True, 'import matplotlib.pyplot as plt\n'), ((3969, 4036), 'matplotlib.pyplot.plot', 'plt.plot', (['KDE_T_Return', 'KDE_Hs_Return', '"""m--"""'], {'label': '"""Bivariate KDE"""'}), "(KDE_T_Return, KDE_Hs_Return, 'm--', label='Bivariate KDE')\n", (3977, 4036), True, 'import matplotlib.pyplot as plt\n'), ((4039, 4118), 'matplotlib.pyplot.plot', 'plt.plot', (['logKDE_T_Return', 'logKDE_Hs_Return', '"""b--"""'], {'label': '"""Bivariate KDE (log)"""'}), "(logKDE_T_Return, logKDE_Hs_Return, 'b--', label='Bivariate KDE (log)')\n", (4047, 4118), True, 'import matplotlib.pyplot as plt\n'), ((4121, 4159), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Energy period, $T_e$ [s]"""'], {}), "('Energy period, $T_e$ [s]')\n", (4131, 4159), True, 'import matplotlib.pyplot as plt\n'), ((4160, 4201), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Sig. wave height, $H_s$ [m]"""'], {}), "('Sig. wave height, $H_s$ [m]')\n", (4170, 4201), True, 'import matplotlib.pyplot as plt\n'), ((4202, 4216), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (4210, 4216), True, 'import matplotlib.pyplot as plt\n'), ((4217, 4306), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""center right"""', 'bbox_to_anchor': '(1.4, 0.5)', 'fontsize': '(10)', 'fancybox': '(True)'}), "(loc='center right', bbox_to_anchor=(1.4, 0.5), fontsize=10,\n fancybox=True)\n", (4227, 4306), True, 'import matplotlib.pyplot as plt\n'), ((4301, 4311), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4309, 4311), True, 'import matplotlib.pyplot as plt\n'), ((4828, 4880), 'numpy.where', 'np.where', (['(SteepH_Return < pca46022.Hs_ReturnContours)'], {}), '(SteepH_Return < pca46022.Hs_ReturnContours)\n', (4836, 4880), True, 'import numpy as np\n'), ((4899, 4940), 'copy.deepcopy', 'copy.deepcopy', (['pca46022.Hs_ReturnContours'], {}), '(pca46022.Hs_ReturnContours)\n', (4912, 4940), False, 'import copy\n'), ((5190, 5210), 'WDRT.ESSC.PCA', 'ESSC.PCA', (['subsetBuoy'], {}), '(subsetBuoy)\n', (5198, 5210), True, 'import WDRT.ESSC as ESSC\n'), ((5325, 5337), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (5335, 5337), True, 'import matplotlib.pyplot as plt\n'), ((5423, 5491), 'matplotlib.pyplot.plot', 'plt.plot', (['subsetBuoy.T', 'subsetBuoy.Hs', '"""bo"""'], {'alpha': '(0.1)', 'label': '"""Data"""'}), "(subsetBuoy.T, subsetBuoy.Hs, 'bo', alpha=0.1, label='Data')\n", (5431, 5491), True, 'import matplotlib.pyplot as plt\n'), ((5492, 5553), 'matplotlib.pyplot.plot', 'plt.plot', (['Subset_T_Return', 'Subset_Hs_Return', '"""-"""'], {'label': '"""PCA"""'}), "(Subset_T_Return, Subset_Hs_Return, '-', label='PCA')\n", (5500, 5553), True, 'import matplotlib.pyplot as plt\n'), ((5556, 5594), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Energy period, $T_e$ [s]"""'], {}), "('Energy period, $T_e$ [s]')\n", (5566, 5594), True, 'import matplotlib.pyplot as plt\n'), ((5595, 5636), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Sig. wave height, $H_s$ [m]"""'], {}), "('Sig. wave height, $H_s$ [m]')\n", (5605, 5636), True, 'import matplotlib.pyplot as plt\n'), ((5637, 5651), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (5645, 5651), True, 'import matplotlib.pyplot as plt\n'), ((5652, 5741), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""center right"""', 'bbox_to_anchor': '(1.4, 0.5)', 'fontsize': '(10)', 'fancybox': '(True)'}), "(loc='center right', bbox_to_anchor=(1.4, 0.5), fontsize=10,\n fancybox=True)\n", (5662, 5741), True, 'import matplotlib.pyplot as plt\n'), ((5736, 5746), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5744, 5746), True, 'import matplotlib.pyplot as plt\n'), ((4536, 4556), 'numpy.amax', 'np.amax', (['buoy46022.T'], {}), '(buoy46022.T)\n', (4543, 4556), True, 'import numpy as np\n')]
|
import numpy as np
import matplotlib.pyplot as plt
# colors corresponding to initial flight, stance, second flight
colors = ['k', 'b', 'g']
### The attributes of sol are:
## sol.t : series of time-points at which the solution was calculated
## sol.y : simulation results, size 6 x times
## sol.t_events : list of the times of 7 events:
# - fall during flight
# - touchdown
# - fall during stance
# - lift-off
# - reversal during stance
# - apex during flight
# - fall during flight
### If the event did not occur than the array is empty.
def com_visualisation(sol, leg_visibility=0.5, colors=colors, size=100, Ground=False):
'''
This function plots failure events in red.
'''
times = sol.t
result = sol.y
t_events = sol.t_events
x_com = result[0]
y_com = result[1]
# plt.figure()
### Initial position
plt.scatter(x_com[0], y_com[0], color = colors[0], s = size)
foot_x = result[4,0]
foot_y = result[5,0]
plt.plot([foot_x,x_com[0]],[foot_y,y_com[0]], color = colors[0],
alpha = leg_visibility)
### First flight phase
if len(t_events[1]) == 0: # no touch-down
## Time of failure
if len(t_events[0]) == 0: # no fall during initial flight
print('No touch-down but no fall during flight')
else:
failure = t_events[0][0]
fail_index = np.argmax(times > failure)
plt.plot(x_com[:fail_index],y_com[:fail_index], color = colors[0])
plt.scatter(x_com[fail_index -1],y_com[fail_index-1],
color = 'r', s = size)
else:
touchdown = t_events[1][0]
index = np.argmax(times > touchdown)
foot_x = result[4,index]
plt.plot(x_com[:index],y_com[:index], color = colors[0])
plt.scatter(x_com[index-1],y_com[index-1], color = colors[1], s = size)
plt.plot([foot_x,x_com[index-1]],[0,y_com[index-1]], color = colors[1],
alpha = leg_visibility)
### Stance phase
if len(t_events[3]) == 0: # no lift-off
## Time of failure
failure = False
if len(t_events[2]) == 0: # no fall during initial flight
if len(t_events[4]) == 0: # no reversal during initial flight
print('No lift-off but no failure during stance')
else:
failure = t_events[4][0] # time of reversal
else:
failure = t_events[2][0] # time of fall
if failure:
fail_index = np.argmax(times > failure)
plt.plot(x_com[index:fail_index],y_com[index:fail_index],
color = colors[1])
plt.scatter(x_com[fail_index -1],y_com[fail_index-1],
color = 'r', s = size)
else:
liftoff = t_events[3][0]
lift_index = np.argmax(times > liftoff)
plt.plot(x_com[index-1:lift_index],y_com[index-1:lift_index],
color = colors[1])
plt.scatter(x_com[lift_index-1],y_com[lift_index-1],
color = colors[2], s = size)
plt.plot([foot_x,x_com[lift_index-1]],[0,y_com[lift_index-1]],
color = colors[2], alpha = leg_visibility)
### Flight phase
if len(t_events[5]) == 0: # no apex
## Time of failure
if len(t_events[6]) == 0: # no fall
print('No apex but no fall during flight')
else:
failure = t_events[6][0]
fail_index = np.argmax(times > failure)
plt.plot(x_com[lift_index-1:fail_index],y_com[lift_index-1:fail_index], color = colors[2])
plt.scatter(x_com[fail_index -1],y_com[fail_index-1], color = 'r', s = size)
else:
apex = t_events[5][0]
if times[-1] > apex:
apex_index = np.argmax(times > apex)
else:
apex_index = len(times)
plt.plot(x_com[lift_index-1:apex_index],
y_com[lift_index-1:apex_index], color = colors[2])
plt.scatter(x_com[apex_index-1],y_com[apex_index-1],
color = colors[0], s = size)
plt.plot([result[4,apex_index-1],x_com[apex_index-1]],
[result[5,apex_index-1],y_com[apex_index-1]],
color = colors[0], alpha = leg_visibility)
if Ground:
ground = result[-1]
plt.plot(x_com, ground, color = 'k')
else:
plt.axhline(y=0, color = 'k')
plt.xlabel('Horizontal position')
plt.ylabel('Vertical position')
def full_visualisation(sol, colors = colors, foot = False):
'''
This function only plots if there was no failure in the trial
'''
times = sol.t
result = sol.y
t_events = sol.t_events
labels = ['touchdown','liftoff','apex']
# If the trial was not a failure:
if len(t_events[1]) > 0 and len(t_events[3]) > 0 and len(t_events[5]) > 0:
events = [t_events[1][0],t_events[3][0],t_events[5][0]]
indices = [0]
for e in range(3):
indices.append(np.argmax(times >= events[e]))
if foot:
## Foot trajectory
foot_x = result[4]
foot_y = result[5]
plt.figure()
for e in range(3):
plt.subplot(221)
plt.plot(times[indices[e]:indices[e+1]], foot_x[indices[e]:indices[e+1]], color = colors[e])
plt.subplot(223)
plt.plot(times[indices[e]:indices[e+1]], foot_y[indices[e]:indices[e+1]], color = colors[e])
plt.subplot(122)
plt.plot(foot_x[indices[e]:indices[e+1]], foot_y[indices[e]:indices[e+1]], color = colors[e])
plt.scatter(foot_x[indices[e]], foot_y[indices[e]], color = colors[e])
## Indicate the events
for i in [3,1]:
plt.subplot(2,2,i)
plt.axvline(x = events[e], color = colors[e], label = labels[e])
## Legends and labels
plt.subplot(221)
plt.xticks([])
plt.ylabel('Horizontal position')
plt.subplot(223)
plt.ylabel('Vertical position')
plt.xlabel('Time')
plt.subplot(122)
plt.xlabel('Horizontal position')
plt.ylabel('Vertical position')
plt.title('Foot trajectory')
## CoM position
plt.figure()
for e in range(3):
for i in range(2):
for j in range(2):
plt.subplot(2,3,1+i+3*j)
plt.plot(times[indices[e]:indices[e+1]+1],
result[i+2*j,indices[e]:indices[e+1]+1],
color = colors[e])
plt.subplot(133)
plt.plot(result[0,indices[e]:indices[e+1]+1],
result[1,indices[e]:indices[e+1]+1], color = colors[e])
## Indicate the events
for i in range(2):
for j in range(2):
plt.subplot(2,3,1+i+3*j)
plt.axvline(x = events[e], color = colors[e],
label = labels[e])
plt.subplot(133)
index = np.argmax(times >= events[e])
plt.scatter(result[0,index], result[1,index], color = colors[e])
## Legends and labels
plt.subplot(231)
plt.legend(loc = 2)
plt.xticks([])
plt.ylabel('Horizontal position')
plt.subplot(232)
plt.xticks([])
plt.ylabel('Vertical position')
plt.subplot(234)
plt.xlabel('Time')
plt.ylabel('Horizontal speed')
plt.subplot(235)
plt.xlabel('Time')
plt.ylabel('Vertical speed')
plt.subplot(133)
plt.xlabel('Horizontal position')
plt.ylabel('Vertical position')
plt.title('CoM trajectory')
else:
print('The trial was a failure')
|
[
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.argmax",
"matplotlib.pyplot.axhline",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.axvline",
"matplotlib.pyplot.legend"
] |
[((835, 891), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x_com[0]', 'y_com[0]'], {'color': 'colors[0]', 's': 'size'}), '(x_com[0], y_com[0], color=colors[0], s=size)\n', (846, 891), True, 'import matplotlib.pyplot as plt\n'), ((941, 1033), 'matplotlib.pyplot.plot', 'plt.plot', (['[foot_x, x_com[0]]', '[foot_y, y_com[0]]'], {'color': 'colors[0]', 'alpha': 'leg_visibility'}), '([foot_x, x_com[0]], [foot_y, y_com[0]], color=colors[0], alpha=\n leg_visibility)\n', (949, 1033), True, 'import matplotlib.pyplot as plt\n'), ((3869, 3902), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Horizontal position"""'], {}), "('Horizontal position')\n", (3879, 3902), True, 'import matplotlib.pyplot as plt\n'), ((3904, 3935), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Vertical position"""'], {}), "('Vertical position')\n", (3914, 3935), True, 'import matplotlib.pyplot as plt\n'), ((1519, 1547), 'numpy.argmax', 'np.argmax', (['(times > touchdown)'], {}), '(times > touchdown)\n', (1528, 1547), True, 'import numpy as np\n'), ((1580, 1635), 'matplotlib.pyplot.plot', 'plt.plot', (['x_com[:index]', 'y_com[:index]'], {'color': 'colors[0]'}), '(x_com[:index], y_com[:index], color=colors[0])\n', (1588, 1635), True, 'import matplotlib.pyplot as plt\n'), ((1639, 1711), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x_com[index - 1]', 'y_com[index - 1]'], {'color': 'colors[1]', 's': 'size'}), '(x_com[index - 1], y_com[index - 1], color=colors[1], s=size)\n', (1650, 1711), True, 'import matplotlib.pyplot as plt\n'), ((1713, 1815), 'matplotlib.pyplot.plot', 'plt.plot', (['[foot_x, x_com[index - 1]]', '[0, y_com[index - 1]]'], {'color': 'colors[1]', 'alpha': 'leg_visibility'}), '([foot_x, x_com[index - 1]], [0, y_com[index - 1]], color=colors[1],\n alpha=leg_visibility)\n', (1721, 1815), True, 'import matplotlib.pyplot as plt\n'), ((3787, 3821), 'matplotlib.pyplot.plot', 'plt.plot', (['x_com', 'ground'], {'color': '"""k"""'}), "(x_com, ground, color='k')\n", (3795, 3821), True, 'import matplotlib.pyplot as plt\n'), ((3838, 3865), 'matplotlib.pyplot.axhline', 'plt.axhline', ([], {'y': '(0)', 'color': '"""k"""'}), "(y=0, color='k')\n", (3849, 3865), True, 'import matplotlib.pyplot as plt\n'), ((5441, 5453), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (5451, 5453), True, 'import matplotlib.pyplot as plt\n'), ((6135, 6151), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(231)'], {}), '(231)\n', (6146, 6151), True, 'import matplotlib.pyplot as plt\n'), ((6154, 6171), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '(2)'}), '(loc=2)\n', (6164, 6171), True, 'import matplotlib.pyplot as plt\n'), ((6176, 6190), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (6186, 6190), True, 'import matplotlib.pyplot as plt\n'), ((6193, 6226), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Horizontal position"""'], {}), "('Horizontal position')\n", (6203, 6226), True, 'import matplotlib.pyplot as plt\n'), ((6229, 6245), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(232)'], {}), '(232)\n', (6240, 6245), True, 'import matplotlib.pyplot as plt\n'), ((6248, 6262), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (6258, 6262), True, 'import matplotlib.pyplot as plt\n'), ((6265, 6296), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Vertical position"""'], {}), "('Vertical position')\n", (6275, 6296), True, 'import matplotlib.pyplot as plt\n'), ((6299, 6315), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(234)'], {}), '(234)\n', (6310, 6315), True, 'import matplotlib.pyplot as plt\n'), ((6318, 6336), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time"""'], {}), "('Time')\n", (6328, 6336), True, 'import matplotlib.pyplot as plt\n'), ((6339, 6369), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Horizontal speed"""'], {}), "('Horizontal speed')\n", (6349, 6369), True, 'import matplotlib.pyplot as plt\n'), ((6372, 6388), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(235)'], {}), '(235)\n', (6383, 6388), True, 'import matplotlib.pyplot as plt\n'), ((6391, 6409), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time"""'], {}), "('Time')\n", (6401, 6409), True, 'import matplotlib.pyplot as plt\n'), ((6412, 6440), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Vertical speed"""'], {}), "('Vertical speed')\n", (6422, 6440), True, 'import matplotlib.pyplot as plt\n'), ((6443, 6459), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(133)'], {}), '(133)\n', (6454, 6459), True, 'import matplotlib.pyplot as plt\n'), ((6462, 6495), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Horizontal position"""'], {}), "('Horizontal position')\n", (6472, 6495), True, 'import matplotlib.pyplot as plt\n'), ((6498, 6529), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Vertical position"""'], {}), "('Vertical position')\n", (6508, 6529), True, 'import matplotlib.pyplot as plt\n'), ((6532, 6559), 'matplotlib.pyplot.title', 'plt.title', (['"""CoM trajectory"""'], {}), "('CoM trajectory')\n", (6541, 6559), True, 'import matplotlib.pyplot as plt\n'), ((1287, 1313), 'numpy.argmax', 'np.argmax', (['(times > failure)'], {}), '(times > failure)\n', (1296, 1313), True, 'import numpy as np\n'), ((1317, 1382), 'matplotlib.pyplot.plot', 'plt.plot', (['x_com[:fail_index]', 'y_com[:fail_index]'], {'color': 'colors[0]'}), '(x_com[:fail_index], y_com[:fail_index], color=colors[0])\n', (1325, 1382), True, 'import matplotlib.pyplot as plt\n'), ((1387, 1463), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x_com[fail_index - 1]', 'y_com[fail_index - 1]'], {'color': '"""r"""', 's': 'size'}), "(x_com[fail_index - 1], y_com[fail_index - 1], color='r', s=size)\n", (1398, 1463), True, 'import matplotlib.pyplot as plt\n'), ((2495, 2521), 'numpy.argmax', 'np.argmax', (['(times > liftoff)'], {}), '(times > liftoff)\n', (2504, 2521), True, 'import numpy as np\n'), ((2525, 2613), 'matplotlib.pyplot.plot', 'plt.plot', (['x_com[index - 1:lift_index]', 'y_com[index - 1:lift_index]'], {'color': 'colors[1]'}), '(x_com[index - 1:lift_index], y_com[index - 1:lift_index], color=\n colors[1])\n', (2533, 2613), True, 'import matplotlib.pyplot as plt\n'), ((2614, 2700), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x_com[lift_index - 1]', 'y_com[lift_index - 1]'], {'color': 'colors[2]', 's': 'size'}), '(x_com[lift_index - 1], y_com[lift_index - 1], color=colors[2],\n s=size)\n', (2625, 2700), True, 'import matplotlib.pyplot as plt\n'), ((2704, 2817), 'matplotlib.pyplot.plot', 'plt.plot', (['[foot_x, x_com[lift_index - 1]]', '[0, y_com[lift_index - 1]]'], {'color': 'colors[2]', 'alpha': 'leg_visibility'}), '([foot_x, x_com[lift_index - 1]], [0, y_com[lift_index - 1]], color\n =colors[2], alpha=leg_visibility)\n', (2712, 2817), True, 'import matplotlib.pyplot as plt\n'), ((4513, 4525), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (4523, 4525), True, 'import matplotlib.pyplot as plt\n'), ((5147, 5163), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(221)'], {}), '(221)\n', (5158, 5163), True, 'import matplotlib.pyplot as plt\n'), ((5167, 5181), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (5177, 5181), True, 'import matplotlib.pyplot as plt\n'), ((5185, 5218), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Horizontal position"""'], {}), "('Horizontal position')\n", (5195, 5218), True, 'import matplotlib.pyplot as plt\n'), ((5222, 5238), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(223)'], {}), '(223)\n', (5233, 5238), True, 'import matplotlib.pyplot as plt\n'), ((5242, 5273), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Vertical position"""'], {}), "('Vertical position')\n", (5252, 5273), True, 'import matplotlib.pyplot as plt\n'), ((5277, 5295), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time"""'], {}), "('Time')\n", (5287, 5295), True, 'import matplotlib.pyplot as plt\n'), ((5299, 5315), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(122)'], {}), '(122)\n', (5310, 5315), True, 'import matplotlib.pyplot as plt\n'), ((5319, 5352), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Horizontal position"""'], {}), "('Horizontal position')\n", (5329, 5352), True, 'import matplotlib.pyplot as plt\n'), ((5356, 5387), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Vertical position"""'], {}), "('Vertical position')\n", (5366, 5387), True, 'import matplotlib.pyplot as plt\n'), ((5391, 5419), 'matplotlib.pyplot.title', 'plt.title', (['"""Foot trajectory"""'], {}), "('Foot trajectory')\n", (5400, 5419), True, 'import matplotlib.pyplot as plt\n'), ((5675, 5691), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(133)'], {}), '(133)\n', (5686, 5691), True, 'import matplotlib.pyplot as plt\n'), ((5695, 5809), 'matplotlib.pyplot.plot', 'plt.plot', (['result[0, indices[e]:indices[e + 1] + 1]', 'result[1, indices[e]:indices[e + 1] + 1]'], {'color': 'colors[e]'}), '(result[0, indices[e]:indices[e + 1] + 1], result[1, indices[e]:\n indices[e + 1] + 1], color=colors[e])\n', (5703, 5809), True, 'import matplotlib.pyplot as plt\n'), ((5983, 5999), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(133)'], {}), '(133)\n', (5994, 5999), True, 'import matplotlib.pyplot as plt\n'), ((6011, 6040), 'numpy.argmax', 'np.argmax', (['(times >= events[e])'], {}), '(times >= events[e])\n', (6020, 6040), True, 'import numpy as np\n'), ((6044, 6108), 'matplotlib.pyplot.scatter', 'plt.scatter', (['result[0, index]', 'result[1, index]'], {'color': 'colors[e]'}), '(result[0, index], result[1, index], color=colors[e])\n', (6055, 6108), True, 'import matplotlib.pyplot as plt\n'), ((2242, 2268), 'numpy.argmax', 'np.argmax', (['(times > failure)'], {}), '(times > failure)\n', (2251, 2268), True, 'import numpy as np\n'), ((2273, 2348), 'matplotlib.pyplot.plot', 'plt.plot', (['x_com[index:fail_index]', 'y_com[index:fail_index]'], {'color': 'colors[1]'}), '(x_com[index:fail_index], y_com[index:fail_index], color=colors[1])\n', (2281, 2348), True, 'import matplotlib.pyplot as plt\n'), ((2360, 2436), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x_com[fail_index - 1]', 'y_com[fail_index - 1]'], {'color': '"""r"""', 's': 'size'}), "(x_com[fail_index - 1], y_com[fail_index - 1], color='r', s=size)\n", (2371, 2436), True, 'import matplotlib.pyplot as plt\n'), ((3394, 3491), 'matplotlib.pyplot.plot', 'plt.plot', (['x_com[lift_index - 1:apex_index]', 'y_com[lift_index - 1:apex_index]'], {'color': 'colors[2]'}), '(x_com[lift_index - 1:apex_index], y_com[lift_index - 1:apex_index],\n color=colors[2])\n', (3402, 3491), True, 'import matplotlib.pyplot as plt\n'), ((3496, 3582), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x_com[apex_index - 1]', 'y_com[apex_index - 1]'], {'color': 'colors[0]', 's': 'size'}), '(x_com[apex_index - 1], y_com[apex_index - 1], color=colors[0],\n s=size)\n', (3507, 3582), True, 'import matplotlib.pyplot as plt\n'), ((3588, 3749), 'matplotlib.pyplot.plot', 'plt.plot', (['[result[4, apex_index - 1], x_com[apex_index - 1]]', '[result[5, apex_index - 1], y_com[apex_index - 1]]'], {'color': 'colors[0]', 'alpha': 'leg_visibility'}), '([result[4, apex_index - 1], x_com[apex_index - 1]], [result[5, \n apex_index - 1], y_com[apex_index - 1]], color=colors[0], alpha=\n leg_visibility)\n', (3596, 3749), True, 'import matplotlib.pyplot as plt\n'), ((4401, 4430), 'numpy.argmax', 'np.argmax', (['(times >= events[e])'], {}), '(times >= events[e])\n', (4410, 4430), True, 'import numpy as np\n'), ((4552, 4568), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(221)'], {}), '(221)\n', (4563, 4568), True, 'import matplotlib.pyplot as plt\n'), ((4573, 4672), 'matplotlib.pyplot.plot', 'plt.plot', (['times[indices[e]:indices[e + 1]]', 'foot_x[indices[e]:indices[e + 1]]'], {'color': 'colors[e]'}), '(times[indices[e]:indices[e + 1]], foot_x[indices[e]:indices[e + 1]\n ], color=colors[e])\n', (4581, 4672), True, 'import matplotlib.pyplot as plt\n'), ((4670, 4686), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(223)'], {}), '(223)\n', (4681, 4686), True, 'import matplotlib.pyplot as plt\n'), ((4691, 4790), 'matplotlib.pyplot.plot', 'plt.plot', (['times[indices[e]:indices[e + 1]]', 'foot_y[indices[e]:indices[e + 1]]'], {'color': 'colors[e]'}), '(times[indices[e]:indices[e + 1]], foot_y[indices[e]:indices[e + 1]\n ], color=colors[e])\n', (4699, 4790), True, 'import matplotlib.pyplot as plt\n'), ((4788, 4804), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(122)'], {}), '(122)\n', (4799, 4804), True, 'import matplotlib.pyplot as plt\n'), ((4809, 4909), 'matplotlib.pyplot.plot', 'plt.plot', (['foot_x[indices[e]:indices[e + 1]]', 'foot_y[indices[e]:indices[e + 1]]'], {'color': 'colors[e]'}), '(foot_x[indices[e]:indices[e + 1]], foot_y[indices[e]:indices[e + 1\n ]], color=colors[e])\n', (4817, 4909), True, 'import matplotlib.pyplot as plt\n'), ((4907, 4975), 'matplotlib.pyplot.scatter', 'plt.scatter', (['foot_x[indices[e]]', 'foot_y[indices[e]]'], {'color': 'colors[e]'}), '(foot_x[indices[e]], foot_y[indices[e]], color=colors[e])\n', (4918, 4975), True, 'import matplotlib.pyplot as plt\n'), ((3044, 3070), 'numpy.argmax', 'np.argmax', (['(times > failure)'], {}), '(times > failure)\n', (3053, 3070), True, 'import numpy as np\n'), ((3076, 3173), 'matplotlib.pyplot.plot', 'plt.plot', (['x_com[lift_index - 1:fail_index]', 'y_com[lift_index - 1:fail_index]'], {'color': 'colors[2]'}), '(x_com[lift_index - 1:fail_index], y_com[lift_index - 1:fail_index],\n color=colors[2])\n', (3084, 3173), True, 'import matplotlib.pyplot as plt\n'), ((3172, 3248), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x_com[fail_index - 1]', 'y_com[fail_index - 1]'], {'color': '"""r"""', 's': 'size'}), "(x_com[fail_index - 1], y_com[fail_index - 1], color='r', s=size)\n", (3183, 3248), True, 'import matplotlib.pyplot as plt\n'), ((3327, 3350), 'numpy.argmax', 'np.argmax', (['(times > apex)'], {}), '(times > apex)\n', (3336, 3350), True, 'import numpy as np\n'), ((5030, 5050), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', 'i'], {}), '(2, 2, i)\n', (5041, 5050), True, 'import matplotlib.pyplot as plt\n'), ((5054, 5112), 'matplotlib.pyplot.axvline', 'plt.axvline', ([], {'x': 'events[e]', 'color': 'colors[e]', 'label': 'labels[e]'}), '(x=events[e], color=colors[e], label=labels[e])\n', (5065, 5112), True, 'import matplotlib.pyplot as plt\n'), ((5525, 5557), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(3)', '(1 + i + 3 * j)'], {}), '(2, 3, 1 + i + 3 * j)\n', (5536, 5557), True, 'import matplotlib.pyplot as plt\n'), ((5555, 5673), 'matplotlib.pyplot.plot', 'plt.plot', (['times[indices[e]:indices[e + 1] + 1]', 'result[i + 2 * j, indices[e]:indices[e + 1] + 1]'], {'color': 'colors[e]'}), '(times[indices[e]:indices[e + 1] + 1], result[i + 2 * j, indices[e]\n :indices[e + 1] + 1], color=colors[e])\n', (5563, 5673), True, 'import matplotlib.pyplot as plt\n'), ((5878, 5910), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(3)', '(1 + i + 3 * j)'], {}), '(2, 3, 1 + i + 3 * j)\n', (5889, 5910), True, 'import matplotlib.pyplot as plt\n'), ((5908, 5966), 'matplotlib.pyplot.axvline', 'plt.axvline', ([], {'x': 'events[e]', 'color': 'colors[e]', 'label': 'labels[e]'}), '(x=events[e], color=colors[e], label=labels[e])\n', (5919, 5966), True, 'import matplotlib.pyplot as plt\n')]
|
import numpy as np
class perceptron(object):
#eta learning rata
#n_iter times
def __init__(self,eta,n_iter):
self.eta=eta
self.n_iter=n_iter
def fit(self,x,y):
'''
x=ndarray(n_samples,n_features),training data
y=ndarray(n_samples),labels
returns
self:object
w_:1darray,weights after fitting
errors=list,errors times
'''
#init
self.w_=np.zeros(np.shape(x)[1]+1)
self.errors_=[]
for _ in range(self.n_iter):
errors=0
for xi,yi in zip(x,y):
updata=self.eta*(self.predict(xi)-yi)
self.w_[1:]+=updata*xi
self.w_[0]+=updata
errors+=int(updata!=0.0)
self.errors_.append(errors)
print(self.errors_)
return self
def net_input(self,x):
'''
calculate net input
'''
return np.dot(x,self.w_[1:])+self.w_[0]
def predict(self,x):
'''
positive function
'''
return np.where(self.net_input(x)>=0.0,1,-1)
#painting
import matplotlib.pyplot as plt
#from perception import perceptron
#read data as DaraFrame
import pandas as pd
import numpy as np
import os
import pandas as pd
import numpy as np
import random
a=np.random.uniform(6.0,7.0,150)
b=np.random.uniform(2.0,4.0,150)
c=np.random.uniform(5.0,5.5,150)
d=np.random.uniform(1.5,2.5,150)
q=[]
for i in range(150):
e=np.random.choice(['a','b'])
q.append(e)
dic={'0':a,'1':b,'2':c,'3':d,'4':q}
df=pd.DataFrame(dic)
y=df.iloc[0:100,4].values
y=np.where(y=='b',-1,1)
x=df.iloc[0:100,[0,2]].values
plt.scatter(x[:50,0],x[:50,1],color='red',marker='o',label='setosa')
plt.scatter(x[50:100,0],x[50:100,1],color='green',marker='x',label='versicolor')
plt.xlabel('petal length')
plt.ylabel('sepal length')
plt.legend(loc='upper right')
plt.show()
ppn=perceptron(eta=1,n_iter=10000)
ppn.fit(x,y)
plt.plot(range(1,len(ppn.errors_)+1),ppn.errors_,marker='o',color='red')
plt.xlabel('epochs')
plt.ylabel('number of miscalssifications')
plt.show()
|
[
"matplotlib.pyplot.ylabel",
"numpy.where",
"numpy.random.choice",
"matplotlib.pyplot.xlabel",
"numpy.dot",
"matplotlib.pyplot.scatter",
"numpy.random.uniform",
"pandas.DataFrame",
"numpy.shape",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] |
[((1308, 1340), 'numpy.random.uniform', 'np.random.uniform', (['(6.0)', '(7.0)', '(150)'], {}), '(6.0, 7.0, 150)\n', (1325, 1340), True, 'import numpy as np\n'), ((1341, 1373), 'numpy.random.uniform', 'np.random.uniform', (['(2.0)', '(4.0)', '(150)'], {}), '(2.0, 4.0, 150)\n', (1358, 1373), True, 'import numpy as np\n'), ((1374, 1406), 'numpy.random.uniform', 'np.random.uniform', (['(5.0)', '(5.5)', '(150)'], {}), '(5.0, 5.5, 150)\n', (1391, 1406), True, 'import numpy as np\n'), ((1407, 1439), 'numpy.random.uniform', 'np.random.uniform', (['(1.5)', '(2.5)', '(150)'], {}), '(1.5, 2.5, 150)\n', (1424, 1439), True, 'import numpy as np\n'), ((1553, 1570), 'pandas.DataFrame', 'pd.DataFrame', (['dic'], {}), '(dic)\n', (1565, 1570), True, 'import pandas as pd\n'), ((1599, 1624), 'numpy.where', 'np.where', (["(y == 'b')", '(-1)', '(1)'], {}), "(y == 'b', -1, 1)\n", (1607, 1624), True, 'import numpy as np\n'), ((1651, 1725), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x[:50, 0]', 'x[:50, 1]'], {'color': '"""red"""', 'marker': '"""o"""', 'label': '"""setosa"""'}), "(x[:50, 0], x[:50, 1], color='red', marker='o', label='setosa')\n", (1662, 1725), True, 'import matplotlib.pyplot as plt\n'), ((1720, 1811), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x[50:100, 0]', 'x[50:100, 1]'], {'color': '"""green"""', 'marker': '"""x"""', 'label': '"""versicolor"""'}), "(x[50:100, 0], x[50:100, 1], color='green', marker='x', label=\n 'versicolor')\n", (1731, 1811), True, 'import matplotlib.pyplot as plt\n'), ((1801, 1827), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""petal length"""'], {}), "('petal length')\n", (1811, 1827), True, 'import matplotlib.pyplot as plt\n'), ((1828, 1854), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""sepal length"""'], {}), "('sepal length')\n", (1838, 1854), True, 'import matplotlib.pyplot as plt\n'), ((1855, 1884), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper right"""'}), "(loc='upper right')\n", (1865, 1884), True, 'import matplotlib.pyplot as plt\n'), ((1885, 1895), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1893, 1895), True, 'import matplotlib.pyplot as plt\n'), ((2017, 2037), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epochs"""'], {}), "('epochs')\n", (2027, 2037), True, 'import matplotlib.pyplot as plt\n'), ((2038, 2080), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""number of miscalssifications"""'], {}), "('number of miscalssifications')\n", (2048, 2080), True, 'import matplotlib.pyplot as plt\n'), ((2081, 2091), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2089, 2091), True, 'import matplotlib.pyplot as plt\n'), ((1470, 1498), 'numpy.random.choice', 'np.random.choice', (["['a', 'b']"], {}), "(['a', 'b'])\n", (1486, 1498), True, 'import numpy as np\n'), ((942, 964), 'numpy.dot', 'np.dot', (['x', 'self.w_[1:]'], {}), '(x, self.w_[1:])\n', (948, 964), True, 'import numpy as np\n'), ((455, 466), 'numpy.shape', 'np.shape', (['x'], {}), '(x)\n', (463, 466), True, 'import numpy as np\n')]
|
import numpy as np
from typing import Any, Tuple, Dict
import logging
class NotDescentDirection(Exception):
pass
class ZeroDescentProduct(Exception):
pass
class ZeroUpdate(Exception):
pass
class Newton:
def __init__(self,
obj_func : Any,
gradient_func : Any,
reg_inv_hessian : Any
):
self.gradient_func = gradient_func
self.obj_func = obj_func
self.reg_inv_hessian = reg_inv_hessian
# Logging
handlerPrint = logging.StreamHandler()
handlerPrint.setLevel(logging.DEBUG)
self.log = logging.getLogger("l-bfgs")
self.log.addHandler(handlerPrint)
self.log.setLevel(logging.DEBUG)
self.line_search_c = pow(10,-4)
self.line_search_tau = 0.5
def get_descent_inner_product(self,
p : np.array,
params : np.array
) -> float:
grads = self.gradient_func(params)
inner_prod = np.dot(p, grads)
if inner_prod > -1e-16 and inner_prod <= 0:
raise ZeroDescentProduct()
elif inner_prod > 0:
self.log.error("ERROR: Positive inner product: %.16f" % inner_prod)
raise NotDescentDirection()
return inner_prod
def run_line_search(self,
p : np.array,
params : np.array
) -> float:
# Check inputs
assert self.line_search_tau < 1
assert self.line_search_tau > 0
assert self.line_search_c > 0
assert self.line_search_c < 1
inner_prod = self.get_descent_inner_product(p, params)
alpha = 1.0
fx = self.obj_func(params)
fx_new = self.obj_func(params + alpha * p)
rhs = alpha * self.line_search_c * inner_prod
self.log.debug(" Line search armijo: obj func old: %f new: %f diff: %.16f rhs: %.16f" % (fx, fx_new, fx_new - fx, rhs))
while fx_new - fx > rhs:
alpha *= self.line_search_tau
fx_new = self.obj_func(params + alpha * p)
rhs = alpha * self.line_search_c * inner_prod
self.log.debug(" Line search armijo: obj func old: %f new: %f diff: %.16f rhs: %.16f" % (fx, fx_new, fx_new - fx, rhs))
return alpha
def step(self,
k : int,
tol : float,
params : np.array
) -> Tuple[bool,np.array,np.array,float]:
update = np.zeros(len(params))
try:
self.log.debug("Iteration: %d [start]" % k)
# Get current grads
gradients = self.gradient_func(params)
# Get regularized inv hessian
rih = self.reg_inv_hessian(params)
# Calculate updates
update = - np.dot(rih, gradients)
# Line search
alpha = self.run_line_search(update, params)
update *= alpha
self.log.debug(" Line search factor: %.16f" % alpha)
# Commit update
params_new = params + update
self.log.debug(" Old params: %s" % params)
self.log.debug(" New params: %s" % params_new)
self.log.debug("Iteration: %d [finished]" % k)
# Monitor convergence
if np.max(abs(update)) < tol:
raise ZeroUpdate()
return (False, params_new, update, alpha)
except ZeroUpdate:
self.log.info("Converged because zero update")
return (True, params, update, 1.0)
except ZeroDescentProduct:
self.log.info("Converged because zero descent inner product")
return (True, params, update, 1.0)
def run(self,
no_steps : int,
params_init : np.array,
tol : float = 1e-8,
store_traj : bool = False
) -> Tuple[bool, int, np.array, Dict[int, np.array], Dict[int, float]]:
assert no_steps >= 1
params = params_init.copy()
traj = {}
line_search = {}
if store_traj:
traj[0] = params.copy()
update = np.zeros(len(params_init))
for k in range(0,no_steps):
converged, params, update, alpha = self.step(
k=k,
tol=tol,
params=params
)
if store_traj:
traj[k+1] = params.copy()
line_search[k+1] = alpha
if converged:
return (True, k, update, traj, line_search)
return (False, no_steps, update, traj, line_search)
|
[
"logging.getLogger",
"numpy.dot",
"logging.StreamHandler"
] |
[((505, 528), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (526, 528), False, 'import logging\n'), ((593, 620), 'logging.getLogger', 'logging.getLogger', (['"""l-bfgs"""'], {}), "('l-bfgs')\n", (610, 620), False, 'import logging\n'), ((953, 969), 'numpy.dot', 'np.dot', (['p', 'grads'], {}), '(p, grads)\n', (959, 969), True, 'import numpy as np\n'), ((2727, 2749), 'numpy.dot', 'np.dot', (['rih', 'gradients'], {}), '(rih, gradients)\n', (2733, 2749), True, 'import numpy as np\n')]
|
# Transfer functions and derivatives
# Note _all_ transfer functions and derivatives _must_ accept keyword arguments
# and handle the output keyword argument out=z correctly.
# <NAME>
import numpy as np
import scipy.special
#-------------------------------------------------------------------------------
"""
def sigval(x, **kwds):
# return 1./(1+exp(-x))
# return 0.5 * np.tanh(0.5*x) + 0.5
z = kwds["out"] if "out" in kwds else np.empty_like(x)
np.multiply(x, 0.5, out=z)
np.tanh(z, out=z)
np.multiply(z, 0.5, out=z)
np.add(z, 0.5, out=z)
return z
"""
sigval = scipy.special.expit
#-------------------------------------------------------------------------------
def sigder(x, **kwds):
#y = sigval(x); return (1.-y)*y
z = kwds["out"] if "out" in kwds else np.empty_like(x)
y = kwds["val"] if "val" in kwds else sigval(x)
np.subtract(1., y, out=z)
np.multiply(z, y, out=z)
return z
#-------------------------------------------------------------------------------
def ReLU(x, **kwds):
z = kwds["out"] if "out" in kwds else np.empty_like(x)
y = kwds["ind"] if "ind" in kwds else x < 0
np.copyto(z, x, casting='no')
z[y].fill(0.)
return z
#-------------------------------------------------------------------------------
def ReDU(x, **kwds):
z = kwds["out"] if "out" in kwds else np.empty_like(x)
y = kwds["ind"] if "ind" in kwds else x < 0
z.fill(1.)
z[y].fill(0.)
return z
#-------------------------------------------------------------------------------
TRANSFER_FUNCTION_DERIVATIVE = {'none': (None, None),
'sigm': (sigval, sigder),
'relu': (ReLU, ReDU)}
#-------------------------------------------------------------------------------
|
[
"numpy.copyto",
"numpy.multiply",
"numpy.empty_like",
"numpy.subtract"
] |
[((852, 878), 'numpy.subtract', 'np.subtract', (['(1.0)', 'y'], {'out': 'z'}), '(1.0, y, out=z)\n', (863, 878), True, 'import numpy as np\n'), ((880, 904), 'numpy.multiply', 'np.multiply', (['z', 'y'], {'out': 'z'}), '(z, y, out=z)\n', (891, 904), True, 'import numpy as np\n'), ((1125, 1154), 'numpy.copyto', 'np.copyto', (['z', 'x'], {'casting': '"""no"""'}), "(z, x, casting='no')\n", (1134, 1154), True, 'import numpy as np\n'), ((783, 799), 'numpy.empty_like', 'np.empty_like', (['x'], {}), '(x)\n', (796, 799), True, 'import numpy as np\n'), ((1060, 1076), 'numpy.empty_like', 'np.empty_like', (['x'], {}), '(x)\n', (1073, 1076), True, 'import numpy as np\n'), ((1325, 1341), 'numpy.empty_like', 'np.empty_like', (['x'], {}), '(x)\n', (1338, 1341), True, 'import numpy as np\n')]
|
#------testing the trained model and ensemble weights on the test data to get the final accuracy
#importing required libraries and modules
import os
import sys
import cv2
import numpy as np
from preprocess import Preprocess
from data_split import Load
from conv_net import CNN
from ensemble import Ensemble
def load_numpy_data(arg, folder):
#loading the numpy data (.npy files) from the required directory
X_test = list(np.load('bin/'+folder+'/'+arg+'/X_test.npy'))
Y_test = list(np.load('bin/'+folder+'/'+arg+'/Y_test.npy'))
X_test = list(np.array(X_test).reshape(-1, 128, 431))
Y_test = list(np.array(Y_test).reshape(-1, 15))
return X_test, Y_test
def predict_test(arg, X_train, X_val, X_test, Y_train, Y_val, Y_test):
#loading the model and training its corresponding SVR classifier
data_size = 'full'
neural_net = CNN()
model = neural_net.create_1ConvModel()
model.load('DNN/'+data_size+'/'+arg+'.model')
#defining an ensemble class and training the SVR for the particular classifier
en = Ensemble()
en.regressor(arg, model, X_val[0], Y_val[0])
neural_net.predict_test_data(arg, model, X_test[0], Y_test[0])
if __name__ == '__main__':
feature = ['mono', 'left', 'right', 'mid', 'side', 'harmonic', 'percussive', 'mfcc'] #all the features used in the architecture
X_test = [0 for i in range(len(feature))]
Y_test = [0 for i in range(len(feature))]
for i in range(8):
X_test[i], Y_test[i] = load_numpy_data(feature[i], 'full')
en = Ensemble()
#uncomment whichever method you want to use in your ensemble(SVR or majority voting)
acc = en.result_SVR(X_test, Y_test)
#acc = en.result_majority_voting(X_test, Y_test)
print("Ensemble Test Accuracy =", acc, '%')
|
[
"numpy.array",
"numpy.load",
"conv_net.CNN",
"ensemble.Ensemble"
] |
[((865, 870), 'conv_net.CNN', 'CNN', ([], {}), '()\n', (868, 870), False, 'from conv_net import CNN\n'), ((1051, 1061), 'ensemble.Ensemble', 'Ensemble', ([], {}), '()\n', (1059, 1061), False, 'from ensemble import Ensemble\n'), ((1526, 1536), 'ensemble.Ensemble', 'Ensemble', ([], {}), '()\n', (1534, 1536), False, 'from ensemble import Ensemble\n'), ((444, 496), 'numpy.load', 'np.load', (["('bin/' + folder + '/' + arg + '/X_test.npy')"], {}), "('bin/' + folder + '/' + arg + '/X_test.npy')\n", (451, 496), True, 'import numpy as np\n'), ((506, 558), 'numpy.load', 'np.load', (["('bin/' + folder + '/' + arg + '/Y_test.npy')"], {}), "('bin/' + folder + '/' + arg + '/Y_test.npy')\n", (513, 558), True, 'import numpy as np\n'), ((570, 586), 'numpy.array', 'np.array', (['X_test'], {}), '(X_test)\n', (578, 586), True, 'import numpy as np\n'), ((626, 642), 'numpy.array', 'np.array', (['Y_test'], {}), '(Y_test)\n', (634, 642), True, 'import numpy as np\n')]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.