version
stringclasses 24
values | code
stringlengths 396
135k
| apis
sequence | full_version
stringlengths 1
6
| repo_name
stringlengths 6
64
| hexsha
stringlengths 40
40
|
---|---|---|---|---|---|
1.1 | import configargparse as cfargparse
import os
import torch
import onmt.opts as opts
from onmt.utils.logging import logger
class ArgumentParser(cfargparse.ArgumentParser):
def __init__(
self,
config_file_parser_class=cfargparse.YAMLConfigFileParser,
formatter_class=cfargparse.ArgumentDefaultsHelpFormatter,
**kwargs):
super(ArgumentParser, self).__init__(
config_file_parser_class=config_file_parser_class,
formatter_class=formatter_class,
**kwargs)
@classmethod
def defaults(cls, *args):
"""Get default arguments added to a parser by all ``*args``."""
dummy_parser = cls()
for callback in args:
callback(dummy_parser)
defaults = dummy_parser.parse_known_args([])[0]
return defaults
@classmethod
def update_model_opts(cls, model_opt):
if model_opt.word_vec_size > 0:
model_opt.src_word_vec_size = model_opt.word_vec_size
model_opt.tgt_word_vec_size = model_opt.word_vec_size
if model_opt.layers > 0:
model_opt.enc_layers = model_opt.layers
model_opt.dec_layers = model_opt.layers
if model_opt.rnn_size > 0:
model_opt.enc_rnn_size = model_opt.rnn_size
model_opt.dec_rnn_size = model_opt.rnn_size
model_opt.brnn = model_opt.encoder_type == "brnn"
if model_opt.copy_attn_type is None:
model_opt.copy_attn_type = model_opt.global_attention
@classmethod
def validate_model_opts(cls, model_opt):
assert model_opt.model_type in ["text", "img", "audio", "vec"], \
"Unsupported model type %s" % model_opt.model_type
# this check is here because audio allows the encoder and decoder to
# be different sizes, but other model types do not yet
same_size = model_opt.enc_rnn_size == model_opt.dec_rnn_size
assert model_opt.model_type == 'audio' or same_size, \
"The encoder and decoder rnns must be the same size for now"
assert model_opt.rnn_type != "SRU" or model_opt.gpu_ranks, \
"Using SRU requires -gpu_ranks set."
if model_opt.share_embeddings:
if model_opt.model_type != "text":
raise AssertionError(
"--share_embeddings requires --model_type text.")
@classmethod
def ckpt_model_opts(cls, ckpt_opt):
# Load default opt values, then overwrite with the opts in
# the checkpoint. That way, if there are new options added,
# the defaults are used.
opt = cls.defaults(opts.model_opts)
opt.__dict__.update(ckpt_opt.__dict__)
return opt
@classmethod
def validate_train_opts(cls, opt):
if opt.epochs:
raise AssertionError(
"-epochs is deprecated please use -train_steps.")
if opt.truncated_decoder > 0 and max(opt.accum_count) > 1:
raise AssertionError("BPTT is not compatible with -accum > 1")
if opt.gpuid:
raise AssertionError(
"gpuid is deprecated see world_size and gpu_ranks")
if torch.cuda.is_available() and not opt.gpu_ranks:
logger.info("WARNING: You have a CUDA device, \
should run with -gpu_ranks")
if opt.world_size < len(opt.gpu_ranks):
raise AssertionError(
"parameter counts of -gpu_ranks must be less or equal "
"than -world_size.")
if opt.world_size == len(opt.gpu_ranks) and \
min(opt.gpu_ranks) > 0:
raise AssertionError(
"-gpu_ranks should have master(=0) rank "
"unless -world_size is greater than len(gpu_ranks).")
assert len(opt.data_ids) == len(opt.data_weights), \
"Please check -data_ids and -data_weights options!"
assert len(opt.dropout) == len(opt.dropout_steps), \
"Number of dropout values must match accum_steps values"
assert len(opt.attention_dropout) == len(opt.dropout_steps), \
"Number of attention_dropout values must match accum_steps values"
@classmethod
def validate_translate_opts(cls, opt):
if opt.beam_size != 1 and opt.random_sampling_topk != 1:
raise ValueError('Can either do beam search OR random sampling.')
@classmethod
def validate_preprocess_args(cls, opt):
assert opt.max_shard_size == 0, \
"-max_shard_size is deprecated. Please use \
-shard_size (number of examples) instead."
assert opt.shuffle == 0, \
"-shuffle is not implemented. Please shuffle \
your data before pre-processing."
assert len(opt.train_src) == len(opt.train_tgt), \
"Please provide same number of src and tgt train files!"
assert len(opt.train_src) == len(opt.train_ids), \
"Please provide proper -train_ids for your data!"
for file in opt.train_src + opt.train_tgt:
assert os.path.isfile(file), "Please check path of %s" % file
assert not opt.valid_src or os.path.isfile(opt.valid_src), \
"Please check path of your valid src file!"
assert not opt.valid_tgt or os.path.isfile(opt.valid_tgt), \
"Please check path of your valid tgt file!"
assert not opt.src_vocab or os.path.isfile(opt.src_vocab), \
"Please check path of your src vocab!"
assert not opt.tgt_vocab or os.path.isfile(opt.tgt_vocab), \
"Please check path of your tgt vocab!"
| [
"torch.cuda.is_available"
] | 1.1 | ACL2020-Submission/ACL2020 | 2a3d6e26d22c650cad823c68b65ee315aa1fe22c |
1.4 | import time
from typing import Optional, Dict
import torch
from torch import nn, optim
from torch.utils.data import DataLoader
from torch.nn.utils.rnn import pack_padded_sequence
from utils import TensorboardWriter, AverageMeter, save_checkpoint, accuracy, \
clip_gradient, adjust_learning_rate
from metrics import Metrics
class Trainer:
"""
Encoder-decoder pipeline. Tearcher Forcing is used during training and validation.
Parameters
----------
caption_model : str
Type of the caption model
epochs : int
We should train the model for __ epochs
device : torch.device
Use GPU or not
word_map : Dict[str, int]
Word2id map
rev_word_map : Dict[int, str]
Id2word map
start_epoch : int
We should start training the model from __th epoch
epochs_since_improvement : int
Number of epochs since last improvement in BLEU-4 score
best_bleu4 : float
Best BLEU-4 score until now
train_loader : DataLoader
DataLoader for training data
val_loader : DataLoader
DataLoader for validation data
encoder : nn.Module
Encoder (based on CNN)
decoder : nn.Module
Decoder (based on LSTM)
encoder_optimizer : optim.Optimizer
Optimizer for encoder (Adam) (if fine-tune)
decoder_optimizer : optim.Optimizer
Optimizer for decoder (Adam)
loss_function : nn.Module
Loss function (cross entropy)
grad_clip : float
Gradient threshold in clip gradients
tau : float
Penalty term τ for doubly stochastic attention in paper: show, attend and tell
fine_tune_encoder : bool
Fine-tune encoder or not
tensorboard : bool, optional, default=False
Enable tensorboard or not?
log_dir : str, optional
Path to the folder to save logs for tensorboard
"""
def __init__(
self,
caption_model: str,
epochs: int,
device: torch.device,
word_map: Dict[str, int],
rev_word_map: Dict[int, str],
start_epoch: int,
epochs_since_improvement: int,
best_bleu4: float,
train_loader: DataLoader,
val_loader: DataLoader,
encoder: nn.Module,
decoder: nn.Module,
encoder_optimizer: optim.Optimizer,
decoder_optimizer: optim.Optimizer,
loss_function: nn.Module,
grad_clip: float,
tau: float,
fine_tune_encoder: bool,
tensorboard: bool = False,
log_dir: Optional[str] = None
) -> None:
self.device = device # GPU / CPU
self.caption_model = caption_model
self.epochs = epochs
self.word_map = word_map
self.rev_word_map = rev_word_map
self.start_epoch = start_epoch
self.epochs_since_improvement = epochs_since_improvement
self.best_bleu4 = best_bleu4
self.train_loader = train_loader
self.val_loader = val_loader
self.encoder = encoder
self.decoder = decoder
self.encoder_optimizer = encoder_optimizer
self.decoder_optimizer = decoder_optimizer
self.loss_function = loss_function
self.tau = tau
self.grad_clip = grad_clip
self.fine_tune_encoder = fine_tune_encoder
self.print_freq = 100 # print training/validation stats every __ batches
# setup visualization writer instance
self.writer = TensorboardWriter(log_dir, tensorboard)
self.len_epoch = len(self.train_loader)
def train(self, epoch: int) -> None:
"""
Train an epoch
Parameters
----------
epoch : int
Current number of epoch
"""
self.decoder.train() # train mode (dropout and batchnorm is used)
self.encoder.train()
batch_time = AverageMeter() # forward prop. + back prop. time
data_time = AverageMeter() # data loading time
losses = AverageMeter(tag='loss', writer=self.writer) # loss (per word decoded)
top5accs = AverageMeter(tag='top5acc', writer=self.writer) # top5 accuracy
start = time.time()
# batches
for i, (imgs, caps, caplens) in enumerate(self.train_loader):
data_time.update(time.time() - start)
# Move to GPU, if available
imgs = imgs.to(self.device)
caps = caps.to(self.device)
caplens = caplens.to(self.device)
# forward encoder
imgs = self.encoder(imgs)
# forward decoder
if self.caption_model == 'att2all':
scores, caps_sorted, decode_lengths, alphas, sort_ind = self.decoder(imgs, caps, caplens)
else:
scores, caps_sorted, decode_lengths, sort_ind = self.decoder(imgs, caps, caplens)
# since we decoded starting with <start>, the targets are all words after <start>, up to <end>
targets = caps_sorted[:, 1:]
# remove timesteps that we didn't decode at, or are pads
# pack_padded_sequence is an easy trick to do this
scores = pack_padded_sequence(scores, decode_lengths, batch_first=True)[0]
targets = pack_padded_sequence(targets, decode_lengths, batch_first=True)[0]
# calc loss
loss = self.loss_function(scores, targets)
# doubly stochastic attention regularization (in paper: show, attend and tell)
if self.caption_model == 'att2all':
loss += self.tau * ((1. - alphas.sum(dim = 1)) ** 2).mean()
# clear gradient of last batch
self.decoder_optimizer.zero_grad()
if self.encoder_optimizer is not None:
self.encoder_optimizer.zero_grad()
# backward
loss.backward()
# clip gradients
if self.grad_clip is not None:
clip_gradient(self.decoder_optimizer, self.grad_clip)
if self.encoder_optimizer is not None:
clip_gradient(self.encoder_optimizer, self.grad_clip)
# update weights
self.decoder_optimizer.step()
if self.encoder_optimizer is not None:
self.encoder_optimizer.step()
# set step for tensorboard
step = (epoch - 1) * self.len_epoch + i
self.writer.set_step(step=step, mode='train')
# keep track of metrics
top5 = accuracy(scores, targets, 5)
losses.update(loss.item(), sum(decode_lengths))
top5accs.update(top5, sum(decode_lengths))
batch_time.update(time.time() - start)
start = time.time()
# print status
if i % self.print_freq == 0:
print(
'Epoch: [{0}][{1}/{2}]\t'
'Batch Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data Load Time {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Top-5 Accuracy {top5.val:.3f} ({top5.avg:.3f})'.format(
epoch, i, len(self.train_loader),
batch_time = batch_time,
data_time = data_time,
loss = losses,
top5 = top5accs
)
)
def validate(self) -> float:
"""
Validate an epoch.
Returns
-------
bleu4 : float
BLEU-4 score
"""
self.decoder.eval() # eval mode (no dropout or batchnorm)
if self.encoder is not None:
self.encoder.eval()
batch_time = AverageMeter()
losses = AverageMeter()
top5accs = AverageMeter()
start = time.time()
ground_truth = list() # ground_truth (true captions) for calculating BLEU-4 score
prediction = list() # prediction (predicted captions)
# explicitly disable gradient calculation to avoid CUDA memory error
# solves the issue #57
with torch.no_grad():
# Batches
for i, (imgs, caps, caplens, allcaps) in enumerate(self.val_loader):
# move to device, if available
imgs = imgs.to(self.device)
caps = caps.to(self.device)
caplens = caplens.to(self.device)
# forward encoder
if self.encoder is not None:
imgs = self.encoder(imgs)
# forward decoder
if self.caption_model == 'att2all':
scores, caps_sorted, decode_lengths, alphas, sort_ind = self.decoder(imgs, caps, caplens)
else:
scores, caps_sorted, decode_lengths, sort_ind = self.decoder(imgs, caps, caplens)
# since we decoded starting with <start>, the targets are all words after <start>, up to <end>
targets = caps_sorted[:, 1:]
# remove timesteps that we didn't decode at, or are pads
# pack_padded_sequence is an easy trick to do this
scores_copy = scores.clone()
scores = pack_padded_sequence(scores, decode_lengths, batch_first = True)[0]
targets = pack_padded_sequence(targets, decode_lengths, batch_first = True)[0]
# calc loss
loss = self.loss_function(scores, targets)
# doubly stochastic attention regularization (in paper: show, attend and tell)
if self.caption_model == 'att2all':
loss += self.tau * ((1. - alphas.sum(dim = 1)) ** 2).mean()
# keep track of metrics
losses.update(loss.item(), sum(decode_lengths))
top5 = accuracy(scores, targets, 5)
top5accs.update(top5, sum(decode_lengths))
batch_time.update(time.time() - start)
start = time.time()
if i % self.print_freq == 0:
print('Validation: [{0}/{1}]\t'
'Batch Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Top-5 Accuracy {top5.val:.3f} ({top5.avg:.3f})\t'.format(i, len(self.val_loader),
batch_time = batch_time,
loss = losses,
top5 = top5accs)
)
# store ground truth captions and predicted captions of each image
# for n images, each of them has one prediction and multiple ground truths (a, b, c...):
# prediction = [ [hyp1], [hyp2], ..., [hypn] ]
# ground_truth = [ [ [ref1a], [ref1b], [ref1c] ], ..., [ [refna], [refnb] ] ]
# ground truth
allcaps = allcaps[sort_ind] # because images were sorted in the decoder
for j in range(allcaps.shape[0]):
img_caps = allcaps[j].tolist()
img_captions = list(
map(
lambda c: [w for w in c if w not in {self.word_map['<start>'], self.word_map['<pad>']}],
img_caps
)
) # remove <start> and pads
ground_truth.append(img_captions)
# prediction
_, preds = torch.max(scores_copy, dim = 2)
preds = preds.tolist()
temp_preds = list()
for j, p in enumerate(preds):
temp_preds.append(preds[j][:decode_lengths[j]]) # remove pads
preds = temp_preds
prediction.extend(preds)
assert len(ground_truth) == len(prediction)
# calc BLEU-4 and CIDEr score
metrics = Metrics(ground_truth, prediction, self.rev_word_map)
bleu4 = metrics.belu[3] # BLEU-4
cider = metrics.cider # CIDEr
print(
'\n * LOSS - {loss.avg:.3f}, TOP-5 ACCURACY - {top5.avg:.3f}, BLEU-4 - {bleu}, CIDEr - {cider}\n'.format(
loss = losses,
top5 = top5accs,
bleu = bleu4,
cider = cider
)
)
return bleu4
def run_train(self) -> None:
# epochs
for epoch in range(self.start_epoch, self.epochs):
# decay learning rate if there is no improvement for 8 consecutive epochs
# terminate training if there is no improvement for 20 consecutive epochs
if self.epochs_since_improvement == 20:
break
if self.epochs_since_improvement > 0 and self.epochs_since_improvement % 8 == 0:
adjust_learning_rate(self.decoder_optimizer, 0.8)
if self.fine_tune_encoder:
adjust_learning_rate(self.encoder_optimizer, 0.8)
# train an epoch
self.train(epoch = epoch)
# validate an epoch
recent_bleu4 = self.validate()
# epochs num since last improvement
is_best = recent_bleu4 > self.best_bleu4
self.best_bleu4 = max(recent_bleu4, self.best_bleu4)
if not is_best:
self.epochs_since_improvement += 1
print("\nEpochs since last improvement: %d\n" % (self.epochs_since_improvement,))
else:
self.epochs_since_improvement = 0
# save checkpoint
save_checkpoint(
epoch = epoch,
epochs_since_improvement = self.epochs_since_improvement,
encoder = self.encoder,
decoder = self.decoder,
encoder_optimizer = self.encoder_optimizer,
decoder_optimizer = self.decoder_optimizer,
caption_model = self.caption_model,
bleu4 = recent_bleu4,
is_best = is_best
)
| [
"torch.no_grad",
"torch.nn.utils.rnn.pack_padded_sequence",
"torch.max"
] | 1.4.0 | Renovamen/Image-Captioning | de8d4f553a22e967fa56a01d5b4a2206b9431771 |
0.3 | """
Baseline CNN, losss function and metrics
Also customizes knowledge distillation (KD) loss function here
"""
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
class Flatten(nn.Module):
def forward(self, input):
return input.view(input.size(0), -1)
"""
This is the standard way to define your own network in PyTorch. You typically choose the components
(e.g. LSTMs, linear layers etc.) of your network in the __init__ function. You then apply these layers
on the input step-by-step in the forward function. You can use torch.nn.functional to apply functions
such as F.relu, F.sigmoid, F.softmax, F.max_pool2d. Be careful to ensure your dimensions are correct after each
step. You are encouraged to have a look at the network in pytorch/nlp/model/net.py to get a better sense of how
you can go about defining your own network.
The documentation for all the various components available o you is here: http://pytorch.org/docs/master/nn.html
"""
class studentB(nn.Module):
def __init__(self, params):
"""
We define an convolutional network that predicts the sign from an image. The components
required are:
Args:
params: (Params) contains num_channels
"""
super(studentB, self).__init__()
self.num_channels = params.num_channels
# each of the convolution layers below have the arguments (input_channels, output_channels, filter_size,
# stride, padding). We also include batch normalisation layers that help stabilise training.
# For more details on how to use these layers, check out the documentation.
self.conv1 = nn.Conv2d(3, 32, 5, stride=1, padding=2)
self.bn1 = nn.BatchNorm2d(32)
self.conv2_1 = nn.Conv2d(32, 32, 1, stride=1, padding=0)
self.conv2_2 = nn.Conv2d(32, 32, 3, stride=1, padding=1)
self.conv2_3 = nn.Conv2d(32, 64, 1, stride=1, padding=0)
self.bn2 = nn.BatchNorm2d(64)
self.conv3_1 = nn.Conv2d(64, 64, 1, stride=1, padding=0)
self.conv3_2 = nn.Conv2d(64, 64, 3, stride=1, padding=1)
self.conv3_3 = nn.Conv2d(64, 128, 1, stride=1, padding=0)
self.bn3 = nn.BatchNorm2d(128)
# 2 fully connected layers to transform the output of the convolution layers to the final output
self.fc1 = nn.Linear(4*4*128, 500)
self.fcbn1 = nn.BatchNorm1d(500)
self.fc2 = nn.Linear(500, 10)
self.dropout_rate = params.dropout_rate
def forward(self, s):
"""
This function defines how we use the components of our network to operate on an input batch.
Args:
s: (Variable) contains a batch of images, of dimension batch_size x 3 x 32 x 32 .
Returns:
out: (Variable) dimension batch_size x 6 with the log probabilities for the labels of each image.
Note: the dimensions after each step are provided
"""
# -> batch_size x 3 x 32 x 32
# we apply the convolution layers, followed by batch normalisation, maxpool and relu x 3
s = self.bn1(self.conv1(s)) # batch_size x 32 x 32 x 32
s = F.relu(F.max_pool2d(s, 2)) # batch_size x 32 x 16 x 16
s = self.conv2_1(s)
s = self.conv2_2(s)
s = self.conv2_3(s)
s = self.bn2(s) # batch_size x 10 * 2 x 16 x 16
s = F.relu(F.max_pool2d(s, 2)) # batch_size x num_channels*2 x 8 x 8
s = self.conv3_1(s)
s = self.conv3_2(s)
s = self.conv3_3(s)
s = self.bn3(s) # batch_size x 10 * 2 x 16 x 16
s = F.relu(F.max_pool2d(s, 2)) # batch_size x num_channels*2 x 8 x 8
# flatten the output for each image
s = s.view(-1, 4*4*128) # batch_size x 4*4*num_channels*4
# apply 2 fully connected layers with dropout
s = F.dropout(F.relu(self.fcbn1(self.fc1(s))),
p=self.dropout_rate, training=self.training) # batch_size x self.num_channels*4
s = self.fc2(s) # batch_size x 10
return s
def loss_fn(outputs, labels):
"""
Compute the cross entropy loss given outputs and labels.
Args:
outputs: (Variable) dimension batch_size x 6 - output of the model
labels: (Variable) dimension batch_size, where each element is a value in [0, 1, 2, 3, 4, 5]
Returns:
loss (Variable): cross entropy loss for all images in the batch
Note: you may use a standard loss function from http://pytorch.org/docs/master/nn.html#loss-functions. This example
demonstrates how you can easily define a custom loss function.
"""
return nn.CrossEntropyLoss()(outputs, labels)
def loss_fn_kd(outputs, labels, teacher_outputs, params):
"""
Compute the knowledge-distillation (KD) loss given outputs, labels.
"Hyperparameters": temperature and alpha
NOTE: the KL Divergence for PyTorch comparing the softmaxs of teacher
and student expects the input tensor to be log probabilities! See Issue #2
"""
alpha = params.alpha
T = params.temperature
KD_loss = nn.KLDivLoss()(F.log_softmax(outputs/T, dim=1),
F.softmax(teacher_outputs/T, dim=1)) * (alpha * T * T) + \
F.cross_entropy(outputs, labels) * (1. - alpha)
return KD_loss
def accuracy(outputs, labels):
"""
Compute the accuracy, given the outputs and labels for all images.
Args:
outputs: (np.ndarray) output of the model
labels: (np.ndarray) [0, 1, ..., num_classes-1]
Returns: (float) accuracy in [0,1]
"""
outputs = np.argmax(outputs, axis=1)
return np.sum(outputs==labels)/float(labels.size)
# maintain all metrics required in this dictionary- these are used in the training and evaluation loops
metrics = {
'accuracy': accuracy,
# could add more metrics such as accuracy for each token type
} | [
"torch.nn.Linear",
"torch.nn.BatchNorm2d",
"torch.nn.functional.log_softmax",
"torch.nn.Conv2d",
"torch.nn.BatchNorm1d",
"torch.nn.functional.cross_entropy",
"torch.nn.KLDivLoss",
"torch.nn.functional.softmax",
"torch.nn.functional.max_pool2d",
"torch.nn.CrossEntropyLoss"
] | 0.3.0 | eungbean/knowledge-distillation-cifar10 | 683379804c8724d097a845cee85f130b6767dbd7 |
1.0 | import torch
from torch import nn, Tensor
from typing import Union, Tuple, List, Iterable, Dict
from ..SentenceTransformer import SentenceTransformer
import logging
class TripleSoftmaxLoss(nn.Module):
def __init__(self,
model: SentenceTransformer,
sentence_embedding_dimension: int,
num_labels: int,
vocab,
document_coef: float = 0.4,
concatenation_sent_rep: bool = True,
concatenation_sent_difference: bool = True,
concatenation_sent_multiplication: bool = False):
super(TripleSoftmaxLoss, self).__init__()
self.model = model
self.num_labels = num_labels
self.hidden = 1000
self.concatenation_sent_rep = concatenation_sent_rep
self.concatenation_sent_difference = concatenation_sent_difference
self.concatenation_sent_multiplication = concatenation_sent_multiplication
self.document_coef = document_coef
num_vectors_concatenated = 0
if concatenation_sent_rep:
num_vectors_concatenated += 2
if concatenation_sent_difference:
num_vectors_concatenated += 2
logging.info("Softmax loss: #Vectors concatenated: {}".format(num_vectors_concatenated))
self.relu = nn.ReLU()
self.document2hidden = nn.Linear(291868, self.hidden)
self.hidden2output = nn.Linear(self.hidden, 768)
self.classifier = nn.Linear(num_vectors_concatenated * sentence_embedding_dimension, num_labels)
def forward(self, sentence_features: Iterable[Dict[str, Tensor]], labels: Tensor, document_rep: Tensor):
reps = [self.model(sentence_feature)['sentence_embedding'] for sentence_feature in sentence_features]
rep_a, rep_b = reps
document_rep = self.relu(self.hidden2output(self.relu(self.document2hidden(document_rep.float()))))
vectors_concat = []
if self.concatenation_sent_rep:
vectors_concat.append(rep_a)
vectors_concat.append(rep_b)
if self.concatenation_sent_difference:
vectors_concat.append(torch.abs(rep_a - rep_b))
vectors_concat.append(torch.abs(rep_a - document_rep))
features = torch.cat(vectors_concat, 1)
output = self.classifier(features)
loss_fct = nn.CrossEntropyLoss()
if labels is not None:
loss = (1.0 - self.document_coef) * loss_fct(output, labels.view(-1))
loss -= self.document_coef * torch.sum(torch.cosine_similarity(document_rep, rep_b)) # todo: MMI가 들어가면 좋긴하겠다.
return loss
else:
return reps, output | [
"torch.nn.Linear",
"torch.cosine_similarity",
"torch.cat",
"torch.nn.ReLU",
"torch.abs",
"torch.nn.CrossEntropyLoss"
] | 1.0.1 | jaimeenahn/COVID-sentence-bert | 2f47d116f7d9b774946fbf3c0724b721d1b88225 |
1.6 |
import os
import sys
sys.path.append(os.getcwd())
import numpy as np
import torch
import flow
from utils import cdfDiscreteLogitstic, cdfMixDiscreteLogistic
from utils import logDiscreteLogistic, logMixDiscreteLogistic
nbins = 4096
_bins = torch.arange(-nbins // 2, nbins // 2).reshape(-1, 1, 1, 1, 1)
decimal = flow.ScalingNshifting(256, -128)
def test_disLogisticCDF():
logscale = torch.tensor(
[[[[-3.6826, -3.0157, -3.6032],
[-3.7063, -3.0269, -3.5338],
[-3.5311, -2.9907, -3.3516],
[-3.9300, -3.3121, -3.8110]],
[[-3.1022, -3.0692, -3.2039],
[-2.9466, -3.0006, -3.2969],
[-2.7636, -2.5691, -2.9628],
[-3.3657, -3.2948, -3.5318]],
[[-3.9748, -3.0670, -3.2399],
[-3.9312, -3.0055, -3.1729],
[-3.8588, -2.9139, -3.1794],
[-4.1534, -3.2404, -3.5665]]]]
)
mean = torch.tensor(
[[[[ 0.0191, 0.0459, 0.0131],
[-0.0059, 0.0254, -0.0100],
[ 0.0359, 0.0406, 0.0242],
[ 0.0331, 0.0438, 0.0255]],
[[ 0.0214, 0.0502, 0.0622],
[ 0.0371, 0.0368, 0.0517],
[ 0.0217, 0.0855, 0.0874],
[ 0.0144, 0.0475, 0.0470]],
[[-0.0602, -0.0791, -0.0784],
[-0.0443, -0.0765, -0.0701],
[-0.0654, -0.0709, -0.0788],
[-0.0608, -0.0721, -0.0688]]]]
)
bins = _bins - 1 + torch.round(decimal.forward_(mean))
cdf = cdfDiscreteLogitstic(bins, mean, logscale, decimal=decimal).detach().numpy()
pList = []
for i in range(bins.shape[0]):
logp = logDiscreteLogistic(bins[i: i + 1], mean, logscale, decimal=decimal).detach().numpy()
pList.append(np.exp(logp).reshape(mean.shape))
pList = np.array(pList)
_cdf = np.cumsum(pList, 0)
assert np.allclose(cdf, _cdf)
def test_mixDixLogisticCDF():
mean = torch.tensor(
[[[[-0.2414, 0.2089, -0.0209, -0.1279]],
[[ 0.7791, 0.1031, 0.0940, 0.1678]],
[[ 0.0095, 0.0391, -0.0318, -0.2183]]],
[[[-0.1466, 0.2090, -0.0594, -0.0837]],
[[ 0.8711, 0.0540, 0.0940, 0.0859]],
[[-0.0683, -0.0204, -0.0340, -0.0587]]],
[[[-0.1994, -0.0442, -0.0307, -0.0823]],
[[ 1.0158, 0.0636, 0.0832, 0.0717]],
[[-0.1863, -0.0177, -0.0293, -0.0708]]],
[[[-0.3517, 0.1062, -0.0362, -0.1661]],
[[ 0.6567, 0.1452, 0.0294, 0.0864]],
[[-0.1384, -0.0171, -0.0195, -0.0710]]],
[[[-0.3158, 0.2068, 0.1114, -0.1251]],
[[ 0.5600, 0.1987, 0.1891, 0.1754]],
[[-0.2758, -0.1032, -0.0435, -0.1156]]]])
logscale = torch.tensor(
[[[[-3.1292, -4.0168, -3.2886, -2.5948]],
[[-2.8226, -2.3489, -2.8613, -2.3892]],
[[-3.3502, -3.4929, -2.9572, -2.7060]]],
[[[-3.4556, -4.0166, -2.7471, -3.1203]],
[[-2.6906, -3.6062, -2.8620, -3.0673]],
[[-3.2775, -3.3661, -3.2897, -4.0553]]],
[[[-3.4652, -3.3828, -3.3053, -3.6945]],
[[-2.7657, -2.9172, -3.4067, -3.7734]],
[[-3.4817, -3.0397, -2.8021, -3.1398]]],
[[[-2.7246, -3.7798, -4.1237, -2.8605]],
[[-3.0524, -2.6628, -2.4833, -3.0913]],
[[-4.0249, -3.8364, -3.7608, -2.7111]]],
[[[-3.5460, -4.0208, -2.9837, -3.1288]],
[[-3.2062, -2.1702, -2.2238, -2.6122]],
[[-3.1754, -3.0892, -2.3359, -2.4321]]]])
mixing = torch.tensor(
[[[[ 1.3161, 0.8664, 1.7648, -0.7598, -0.8658],
[-3.7472, -3.6553, 5.2783, 0.2242, -3.6304],
[-0.7378, 0.2730, 1.8044, 0.7450, -1.6218],
[-0.8105, 1.8833, 1.8243, -0.7879, -1.1211]]],
[[[ 1.3952, -0.8232, -1.0135, 1.8041, 0.9846],
[-0.4372, 1.1296, 1.5473, -0.0661, -0.5995],
[-0.5167, 1.5559, 1.2607, -0.3227, -0.8687],
[-0.6226, 1.5024, 1.4221, 1.4741, -0.4409]]],
[[[ 1.3045, 1.8551, 0.1755, -0.6253, -1.2045],
[-0.9858, 1.5529, -0.6332, 1.4569, -1.1089],
[-0.5954, 1.2305, 1.4068, 0.7919, -0.3811],
[-0.2997, 0.6804, 2.0660, 1.1353, -0.9155]]]])
bins = _bins - 1 + torch.round(decimal.forward_(mean.permute([1, 2, 3, 0])) * mixing).sum(-1).reshape(1, *mean.shape[1:])
cdf = cdfMixDiscreteLogistic(bins, mean, logscale, mixing, decimal=decimal)
pList = []
for i in range(bins.shape[0]):
logp = logMixDiscreteLogistic(bins[i: i + 1], mean, logscale, mixing, decimal=decimal).detach().numpy()
pList.append(np.exp(logp).reshape(logp.shape[1:]))
pList = np.array(pList)
_cdf = np.cumsum(pList, 0)
assert np.allclose(cdf, _cdf)
if __name__ == "__main__":
test_disLogisticCDF()
test_mixDixLogisticCDF() | [
"torch.arange",
"torch.tensor"
] | 1.6.0 | li012589/NeuralWavelet | 6e593ded5cb4ae80579cbf56eb9c346d808669cb |
1.6 | # Source: https://gist.github.com/redknightlois/c4023d393eb8f92bb44b2ab582d7ec20
from torch.optim.optimizer import Optimizer
import torch
import math
class Ralamb(Optimizer):
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=1e-4):
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)
self.buffer = [[None, None, None] for ind in range(10)]
super(Ralamb, self).__init__(params, defaults)
def step(self, closure=None):
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data.float()
if grad.is_sparse:
raise RuntimeError('Ralamb does not support sparse gradients')
p_data_fp32 = p.data.float()
state = self.state[p]
if len(state) == 0:
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p_data_fp32)
state['exp_avg_sq'] = torch.zeros_like(p_data_fp32)
else:
state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32)
state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
# Decay the first and second moment running average coefficient
# m_t
exp_avg.mul_(beta1).add_(1 - beta1, grad)
# v_t
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
state['step'] += 1
buffered = self.buffer[int(state['step'] % 10)]
if state['step'] == buffered[0]:
N_sma, radam_step_size = buffered[1], buffered[2]
else:
buffered[0] = state['step']
beta2_t = beta2 ** state['step']
N_sma_max = 2 / (1 - beta2) - 1
N_sma = N_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t)
buffered[1] = N_sma
# more conservative since it's an approximated value
if N_sma >= 5:
radam_step_size = group['lr'] * math.sqrt((1 - beta2_t) * (N_sma - 4) / (N_sma_max - 4) * (N_sma - 2) / N_sma * N_sma_max / (N_sma_max - 2)) / (1 - beta1 ** state['step'])
else:
radam_step_size = group['lr'] / (1 - beta1 ** state['step'])
buffered[2] = radam_step_size
if group['weight_decay'] != 0:
p_data_fp32.add_(-group['weight_decay'] * group['lr'], p_data_fp32)
# more conservative since it's an approximated value
radam_step = p_data_fp32.clone()
if N_sma >= 5:
denom = exp_avg_sq.sqrt().add_(group['eps'])
radam_step.addcdiv_(-radam_step_size, exp_avg, denom)
else:
radam_step.add_(-radam_step_size, exp_avg)
radam_norm = radam_step.pow(2).sum().sqrt()
weight_norm = p.data.pow(2).sum().sqrt().clamp(0, 10)
if 0 in (weight_norm, radam_norm):
trust_ratio = 1
else:
trust_ratio = weight_norm / radam_norm
state['weight_norm'] = weight_norm
state['adam_norm'] = radam_norm
state['trust_ratio'] = trust_ratio
if N_sma >= 5:
p_data_fp32.addcdiv_(-radam_step_size * trust_ratio, exp_avg, denom)
else:
p_data_fp32.add_(-radam_step_size * trust_ratio, exp_avg)
p.data.copy_(p_data_fp32)
return loss | [
"torch.zeros_like"
] | 1.6.0 | achaiah/pywick | 9d663faf0c1660a9b8359a6472c164f658dfc8cb |
1.6 | """ PyTorch MADGRAD optimizer
MADGRAD: https://arxiv.org/abs/2101.11075
Code from: https://github.com/facebookresearch/madgrad
"""
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from typing import TYPE_CHECKING, Any, Callable, Optional
import torch
import torch.optim
if TYPE_CHECKING:
from torch.optim.optimizer import _params_t
else:
_params_t = Any
class MADGRAD(torch.optim.Optimizer):
"""
MADGRAD_: A Momentumized, Adaptive, Dual Averaged Gradient Method for Stochastic
Optimization.
.. _MADGRAD: https://arxiv.org/abs/2101.11075
MADGRAD is a general purpose optimizer that can be used in place of SGD or
Adam may converge faster and generalize better. Currently GPU-only.
Typically, the same learning rate schedule that is used for SGD or Adam may
be used. The overall learning rate is not comparable to either method and
should be determined by a hyper-parameter sweep.
MADGRAD requires less weight decay than other methods, often as little as
zero. Momentum values used for SGD or Adam's beta1 should work here also.
On sparse problems both weight_decay and momentum should be set to 0.
Arguments:
params (iterable):
Iterable of parameters to optimize or dicts defining parameter groups.
lr (float):
Learning rate (default: 1e-2).
momentum (float):
Momentum value in the range [0,1) (default: 0.9).
weight_decay (float):
Weight decay, i.e. a L2 penalty (default: 0).
eps (float):
Term added to the denominator outside of the root operation to improve numerical stability. (default: 1e-6).
"""
def __init__(
self,
params: _params_t,
lr: float = 1e-2,
momentum: float = 0.9,
weight_decay: float = 0,
eps: float = 1e-6,
decoupled_decay: bool = False,
):
if momentum < 0 or momentum >= 1:
raise ValueError(f"Momentum {momentum} must be in the range [0,1]")
if lr <= 0:
raise ValueError(f"Learning rate {lr} must be positive")
if weight_decay < 0:
raise ValueError(f"Weight decay {weight_decay} must be non-negative")
if eps < 0:
raise ValueError(f"Eps must be non-negative")
defaults = dict(
lr=lr, eps=eps, momentum=momentum, weight_decay=weight_decay, decoupled_decay=decoupled_decay)
super().__init__(params, defaults)
@property
def supports_memory_efficient_fp16(self) -> bool:
return False
@property
def supports_flat_params(self) -> bool:
return True
@torch.no_grad()
def step(self, closure: Optional[Callable[[], float]] = None) -> Optional[float]:
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model and returns the loss.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
eps = group['eps']
lr = group['lr'] + eps
weight_decay = group['weight_decay']
momentum = group['momentum']
ck = 1 - momentum
for p in group["params"]:
if p.grad is None:
continue
grad = p.grad
if momentum != 0.0 and grad.is_sparse:
raise RuntimeError("momentum != 0 is not compatible with sparse gradients")
state = self.state[p]
if len(state) == 0:
state['step'] = 0
state['grad_sum_sq'] = torch.zeros_like(p)
state['s'] = torch.zeros_like(p)
if momentum != 0:
state['x0'] = torch.clone(p).detach()
state['step'] += 1
grad_sum_sq = state['grad_sum_sq']
s = state['s']
lamb = lr * math.sqrt(state['step'])
# Apply weight decay
if weight_decay != 0:
if group['decoupled_decay']:
p.mul_(1.0 - group['lr'] * weight_decay)
else:
if grad.is_sparse:
raise RuntimeError("weight_decay option is not compatible with sparse gradients")
grad.add_(p, alpha=weight_decay)
if grad.is_sparse:
grad = grad.coalesce()
grad_val = grad._values()
p_masked = p.sparse_mask(grad)
grad_sum_sq_masked = grad_sum_sq.sparse_mask(grad)
s_masked = s.sparse_mask(grad)
# Compute x_0 from other known quantities
rms_masked_vals = grad_sum_sq_masked._values().pow(1 / 3).add_(eps)
x0_masked_vals = p_masked._values().addcdiv(s_masked._values(), rms_masked_vals, value=1)
# Dense + sparse op
grad_sq = grad * grad
grad_sum_sq.add_(grad_sq, alpha=lamb)
grad_sum_sq_masked.add_(grad_sq, alpha=lamb)
rms_masked_vals = grad_sum_sq_masked._values().pow_(1 / 3).add_(eps)
s.add_(grad, alpha=lamb)
s_masked._values().add_(grad_val, alpha=lamb)
# update masked copy of p
p_kp1_masked_vals = x0_masked_vals.addcdiv(s_masked._values(), rms_masked_vals, value=-1)
# Copy updated masked p to dense p using an add operation
p_masked._values().add_(p_kp1_masked_vals, alpha=-1)
p.add_(p_masked, alpha=-1)
else:
if momentum == 0:
# Compute x_0 from other known quantities
rms = grad_sum_sq.pow(1 / 3).add_(eps)
x0 = p.addcdiv(s, rms, value=1)
else:
x0 = state['x0']
# Accumulate second moments
grad_sum_sq.addcmul_(grad, grad, value=lamb)
rms = grad_sum_sq.pow(1 / 3).add_(eps)
# Update s
s.add_(grad, alpha=lamb)
# Step
if momentum == 0:
p.copy_(x0.addcdiv(s, rms, value=-1))
else:
z = x0.addcdiv(s, rms, value=-1)
# p is a moving average of z
p.mul_(1 - ck).add_(z, alpha=ck)
return loss | [
"torch.zeros_like",
"torch.no_grad",
"torch.clone",
"torch.enable_grad"
] | 1.6.0 | achaiah/pywick | 9d663faf0c1660a9b8359a6472c164f658dfc8cb |
1.4 | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from contextlib import contextmanager, suppress
from copy import copy, deepcopy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from pytorch_lightning.core.optimizer import LightningOptimizer
from pytorch_lightning.core.step_result import Result
from pytorch_lightning.plugins import ParallelPlugin
from pytorch_lightning.trainer.states import TrainerState
from pytorch_lightning.trainer.supporters import TensorRunningAccum
from pytorch_lightning.utilities import _TPU_AVAILABLE, AMPType, DeviceType
from pytorch_lightning.utilities.distributed import rank_zero_info
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from pytorch_lightning.utilities.finite_checks import detect_nan_parameters
from pytorch_lightning.utilities.grads import grad_norm
from pytorch_lightning.utilities.model_helpers import is_overridden
from pytorch_lightning.utilities.parsing import AttributeDict
from pytorch_lightning.utilities.signature_utils import is_param_in_hook_signature
from pytorch_lightning.utilities.warnings import WarningCache
class TrainLoop:
def __init__(self, trainer, multiple_trainloader_mode: str):
self.trainer = trainer
self.accumulated_loss = None
self.warning_cache = WarningCache()
self._teardown_already_run = False
self.running_loss = TensorRunningAccum(window_length=20)
self._curr_step_result = None
self._cur_grad_norm_dict = None
self._multiple_trainloader_mode = multiple_trainloader_mode
self._skip_backward = False
self.trainer._multiple_trainloader_mode = multiple_trainloader_mode
self._optimizer_freq_cumsum = None
def on_trainer_init(
self,
max_epochs: Optional[int],
min_epochs: Optional[int],
max_steps: Optional[int],
min_steps: Optional[int],
num_sanity_val_steps: int,
) -> None:
self.trainer.global_step = 0
self.trainer.current_epoch = 0
self.trainer.should_stop = False
self.trainer.state = TrainerState()
self.trainer.total_batch_idx = 0
self.trainer.batch_idx = 0
self.trainer.num_training_batches = 0
self.trainer.train_dataloader = None
# If neither max_epochs or max_steps is set, then use existing default of max_epochs = 1000
self.trainer.max_epochs = 1000 if (max_epochs is None and max_steps is None) else max_epochs
# If neither min_epochs or min_steps is set, then use existing default of min_epochs = 1
self.trainer.min_epochs = 1 if (min_epochs is None and min_steps is None) else min_epochs
self.trainer.max_steps = max_steps
self.trainer.min_steps = min_steps
if num_sanity_val_steps == -1:
self.trainer.num_sanity_val_steps = float("inf")
else:
self.trainer.num_sanity_val_steps = num_sanity_val_steps
@property
def num_optimizers(self):
num_optimizers = len(self.get_optimizers_iterable())
return num_optimizers
@property
def optimizer_freq_cumsum(self):
if self._optimizer_freq_cumsum is None:
self._optimizer_freq_cumsum = np.cumsum(self.trainer.optimizer_frequencies)
return self._optimizer_freq_cumsum
def should_skip_training(self):
should_by_max_steps = self.trainer.max_steps is not None and self.trainer.global_step >= self.trainer.max_steps
should_by_epoch = self.trainer.max_epochs is not None and self.trainer.current_epoch >= self.trainer.max_epochs
return should_by_max_steps or should_by_epoch or self.trainer.num_training_batches == 0
def on_train_start(self):
# hook
self.trainer.call_hook("on_train_start")
def on_train_end(self):
if self._teardown_already_run:
return
self._teardown_already_run = True
# trigger checkpoint check. need to temporarily decrease the global step to avoid saving duplicates
# when a checkpoint was saved at the last step
self.trainer.global_step -= 1
self.check_checkpoint_callback(should_update=True, is_last=True)
self.trainer.global_step += 1
# hook
self.trainer.call_hook("on_train_end")
# todo: TPU 8 cores hangs in flush with TensorBoard. Might do for all loggers.
# It might be related to xla tensors blocked when moving the cpu
# kill loggers
if self.trainer.logger is not None:
self.trainer.logger.finalize("success")
# summarize profile results
self.trainer.profiler.describe()
# give accelerators a chance to finish
self.trainer.accelerator.on_train_end()
# reset bookkeeping
self.trainer.state.stage = None
def check_checkpoint_callback(self, should_update, is_last=False):
# TODO bake this logic into the ModelCheckpoint callback
if should_update and self.trainer.checkpoint_connector.has_trained:
callbacks = self.trainer.checkpoint_callbacks
if is_last and any(cb.save_last and cb.verbose for cb in callbacks):
rank_zero_info("Saving latest checkpoint...")
model = self.trainer.lightning_module
for cb in callbacks:
cb.on_validation_end(self.trainer, model)
def on_train_epoch_start(self, epoch):
# update training progress in trainer
self.trainer.current_epoch = epoch
model = self.trainer.lightning_module
# reset train dataloader
if epoch != 0 and self.trainer.reload_dataloaders_every_epoch:
self.trainer.reset_train_dataloader(model)
# todo: specify the possible exception
with suppress(Exception):
# set seed for distributed sampler (enables shuffling for each epoch)
self.trainer.train_dataloader.sampler.set_epoch(epoch)
# changing gradient according accumulation_scheduler
self.trainer.accumulation_scheduler.on_train_epoch_start(self.trainer, self.trainer.lightning_module)
# stores accumulated grad fractions per batch
self.accumulated_loss = TensorRunningAccum(window_length=self.trainer.accumulate_grad_batches)
# hook
self.trainer.call_hook("on_epoch_start")
self.trainer.call_hook("on_train_epoch_start")
def on_train_batch_end(self, epoch_output, batch_end_outputs, batch, batch_idx, dataloader_idx):
batch_end_outputs = [opt_idx_out for opt_idx_out in batch_end_outputs if len(opt_idx_out)]
processed_batch_end_outputs = TrainLoop._prepare_outputs(batch_end_outputs, batch_mode=True)
# hook
self.trainer.call_hook('on_train_batch_end', processed_batch_end_outputs, batch, batch_idx, dataloader_idx)
self.trainer.call_hook('on_batch_end')
# figure out what to track for epoch end
self.track_epoch_end_reduce_metrics(epoch_output, batch_end_outputs)
# reset batch logger internals
self.trainer.logger_connector.on_train_batch_end()
def reset_train_val_dataloaders(self, model) -> None:
"""
Resets train and val dataloaders if none are attached to the trainer.
The val dataloader must be initialized before training loop starts, as the training loop
inspects the val dataloader to determine whether to run the evaluation loop.
"""
if self.trainer.train_dataloader is None:
self.trainer.reset_train_dataloader(model)
if self.trainer.val_dataloaders is None:
self.trainer.reset_val_dataloader(model)
def track_epoch_end_reduce_metrics(self, epoch_output, batch_end_outputs):
hook_overridden = self._should_add_batch_output_to_epoch_output()
# track the outputs to reduce at the end of the epoch
for opt_idx, opt_outputs in enumerate(batch_end_outputs):
sample_output = opt_outputs[-1]
# decide if we need to reduce at the end of the epoch automatically
auto_reduce_tng_result = isinstance(sample_output, Result) and sample_output.should_reduce_on_epoch_end
# only track when a) it needs to be autoreduced OR b) the user wants to manually reduce on epoch end
if not (hook_overridden or auto_reduce_tng_result):
continue
# with 1 step (no tbptt) don't use a sequence at epoch end
if isinstance(opt_outputs, list) and len(opt_outputs) == 1 and not isinstance(opt_outputs[0], Result):
opt_outputs = opt_outputs[0]
epoch_output[opt_idx].append(opt_outputs)
def _should_add_batch_output_to_epoch_output(self) -> bool:
# We add to the epoch outputs if
# 1. The model defines training_epoch_end OR
# 2. The model overrides on_train_epoch_end which has `outputs` in the signature
# TODO: in v1.5 this only needs to check if training_epoch_end is overridden
lightning_module = self.trainer.lightning_module
if is_overridden("training_epoch_end", model=lightning_module):
return True
if is_overridden("on_train_epoch_end", model=lightning_module):
model_hook_fx = getattr(lightning_module, "on_train_epoch_end")
if is_param_in_hook_signature(model_hook_fx, "outputs"):
return True
return False
def get_optimizers_iterable(self, batch_idx=None):
"""
Generates an iterable with (idx, optimizer) for each optimizer.
"""
if not self.trainer.optimizer_frequencies:
# call training_step once per optimizer
return list(enumerate(self.trainer.optimizers))
if batch_idx is None:
batch_idx = self.trainer.total_batch_idx
optimizers_loop_length = self.optimizer_freq_cumsum[-1]
current_place_in_loop = batch_idx % optimizers_loop_length
# find optimzier index by looking for the first {item > current_place} in the cumsum list
opt_idx = np.argmax(self.optimizer_freq_cumsum > current_place_in_loop)
return [[opt_idx, self.trainer.optimizers[opt_idx]]]
def on_after_backward(self, training_step_output, batch_idx, untouched_loss):
training_step_output.detach()
# insert after step hook
self.trainer.call_hook("on_after_backward")
# when in dev debugging track the losses
self.trainer.dev_debugger.track_train_loss_history(batch_idx, untouched_loss.detach())
def _check_training_step_output(self, training_step_output):
if isinstance(training_step_output, torch.Tensor) and not self.trainer.lightning_module.automatic_optimization:
if training_step_output.grad_fn is None:
# TODO: Find why - RuntimeError: Expected to mark a variable ready only once ...
raise MisconfigurationException("In manual optimization, `training_step` should not return a Tensor")
def training_step(self, split_batch, batch_idx, opt_idx, hiddens):
# give the PL module a result for logging
model_ref = self.trainer.lightning_module
with self.trainer.profiler.profile("model_forward"):
args = self.build_train_args(split_batch, batch_idx, opt_idx, hiddens)
# manually capture logged metrics
model_ref._current_fx_name = 'training_step'
model_ref._results = Result()
with self.trainer.profiler.profile("training_step"):
training_step_output = self.trainer.accelerator.training_step(args)
self.trainer.accelerator.post_training_step()
self.trainer.logger_connector.cache_logged_metrics()
self._check_training_step_output(training_step_output)
training_step_output = self.trainer.call_hook("training_step_end", training_step_output)
training_step_output_for_epoch_end, training_step_output = self._process_training_step_output(
training_step_output, split_batch
)
if training_step_output_for_epoch_end is None:
return
# enable empty loss when using manual opt
closure_loss = None
untouched_loss = None
if self.trainer.lightning_module.automatic_optimization:
# accumulate loss. if accumulate_grad_batches==1, no effect
closure_loss = training_step_output.minimize / self.trainer.accumulate_grad_batches
# the loss will get scaled for amp. avoid any modifications to it
untouched_loss = closure_loss.detach().clone()
# result
result = AttributeDict(
closure_loss=closure_loss,
loss=untouched_loss,
training_step_output=training_step_output,
training_step_output_for_epoch_end=training_step_output_for_epoch_end,
)
return result
def _process_training_step_output(self, training_step_output, split_batch):
training_step_output_for_epoch_end = training_step_output
# enable validation_step return None
if training_step_output_for_epoch_end is None:
return None, None
result = self.trainer.lightning_module._results
loss = None
hiddens = None
result["extra"] = {}
# handle dict return
if isinstance(training_step_output, dict):
loss = training_step_output.pop("loss", None)
hiddens = training_step_output.pop("hiddens", None)
if hiddens is not None:
hiddens = hiddens.detach()
result["extra"] = training_step_output
# handle scalar return
elif isinstance(training_step_output, torch.Tensor):
loss = training_step_output
# map to results under the hood
result.minimize = loss
self.trainer.hiddens = hiddens
# track batch for manual reduction with result
result.track_batch_size(len(split_batch))
# track metrics without grads for epoch reduction
training_step_output_for_epoch_end = copy(result)
training_step_output_for_epoch_end = training_step_output_for_epoch_end.detach()
if self.trainer.move_metrics_to_cpu:
training_step_output_for_epoch_end = training_step_output_for_epoch_end.cpu()
return training_step_output_for_epoch_end, result
@staticmethod
def _prepare_outputs(
outputs: List[List[List[Result]]],
batch_mode: bool,
) -> Union[List[List[List[Dict]]], List[List[Dict]], List[Dict], Dict]:
"""
Extract required information from batch or epoch end results.
Args:
outputs: A 3-dimensional list of ``Result`` objects with dimensions:
[optimizer outs][batch outs][tbptt steps].
batch_mode: If True, ignore the batch output dimension.
Returns:
The cleaned outputs with ``Result`` objects converted to dictionaries. All list dimensions of size one will
be collapsed.
"""
processed_outputs = []
for opt_outputs in outputs:
# handle an edge case where an optimizer output is the empty list
if len(opt_outputs) == 0:
continue
processed_batch_outputs = []
if batch_mode:
opt_outputs = [opt_outputs]
for batch_outputs in opt_outputs:
processed_tbptt_outputs = []
for tbptt_output in batch_outputs:
out = tbptt_output.extra
out['loss'] = tbptt_output.minimize
processed_tbptt_outputs.append(out)
# if there was only one tbptt step then we can collapse that dimension
if len(processed_tbptt_outputs) == 1:
processed_tbptt_outputs = processed_tbptt_outputs[0]
processed_batch_outputs.append(processed_tbptt_outputs)
# batch_outputs should be just one dict (or a list of dicts if using tbptt) per optimizer
if batch_mode:
processed_batch_outputs = processed_batch_outputs[0]
processed_outputs.append(processed_batch_outputs)
# if there is only one optimiser then we collapse that dimension
if len(processed_outputs) == 1:
processed_outputs = processed_outputs[0]
return processed_outputs
def optimizer_step(self, optimizer, opt_idx, batch_idx, train_step_and_backward_closure):
model_ref = self.trainer.lightning_module
is_lbfgs = isinstance(optimizer, torch.optim.LBFGS)
using_native_amp = self.trainer.amp_backend == AMPType.NATIVE
# native amp + lbfgs is a no go right now
if using_native_amp and is_lbfgs:
raise MisconfigurationException(
'native PyTorch amp and lbfgs are not compatible.'
' To request, please file a Github issue in PyTorch and tag @mcarilli'
)
# wraps into LightningOptimizer only for running step
optimizer = LightningOptimizer._to_lightning_optimizer(optimizer, self.trainer, opt_idx)
# model hook
model_ref.optimizer_step(
self.trainer.current_epoch,
batch_idx,
optimizer,
opt_idx,
train_step_and_backward_closure,
on_tpu=self.trainer._device_type == DeviceType.TPU and _TPU_AVAILABLE,
using_native_amp=using_native_amp,
using_lbfgs=is_lbfgs,
)
def on_before_zero_grad(self, optimizer):
self.trainer.call_hook('on_before_zero_grad', optimizer)
def optimizer_zero_grad(self, batch_idx, optimizer, opt_idx):
self.trainer.accelerator.optimizer_zero_grad(self.trainer.current_epoch, batch_idx, optimizer, opt_idx)
def track_and_norm_grad(self, optimizer):
# track gradient norms
grad_norm_dic = self._track_gradient_norm()
# clip gradients
self.trainer.accelerator.clip_gradients(
optimizer, self.trainer.gradient_clip_val, gradient_clip_algorithm=self.trainer.gradient_clip_algorithm
)
self._cur_grad_norm_dict = grad_norm_dic
def _track_gradient_norm(self):
grad_norm_dict = {}
if (self.trainer.global_step + 1) % self.trainer.log_every_n_steps == 0:
if float(self.trainer.track_grad_norm) > 0:
model = self.trainer.lightning_module
grad_norm_dict = grad_norm(model, self.trainer.track_grad_norm)
return grad_norm_dict
def _tbptt_split_batch(self, batch: Any) -> List[Any]:
splits = [batch]
truncated_bptt_enabled = self._truncated_bptt_enabled()
if truncated_bptt_enabled:
model_ref = self.trainer.lightning_module
with self.trainer.profiler.profile("tbptt_split_batch"):
splits = model_ref.tbptt_split_batch(batch, self._truncated_bptt_steps())
return splits
def run_training_epoch(self):
# modify dataloader if needed (ddp, etc...)
train_dataloader = self.trainer.accelerator.process_dataloader(self.trainer.train_dataloader)
# track epoch output
epoch_output = [[] for _ in range(self.num_optimizers)]
train_dataloader = self.trainer.data_connector.get_profiled_train_dataloader(train_dataloader)
dataloader_idx = 0
val_loop_called = False
batch_idx = None
is_last_batch = None
for batch_idx, (batch, is_last_batch) in train_dataloader:
self.trainer.batch_idx = batch_idx
self.trainer.is_last_batch = is_last_batch
# ------------------------------------
# TRAINING_STEP + TRAINING_STEP_END
# ------------------------------------
with self.trainer.profiler.profile("run_training_batch"):
batch_output = self.run_training_batch(batch, batch_idx, dataloader_idx)
# when returning -1 from train_step, we end epoch early
if batch_output.signal == -1:
break
# hook
# TODO: add outputs to batches
self.on_train_batch_end(
epoch_output,
batch_output.training_step_output_for_epoch_end,
batch,
batch_idx,
dataloader_idx,
)
# -----------------------------------------
# SAVE METRICS TO LOGGERS
# -----------------------------------------
self.trainer.logger_connector.log_train_step_metrics(batch_output)
# -----------------------------------------
# VALIDATE IF NEEDED
# -----------------------------------------
should_check_val = self._should_check_val_fx(batch_idx, is_last_batch)
if should_check_val:
self.trainer.validating = True
self.trainer.run_evaluation()
self.trainer.training = True
val_loop_called = True
# -----------------------------------------
# SAVE LOGGERS (ie: Tensorboard, etc...)
# -----------------------------------------
self.save_loggers_on_train_batch_end()
# update LR schedulers
monitor_metrics = deepcopy(self.trainer.logger_connector.callback_metrics)
self.update_train_loop_lr_schedulers(monitor_metrics=monitor_metrics)
self.trainer.checkpoint_connector.has_trained = True
# max steps reached, end training
if (
self.trainer.max_steps is not None and self.trainer.max_steps <= self.trainer.global_step + 1
and self._accumulated_batches_reached()
):
break
# end epoch early
# stop when the flag is changed or we've gone past the amount
# requested in the batches
if self.trainer.should_stop:
break
self.trainer.total_batch_idx += 1
# stop epoch if we limited the number of training batches
if self._num_training_batches_reached(is_last_batch):
break
# progress global step according to grads progress
self.increment_accumulated_grad_global_step()
if batch_idx is None:
# dataloader/iterator did not produce a batch
return
# handle epoch_output on epoch end
self.on_train_epoch_end(epoch_output)
# log epoch metrics
self.trainer.logger_connector.log_train_epoch_end_metrics(epoch_output)
should_check_val = self._should_check_val_fx(batch_idx, is_last_batch, on_epoch=True)
should_skip_eval = self.trainer.evaluation_loop.should_skip_evaluation(self.trainer.num_val_batches)
should_train_only = self.trainer.disable_validation or should_skip_eval
# update epoch level lr_schedulers if no val loop outside train loop is triggered
if (val_loop_called and not should_check_val) or should_train_only:
self.trainer.optimizer_connector.update_learning_rates(interval='epoch')
if should_train_only:
self.check_checkpoint_callback(True)
if should_check_val:
self.trainer.validating = True
self.trainer.run_evaluation(on_epoch=True)
self.trainer.training = True
# increment the global step once
# progress global step according to grads progress
self.increment_accumulated_grad_global_step()
def on_train_epoch_end(self, epoch_output: List[List[List[Result]]]) -> None:
# inform logger the batch loop has finished
self.trainer.logger_connector.on_train_epoch_end()
# prepare epoch output
processed_epoch_output = TrainLoop._prepare_outputs(epoch_output, batch_mode=False)
# get the model and call model.training_epoch_end
model = self.trainer.lightning_module
if is_overridden('training_epoch_end', model=model):
# run training_epoch_end
# refresh the result for custom logging at the epoch level
model._current_fx_name = 'training_epoch_end'
# lightningmodule hook
training_epoch_end_output = model.training_epoch_end(processed_epoch_output)
if training_epoch_end_output is not None:
raise MisconfigurationException(
'training_epoch_end expects a return of None. '
'HINT: remove the return statement in training_epoch_end'
)
# capture logging
self.trainer.logger_connector.cache_logged_metrics()
# call train epoch end hooks
self._on_train_epoch_end_hook(processed_epoch_output)
self.trainer.call_hook('on_epoch_end')
def _on_train_epoch_end_hook(self, processed_epoch_output) -> None:
# We cannot rely on Trainer.call_hook because the signatures might be different across
# lightning module and callback
# As a result, we need to inspect if the module accepts `outputs` in `on_train_epoch_end`
# This implementation is copied from Trainer.call_hook
hook_name = "on_train_epoch_end"
# set hook_name to model + reset Result obj
skip = self.trainer._reset_result_and_set_hook_fx_name(hook_name)
# always profile hooks
with self.trainer.profiler.profile(hook_name):
# first call trainer hook
if hasattr(self.trainer, hook_name):
trainer_hook = getattr(self.trainer, hook_name)
trainer_hook(processed_epoch_output)
# next call hook in lightningModule
model_ref = self.trainer.lightning_module
if is_overridden(hook_name, model_ref):
hook_fx = getattr(model_ref, hook_name)
if is_param_in_hook_signature(hook_fx, "outputs"):
self.warning_cache.warn(
"The signature of `ModelHooks.on_train_epoch_end` has changed in v1.3."
" `outputs` parameter has been deprecated."
" Support for the old signature will be removed in v1.5", DeprecationWarning
)
model_ref.on_train_epoch_end(processed_epoch_output)
else:
model_ref.on_train_epoch_end()
# if the PL module doesn't have the hook then call the accelerator
# used to auto-reduce things for the user with Results obj
elif hasattr(self.trainer.accelerator, hook_name):
accelerator_hook = getattr(self.trainer.accelerator, hook_name)
accelerator_hook()
if not skip:
self.trainer._cache_logged_metrics()
def run_training_batch(self, batch, batch_idx, dataloader_idx):
# track grad norms
grad_norm_dic = {}
# bookkeeping
self.trainer.hiddens = None
optimizers = self.prepare_optimizers()
# track all outputs across time and num of optimizers
batch_outputs = [[] for _ in range(len(optimizers))]
if batch is None:
self.warning_cache.warn("train_dataloader yielded None. If this was on purpose, ignore this warning...")
return AttributeDict(
signal=0,
grad_norm_dic=grad_norm_dic,
training_step_output_for_epoch_end=batch_outputs,
)
# hook
response = self.trainer.call_hook("on_batch_start")
if response == -1:
return AttributeDict(signal=-1, grad_norm_dic=grad_norm_dic)
# hook
response = self.trainer.call_hook("on_train_batch_start", batch, batch_idx, dataloader_idx)
if response == -1:
return AttributeDict(signal=-1, grad_norm_dic=grad_norm_dic)
# lightning module hook
splits = self._tbptt_split_batch(batch)
for split_idx, split_batch in enumerate(splits):
# create an iterable for optimizers and loop over them
for opt_idx, optimizer in optimizers:
# toggle model params + set info to logger_connector
self.run_train_split_start(split_idx, split_batch, opt_idx, optimizer)
if self.should_accumulate():
# For gradient accumulation
# -------------------
# calculate loss (train step + train step end)
# -------------------
# automatic_optimization=True: perform dpp sync only when performing optimizer_step
# automatic_optimization=False: don't block synchronization here
with self.block_ddp_sync_behaviour():
self.training_step_and_backward(
split_batch, batch_idx, opt_idx, optimizer, self.trainer.hiddens
)
batch_outputs = self._process_closure_result(
batch_outputs=batch_outputs,
opt_idx=opt_idx,
)
# ------------------------------
# BACKWARD PASS
# ------------------------------
# gradient update with accumulated gradients
else:
if self.trainer.lightning_module.automatic_optimization:
def train_step_and_backward_closure():
result = self.training_step_and_backward(
split_batch, batch_idx, opt_idx, optimizer, self.trainer.hiddens
)
return None if result is None else result.loss
# optimizer step
self.optimizer_step(optimizer, opt_idx, batch_idx, train_step_and_backward_closure)
else:
self._curr_step_result = self.training_step(
split_batch, batch_idx, opt_idx, self.trainer.hiddens
)
if self._curr_step_result is None:
# user decided to skip optimization
# make sure to zero grad.
continue
batch_outputs = self._process_closure_result(
batch_outputs=batch_outputs,
opt_idx=opt_idx,
)
# todo: Properly aggregate grad_norm accros opt_idx and split_idx
grad_norm_dic = self._cur_grad_norm_dict
self._cur_grad_norm_dict = None
# update running loss + reset accumulated loss
self.update_running_loss()
result = AttributeDict(
signal=0,
grad_norm_dic=grad_norm_dic,
training_step_output_for_epoch_end=batch_outputs,
)
return result
@contextmanager
def block_ddp_sync_behaviour(self, should_block_sync: bool = False):
"""
automatic_optimization = True
Blocks ddp sync gradients behaviour on backwards pass.
This is useful for skipping sync when accumulating gradients, reducing communication overhead
automatic_optimization = False
do not block ddp gradient sync when using manual optimization
as gradients are needed within the training step
Returns:
context manager with sync behaviour off
"""
if (
isinstance(self.trainer.training_type_plugin, ParallelPlugin)
and (self.trainer.lightning_module.automatic_optimization or should_block_sync)
):
with self.trainer.training_type_plugin.block_backward_sync():
yield None
else:
yield None
def _process_closure_result(self, batch_outputs: list, opt_idx: int) -> list:
opt_closure_result = self._curr_step_result
if opt_closure_result is not None:
# cache metrics
self.trainer.logger_connector.cache_training_step_metrics(opt_closure_result)
# check if loss or model weights are nan
if self.trainer.terminate_on_nan:
self._check_finite(opt_closure_result.loss)
# track all the outputs across all steps
batch_opt_idx = opt_idx if len(batch_outputs) > 1 else 0
batch_outputs[batch_opt_idx].append(opt_closure_result.training_step_output_for_epoch_end)
if self.trainer.lightning_module.automatic_optimization:
# track total loss for logging (avoid mem leaks)
self.accumulated_loss.append(opt_closure_result.loss)
self._curr_step_result = None
return batch_outputs
def training_step_and_backward(self, split_batch, batch_idx, opt_idx, optimizer, hiddens):
"""Wrap forward, zero_grad and backward in a closure so second order methods work"""
with self.trainer.profiler.profile("training_step_and_backward"):
# lightning module hook
result = self.training_step(split_batch, batch_idx, opt_idx, hiddens)
self._curr_step_result = result
if not self._skip_backward and self.trainer.lightning_module.automatic_optimization:
is_first_batch_to_accumulate = batch_idx % self.trainer.accumulate_grad_batches == 0
if is_first_batch_to_accumulate:
self.on_before_zero_grad(optimizer)
self.optimizer_zero_grad(batch_idx, optimizer, opt_idx)
# backward pass
if result is not None:
with self.trainer.profiler.profile("backward"):
self.backward(result, optimizer, opt_idx)
# hook - call this hook only
# when gradients have finished to accumulate
if not self.should_accumulate():
self.on_after_backward(result.training_step_output, batch_idx, result.loss)
# check if loss or model weights are nan
if self.trainer.terminate_on_nan:
self._check_finite(result.loss)
else:
self.warning_cache.warn(
"training_step returned None. If this was on purpose, ignore this warning..."
)
if len(self.trainer.optimizers) > 1:
# revert back to previous state
self.trainer.lightning_module.untoggle_optimizer(opt_idx)
return result
def _check_finite(self, loss: torch.Tensor) -> None:
if not torch.isfinite(loss).all():
raise ValueError(f'The loss returned in `training_step` is {loss}.')
model = self.trainer.lightning_module
detect_nan_parameters(model)
def backward(self, result, optimizer, opt_idx, *args, **kwargs):
self.trainer.dev_debugger.track_event("backward_call")
should_accumulate = self.should_accumulate()
# backward can be called manually in the training loop
if isinstance(result, torch.Tensor):
self.trainer.accelerator.backward(result, optimizer, opt_idx, should_accumulate, *args, **kwargs)
else:
result.closure_loss = self.trainer.accelerator.backward(
result.closure_loss, optimizer, opt_idx, should_accumulate, *args, **kwargs
)
if not self.should_accumulate():
# track gradients
self.track_and_norm_grad(optimizer=optimizer)
def update_train_loop_lr_schedulers(self, monitor_metrics=None):
num_accumulated_batches_reached = self._accumulated_batches_reached()
num_training_batches_reached = self._num_training_batches_reached()
if num_accumulated_batches_reached or num_training_batches_reached:
# update lr
self.trainer.optimizer_connector.update_learning_rates(
interval="step",
monitor_metrics=monitor_metrics,
opt_indices=[opt_idx for opt_idx, _ in self.get_optimizers_iterable()],
)
def increment_accumulated_grad_global_step(self):
num_accumulated_batches_reached = self._accumulated_batches_reached()
num_training_batches_reached = self._num_training_batches_reached()
# progress global step according to grads progress
if num_accumulated_batches_reached or num_training_batches_reached:
self.trainer.global_step = self.trainer.accelerator.update_global_step(
self.trainer.total_batch_idx, self.trainer.global_step
)
def _accumulated_batches_reached(self):
return (self.trainer.batch_idx + 1) % self.trainer.accumulate_grad_batches == 0
def _num_training_batches_reached(self, is_last_batch=False):
return (self.trainer.batch_idx + 1) == self.trainer.num_training_batches or is_last_batch
def should_accumulate(self):
# checks if backward or backward + optimizer step (via closure)
accumulation_done = self._accumulated_batches_reached()
is_final_batch = self._num_training_batches_reached()
return not (accumulation_done or is_final_batch)
def _should_check_val_fx(self, batch_idx: int, is_last_batch: bool, on_epoch: bool = False) -> bool:
""" Decide if we should run validation. """
if not self.trainer.enable_validation:
return False
# check if this epoch is eligible to run validation
if (self.trainer.current_epoch + 1) % self.trainer.check_val_every_n_epoch != 0:
return False
# val_check_batch is inf for iterable datasets with no length defined
# TODO: let training/eval loop handle logic around limit_*_batches and val_check_batch
is_val_check_batch = False
if isinstance(self.trainer.limit_train_batches, int) and self.trainer.val_check_batch == float('inf'):
is_val_check_batch = (batch_idx + 1) % self.trainer.limit_train_batches == 0
elif self.trainer.val_check_batch != float('inf'):
is_val_check_batch = (batch_idx + 1) % self.trainer.val_check_batch == 0
# Note: num_training_batches is also inf for iterable datasets with no length defined
epoch_end_val_check = (batch_idx + 1) % self.trainer.num_training_batches == 0
is_last_batch_for_infinite_dataset = is_last_batch and self.trainer.val_check_batch == float("inf")
if on_epoch:
return (
is_val_check_batch and epoch_end_val_check
) or self.trainer.should_stop or is_last_batch_for_infinite_dataset
else:
return is_val_check_batch and not epoch_end_val_check
def build_train_args(self, batch, batch_idx, opt_idx, hiddens):
# enable not needing to add opt_idx to training_step
args = [batch, batch_idx]
if len(self.trainer.optimizers) > 1:
if self.trainer.has_arg("training_step", "optimizer_idx"):
if not self.trainer.lightning_module.automatic_optimization:
self.warning_cache.warn(
"`training_step` hook signature has changed in v1.3."
" `optimizer_idx` argument has been removed in case of manual optimization. Support for"
" the old signature will be removed in v1.5", DeprecationWarning
)
args.append(opt_idx)
elif not self.trainer.has_arg(
"training_step", "optimizer_idx"
) and self.trainer.lightning_module.automatic_optimization:
raise ValueError(
f"Your LightningModule defines {len(self.trainer.optimizers)} optimizers but"
' `training_step` is missing the `optimizer_idx` argument.'
)
# pass hiddens if using tbptt
if self._truncated_bptt_enabled():
args.append(hiddens)
return args
def _truncated_bptt_enabled(self) -> bool:
""" Temporary tbptt utilities until this flag is fully migrated to the lightning module. """
return self._truncated_bptt_steps() > 0
def _truncated_bptt_steps(self) -> int:
lightning_module = self.trainer.lightning_module
# Give precedence to the LightningModule as the Trainer flag will be removed in v1.5
if lightning_module.truncated_bptt_steps > 0:
return lightning_module.truncated_bptt_steps
return self.trainer.truncated_bptt_steps or 0
def save_loggers_on_train_batch_end(self):
# when loggers should save to disk
should_flush_logs = self.trainer.logger_connector.should_flush_logs
if should_flush_logs and self.trainer.is_global_zero and self.trainer.logger is not None:
self.trainer.logger.save()
def prepare_optimizers(self):
# in manual optimization we loop over all optimizers at once
optimizers = self.get_optimizers_iterable()
if not self.trainer.lightning_module.automatic_optimization:
optimizers = [optimizers[0]]
return optimizers
def run_train_split_start(self, split_idx, split_batch, opt_idx, optimizer):
# set split_idx to trainer for tracking
self.trainer.split_idx = split_idx
# make sure only the gradients of the current optimizer's parameters are calculated
# in the training step to prevent dangling gradients in multiple-optimizer setup.
if self.trainer.lightning_module.automatic_optimization and len(self.trainer.optimizers) > 1:
model = self.trainer.lightning_module
model.toggle_optimizer(optimizer, opt_idx)
# use to track metrics internally
self.trainer.logger_connector.on_train_split_start(split_idx, opt_idx, split_batch)
def update_running_loss(self):
accumulated_loss = self.accumulated_loss.mean()
if accumulated_loss is not None:
# calculate running loss for display
self.running_loss.append(self.accumulated_loss.mean() * self.trainer.accumulate_grad_batches)
# reset for next set of accumulated grads
self.accumulated_loss.reset()
| [
"torch.isfinite"
] | 1.4 | neggert/pytorch-lightning | 8208c330eb1a4e8cca243ee525882854dd366921 |
1.4 | import os
import sys
import numpy as np
import random
import math
from PIL import Image, ImageOps, ImageFilter
import torch
import torch.utils.data as data
import torchvision.transforms as transform
from .base import BaseDataset
class NYUv2Segmentation(BaseDataset):
BASE_DIR = 'nyuv2'
NUM_CLASS = 40
def __init__(self, root=os.path.expanduser('~/.cvss/data'), split='train',
mode=None, transform=None, target_transform=None, **kwargs):
super(NYUv2Segmentation, self).__init__(
root, split, mode, transform, target_transform, **kwargs)
# assert exists and prepare dataset automatically
root = os.path.join(root, self.BASE_DIR)
assert os.path.exists(root), "Please setup the dataset using" + \
"cvss/scripts/prepare_nyuv2.py"
self.images, self.masks = _get_nyuv2_pairs(root, split)
if split != 'test':
assert (len(self.images) == len(self.masks))
if len(self.images) == 0:
raise(RuntimeError("Found 0 images in subfolders of: \
" + root + "\n"))
def __getitem__(self, index):
img = Image.open(self.images[index]).convert('RGB')
if self.mode == 'test':
if self.transform is not None:
img = self.transform(img)
return img, os.path.basename(self.images[index])
mask = Image.open(self.masks[index])
# synchrosized transform
if self.mode == 'train':
img, mask = self._sync_transform(img, mask)
elif self.mode == 'val':
img, mask = self._val_sync_transform(img, mask)
else:
assert self.mode == 'testval'
mask = self._mask_transform(mask)
# general resize, normalize and toTensor
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
mask = self.target_transform(mask)
return img, mask
def _sync_transform(self, img, mask):
# random mirror
if random.random() < 0.5:
img = img.transpose(Image.FLIP_LEFT_RIGHT)
mask = mask.transpose(Image.FLIP_LEFT_RIGHT)
crop_size = self.crop_size
# random scale (short edge)
w, h = img.size
min_side = min(w, h)
scale = np.random.uniform(0.5, 2.0)
if min_side * scale < 350:
scale = 350 * 1.0 / min_side
long_size = int(self.base_size*scale)
if h > w:
oh = long_size
ow = int(1.0 * w * long_size / h + 0.5)
short_size = ow
else:
ow = long_size
oh = int(1.0 * h * long_size / w + 0.5)
short_size = oh
img = img.resize((ow, oh), Image.BILINEAR)
mask = mask.resize((ow, oh), Image.NEAREST)
# pad crop
if short_size < crop_size:
padh = crop_size - oh if oh < crop_size else 0
padw = crop_size - ow if ow < crop_size else 0
img = ImageOps.expand(img, border=(0, 0, padw, padh), fill=0)
mask = ImageOps.expand(mask, border=(0, 0, padw, padh), fill=0)
# random crop crop_size
w, h = img.size
x1 = random.randint(0, w - crop_size)
y1 = random.randint(0, h - crop_size)
img = img.crop((x1, y1, x1+crop_size, y1+crop_size))
mask = mask.crop((x1, y1, x1+crop_size, y1+crop_size))
# final transform
return img, self._mask_transform(mask)
def _val_sync_transform(self, img, mask):
# final transform
return img, self._mask_transform(mask)
def _mask_transform(self, mask):
target = np.array(mask).astype('int64') - 1
return torch.from_numpy(target)
def __len__(self):
return len(self.images)
@property
def pred_offset(self):
return 1
def _get_nyuv2_pairs(folder, split='train'):
def get_path_pairs(folder, split_file):
img_paths = []
mask_paths = []
with open(os.path.join(folder, split_file), 'r') as f:
for filename in f.readlines():
filename = filename.strip()
imgpath = os.path.join(folder, 'image', filename)
maskpath = os.path.join(folder, 'mask', filename)
if os.path.isfile(maskpath):
img_paths.append(imgpath)
mask_paths.append(maskpath)
else:
print('cannot find the mask:', maskpath)
return img_paths, mask_paths
img_paths, mask_paths = get_path_pairs(folder, split_file=split+'.txt')
return img_paths, mask_paths
| [
"torch.from_numpy"
] | 1.4.0 | etmwb/cvsegmentation | c283a79f4cf4e78d057f598944b1c252f6533f00 |
1.8 | from __future__ import absolute_import
import os
from collections import namedtuple
import time
from torch.nn import functional as F
from baseline.fast_rcnn.model.utils.creator_tool import AnchorTargetCreator, ProposalTargetCreator
from torch import nn
import torch as t
from baseline.fast_rcnn.utils import array_tool as at
from baseline.fast_rcnn.utils.vis_tool import Visualizer
from baseline.fast_rcnn.utils.config import opt
from torchnet.meter import ConfusionMeter, AverageValueMeter
LossTuple = namedtuple('LossTuple',
['rpn_loc_loss',
'rpn_cls_loss',
'roi_loc_loss',
'roi_cls_loss',
'total_loss'
])
class FasterRCNNTrainer(nn.Module):
"""wrapper for conveniently training. return losses
The losses include:
* :obj:`rpn_loc_loss`: The localization loss for \
Region Proposal Network (RPN).
* :obj:`rpn_cls_loss`: The classification loss for RPN.
* :obj:`roi_loc_loss`: The localization loss for the head module.
* :obj:`roi_cls_loss`: The classification loss for the head module.
* :obj:`total_loss`: The sum of 4 loss above.
Args:
faster_rcnn (model.FasterRCNN):
A Faster R-CNN model that is going to be trained.
"""
def __init__(self, faster_rcnn):
super(FasterRCNNTrainer, self).__init__()
self.faster_rcnn = faster_rcnn
self.rpn_sigma = opt.rpn_sigma
self.roi_sigma = opt.roi_sigma
# target creator create gt_bbox gt_label etc as training targets.
self.anchor_target_creator = AnchorTargetCreator()
self.proposal_target_creator = ProposalTargetCreator()
self.loc_normalize_mean = faster_rcnn.loc_normalize_mean
self.loc_normalize_std = faster_rcnn.loc_normalize_std
self.optimizer = self.faster_rcnn.get_optimizer()
# visdom wrapper
self.vis = Visualizer(env=opt.env)
# indicators for training status
self.rpn_cm = ConfusionMeter(2)
self.roi_cm = ConfusionMeter(21)
self.meters = {k: AverageValueMeter() for k in LossTuple._fields} # average loss
def forward(self, imgs, bboxes, labels, scale):
"""Forward Faster R-CNN and calculate losses.
Here are notations used.
* :math:`N` is the batch size.
* :math:`R` is the number of bounding boxes per image.
Currently, only :math:`N=1` is supported.
Args:
imgs (~torch.autograd.Variable): A variable with a batch of images.
bboxes (~torch.autograd.Variable): A batch of bounding boxes.
Its shape is :math:`(N, R, 4)`.
labels (~torch.autograd..Variable): A batch of labels.
Its shape is :math:`(N, R)`. The background is excluded from
the definition, which means that the range of the value
is :math:`[0, L - 1]`. :math:`L` is the number of foreground
classes.
scale (float): Amount of scaling applied to
the raw image during preprocessing.
Returns:
namedtuple of 5 losses
"""
n = bboxes.shape[0]
if n != 1:
raise ValueError('Currently only batch size 1 is supported.')
_, _, H, W = imgs.shape
img_size = (H, W)
features = self.faster_rcnn.extractor(imgs)
rpn_locs, rpn_scores, rois, roi_indices, anchor = \
self.faster_rcnn.rpn(features, img_size, scale)
# Since batch size is one, convert variables to singular form
bbox = bboxes[0]
label = labels[0]
rpn_score = rpn_scores[0]
rpn_loc = rpn_locs[0]
roi = rois
# Sample RoIs and forward
# it's fine to break the computation graph of rois,
# consider them as constant input
sample_roi, gt_roi_loc, gt_roi_label = self.proposal_target_creator(
roi,
at.tonumpy(bbox),
at.tonumpy(label),
self.loc_normalize_mean,
self.loc_normalize_std)
# NOTE it's all zero because now it only support for batch=1 now
sample_roi_index = t.zeros(len(sample_roi))
roi_cls_loc, roi_score = self.faster_rcnn.head(
features,
sample_roi,
sample_roi_index)
# ------------------ RPN losses -------------------#
gt_rpn_loc, gt_rpn_label = self.anchor_target_creator(
at.tonumpy(bbox),
anchor,
img_size)
gt_rpn_label = at.totensor(gt_rpn_label).long()
gt_rpn_loc = at.totensor(gt_rpn_loc)
rpn_loc_loss = _fast_rcnn_loc_loss(
rpn_loc,
gt_rpn_loc,
gt_rpn_label.data,
self.rpn_sigma)
# NOTE: default value of ignore_index is -100 ...
rpn_cls_loss = F.cross_entropy(rpn_score, gt_rpn_label.cuda(), ignore_index=-1)
_gt_rpn_label = gt_rpn_label[gt_rpn_label > -1]
_rpn_score = at.tonumpy(rpn_score)[at.tonumpy(gt_rpn_label) > -1]
self.rpn_cm.add(at.totensor(_rpn_score, False), _gt_rpn_label.data.long())
# ------------------ ROI losses (fast rcnn loss) -------------------#
n_sample = roi_cls_loc.shape[0]
roi_cls_loc = roi_cls_loc.view(n_sample, -1, 4)
roi_loc = roi_cls_loc[t.arange(0, n_sample).long().cuda(), \
at.totensor(gt_roi_label).long()]
gt_roi_label = at.totensor(gt_roi_label).long()
gt_roi_loc = at.totensor(gt_roi_loc)
roi_loc_loss = _fast_rcnn_loc_loss(
roi_loc.contiguous(),
gt_roi_loc,
gt_roi_label.data,
self.roi_sigma)
roi_cls_loss = nn.CrossEntropyLoss()(roi_score, gt_roi_label.cuda())
self.roi_cm.add(at.totensor(roi_score, False), gt_roi_label.data.long())
losses = [rpn_loc_loss, rpn_cls_loss, roi_loc_loss, roi_cls_loss]
losses = losses + [sum(losses)]
return LossTuple(*losses)
def train_step(self, imgs, bboxes, labels, scale):
self.optimizer.zero_grad()
losses = self.forward(imgs, bboxes, labels, scale)
losses.total_loss.backward()
self.optimizer.step()
self.update_meters(losses)
return losses
def save(self, save_optimizer=False, save_path=None, **kwargs):
"""serialize models include optimizer and other info
return path where the model-file is stored.
Args:
save_optimizer (bool): whether save optimizer.state_dict().
save_path (string): where to save model, if it's None, save_path
is generate using time str and info from kwargs.
Returns:
save_path(str): the path to save models.
"""
save_dict = dict()
save_dict['model'] = self.faster_rcnn.state_dict()
save_dict['config'] = opt._state_dict()
save_dict['other_info'] = kwargs
save_dict['vis_info'] = self.vis.state_dict()
if save_optimizer:
save_dict['optimizer'] = self.optimizer.state_dict()
if save_path is None:
timestr = time.strftime('%m%d%H%M')
save_path = 'checkpoints/fasterrcnn_%s' % timestr
for k_, v_ in kwargs.items():
save_path += '_%s' % v_
save_dir = os.path.dirname(save_path)
if not os.path.exists(save_dir):
os.makedirs(save_dir)
t.save(save_dict, save_path)
self.vis.save([self.vis.env])
return save_path
def load(self, path, load_optimizer=True, parse_opt=False, cpu_flag: bool = True):
if cpu_flag:
state_dict = t.load(path,
map_location=t.device('cpu'))
else:
state_dict = t.load(path)
if 'model' in state_dict:
self.faster_rcnn.load_state_dict(state_dict['model'])
else: # legacy way, for backward compatibility
self.faster_rcnn.load_state_dict(state_dict)
return self
if parse_opt:
opt._parse(state_dict['config'])
if 'optimizer' in state_dict and load_optimizer:
self.optimizer.load_state_dict(state_dict['optimizer'])
return self
def update_meters(self, losses):
loss_d = {k: at.scalar(v) for k, v in losses._asdict().items()}
for key, meter in self.meters.items():
meter.add(loss_d[key])
def reset_meters(self):
for key, meter in self.meters.items():
meter.reset()
self.roi_cm.reset()
self.rpn_cm.reset()
def get_meter_data(self):
return {k: v.value()[0] for k, v in self.meters.items()}
def _smooth_l1_loss(x, t, in_weight, sigma):
sigma2 = sigma ** 2
diff = in_weight * (x - t)
abs_diff = diff.abs()
flag = (abs_diff.data < (1. / sigma2)).float()
y = (flag * (sigma2 / 2.) * (diff ** 2) +
(1 - flag) * (abs_diff - 0.5 / sigma2))
return y.sum()
def _fast_rcnn_loc_loss(pred_loc, gt_loc, gt_label, sigma):
in_weight = t.zeros(gt_loc.shape).cuda()
# Localization loss is calculated only for positive rois.
# NOTE: unlike origin implementation,
# we don't need inside_weight and outside_weight, they can calculate by gt_label
in_weight[(gt_label > 0).view(-1, 1).expand_as(in_weight).cuda()] = 1
loc_loss = _smooth_l1_loss(pred_loc, gt_loc, in_weight.detach(), sigma)
# Normalize by total number of negtive and positive rois.
loc_loss /= ((gt_label >= 0).sum().float()) # ignore gt_label==-1 for rpn_loss
return loc_loss
| [
"torch.zeros",
"torch.device",
"torch.arange",
"torch.save",
"torch.load",
"torch.nn.CrossEntropyLoss"
] | 1.8.1 | ITMO-NSS-team/LightObjRecEnsembler | 1375400f0a681aefdd3ab484e828257fd7aed318 |
1.6 | import copy
from functools import wraps
import numpy as np
import wandb
import torchvision
import torch
import torch.nn.functional as F
from kornia import enhance, filters
from torchvision.transforms import RandomApply, RandomChoice
from atariari.methods.utils import EarlyStopping
from torch import nn
from torch.utils.data import BatchSampler, RandomSampler
def default(val, def_val):
return def_val if val is None else val
def flatten(t):
return t.reshape(t.shape[0], -1)
def singleton(cache_key):
def inner_fn(fn):
@wraps(fn)
def wrapper(self, *args, **kwargs):
instance = getattr(self, cache_key)
if instance is not None:
return instance
instance = fn(self, *args, **kwargs)
setattr(self, cache_key, instance)
return instance
return wrapper
return inner_fn
def get_module_device(module):
return next(module.parameters()).device
def set_requires_grad(model, val):
for p in model.parameters():
p.requires_grad = val
# loss fn
def loss_fn(x, y):
x = F.normalize(x, dim=-1, p=2)
y = F.normalize(y, dim=-1, p=2)
return 2 - 2 * (x * y).sum(dim=-1)
# augmentation utils
# class RandomApply(nn.Module):
# def __init__(self, fn, p):
# super().__init__()
# self.fn = fn
# self.p = p
# def forward(self, x):
# if random.random() > self.p:
# return x
# return self.fn(x)
# exponential moving average
class EMA():
def __init__(self, beta):
super().__init__()
self.beta = beta
def update_average(self, old, new):
if old is None:
return new
return old * self.beta + (1 - self.beta) * new
def update_moving_average(ema_updater, ma_model, current_model):
for current_params, ma_params in zip(current_model.parameters(), ma_model.parameters()):
old_weight, up_weight = ma_params.data, current_params.data
ma_params.data = ema_updater.update_average(old_weight, up_weight)
# MLP class for projector and predictor
class MLP(nn.Module):
def __init__(self, dim, projection_size, hidden_size=4096):
super().__init__()
self.net = nn.Sequential(
nn.Linear(dim, hidden_size),
nn.BatchNorm1d(hidden_size),
nn.ReLU(inplace=True),
nn.Linear(hidden_size, projection_size)
)
def forward(self, x):
return self.net(x)
# a wrapper class for the base neural network
# will manage the interception of the hidden layer output
# and pipe it into the projecter and predictor nets
class NetWrapper(nn.Module):
def __init__(self, net, projection_size, projection_hidden_size, layer=-2):
super().__init__()
self.net = net
self.layer = layer # final avg-pooling layer
self.projector = None
self.projection_size = projection_size
self.projection_hidden_size = projection_hidden_size
self.hidden = None
self.hook_registered = False
def _find_layer(self):
if type(self.layer) == str:
modules = dict([*self.net.named_modules()])
return modules.get(self.layer, None)
elif type(self.layer) == int:
children = [*self.net.children()]
return children[self.layer]
return None
def _hook(self, _, __, output):
self.hidden = flatten(output)
def _register_hook(self):
layer = self._find_layer()
assert layer is not None, f'hidden layer ({self.layer}) not found'
handle = layer.register_forward_hook(self._hook)
self.hook_registered = True
@singleton('projector')
def _get_projector(self, hidden):
_, dim = hidden.shape
projector = MLP(dim, self.projection_size, self.projection_hidden_size)
return projector.to(hidden)
def get_representation(self, x):
if self.layer == -1:
return self.net(x)
if not self.hook_registered:
self._register_hook()
_ = self.net(x)
hidden = self.hidden
self.hidden = None
assert hidden is not None, f'hidden layer {self.layer} never emitted an output'
return hidden
def forward(self, x):
representation = self.get_representation(x)
projector = self._get_projector(representation)
projection = projector(representation)
return projection
# main class
class BYOL(nn.Module):
def __init__(self, net, image_size, grayscale=True, num_frame_stack=1, batch_size=64, hidden_layer=-2, projection_size=256, projection_hidden_size=4096, augment_fn=None, augment_fn2=None, moving_average_decay=0.99, wandb=None, patience=15):
super().__init__()
# default SimCLR augmentation
#####
# IMPORTANT for kornia: parameters are often float!! e.g. 1. vs 1
# DEFAULT_AUG = nn.Sequential(
# RandomApply(augs.ColorJitter(0.8, 0.8, 0.8, 0.2), p=0.8),
# augs.RandomHorizontalFlip(),
# RandomApply(filters.GaussianBlur2d((3, 3), (1.5, 1.5)), p=0.1),
# input tensor: float + normalized range [0,1]
# augs.RandomResizedCrop(
# size=(image_size, image_size), scale=(0.84, 1.), ratio=(1.,1.), p=1.0)
# augs.Normalize(mean=torch.tensor(
# [0.485, 0.456, 0.406]), std=torch.tensor([0.229, 0.224, 0.225]))
# )
kernel_size = (9, 9) # has to be ODD
kernel_std = np.random.uniform(low=0.1, high=2.0)
kernel_std = (kernel_std,)*2
aug_transform = torchvision.transforms.Compose([
RandomChoice(
[enhance.AdjustBrightness(0.4),
enhance.AdjustBrightness(0.3),
enhance.AdjustBrightness(0.2),
enhance.AdjustBrightness(0.1),
enhance.AdjustBrightness(0.0)]
),
RandomChoice(
[enhance.AdjustContrast(1.0),
enhance.AdjustContrast(0.9),
enhance.AdjustContrast(0.8),
enhance.AdjustContrast(0.7),
enhance.AdjustContrast(0.6)]
),
RandomApply([filters.GaussianBlur2d(
kernel_size, kernel_std)], p=0.5)
# RandomChoice(
# [enhance.AdjustContrast(1.0),
# enhance.AdjustContrast(1.0),
# enhance.AdjustContrast(1.0),
# filters.GaussianBlur2d((1, 1), (1, 1)),
# filters.GaussianBlur2d((3, 3), (1.5, 1.5))]
# )
])
self.augment1 = default(augment_fn, aug_transform)
self.augment2 = default(augment_fn2, self.augment1)
self.online_encoder = NetWrapper(
net, projection_size, projection_hidden_size, layer=hidden_layer)
self.target_encoder = None
self.target_ema_updater = EMA(moving_average_decay)
self.online_predictor = MLP(
projection_size, projection_size, projection_hidden_size)
self.batch_size = batch_size
# get device of network and make wrapper same device
self.device = torch.device(
'cuda' if torch.cuda.is_available() else 'cpu')
print(f"Device is {self.device.type}")
self.to(self.device)
self.wandb = wandb
self.early_stopper = EarlyStopping(
patience=patience, verbose=False, wandb=self.wandb, name="encoder-byol")
if self.wandb:
wandb.watch(self.online_encoder, self.target_encoder,
self.online_predictor)
# send a mock image tensor to instantiate singleton parameters
assert grayscale
nr_channels = num_frame_stack
self.forward(torch.rand(batch_size, nr_channels,
210, 160, device=self.device))
self.opt = torch.optim.Adam(self.parameters(), lr=3e-4)
print(
f"Finished Initialization of BYOL with model {self.online_encoder.net.__class__.__name__}")
@singleton('target_encoder')
def _get_target_encoder(self):
target_encoder = copy.deepcopy(self.online_encoder)
set_requires_grad(target_encoder, False)
return target_encoder
def reset_moving_average(self):
del self.target_encoder
self.target_encoder = None
def update_moving_average(self):
assert self.target_encoder is not None, 'target encoder has not been created yet'
update_moving_average(self.target_ema_updater,
self.target_encoder, self.online_encoder)
def forward(self, x):
image_one, image_two = self.augment1(x), self.augment2(x)
online_proj_one = self.online_encoder(image_one)
online_proj_two = self.online_encoder(image_two)
online_pred_one = self.online_predictor(online_proj_one)
online_pred_two = self.online_predictor(online_proj_two)
with torch.no_grad():
target_encoder = self._get_target_encoder()
target_proj_one = target_encoder(image_one)
target_proj_two = target_encoder(image_two)
loss_one = loss_fn(online_pred_one, target_proj_two.detach())
loss_two = loss_fn(online_pred_two, target_proj_one.detach())
loss = loss_one + loss_two
return loss.mean()
def logResults(self, epoch_idx, epoch_loss, prefix=""):
print(f"{prefix} Epoch: {epoch_idx}, Loss: {epoch_loss}")
if self.wandb:
self.wandb.log({prefix + '_loss': epoch_loss},
step=epoch_idx, commit=False)
def doOneEpoch(self, nr_epoch, episodes):
mode = "train" if self.training else "val"
data_generator = generate_batch(episodes, self.batch_size, self.device)
for steps, batch in enumerate(data_generator):
print(f"batch nr {steps} for mode {mode}")
loss = self(batch)
self.opt.zero_grad()
loss.backward()
self.opt.step()
self.update_moving_average() # update moving average of target encoder
self.logResults(nr_epoch, loss / steps, prefix=mode)
if mode == "val":
self.early_stopper(-loss / steps, self.online_encoder)
def generate_batch(episodes, batch_size, device):
total_steps = sum([len(e) for e in episodes])
print('Total Steps: {}'.format(total_steps))
# Episode sampler
# Sample `num_samples` episodes then batchify them with `self.batch_size` episodes per batch
sampler = BatchSampler(RandomSampler(range(len(episodes)),
replacement=True, num_samples=total_steps),
batch_size, drop_last=True)
for nr, indices in enumerate(sampler):
x = []
episodes_batch = [episodes[i] for i in indices]
# print(f"indices in sampler nr {nr} are {*indices,}")
for e in episodes_batch:
t = np.random.randint(0, len(e))
x.append(e[t])
yield torch.stack(x).float().to(device) / 255. # SCALING!!!! | [
"torch.nn.Linear",
"torch.nn.functional.normalize",
"torch.rand",
"torch.stack",
"torch.no_grad",
"torch.nn.ReLU",
"torch.nn.BatchNorm1d",
"torch.cuda.is_available"
] | 1.6 | mariodoebler/byol-pytorch | 4c1b6d27d86e0a9a39ecef6f6888038355943cd0 |
1.4 | # Copyright 2020 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import TYPE_CHECKING, Dict, Optional
import torch
from monai.utils import exact_version, optional_import
Events, _ = optional_import("ignite.engine", "0.3.0", exact_version, "Events")
Checkpoint, _ = optional_import("ignite.handlers", "0.3.0", exact_version, "Checkpoint")
if TYPE_CHECKING:
from ignite.engine import Engine
else:
Engine, _ = optional_import("ignite.engine", "0.3.0", exact_version, "Engine")
class CheckpointLoader:
"""
CheckpointLoader acts as an Ignite handler to load checkpoint data from file.
It can load variables for network, optimizer, lr_scheduler, etc.
If saving checkpoint after `torch.nn.DataParallel`, need to save `model.module` instead
as PyTorch recommended and then use this loader to load the model.
Args:
load_path: the file path of checkpoint, it should be a PyTorch `pth` file.
load_dict: target objects that load checkpoint to. examples::
{'network': net, 'optimizer': optimizer, 'lr_scheduler': lr_scheduler}
name: identifier of logging.logger to use, if None, defaulting to ``engine.logger``.
map_location: when loading the module for distributed training/evaluation,
need to provide an appropriate map_location argument to prevent a process
to step into others’ devices. If map_location is missing, torch.load will
first load the module to CPU and then copy each parameter to where it was
saved, which would result in all processes on the same machine using the
same set of devices.
"""
def __init__(
self, load_path: str, load_dict: Dict, name: Optional[str] = None, map_location: Optional[Dict] = None,
) -> None:
assert load_path is not None, "must provide clear path to load checkpoint."
self.load_path = load_path
assert load_dict is not None and len(load_dict) > 0, "must provide target objects to load."
self.logger = logging.getLogger(name)
for k, v in load_dict.items():
if hasattr(v, "module"):
load_dict[k] = v.module
self.load_dict = load_dict
self._name = name
self.map_location = map_location
def attach(self, engine: Engine) -> None:
"""
Args:
engine: Ignite Engine, it can be a trainer, validator or evaluator.
"""
if self._name is None:
self.logger = engine.logger
engine.add_event_handler(Events.STARTED, self)
def __call__(self, engine: Engine) -> None:
"""
Args:
engine: Ignite Engine, it can be a trainer, validator or evaluator.
"""
checkpoint = torch.load(self.load_path, map_location=self.map_location)
if len(self.load_dict) == 1:
key = list(self.load_dict.keys())[0]
if not (key in checkpoint):
checkpoint = {key: checkpoint}
Checkpoint.load_objects(to_load=self.load_dict, checkpoint=checkpoint)
self.logger.info(f"Restored all variables from {self.load_path}")
| [
"torch.load"
] | 1.4 | BRAINSia/MONAI | 04e1c345fc840f5a1b6504ee5857d5a9feb27d84 |
0.6 | import torch
import torch.nn as nn
import torch.nn.functional as F
from segmentation_models_pytorch.base import modules as md
class DecoderBlock(nn.Module):
def __init__(
self,
in_channels,
skip_channels,
out_channels,
use_batchnorm=True,
attention_type=None,
):
super().__init__()
self.conv1 = md.Conv2dReLU(
in_channels + skip_channels,
out_channels,
kernel_size=3,
padding=1,
use_batchnorm=use_batchnorm,
)
self.attention1 = md.Attention(attention_type, in_channels=in_channels + skip_channels)
self.conv2 = md.Conv2dReLU(
out_channels,
out_channels,
kernel_size=3,
padding=1,
use_batchnorm=use_batchnorm,
)
self.attention2 = md.Attention(attention_type, in_channels=out_channels)
def forward(self, x, skip=None):
x = F.interpolate(x, scale_factor=2, mode="nearest")
if skip is not None:
if skip.shape[-1] != x.shape[-1]:
skip = F.interpolate(skip, scale_factor=2, mode="nearest")
x = torch.cat([x, skip], dim=1)
x = self.attention1(x)
x = self.conv1(x)
x = self.conv2(x)
x = self.attention2(x)
return x
class CenterBlock(nn.Sequential):
def __init__(self, in_channels, out_channels, use_batchnorm=True):
conv1 = md.Conv2dReLU(
in_channels,
out_channels,
kernel_size=3,
padding=1,
use_batchnorm=use_batchnorm,
)
conv2 = md.Conv2dReLU(
out_channels,
out_channels,
kernel_size=3,
padding=1,
use_batchnorm=use_batchnorm,
)
super().__init__(conv1, conv2)
class UnetDecoder(nn.Module):
def __init__(
self,
encoder_channels,
decoder_channels,
n_blocks=5,
use_batchnorm=True,
attention_type=None,
center=False,
):
super().__init__()
if n_blocks != len(decoder_channels):
raise ValueError(
"Model depth is {}, but you provide `decoder_channels` for {} blocks.".format(
n_blocks, len(decoder_channels)
)
)
# remove first skip with same spatial resolution
encoder_channels = encoder_channels[1:]
# reverse channels to start from head of encoder
encoder_channels = encoder_channels[::-1]
# computing blocks input and output channels
head_channels = encoder_channels[0]
in_channels = [head_channels] + list(decoder_channels[:-1])
skip_channels = list(encoder_channels[1:]) + [0]
out_channels = decoder_channels
if center:
self.center = CenterBlock(head_channels, head_channels, use_batchnorm=use_batchnorm)
else:
self.center = nn.Identity()
# combine decoder keyword arguments
kwargs = dict(use_batchnorm=use_batchnorm, attention_type=attention_type)
blocks = [
DecoderBlock(in_ch, skip_ch, out_ch, **kwargs)
for in_ch, skip_ch, out_ch in zip(in_channels, skip_channels, out_channels)
]
self.blocks = nn.ModuleList(blocks)
def forward(self, *features):
features = features[1:] # remove first skip with same spatial resolution
features = features[::-1] # reverse channels to start from head of encoder
head = features[0]
skips = features[1:]
x = self.center(head)
for i, decoder_block in enumerate(self.blocks):
skip = skips[i] if i < len(skips) else None
x = decoder_block(x, skip)
return x
| [
"torch.nn.functional.interpolate",
"torch.cat",
"torch.nn.Identity",
"torch.nn.ModuleList"
] | 0.6.3 | navivokaj/segmentation_models.pytorch | 5dbb5f6733515097cecc93f078c09e59ccbeb0c0 |
1.6 | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from .base import Loss
class AdaCos(Loss):
"""PyTorch implementation of AdaCos. See Ref[1] for paper
This implementation is different from the most open-source implementations in following ways:
1) expects raw logits of size (bs x num_classes) not (bs, embedding_size)
2) despite AdaCos being dynamic, still add an optional margin parameter
3) calculate running average stats of B and θ, not batch-wise stats as in original paper
4) normalize input logits, not embeddings and weights
Args:
margin (float): margin in radians
momentum (float): momentum for running average of B and θ
Input:
y_pred (torch.Tensor): shape BS x N_classes
y_true (torch.Tensor): one-hot encoded. shape BS x N_classes
Reference:
[1] Adaptively Scaling Cosine Logits for Effectively Learning Deep Face Representations
"""
def __init__(self, embedding_size, num_classes, final_criterion, margin=0, momentum=0.95):
super(AdaCos, self).__init__()
self.final_criterion = final_criterion
self.margin = margin
self.momentum = momentum
self.prev_s = 10
self.running_B = 1000 # default value is chosen so that initial S is ~10
self.running_theta = math.pi / 4
self.eps = 1e-7
self.register_parameter("weight", torch.nn.Parameter(torch.zeros(num_classes, embedding_size)))
nn.init.xavier_uniform_(self.weight)
self.idx = 0
def forward(self, embedding, y_true):
cos_theta = F.linear(F.normalize(embedding), F.normalize(self.weight)).clamp(-1 + self.eps, 1 - self.eps)
# cos_theta = torch.cos(torch.acos(cos_theta + self.margin))
if y_true.dim() != 1:
y_true_one_hot = y_true.float()
else:
y_true_one_hot = torch.zeros_like(cos_theta)
y_true_one_hot.scatter_(1, y_true.unsqueeze(1), 1.0)
with torch.no_grad():
B_batch = cos_theta[y_true_one_hot.eq(0)].mul(self.prev_s).exp().sum().div(embedding.size(0))
self.running_B = self.running_B * self.momentum + B_batch * (1 - self.momentum)
theta = torch.acos(cos_theta.clamp(-1 + self.eps, 1 - self.eps))
# originally authors use median, but I use mean
theta_batch = theta[y_true_one_hot.ne(0)].mean().clamp_max(math.pi / 4)
self.running_theta = self.running_theta * self.momentum + theta_batch * (1 - self.momentum)
self.prev_s = self.running_B.log() / torch.cos(self.running_theta)
self.idx += 1
if self.idx % 1000 == 0:
print(
f"\nRunning B: {self.running_B:.2f}. Running theta: {self.running_theta:.2f}. Running S: {self.prev_s:.2f}"
)
return self.final_criterion(cos_theta * self.prev_s, y_true_one_hot)
| [
"torch.zeros",
"torch.cos",
"torch.nn.functional.normalize",
"torch.no_grad",
"torch.nn.init.xavier_uniform_",
"torch.zeros_like"
] | 1.6 | YevheniiSemendiak/pytorch-tools | 11f895ac7af796ca786a3d94bb46de70d7fce87a |
1.5 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import torch
from detr.models.backbone import Backbone, Joiner
from detr.models.detr import DETR, PostProcess
from detr.models.position_encoding import PositionEmbeddingSine
from detr.models.segmentation import DETRsegm, PostProcessPanoptic
from detr.models.transformer import Transformer
dependencies = ["torch", "torchvision"]
def _make_detr(backbone_name: str, dilation=False, num_classes=91, mask=False):
hidden_dim = 256
backbone = Backbone(backbone_name, train_backbone=True, return_interm_layers=mask, dilation=dilation)
pos_enc = PositionEmbeddingSine(hidden_dim // 2, normalize=True)
backbone_with_pos_enc = Joiner(backbone, pos_enc)
backbone_with_pos_enc.num_channels = backbone.num_channels
transformer = Transformer(d_model=hidden_dim, return_intermediate_dec=True)
detr = DETR(backbone_with_pos_enc, transformer, num_classes=num_classes, num_queries=100)
if mask:
return DETRsegm(detr)
return detr
def detr_resnet50(pretrained=False, num_classes=91, return_postprocessor=False):
"""
DETR R50 with 6 encoder and 6 decoder layers.
Achieves 42/62.4 AP/AP50 on COCO val5k.
"""
model = _make_detr("resnet50", dilation=False, num_classes=num_classes)
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/detr/detr-r50-e632da11.pth", map_location="cpu", check_hash=True
)
model.load_state_dict(checkpoint["model"])
if return_postprocessor:
return model, PostProcess()
return model
def detr_resnet50_dc5(pretrained=False, num_classes=91, return_postprocessor=False):
"""
DETR-DC5 R50 with 6 encoder and 6 decoder layers.
The last block of ResNet-50 has dilation to increase
output resolution.
Achieves 43.3/63.1 AP/AP50 on COCO val5k.
"""
model = _make_detr("resnet50", dilation=True, num_classes=num_classes)
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/detr/detr-r50-dc5-f0fb7ef5.pth", map_location="cpu", check_hash=True
)
model.load_state_dict(checkpoint["model"])
if return_postprocessor:
return model, PostProcess()
return model
def detr_resnet101(pretrained=False, num_classes=91, return_postprocessor=False):
"""
DETR-DC5 R101 with 6 encoder and 6 decoder layers.
Achieves 43.5/63.8 AP/AP50 on COCO val5k.
"""
model = _make_detr("resnet101", dilation=False, num_classes=num_classes)
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/detr/detr-r101-2c7b67e5.pth", map_location="cpu", check_hash=True
)
model.load_state_dict(checkpoint["model"])
if return_postprocessor:
return model, PostProcess()
return model
def detr_resnet101_dc5(pretrained=False, num_classes=91, return_postprocessor=False):
"""
DETR-DC5 R101 with 6 encoder and 6 decoder layers.
The last block of ResNet-101 has dilation to increase
output resolution.
Achieves 44.9/64.7 AP/AP50 on COCO val5k.
"""
model = _make_detr("resnet101", dilation=True, num_classes=num_classes)
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/detr/detr-r101-dc5-a2e86def.pth", map_location="cpu", check_hash=True
)
model.load_state_dict(checkpoint["model"])
if return_postprocessor:
return model, PostProcess()
return model
def detr_resnet50_panoptic(
pretrained=False, num_classes=250, threshold=0.85, return_postprocessor=False
):
"""
DETR R50 with 6 encoder and 6 decoder layers.
Achieves 43.4 PQ on COCO val5k.
threshold is the minimum confidence required for keeping segments in the prediction
"""
model = _make_detr("resnet50", dilation=False, num_classes=num_classes, mask=True)
is_thing_map = {i: i <= 90 for i in range(250)}
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/detr/detr-r50-panoptic-00ce5173.pth",
map_location="cpu",
check_hash=True,
)
model.load_state_dict(checkpoint["model"])
if return_postprocessor:
return model, PostProcessPanoptic(is_thing_map, threshold=threshold)
return model
def detr_resnet50_dc5_panoptic(
pretrained=False, num_classes=250, threshold=0.85, return_postprocessor=False
):
"""
DETR-DC5 R50 with 6 encoder and 6 decoder layers.
The last block of ResNet-50 has dilation to increase
output resolution.
Achieves 44.6 on COCO val5k.
threshold is the minimum confidence required for keeping segments in the prediction
"""
model = _make_detr("resnet50", dilation=True, num_classes=num_classes, mask=True)
is_thing_map = {i: i <= 90 for i in range(250)}
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/detr/detr-r50-dc5-panoptic-da08f1b1.pth",
map_location="cpu",
check_hash=True,
)
model.load_state_dict(checkpoint["model"])
if return_postprocessor:
return model, PostProcessPanoptic(is_thing_map, threshold=threshold)
return model
def detr_resnet101_panoptic(
pretrained=False, num_classes=250, threshold=0.85, return_postprocessor=False
):
"""
DETR-DC5 R101 with 6 encoder and 6 decoder layers.
Achieves 45.1 PQ on COCO val5k.
threshold is the minimum confidence required for keeping segments in the prediction
"""
model = _make_detr("resnet101", dilation=False, num_classes=num_classes, mask=True)
is_thing_map = {i: i <= 90 for i in range(250)}
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/detr/detr-r101-panoptic-40021d53.pth",
map_location="cpu",
check_hash=True,
)
model.load_state_dict(checkpoint["model"])
if return_postprocessor:
return model, PostProcessPanoptic(is_thing_map, threshold=threshold)
return model
| [
"torch.hub.load_state_dict_from_url"
] | 1.5.0 | kcetskcaz/detr_package | 0f5cad16c72ec37d7b596d37e12dc32cfb5ef6aa |
1.5 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
DETR model and criterion classes.
"""
import torch
import torch.nn.functional as F
from torch import nn
from detr.util import box_ops
from detr.util.misc import (NestedTensor, nested_tensor_from_tensor_list,
accuracy, get_world_size, interpolate,
is_dist_avail_and_initialized)
from detr.models.backbone import build_backbone
from detr.models.matcher import build_matcher
from detr.models.segmentation import (DETRsegm, PostProcessPanoptic, PostProcessSegm,
dice_loss, sigmoid_focal_loss)
from detr.models.transformer import build_transformer
class DETR(nn.Module):
""" This is the DETR module that performs object detection """
def __init__(self, backbone, transformer, num_classes, num_queries, aux_loss=False):
""" Initializes the model.
Parameters:
backbone: torch module of the backbone to be used. See backbone.py
transformer: torch module of the transformer architecture. See transformer.py
num_classes: number of object classes
num_queries: number of object queries, ie detection slot. This is the maximal number of objects
DETR can detect in a single image. For COCO, we recommend 100 queries.
aux_loss: True if auxiliary decoding losses (loss at each decoder layer) are to be used.
"""
super().__init__()
self.num_queries = num_queries
self.transformer = transformer
hidden_dim = transformer.d_model
self.class_embed = nn.Linear(hidden_dim, num_classes + 1)
self.bbox_embed = MLP(hidden_dim, hidden_dim, 4, 3)
self.query_embed = nn.Embedding(num_queries, hidden_dim)
self.input_proj = nn.Conv2d(backbone.num_channels, hidden_dim, kernel_size=1)
self.backbone = backbone
self.aux_loss = aux_loss
def forward(self, samples: NestedTensor):
""" The forward expects a NestedTensor, which consists of:
- samples.tensor: batched images, of shape [batch_size x 3 x H x W]
- samples.mask: a binary mask of shape [batch_size x H x W], containing 1 on padded pixels
It returns a dict with the following elements:
- "pred_logits": the classification logits (including no-object) for all queries.
Shape= [batch_size x num_queries x (num_classes + 1)]
- "pred_boxes": The normalized boxes coordinates for all queries, represented as
(center_x, center_y, height, width). These values are normalized in [0, 1],
relative to the size of each individual image (disregarding possible padding).
See PostProcess for information on how to retrieve the unnormalized bounding box.
- "aux_outputs": Optional, only returned when auxilary losses are activated. It is a list of
dictionnaries containing the two above keys for each decoder layer.
"""
if isinstance(samples, (list, torch.Tensor)):
samples = nested_tensor_from_tensor_list(samples)
features, pos = self.backbone(samples)
src, mask = features[-1].decompose()
assert mask is not None
hs = self.transformer(self.input_proj(src), mask, self.query_embed.weight, pos[-1])[0]
outputs_class = self.class_embed(hs)
outputs_coord = self.bbox_embed(hs).sigmoid()
out = {'pred_logits': outputs_class[-1], 'pred_boxes': outputs_coord[-1]}
if self.aux_loss:
out['aux_outputs'] = self._set_aux_loss(outputs_class, outputs_coord)
return out
@torch.jit.unused
def _set_aux_loss(self, outputs_class, outputs_coord):
# this is a workaround to make torchscript happy, as torchscript
# doesn't support dictionary with non-homogeneous values, such
# as a dict having both a Tensor and a list.
return [{'pred_logits': a, 'pred_boxes': b}
for a, b in zip(outputs_class[:-1], outputs_coord[:-1])]
class SetCriterion(nn.Module):
""" This class computes the loss for DETR.
The process happens in two steps:
1) we compute hungarian assignment between ground truth boxes and the outputs of the model
2) we supervise each pair of matched ground-truth / prediction (supervise class and box)
"""
def __init__(self, num_classes, matcher, weight_dict, eos_coef, losses):
""" Create the criterion.
Parameters:
num_classes: number of object categories, omitting the special no-object category
matcher: module able to compute a matching between targets and proposals
weight_dict: dict containing as key the names of the losses and as values their relative weight.
eos_coef: relative classification weight applied to the no-object category
losses: list of all the losses to be applied. See get_loss for list of available losses.
"""
super().__init__()
self.num_classes = num_classes
self.matcher = matcher
self.weight_dict = weight_dict
self.eos_coef = eos_coef
self.losses = losses
empty_weight = torch.ones(self.num_classes + 1)
empty_weight[-1] = self.eos_coef
self.register_buffer('empty_weight', empty_weight)
def loss_labels(self, outputs, targets, indices, num_boxes, log=True):
"""Classification loss (NLL)
targets dicts must contain the key "labels" containing a tensor of dim [nb_target_boxes]
"""
assert 'pred_logits' in outputs
src_logits = outputs['pred_logits']
idx = self._get_src_permutation_idx(indices)
target_classes_o = torch.cat([t["labels"][J] for t, (_, J) in zip(targets, indices)])
target_classes = torch.full(src_logits.shape[:2], self.num_classes,
dtype=torch.int64, device=src_logits.device)
target_classes[idx] = target_classes_o
loss_ce = F.cross_entropy(src_logits.transpose(1, 2), target_classes, self.empty_weight)
losses = {'loss_ce': loss_ce}
if log:
# TODO this should probably be a separate loss, not hacked in this one here
losses['class_error'] = 100 - accuracy(src_logits[idx], target_classes_o)[0]
return losses
@torch.no_grad()
def loss_cardinality(self, outputs, targets, indices, num_boxes):
""" Compute the cardinality error, ie the absolute error in the number of predicted non-empty boxes
This is not really a loss, it is intended for logging purposes only. It doesn't propagate gradients
"""
pred_logits = outputs['pred_logits']
device = pred_logits.device
tgt_lengths = torch.as_tensor([len(v["labels"]) for v in targets], device=device)
# Count the number of predictions that are NOT "no-object" (which is the last class)
card_pred = (pred_logits.argmax(-1) != pred_logits.shape[-1] - 1).sum(1)
card_err = F.l1_loss(card_pred.float(), tgt_lengths.float())
losses = {'cardinality_error': card_err}
return losses
def loss_boxes(self, outputs, targets, indices, num_boxes):
"""Compute the losses related to the bounding boxes, the L1 regression loss and the GIoU loss
targets dicts must contain the key "boxes" containing a tensor of dim [nb_target_boxes, 4]
The target boxes are expected in format (center_x, center_y, w, h), normalized by the image size.
"""
assert 'pred_boxes' in outputs
idx = self._get_src_permutation_idx(indices)
src_boxes = outputs['pred_boxes'][idx]
target_boxes = torch.cat([t['boxes'][i] for t, (_, i) in zip(targets, indices)], dim=0)
loss_bbox = F.l1_loss(src_boxes, target_boxes, reduction='none')
losses = {}
losses['loss_bbox'] = loss_bbox.sum() / num_boxes
loss_giou = 1 - torch.diag(box_ops.generalized_box_iou(
box_ops.box_cxcywh_to_xyxy(src_boxes),
box_ops.box_cxcywh_to_xyxy(target_boxes)))
losses['loss_giou'] = loss_giou.sum() / num_boxes
return losses
def loss_masks(self, outputs, targets, indices, num_boxes):
"""Compute the losses related to the masks: the focal loss and the dice loss.
targets dicts must contain the key "masks" containing a tensor of dim [nb_target_boxes, h, w]
"""
assert "pred_masks" in outputs
src_idx = self._get_src_permutation_idx(indices)
tgt_idx = self._get_tgt_permutation_idx(indices)
src_masks = outputs["pred_masks"]
src_masks = src_masks[src_idx]
masks = [t["masks"] for t in targets]
# TODO use valid to mask invalid areas due to padding in loss
target_masks, valid = nested_tensor_from_tensor_list(masks).decompose()
target_masks = target_masks.to(src_masks)
target_masks = target_masks[tgt_idx]
# upsample predictions to the target size
src_masks = interpolate(src_masks[:, None], size=target_masks.shape[-2:],
mode="bilinear", align_corners=False)
src_masks = src_masks[:, 0].flatten(1)
target_masks = target_masks.flatten(1)
target_masks = target_masks.view(src_masks.shape)
losses = {
"loss_mask": sigmoid_focal_loss(src_masks, target_masks, num_boxes),
"loss_dice": dice_loss(src_masks, target_masks, num_boxes),
}
return losses
def _get_src_permutation_idx(self, indices):
# permute predictions following indices
batch_idx = torch.cat([torch.full_like(src, i) for i, (src, _) in enumerate(indices)])
src_idx = torch.cat([src for (src, _) in indices])
return batch_idx, src_idx
def _get_tgt_permutation_idx(self, indices):
# permute targets following indices
batch_idx = torch.cat([torch.full_like(tgt, i) for i, (_, tgt) in enumerate(indices)])
tgt_idx = torch.cat([tgt for (_, tgt) in indices])
return batch_idx, tgt_idx
def get_loss(self, loss, outputs, targets, indices, num_boxes, **kwargs):
loss_map = {
'labels': self.loss_labels,
'cardinality': self.loss_cardinality,
'boxes': self.loss_boxes,
'masks': self.loss_masks
}
assert loss in loss_map, f'do you really want to compute {loss} loss?'
return loss_map[loss](outputs, targets, indices, num_boxes, **kwargs)
def forward(self, outputs, targets):
""" This performs the loss computation.
Parameters:
outputs: dict of tensors, see the output specification of the model for the format
targets: list of dicts, such that len(targets) == batch_size.
The expected keys in each dict depends on the losses applied, see each loss' doc
"""
outputs_without_aux = {k: v for k, v in outputs.items() if k != 'aux_outputs'}
# Retrieve the matching between the outputs of the last layer and the targets
indices = self.matcher(outputs_without_aux, targets)
# Compute the average number of target boxes accross all nodes, for normalization purposes
num_boxes = sum(len(t["labels"]) for t in targets)
num_boxes = torch.as_tensor([num_boxes], dtype=torch.float, device=next(iter(outputs.values())).device)
if is_dist_avail_and_initialized():
torch.distributed.all_reduce(num_boxes)
num_boxes = torch.clamp(num_boxes / get_world_size(), min=1).item()
# Compute all the requested losses
losses = {}
for loss in self.losses:
losses.update(self.get_loss(loss, outputs, targets, indices, num_boxes))
# In case of auxiliary losses, we repeat this process with the output of each intermediate layer.
if 'aux_outputs' in outputs:
for i, aux_outputs in enumerate(outputs['aux_outputs']):
indices = self.matcher(aux_outputs, targets)
for loss in self.losses:
if loss == 'masks':
# Intermediate masks losses are too costly to compute, we ignore them.
continue
kwargs = {}
if loss == 'labels':
# Logging is enabled only for the last layer
kwargs = {'log': False}
l_dict = self.get_loss(loss, aux_outputs, targets, indices, num_boxes, **kwargs)
l_dict = {k + f'_{i}': v for k, v in l_dict.items()}
losses.update(l_dict)
return losses
class PostProcess(nn.Module):
""" This module converts the model's output into the format expected by the coco api"""
@torch.no_grad()
def forward(self, outputs, target_sizes):
""" Perform the computation
Parameters:
outputs: raw outputs of the model
target_sizes: tensor of dimension [batch_size x 2] containing the size of each images of the batch
For evaluation, this must be the original image size (before any data augmentation)
For visualization, this should be the image size after data augment, but before padding
"""
out_logits, out_bbox = outputs['pred_logits'], outputs['pred_boxes']
assert len(out_logits) == len(target_sizes)
assert target_sizes.shape[1] == 2
prob = F.softmax(out_logits, -1)
scores, labels = prob[..., :-1].max(-1)
# convert to [x0, y0, x1, y1] format
boxes = box_ops.box_cxcywh_to_xyxy(out_bbox)
# and from relative [0, 1] to absolute [0, height] coordinates
img_h, img_w = target_sizes.unbind(1)
scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1)
boxes = boxes * scale_fct[:, None, :]
results = [{'scores': s, 'labels': l, 'boxes': b} for s, l, b in zip(scores, labels, boxes)]
return results
class MLP(nn.Module):
""" Very simple multi-layer perceptron (also called FFN)"""
def __init__(self, input_dim, hidden_dim, output_dim, num_layers):
super().__init__()
self.num_layers = num_layers
h = [hidden_dim] * (num_layers - 1)
self.layers = nn.ModuleList(nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim]))
def forward(self, x):
for i, layer in enumerate(self.layers):
x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x)
return x
def build(args):
# the `num_classes` naming here is somewhat misleading.
# it indeed corresponds to `max_obj_id + 1`, where max_obj_id
# is the maximum id for a class in your dataset. For example,
# COCO has a max_obj_id of 90, so we pass `num_classes` to be 91.
# As another example, for a dataset that has a single class with id 1,
# you should pass `num_classes` to be 2 (max_obj_id + 1).
# For more details on this, check the following discussion
# https://github.com/facebookresearch/detr/issues/108#issuecomment-650269223
num_classes = 20 if args.dataset_file != 'coco' else 91
if args.dataset_file == "coco_panoptic":
# for panoptic, we just add a num_classes that is large enough to hold
# max_obj_id + 1, but the exact value doesn't really matter
num_classes = 250
device = torch.device(args.device)
backbone = build_backbone(args)
transformer = build_transformer(args)
model = DETR(
backbone,
transformer,
num_classes=num_classes,
num_queries=args.num_queries,
aux_loss=args.aux_loss,
)
if args.masks:
model = DETRsegm(model, freeze_detr=(args.frozen_weights is not None))
matcher = build_matcher(args)
weight_dict = {'loss_ce': 1, 'loss_bbox': args.bbox_loss_coef}
weight_dict['loss_giou'] = args.giou_loss_coef
if args.masks:
weight_dict["loss_mask"] = args.mask_loss_coef
weight_dict["loss_dice"] = args.dice_loss_coef
# TODO this is a hack
if args.aux_loss:
aux_weight_dict = {}
for i in range(args.dec_layers - 1):
aux_weight_dict.update({k + f'_{i}': v for k, v in weight_dict.items()})
weight_dict.update(aux_weight_dict)
losses = ['labels', 'boxes', 'cardinality']
if args.masks:
losses += ["masks"]
criterion = SetCriterion(num_classes, matcher=matcher, weight_dict=weight_dict,
eos_coef=args.eos_coef, losses=losses)
criterion.to(device)
postprocessors = {'bbox': PostProcess()}
if args.masks:
postprocessors['segm'] = PostProcessSegm()
if args.dataset_file == "coco_panoptic":
is_thing_map = {i: i <= 90 for i in range(201)}
postprocessors["panoptic"] = PostProcessPanoptic(is_thing_map, threshold=0.85)
return model, criterion, postprocessors
| [
"torch.nn.Linear",
"torch.device",
"torch.cat",
"torch.stack",
"torch.nn.functional.l1_loss",
"torch.no_grad",
"torch.ones",
"torch.full_like",
"torch.nn.Conv2d",
"torch.full",
"torch.distributed.all_reduce",
"torch.nn.functional.softmax",
"torch.nn.Embedding"
] | 1.5.0 | kcetskcaz/detr_package | 0f5cad16c72ec37d7b596d37e12dc32cfb5ef6aa |
1.4 | import logging
import torch.nn as nn
from . import arch as archs
logger = logging.getLogger()
def build_model(cfg_model):
if cfg_model.get('pretrained', False):
info = "=> building pre-trained model {}".format(cfg_model['arch'])
model = archs.__dict__[cfg_model.arch](pretrained=True)
in_features = model.fc.in_features
model.fc = nn.Linear(in_features, cfg_model.num_classes)
else:
info = "=> building model {}".format(cfg_model.arch)
model = archs.__dict__[cfg_model.arch](num_classes=cfg_model.num_classes)
logger.info(info)
return model
| [
"torch.nn.Linear"
] | 1.4 | ChaseMonsterAway/vedacls | 91657f688dcaf3f9f4c58eb40a8f5c8f34a4bd73 |
1.10 | import os
import logging
from typing import Dict, Union
from datetime import timedelta
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import pandas as pd
import mlflow
import torch
import pytorch_lightning as pl
import pprint
pp = pprint.PrettyPrinter(indent=4)
from arnet import utils
from arnet.modeling.models import build_model
logger = logging.getLogger(__name__)
def build_test_logger(logged_learner):
logger = pl.loggers.TensorBoardLogger(
logged_learner.logger_save_dir,
name=logged_learner.logger_name,
version=logged_learner.logger_version + '_test'
)
return logger
class Learner(pl.LightningModule):
def __init__(self, cfg):
"""
model: torch.nn.Module
cfg: model-agnostic experiment configs
"""
#super(Learner, self).__init__()
super().__init__()
self.cfg = cfg
self.image = 'MAGNETOGRAM' in cfg.DATA.FEATURES
self.model = build_model(cfg)
self.save_hyperparameters() # write to self.hparams. when save model, they are # responsible for tensorboard hp_metric
def forward(self, *args, **kwargs):
return self.model(*args, **kwargs)
def on_load_checkpoint(self, checkpoint) -> None:
# log_dev / lightning_logs / version_0 / checkpoints / epoch=0-step=4.ckpt
# =======================================
# save_dir / (name) (version)
# ------- root_dir ---------/
# ------------ log_dir ----------------/
# ckpt_list = checkpoint['hyper_parameters']['cfg']['LEARNER']['CHECKPOINT'].split('/')
# self.logger_save_dir, self.logger_name, self.logger_version = (
# ckpt_list[-5], ckpt_list[-4], ckpt_list[-3])
# I gave up modifying test log dir because it requires checkpoint['callbacks']["ModelCheckpoint{'monitor': 'validation0/tss', 'mode': 'max', 'every_n_train_steps': 0, 'every_n_epochs': 1, 'train_time_interval': None, 'save_on_train_epoch_end': True}"]['best_model_path']
pass
def grad_norm(self, norm_type: Union[float, int, str]) -> Dict[str, float]:
"""Compute each parameter's gradient's norm and their overall norm.
The overall norm is computed over all gradients together, as if they
were concatenated into a single vector.
Args:
norm_type: The type of the used p-norm, cast to float if necessary.
Can be ``'inf'`` for infinity norm.
Return:
norms: The dictionary of p-norms of each parameter's gradient and
a special entry for the total p-norm of the gradients viewed
as a single vector.
"""
#norm_type = float(norm_type)
norms, all_norms = {}, []
for name, p in self.named_parameters():
if name.split('.')[0] == 'model':
name = name[6:]
if p.grad is None:
continue
param_norm = float(p.data.norm(norm_type))
grad_norm = float(p.grad.data.norm(norm_type))
norms[f'grad_{norm_type}_norm/{name}'] = {
'param': param_norm,
'grad': grad_norm,
}
all_norms.append(param_norm)
total_norm = float(torch.tensor(all_norms).norm(norm_type))
norms[f'grad_{norm_type}_norm/total'] = round(total_norm, 3)
return norms
def _check_nan_loss(self, loss):
if torch.isnan(loss):
norms = self.grad_norm(1)
import json
print(json.dumps(norms, indent=2))
def training_step(self, batch, batch_idx):
loss = self.model.get_loss(batch)
self._check_nan_loss(loss)
# Scalar(s)
self.log('train/loss', loss)
mlflow.log_metric('train/loss', loss.item(), step=self.global_step)
mlflow.log_metric('train/epoch', self.trainer.current_epoch, step=self.global_step)
if self.image:
# Text
if self.global_step in [0] or batch_idx == 0:
self.log_meta(self.model.result)
# Input videos (padded)
if False: #self.global_step in [0] or batch_idx == 0:
self.log_video('train/inputs', x)
# Layer weight
# not changing fast enough within first epoch
if False: #self.current_epoch == 0 and batch_idx in [0, 1, 2, 5, 10, 20, 50, 100]:
self.log_layer_weights('weight', ['convs.conv1'])
# Middle layer features
if False: #self.global_step in [0] or batch_idx == 0:
self.log_layer_activations('train features', self.model.result['video'], self.cfg.LEARNER.VIS.ACTIVATIONS)
# Weight histograms
if True: #self.global_step in [0] or batch_idx == 0:
for layer_name in self.cfg.LEARNER.VIS.HISTOGRAM:
self.logger.experiment.add_histogram("weights/{} kernel".format(layer_name),
utils.get_layer(self.model, layer_name).weight, self.global_step)
self.logger.experiment.flush()
return {'loss': loss}
def validation_step(self, batch, batch_idx, dataloader_idx):
loss = self.model.get_loss(batch)
result = self.model.result
result.update({'val_loss': loss})
return result
def validation_epoch_end(self, outputs):
for dataloader_idx, dataloader_outputs in enumerate(outputs):
tag = f'validation{dataloader_idx}'
avg_val_loss = torch.stack([out['val_loss'] for out in dataloader_outputs]).mean()
self.log(tag + '/loss', avg_val_loss)
mlflow.log_metric(tag + '/loss', avg_val_loss.item(), step=self.global_step)
if True:
#step = -1 if self.global_step == 0 else None # before training
step = None # use global_step
self.log_layer_weights('weight', ['convs.conv1'], step=step)
y_true = torch.cat([out['y_true'] for out in dataloader_outputs])
y_prob = torch.cat([out['y_prob'] for out in dataloader_outputs])
self.trainer.datamodule.fill_prob(tag, self.global_step, y_prob.detach().cpu().numpy())
scores, cm2, _ = utils.get_metrics_probabilistic(y_true, y_prob, criterion=None)
self.log_scores(tag, scores, step=self.global_step) # pp.pprint(scores)
self.log_cm(tag + '/cm2', cm2, step=self.global_step)
self.log_eval_plots(tag, y_true, y_prob, step=self.global_step)
mlflow.log_artifacts(self.logger.log_dir, 'tensorboard/train_val')
def test_step(self, batch, batch_idx):
loss = self.model.get_loss(batch)
result = self.model.result
result.update({'test_loss': loss})
return result
def test_epoch_end(self, outputs):
avg_test_loss = torch.stack([out['test_loss'] for out in outputs]).mean()
self.log('test/loss', avg_test_loss)
y_true = torch.cat([out['y_true'] for out in outputs])
y_prob = torch.cat([out['y_prob'] for out in outputs])
self.trainer.datamodule.fill_prob('test', self.global_step, y_prob.detach().cpu().numpy())
scores, cm2, thresh = utils.get_metrics_probabilistic(y_true, y_prob, criterion=None)
#self.thresh = thresh
logger.info(scores)
logger.info(cm2)
self.log_scores('test', scores)
self.log_cm('test/cm2', cm2)
self.log_eval_plots('test', y_true, y_prob)
mlflow.log_artifacts(self.logger.log_dir, 'tensorboard/test')
def predict_step(self, batch, batch_idx: int , dataloader_idx: int = None):
_ = self.model.get_loss(batch)
y_prob = self.model.result['y_prob']
###
#self.thresh = 0.5
###
return y_prob #y_prob >= 0.5 #self.thresh
def configure_optimizers(self):
return torch.optim.Adam(self.parameters(), lr=self.cfg.LEARNER.LEARNING_RATE)
def on_train_end(self):
for tag, df in self.trainer.datamodule.val_history.items():
if tag == 'test':
continue # val_history['test'] does not update every epoch.
tmp_path = 'outputs/val_predictions.csv'
df.to_csv(tmp_path)
mlflow.log_artifact(tmp_path, tag) # tag in ['validation0', ..., 'test']
def on_test_end(self):
tmp_path = 'outputs/test_predictions.csv'
self.trainer.datamodule.val_history['test'].to_csv(tmp_path)
mlflow.log_artifact(tmp_path, 'test')
def log_meta(self, outputs, step=None):
video = outputs['video']
meta = outputs['meta']
video = video.detach().cpu().numpy()
y_true = outputs['y_true'].detach().cpu().numpy()
y_prob = outputs['y_prob'].detach().cpu().numpy()
info = utils.generate_batch_info_classification(video, meta, y_true=y_true, y_prob=y_prob)
step = step or self.global_step
self.logger.experiment.add_text("batch info", info.to_markdown(), step)
return info
def log_video(self, tag, video, size=None, normalized=False, step=None):
from skimage.transform import resize
size = np.round(size.detach().cpu().numpy() * [38, 78] + [78, 157]).astype(int)
# video: [N, C, T, H, W]
if video.shape[0] > 8:
video = video[:8]
video = video.detach().permute(0, 2, 1, 3, 4).to('cpu', non_blocking=True) # convert to numpy may not be efficient in production
# (N,C,D,H,W) -> (N,T,C,H,W)
step = step or self.global_step
if not normalized:
video = utils.array_to_float_video(video * 50, low=-200, high=200, perc=False)
self.logger.experiment.add_video(tag, video, step, fps=10)
vs = video.detach().cpu().numpy()
for i, v in enumerate(vs):
for j, image in enumerate(v):
image = image.transpose(1,2,0)
if size is not None:
image = resize(image, size[i])
mlflow.log_image(image, tag+f'/{i}_{j}.png')
def log_layer_weights(self, tag, layer_names, step=None):
step = step or self.global_step
from arnet.modeling.models import MODEL_REGISTRY
if (isinstance(self.model, MODEL_REGISTRY.get('CNN_Li2020')) or
isinstance(self.model, MODEL_REGISTRY.get('SimpleC3D'))):
for layer_name in layer_names:
layer = utils.get_layer(self.model, layer_name)
if isinstance(layer, torch.nn.Conv3d):
# Unscaled
fig = utils.draw_conv2d_weight(layer.weight)
image_tensor = utils.fig2rgb(fig)
save_name = tag + f'/unscaled/{layer_name}'
self.logger.experiment.add_image(save_name, image_tensor, step)
save_name += f'/{step}.png'
mlflow.log_figure(fig, save_name)
# Set vmin vmax
fig = utils.draw_conv2d_weight(layer.weight, vmin=-0.3, vmax=0.3) # -1/+1 for lr 1e-2
image_tensor = utils.fig2rgb(fig)
save_name = tag + f'/uniform_scaled/{layer_name}'
self.logger.experiment.add_image(save_name, image_tensor, step)
save_name += f'/{step}.png'
mlflow.log_figure(fig, save_name)
def log_layer_activations(self, tag, x, layer_names, step=None):
step = step or self.global_step
import copy
model = copy.copy(self.model) # shallow copy, the original model keeps training mode and no activation hook attached
activations = utils.register_activations(model, layer_names)
model.eval()
_ = self.model(x)
for layer_name in activations:
features = activations[layer_name].detach().cpu()
if features.shape[0] > 8:
features = features[:8]
for c in range(features.shape[1]):
features_c = features[:,[c],:,:,:].permute(0,2,1,3,4)
features_c = utils.array_to_float_video(features_c, 0.1, 99.9)
self.logger.experiment.add_video(
'{}/{}/ch{}'.format(tag, layer_name, c),
features_c,
step)
def log_scores(self, tag, scores: dict, step=None):
step = step or self.global_step
for k, v in scores.items():
#self.logger.experiment.add_scalar(tag + '/' + k, v, step)
self.log(tag + '/' + k, v) #wield problem
mlflow.log_metrics({tag + '/' + k: v.item() for k, v in scores.items()},
step=step)
def log_cm(self, tag, cm, labels=None, step=None):
step = step or self.global_step
fig = utils.draw_confusion_matrix(cm.cpu())
image_tensor = utils.fig2rgb(fig)
self.logger.experiment.add_image(tag, image_tensor, step)
mlflow.log_figure(fig, tag + f'/{step}.png')
def log_eval_plots(self, tag, y_true, y_prob, step=None):
y_true = y_true.detach().cpu()
y_prob = y_prob.detach().cpu()
step = step or self.global_step
reliability = utils.draw_reliability_plot(y_true, y_prob, n_bins=10)
mlflow.log_figure(reliability, tag + f'/reliability/{step}.png')
reliability = utils.fig2rgb(reliability)
self.logger.experiment.add_image(tag + '/reliability', reliability, step)
roc = utils.draw_roc(y_true, y_prob)
mlflow.log_figure(roc, tag + f'/roc/{step}.png')
roc = utils.fig2rgb(roc)
self.logger.experiment.add_image(tag + '/roc', roc, step)
ssp = utils.draw_ssp(y_true, y_prob)
mlflow.log_figure(ssp, tag + f'/ssp/{step}.png')
ssp = utils.fig2rgb(ssp)
self.logger.experiment.add_image(tag + '/ssp', ssp, step)
| [
"torch.cat",
"torch.stack",
"torch.isnan",
"torch.tensor"
] | 1.10.0 | ZeyuSun/flare-prediction-smarp | ad60163eb83b47ba39e898beb387031d349e2ed6 |
1.4 | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as functional
from .noisy_linear import NoisyLinear
class Enet(nn.Module):
def __init__(self) -> None:
super(Enet, self).__init__()
return
def get_max_action(self, observation: torch.Tensor) -> int:
"""
Get the action with the maximum q-value for an observation.
Args:
observation(torch.Tensor): an observation
Returns:
int: action with the maximum q-value for the current state
"""
qvals = self.forward(observation)
return int(torch.argmax(qvals, dim=-1).cpu().detach().numpy())
class Epn(Enet):
def __init__(self, states_size: np.ndarray, action_size: np.ndarray, settings: dict) -> None:
"""
Initializes the neural network.
Args:
states_size: Size of the input space.
action_size:Size of the action space.
settings: dictionary with settings
"""
super(Epn, self).__init__()
self.batch_size = settings["batch_size"]
self.noisy_net = settings['noisy_nets']
layers_size = settings["layers_sizes"][0] * 2
if not self.noisy_net:
self.FC1 = nn.Linear(int(states_size), layers_size)
self.FC2 = nn.Linear(layers_size, layers_size)
self.FC3 = nn.Linear(layers_size, int(action_size))
else:
self.FC1 = NoisyLinear(int(states_size), layers_size )
self.FC2 = NoisyLinear(layers_size, layers_size)
self.FC3 = NoisyLinear(layers_size, int(action_size))
self.reset()
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""
Forward step of the neural network
Args:
x(torch.Tensor): observation or a batch of observations
Returns:
torch.Tensor: q-values for all observations and actions, size: batch_size x actions_size
"""
x = functional.relu(self.FC1(x))
x = functional.relu(self.FC2(x))
return functional.softplus(self.FC3(x))
def reset(self) -> None:
"""
Resets the weights of the neural network layers.
Returns:
None
"""
torch.nn.init.xavier_uniform_(self.FC1.weight.data)
torch.nn.init.xavier_uniform_(self.FC2.weight.data)
torch.nn.init.xavier_uniform_(self.FC3.weight.data)
if self.noisy_net:
self.reset_noise()
def reset_noise(self) -> None:
"""
Resets the noise of the noisy layers.
"""
self.FC1.reset_noise()
self.FC2.reset_noise()
self.FC3.reset_noise()
| [
"torch.nn.Linear",
"torch.nn.init.xavier_uniform_",
"torch.argmax"
] | 1.4.0 | hbutsuak95/iv_rl | 0f72a8f077a238237027ea96b7d1160c35ac9959 |
1.1 | """Trains a hypergraph machine on MNIST and generates Figure 1 panels b and c
of Discrete and continuous learning machines
"""
import numpy as np
import torch
import torch.nn.functional as F
from torchvision import datasets, transforms
from torch.optim.lr_scheduler import StepLR
from hypergraph_machines.hypergraph_machine import HypergraphMachine
from hypergraph_machines.utils import train, test, visualise_graph
from hypergraph_machines.dataset_loader import load_dataset
from hypergraph_machines.utils import BestModelSaver, generate_timestamp, reg_loss
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(context="paper", style="white")
plt.ion()
device = torch.device("cuda")
timestamp = generate_timestamp()
batch_size, num_epochs = 128, 100
train_loader,\
test_loader,\
image_size = load_dataset("MNIST", batch_size, data_folder = "../../data")
model = HypergraphMachine((1,28,28), 10, number_of_classes = 10, tol = 1e-6,
limit_image_upsample = 2, prune=True).to(device)
optimizer = torch.optim.SGD(model.parameters(), lr= 3e-3)
saver = BestModelSaver('./checkpoints' + timestamp)
for epoch in range(1, num_epochs + 1):
print("starting epoch {} of {}".format(epoch, num_epochs))
train(model, device, train_loader, optimizer, epoch,
loss_func = reg_loss, loss_inputs = [model, F.nll_loss, 1])
loss, acc = test(model, device, test_loader)
saver.save(model, optimizer, epoch, loss, acc)
if epoch % 10 == 1:
f,ax = plt.subplots()
visualise_graph(model, ax=ax)
f.suptitle("epoch {}".format(epoch))
| [
"torch.device"
] | 1.1.0 | Veos-Digital/hypergraph_machines | 0d24cd89766c45c6c1ffb2967438ef82288a5d3c |
1.4 | import time
import copy
import pickle
import warnings
import numpy as np
import scipy.sparse as sp
import torch
import torch.nn.functional as F
from sklearn.metrics import roc_auc_score, average_precision_score, precision_recall_curve, auc
def sparse_to_tuple(sparse_mx):
if not sp.isspmatrix_coo(sparse_mx):
sparse_mx = sparse_mx.tocoo()
coords = np.vstack((sparse_mx.row, sparse_mx.col)).transpose()
values = sparse_mx.data
shape = sparse_mx.shape
return coords, values, shape
def get_scores(edges_pos, edges_neg, A_pred, adj_label):
# get logists and labels
preds = A_pred[edges_pos.T]
preds_neg = A_pred[edges_neg.T]
logists = np.hstack([preds, preds_neg])
labels = np.hstack([np.ones(preds.size(0)), np.zeros(preds_neg.size(0))])
# logists = A_pred.view(-1)
# labels = adj_label.to_dense().view(-1)
# calc scores
roc_auc = roc_auc_score(labels, logists)
ap_score = average_precision_score(labels, logists)
precisions, recalls, thresholds = precision_recall_curve(labels, logists)
pr_auc = auc(recalls, precisions)
warnings.simplefilter('ignore', RuntimeWarning)
f1s = np.nan_to_num(2*precisions*recalls/(precisions+recalls))
best_comb = np.argmax(f1s)
f1 = f1s[best_comb]
pre = precisions[best_comb]
rec = recalls[best_comb]
thresh = thresholds[best_comb]
# calc reconstracted adj_mat and accuracy with the threshold for best f1
adj_rec = copy.deepcopy(A_pred)
adj_rec[adj_rec < thresh] = 0
adj_rec[adj_rec >= thresh] = 1
labels_all = adj_label.to_dense().view(-1).long()
preds_all = adj_rec.view(-1).long()
recon_acc = (preds_all == labels_all).sum().float() / labels_all.size(0)
results = {'roc': roc_auc,
'pr': pr_auc,
'ap': ap_score,
'pre': pre,
'rec': rec,
'f1': f1,
'acc': recon_acc,
'adj_recon': adj_rec}
return results
def train_model(args, dl, vgae):
optimizer = torch.optim.Adam(vgae.parameters(), lr=args.lr)
# weights for log_lik loss
adj_t = dl.adj_train
norm_w = adj_t.shape[0]**2 / float((adj_t.shape[0]**2 - adj_t.sum()) * 2)
pos_weight = torch.FloatTensor([float(adj_t.shape[0]**2 - adj_t.sum()) / adj_t.sum()]).to(args.device)
# move input data and label to gpu if needed
features = dl.features.to(args.device)
adj_label = dl.adj_label.to_dense().to(args.device)
best_vali_criterion = 0.0
best_state_dict = None
vgae.train()
for epoch in range(args.epochs):
t = time.time()
A_pred = vgae(features)
optimizer.zero_grad()
loss = log_lik = norm_w*F.binary_cross_entropy_with_logits(A_pred, adj_label, pos_weight=pos_weight)
if not args.gae:
kl_divergence = 0.5/A_pred.size(0) * (1 + 2*vgae.logstd - vgae.mean**2 - torch.exp(2*vgae.logstd)).sum(1).mean()
loss -= kl_divergence
A_pred = torch.sigmoid(A_pred).detach().cpu()
r = get_scores(dl.val_edges, dl.val_edges_false, A_pred, dl.adj_label)
print('Epoch{:3}: train_loss: {:.4f} recon_acc: {:.4f} val_roc: {:.4f} val_ap: {:.4f} f1: {:.4f} time: {:.4f}'.format(
epoch+1, loss.item(), r['acc'], r['roc'], r['ap'], r['f1'], time.time()-t))
if r[args.criterion] > best_vali_criterion:
best_vali_criterion = r[args.criterion]
best_state_dict = copy.deepcopy(vgae.state_dict())
# r_test = get_scores(dl.test_edges, dl.test_edges_false, A_pred, dl.adj_label)
r_test = r
print(" test_roc: {:.4f} test_ap: {:.4f} test_f1: {:.4f} test_recon_acc: {:.4f}".format(
r_test['roc'], r_test['ap'], r_test['f1'], r_test['acc']))
loss.backward()
optimizer.step()
print("Done! final results: test_roc: {:.4f} test_ap: {:.4f} test_f1: {:.4f} test_recon_acc: {:.4f}".format(
r_test['roc'], r_test['ap'], r_test['f1'], r_test['acc']))
vgae.load_state_dict(best_state_dict)
return vgae
def gen_graphs(args, dl, vgae):
adj_orig = dl.adj_orig
assert adj_orig.diagonal().sum() == 0
# sp.csr_matrix
if args.gae:
pickle.dump(adj_orig, open(f'graphs/{args.dataset}_graph_0_gae.pkl', 'wb'))
else:
pickle.dump(adj_orig, open(f'graphs/{args.dataset}_graph_0.pkl', 'wb'))
# sp.lil_matrix
pickle.dump(dl.features_orig, open(f'graphs/{args.dataset}_features.pkl', 'wb'))
features = dl.features.to(args.device)
for i in range(args.gen_graphs):
with torch.no_grad():
A_pred = vgae(features)
A_pred = torch.sigmoid(A_pred).detach().cpu()
r = get_scores(dl.val_edges, dl.val_edges_false, A_pred, dl.adj_label)
adj_recon = A_pred.numpy()
np.fill_diagonal(adj_recon, 0)
# np.ndarray
if args.gae:
filename = f'graphs/{args.dataset}_graph_{i+1}_logits_gae.pkl'
else:
filename = f'graphs/{args.dataset}_graph_{i+1}_logits.pkl'
pickle.dump(adj_recon, open(filename, 'wb'))
| [
"torch.nn.functional.binary_cross_entropy_with_logits",
"torch.sigmoid",
"torch.no_grad",
"torch.exp"
] | 1.4.0 | coodest/GAug | ef6ab307e3dfd3e9e0a653d385dc1f41963f9ba8 |
1.1 | import torch
import torch.nn as nn
import torch.nn.functional as F
from starter_code.modules.networks import MLP, MinigridCNN
from mnist.embedded_mnist import MNIST_CNN
class SimpleValueFn(nn.Module):
def __init__(self, state_dim, hdim):
super(SimpleValueFn, self).__init__()
self.value_net = MLP(dims=[state_dim, *hdim, 1])
def forward(self, state):
state_values = self.value_net(state)
return state_values
class CNNValueFn(nn.Module):
def __init__(self, state_dim):
super(CNNValueFn, self).__init__()
self.state_dim = state_dim
if self.state_dim == (1, 64, 64):
self.encoder = MNIST_CNN(1)
self.decoder = lambda x: x
elif self.state_dim == (7, 7, 3):
self.encoder = MinigridCNN(*state_dim[:-1])
self.decoder = nn.Linear(self.encoder.image_embedding_size, 1)
else:
assert False
def forward(self, state):
state_values = self.decoder(self.encoder(state))
return state_values | [
"torch.nn.Linear"
] | 1.1.0 | mbchang/societal-decision-making | 23fd6de4df33f985d360330a9d5a2c29faeb8e52 |
1.2 | # MIT License
#
# Copyright (C) IBM Corporation 2019
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""Robustness evaluation module."""
import numpy as np
from minio import Minio
import torch
import torch.utils.data
from art.classifiers.pytorch import PyTorchClassifier
from art.attacks.evasion.fast_gradient import FastGradientMethod
import zipfile
import importlib
import re
from robustness_util import get_metrics
def robustness_evaluation(object_storage_url, object_storage_username, object_storage_password,
data_bucket_name, result_bucket_name, model_id,
feature_testset_path='processed_data/X_test.npy',
label_testset_path='processed_data/y_test.npy',
clip_values=(0, 1),
nb_classes=2,
input_shape=(1, 3, 64, 64),
model_class_file='model.py',
model_class_name='model',
LossFn='',
Optimizer='',
epsilon=0.2):
url = re.compile(r"https?://")
cos = Minio(url.sub('', object_storage_url),
access_key=object_storage_username,
secret_key=object_storage_password,
secure=False)
dataset_filenamex = "X_test.npy"
dataset_filenamey = "y_test.npy"
weights_filename = "model.pt"
model_files = model_id + '/_submitted_code/model.zip'
cos.fget_object(data_bucket_name, feature_testset_path, dataset_filenamex)
cos.fget_object(data_bucket_name, label_testset_path, dataset_filenamey)
cos.fget_object(result_bucket_name, model_id + '/' + weights_filename, weights_filename)
cos.fget_object(result_bucket_name, model_files, 'model.zip')
# Load PyTorch model definition from the source code.
zip_ref = zipfile.ZipFile('model.zip', 'r')
zip_ref.extractall('model_files')
zip_ref.close()
modulename = 'model_files.' + model_class_file.split('.')[0].replace('-', '_')
'''
We required users to define where the model class is located or follow
some naming convention we have provided.
'''
model_class = getattr(importlib.import_module(modulename), model_class_name)
# load & compile model
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
model = model_class().to(device)
model.load_state_dict(torch.load(weights_filename, map_location=device))
# Define Loss and optimizer function for the PyTorch model
if LossFn:
loss_fn = eval(LossFn)
else:
loss_fn = torch.nn.CrossEntropyLoss()
if Optimizer:
optimizer = eval(Optimizer)
else:
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
# create pytorch classifier
classifier = PyTorchClassifier(clip_values, model, loss_fn, optimizer, input_shape, nb_classes)
# load test dataset
x = np.load(dataset_filenamex)
y = np.load(dataset_filenamey)
# craft adversarial samples using FGSM
crafter = FastGradientMethod(classifier, eps=epsilon)
x_samples = crafter.generate(x)
# obtain all metrics (robustness score, perturbation metric, reduction in confidence)
metrics, y_pred_orig, y_pred_adv = get_metrics(model, x, x_samples, y)
print("metrics:", metrics)
return metrics
| [
"torch.cuda.is_available",
"torch.nn.CrossEntropyLoss",
"torch.load"
] | 1.2.0 | virkt25/adversarial-robustness-toolbox | 3cfa6de196cb32a3efafab2ff6bbf44247c9ddbd |
1.8 | import numpy as np
from torch import nn
import torch.optim as optim
import torch
import matplotlib.pyplot as plt
import pandas as pd
import data_loader as dl
import time
import copy
import utility
import yaml
import trainer
from PIL import Image
from os import path
Image.MAX_IMAGE_PIXELS = None
from scipy.io import savemat
from sklearn.model_selection import train_test_split
from torchvision import transforms
import os.path
from os import path
BATCH_SIZE = 32
EPOCHS = 100
LR = 0.001
ANNEAL_STRAT = "cos"
FEATURE_EXTRACT = False
APPLY_ZCA_TRANS = True
DATA_DIR = 'data/train_images'
NETS = ['resnext'] # train on resnext
IMAGE_SIZES = [64, 128, 224] # train for 4 resolutions
def main():
# Load the meta data file
df = pd.read_csv('./data/train.csv')
df, label_encoder = utility.encode_labels(df)
num_classes = len(df['label'].value_counts())
np.save('./data/label_encoder_classes.npy', label_encoder.classes_)
# Generate the ZCA matrix if enabled
for image_size in IMAGE_SIZES: # train for every res
if APPLY_ZCA_TRANS:
print("Making ZCA matrix ...")
data_loader = dl.get_full_data_loader(df, data_dir=DATA_DIR,
batch_size=BATCH_SIZE,
image_size=image_size)
train_dataset_arr = next(iter(data_loader))[0].numpy()
zca = utility.ZCA()
zca.fit(train_dataset_arr)
zca_dic = {"zca_matrix": zca.ZCA_mat, "zca_mean": zca.mean}
savemat("./data/zca_data.mat", zca_dic)
print("Completed making ZCA matrix")
# Define normalization
normalize = transforms.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225],
)
# Define specific transforms
train_transform = transforms.Compose([
utility.AddPadding(),
transforms.Resize((image_size,image_size)),
transforms.RandomHorizontalFlip(p=0.5),
transforms.RandomRotation(degrees=(-90, 90)),
transforms.RandomVerticalFlip(p=0.5),
transforms.ColorJitter(.4,.4,.4),
transforms.ToTensor(),
normalize
])
valid_transform = transforms.Compose([
utility.AddPadding(),
transforms.Resize((image_size,image_size)),
transforms.ToTensor(),
normalize
])
# Create a train and valid dataset
train_dataset = dl.HotelImagesDataset(df, root_dir=DATA_DIR,
transform=train_transform)
valid_dataset = dl.HotelImagesDataset(df, root_dir=DATA_DIR,
transform=valid_transform)
# Get a train and valid data loader
train_loader, valid_loader = dl.get_train_valid_loader(train_dataset,
valid_dataset,
batch_size=BATCH_SIZE,
random_seed=0)
for net_type in NETS: # train for every net
model = utility.initialize_net(num_classes, net_type,
feature_extract=FEATURE_EXTRACT)
# If old model exists, take state from it
if path.exists(f"./models/model_{net_type}.pt"):
print("Resuming training on trained model ...")
model = utility.load_latest_model(model, f'./models/model_{net_type}.pt')
# Gather the parameters to be optimized/updated in this run.
params_to_update = utility.get_model_params_to_train(model, FEATURE_EXTRACT)
# Send model to GPU
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = model.to(device)
# Make criterion
criterion = nn.CrossEntropyLoss()
# Make optimizer + scheduler
optimizer = torch.optim.SGD(params_to_update, lr=0.01, momentum=0.9)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer,
mode='min',
factor=0.01,
patience=3)
trained_model = trainer.train_model(device=device,
model=model,
optimizer=optimizer,
criterion=criterion,
train_loader=train_loader,
valid_loader=valid_loader,
scheduler=scheduler,
net_type=net_type,
epochs=EPOCHS,
apply_zca_trans=APPLY_ZCA_TRANS)
utility.save_current_model(trained_model,
f"./models/model_{net_type}.pt")
if __name__ == "__main__":
main() | [
"torch.optim.SGD",
"torch.cuda.is_available",
"torch.optim.lr_scheduler.ReduceLROnPlateau",
"torch.nn.CrossEntropyLoss"
] | 1.8.1 | micqu/hotel-challenge | 9373d5bd69a48e22b043b1410a57ec051f63dd45 |
1.5 | """
Copyright (c) 2019-2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import json
import os
import pytest
import torch
from examples.torch.common.distributed import configure_distributed
from examples.torch.common.execution import ExecutionMode
from examples.torch.common.execution import get_device
from examples.torch.common.execution import prepare_model_for_execution
from examples.torch.common.model_loader import load_model
from examples.torch.common.sample_config import SampleConfig
from nncf.api.compression import CompressionStage
from nncf.torch import register_default_init_args
from nncf.torch.checkpoint_loading import load_state
from nncf.common.graph.definitions import MODEL_INPUT_OP_NAME
from nncf.config import NNCFConfig
from nncf.torch.nncf_network import LEGACY_ACT_STORAGE_NAME
from nncf.torch.nncf_network import MODEL_WRAPPED_BY_NNCF_ATTR_NAME
from tests.common.helpers import TEST_ROOT
from tests.torch.helpers import create_ones_mock_dataloader
from tests.torch.helpers import register_bn_adaptation_init_args
from tests.torch.quantization.test_range_init import SingleConv2dIdentityModel
from tests.torch.test_compressed_graph import get_basic_quantization_config
from tests.torch.helpers import create_compressed_model_and_algo_for_test
from tests.torch.helpers import Command
from tests.common.helpers import get_cli_dict_args
from tests.torch.test_sanity_sample import create_command_line
GLOBAL_CONFIG = {
TEST_ROOT.joinpath("torch", "data", "configs", "squeezenet1_1_cifar10_rb_sparsity_int8.json"): [
{
'checkpoint_name': 'squeezenet1_1_custom_cifar10_rb_sparsity_int8_dp.pth',
'dataset': "cifar10",
'execution_mode': ExecutionMode.GPU_DATAPARALLEL,
},
{
'checkpoint_name': 'squeezenet1_1_custom_cifar10_rb_sparsity_int8_ddp.pth',
'dataset': "cifar10",
'execution_mode': ExecutionMode.MULTIPROCESSING_DISTRIBUTED,
},
],
}
CONFIG_PARAMS = []
for config_path_, cases_list_ in GLOBAL_CONFIG.items():
for case_params_ in cases_list_:
CONFIG_PARAMS.append((config_path_, case_params_,))
@pytest.fixture(scope='module', params=CONFIG_PARAMS,
ids=['-'.join([str(p[0]), p[1]['execution_mode']]) for p in CONFIG_PARAMS])
def _params(request, backward_compat_models_path):
if backward_compat_models_path is None:
pytest.skip('Path to models weights for backward compatibility testing is not set,'
' use --backward-compat-models option.')
config_path, case_params = request.param
checkpoint_path = str(os.path.join(backward_compat_models_path, case_params['checkpoint_name']))
return {
'sample_config_path': config_path,
'checkpoint_path': checkpoint_path,
'execution_mode': case_params['execution_mode'],
'dataset': case_params['dataset']
}
def test_model_can_be_loaded_with_resume(_params):
p = _params
sample_config_path = p['sample_config_path']
checkpoint_path = p['checkpoint_path']
config = SampleConfig.from_json(str(sample_config_path))
nncf_config = NNCFConfig.from_json(str(sample_config_path))
config.execution_mode = p['execution_mode']
config.current_gpu = 0
config.device = get_device(config)
config.distributed = config.execution_mode in (ExecutionMode.DISTRIBUTED, ExecutionMode.MULTIPROCESSING_DISTRIBUTED)
if config.distributed:
config.dist_url = "tcp://127.0.0.1:9898"
config.dist_backend = "nccl"
config.rank = 0
config.world_size = 1
configure_distributed(config)
model_name = config['model']
model = load_model(model_name,
pretrained=False,
num_classes=config.get('num_classes', 1000),
model_params=config.get('model_params'))
nncf_config = register_default_init_args(nncf_config, train_loader=create_ones_mock_dataloader(nncf_config))
model.to(config.device)
model, compression_ctrl = create_compressed_model_and_algo_for_test(model, nncf_config)
model, _ = prepare_model_for_execution(model, config)
if config.distributed:
compression_ctrl.distributed()
checkpoint = torch.load(checkpoint_path, map_location='cpu')
load_state(model, checkpoint['state_dict'], is_resume=True)
def test_loaded_model_evals_according_to_saved_acc(_params, tmp_path, dataset_dir):
p = _params
config_path = p['sample_config_path']
checkpoint_path = p['checkpoint_path']
metrics_path = str(tmp_path.joinpath('metrics.json'))
tmp_path = str(tmp_path)
args = {}
if not dataset_dir:
dataset_dir = tmp_path
args['data'] = dataset_dir
args['dataset'] = p['dataset']
args['config'] = str(config_path)
args['mode'] = 'test'
args['log-dir'] = tmp_path
args['workers'] = 0 # Workaroundr the PyTorch MultiProcessingDataLoader issue
args['seed'] = 1
args['resume'] = checkpoint_path
args['metrics-dump'] = metrics_path
if p['execution_mode'] == ExecutionMode.MULTIPROCESSING_DISTRIBUTED:
args['multiprocessing-distributed'] = ''
else:
pytest.skip("DataParallel eval takes too long for this test to be run during pre-commit")
runner = Command(create_command_line(get_cli_dict_args(args), "classification"))
runner.run()
with open(metrics_path, encoding='utf8') as metric_file:
metrics = json.load(metric_file)
# accuracy is rounded to hundredths
assert torch.load(checkpoint_path)['best_acc1'] == pytest.approx(metrics['Accuracy'], abs=1e-2)
old_style_sd = {
f'{MODEL_WRAPPED_BY_NNCF_ATTR_NAME}.conv2d.weight': torch.ones([3, 3, 1, 1]),
f'{MODEL_WRAPPED_BY_NNCF_ATTR_NAME}.conv2d.bias': torch.ones([3]),
f'{MODEL_WRAPPED_BY_NNCF_ATTR_NAME}.conv2d.pre_ops.0.op._num_bits': 8 * torch.ones([1], dtype=torch.int32),
f'{MODEL_WRAPPED_BY_NNCF_ATTR_NAME}.conv2d.pre_ops.0.op.signed_tensor': torch.ones([1], dtype=torch.int32),
f'{MODEL_WRAPPED_BY_NNCF_ATTR_NAME}.conv2d.pre_ops.0.op.enabled': torch.ones([1], dtype=torch.int32),
f'{MODEL_WRAPPED_BY_NNCF_ATTR_NAME}.conv2d.pre_ops.0.op.scale': torch.ones([3, 1, 1, 1]),
f'{LEGACY_ACT_STORAGE_NAME}./{MODEL_INPUT_OP_NAME}_0|OUTPUT._num_bits': 8 * torch.ones([1], dtype=torch.int32),
f'{LEGACY_ACT_STORAGE_NAME}./{MODEL_INPUT_OP_NAME}_0|OUTPUT.signed_tensor': torch.zeros([1], dtype=torch.int32),
f'{LEGACY_ACT_STORAGE_NAME}./{MODEL_INPUT_OP_NAME}_0|OUTPUT.enabled': torch.ones([1], dtype=torch.int32),
f'{LEGACY_ACT_STORAGE_NAME}./{MODEL_INPUT_OP_NAME}_0|OUTPUT.scale': torch.ones([1]),
}
def test_renamed_activation_quantizer_storage_in_state_dict():
model = SingleConv2dIdentityModel()
config = get_basic_quantization_config(input_info={
"sample_size": [1, 3, 100, 100]
})
register_bn_adaptation_init_args(config)
compressed_model, _ = create_compressed_model_and_algo_for_test(model, config)
with pytest.deprecated_call():
_ = load_state(compressed_model, old_style_sd, is_resume=True)
def test_can_compress_with_config_and_resume_of_old_checkpoint():
model = SingleConv2dIdentityModel()
config = get_basic_quantization_config(input_info={
"sample_size": [1, 3, 100, 100]
})
register_bn_adaptation_init_args(config)
create_compressed_model_and_algo_for_test(model, config, compression_state=old_style_sd)
# BN Wrapping backward compatibility test
class ConvBNLayer(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv = torch.nn.Conv2d(3, 9, (3, 3))
self.bn = torch.nn.BatchNorm2d(9)
self.conv1 = torch.nn.Conv2d(9, 3, (3, 3))
self.bn1 = torch.nn.BatchNorm2d(3)
def forward(self, x):
x = self.bn(self.conv(x))
return self.bn1(self.conv1(x))
sd_without_nncf_bn_wrapping = {
'nncf_module.conv.weight': torch.empty([9, 3, 3, 3]),
'nncf_module.conv.bias': torch.empty([9]),
'nncf_module.conv.nncf_padding_value': torch.empty([1]),
'nncf_module.conv.pre_ops.0.op._num_bits': torch.empty([1]),
'nncf_module.conv.pre_ops.0.op.signed_tensor': torch.empty([1]),
'nncf_module.conv.pre_ops.0.op.enabled': torch.empty([1]),
'nncf_module.conv.pre_ops.0.op.scale': torch.empty([9, 1, 1, 1]),
'nncf_module.bn.weight': torch.empty([9]),
'nncf_module.bn.bias': torch.empty([9]),
'nncf_module.bn.running_mean': torch.empty([9]),
'nncf_module.bn.running_var': torch.empty([9]),
'nncf_module.bn.num_batches_tracked': torch.empty([]),
'nncf_module.conv1.weight': torch.empty([3, 9, 3, 3]),
'nncf_module.conv1.bias': torch.empty([3]),
'nncf_module.conv1.nncf_padding_value': torch.empty([1]),
'nncf_module.conv1.pre_ops.0.op._num_bits': torch.empty([1]),
'nncf_module.conv1.pre_ops.0.op.signed_tensor': torch.empty([1]),
'nncf_module.conv1.pre_ops.0.op.enabled': torch.empty([1]),
'nncf_module.conv1.pre_ops.0.op.scale': torch.empty([3, 1, 1, 1]),
'nncf_module.bn1.weight': torch.empty([3]),
'nncf_module.bn1.bias': torch.empty([3]),
'nncf_module.bn1.running_mean': torch.empty([3]),
'nncf_module.bn1.running_var': torch.empty([3]),
'nncf_module.bn1.num_batches_tracked': torch.empty([]),
'external_quantizers./nncf_model_input_0|OUTPUT._num_bits': torch.empty([1]),
'external_quantizers./nncf_model_input_0|OUTPUT.signed_tensor': torch.empty([1]),
'external_quantizers./nncf_model_input_0|OUTPUT.enabled': torch.empty([1]),
'external_quantizers./nncf_model_input_0|OUTPUT.scale': torch.empty([1]),
# Old bn layer names: |||||||||||
'external_quantizers.ConvBNLayer/BatchNorm2d[bn]/batch_norm_0|OUTPUT._num_bits': torch.empty([1]),
'external_quantizers.ConvBNLayer/BatchNorm2d[bn]/batch_norm_0|OUTPUT.signed_tensor': torch.empty([1]),
'external_quantizers.ConvBNLayer/BatchNorm2d[bn]/batch_norm_0|OUTPUT.enabled': torch.empty([1]),
'external_quantizers.ConvBNLayer/BatchNorm2d[bn]/batch_norm_0|OUTPUT.scale': torch.empty([1])
}
compression_state_without_bn_wrapping = {
'builder_state':
{'quantization':
{'quantizer_setup':
{'quantization_points':
{1: {'qip': {'target_node_name': '/nncf_model_input_0', 'input_port_id': None},
'qip_class': 'ActivationQuantizationInsertionPoint',
'qconfig':
{'num_bits': 8, 'mode': 'symmetric', 'signedness_to_force': None, 'per_channel': False},
'directly_quantized_operator_node_names': ['ConvBNLayer/NNCFConv2d[conv]/conv2d_0']},
# Old bn layer name: |||||||||||
2: {'qip': {'target_node_name': 'ConvBNLayer/BatchNorm2d[bn]/batch_norm_0', 'input_port_id': None},
'qip_class': 'ActivationQuantizationInsertionPoint',
'qconfig':
{'num_bits': 8, 'mode': 'symmetric', 'signedness_to_force': None, 'per_channel': False},
'directly_quantized_operator_node_names': ['ConvBNLayer/NNCFConv2d[conv1]/conv2d_0']},
4: {'qip': {'target_node_name': 'ConvBNLayer/NNCFConv2d[conv]/conv2d_0'},
'qip_class': 'WeightQuantizationInsertionPoint',
'qconfig':
{'num_bits': 8, 'mode': 'symmetric', 'signedness_to_force': True, 'per_channel': True},
'directly_quantized_operator_node_names': ['ConvBNLayer/NNCFConv2d[conv]/conv2d_0']},
5: {'qip': {'target_node_name': 'ConvBNLayer/NNCFConv2d[conv1]/conv2d_0'},
'qip_class': 'WeightQuantizationInsertionPoint',
'qconfig':
{'num_bits': 8, 'mode': 'symmetric', 'signedness_to_force': True, 'per_channel': True},
'directly_quantized_operator_node_names': ['ConvBNLayer/NNCFConv2d[conv1]/conv2d_0']}},
'unified_scale_groups': {}, 'shared_input_operation_set_groups': {0: [1, 4], 1: [2, 5]}},
'build_time_metric_infos': {'aq_potential_num': 3, 'wq_potential_num': 4}}},
'ctrl_state': {'quantization': {'loss_state': None, 'scheduler_state': {'current_step': -1, 'current_epoch': -1},
'compression_stage': CompressionStage.FULLY_COMPRESSED}}}
def test_quantization_ckpt_without_wrapped_bn_loading():
model = ConvBNLayer()
config = get_basic_quantization_config(input_info={
"sample_size": [1, 3, 100, 100]
})
register_bn_adaptation_init_args(config)
with pytest.deprecated_call():
compressed_model, _ = \
create_compressed_model_and_algo_for_test(model, config,
compression_state=compression_state_without_bn_wrapping)
with pytest.deprecated_call():
_ = load_state(compressed_model, sd_without_nncf_bn_wrapping, is_resume=True)
| [
"torch.zeros",
"torch.nn.BatchNorm2d",
"torch.ones",
"torch.nn.Conv2d",
"torch.load",
"torch.empty"
] | 1.5.0 | evgeniya-egupova/nncf | 39a3c5b2e5cc7d33723154d2e622d4d7882a99a4 |
1.5 | """
Copyright (c) 2019 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import torch
from torch import nn
from examples.torch.common.sample_config import SampleConfig
from examples.torch.object_detection.layers.modules.ssd_head import MultiOutputSequential, SSDDetectionOutput
from nncf.torch.checkpoint_loading import load_state
def conv_bn(inp, oup, kernel, stride, padding):
return nn.Sequential(
nn.Conv2d(inp, oup, kernel, stride, padding, bias=False),
nn.BatchNorm2d(oup),
nn.ReLU(inplace=True)
)
def conv_dw(inp, oup, stride):
return nn.Sequential(
nn.Conv2d(inp, inp, 3, stride, 1, groups=inp, bias=False),
nn.BatchNorm2d(inp),
nn.ReLU(inplace=True),
nn.Conv2d(inp, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
nn.ReLU(inplace=True),
)
def mobilenet(start_input_channels=3):
model = MultiOutputSequential(
[11, 13],
[
conv_bn(start_input_channels, 32, 3, 2, 1),
conv_dw(32, 64, 1),
conv_dw(64, 128, 2),
conv_dw(128, 128, 1),
conv_dw(128, 256, 2),
conv_dw(256, 256, 1),
conv_dw(256, 512, 2),
conv_dw(512, 512, 1),
conv_dw(512, 512, 1),
conv_dw(512, 512, 1),
conv_dw(512, 512, 1),
conv_dw(512, 512, 1),
conv_dw(512, 1024, 2),
conv_dw(1024, 1024, 1)
]
)
return model
def extra_layers(start_input_channels):
return MultiOutputSequential(
[1, 3, 5, 7],
[
conv_bn(start_input_channels, 256, 1, 1, 0),
conv_bn(256, 512, 3, 2, 1),
conv_bn(512, 128, 1, 1, 0),
conv_bn(128, 256, 3, 2, 1),
conv_bn(256, 128, 1, 1, 0),
conv_bn(128, 256, 3, 2, 1),
conv_bn(256, 64, 1, 1, 0),
conv_bn(64, 128, 3, 2, 1)
]
)
class MobileNetSSD(nn.Module):
def __init__(self, num_classes, cfg):
super().__init__()
self.cfg = cfg
self.num_classes = num_classes
self.basenet = mobilenet()
self.extras = extra_layers(1024)
NUM_INPUT_FEATURES = [512, 1024, 512, 256, 256, 128]
self.detection_head = SSDDetectionOutput(NUM_INPUT_FEATURES, num_classes, cfg)
def forward(self, x):
img_tensor = x[0].clone().unsqueeze(0)
sources, x = self.basenet(x)
extra_sources, x = self.extras(x)
return self.detection_head(sources + extra_sources, img_tensor)
def build_ssd_mobilenet(cfg, size, num_classes, config):
if size != 300:
raise ValueError("Only Mobilenet-SSD with input size 300 is supported")
mobilenet_ssd = MobileNetSSD(num_classes, cfg)
if config.basenet and (config.resuming_checkpoint_path is None) and (config.weights is None):
print('Loading base network...')
basenet_weights = torch.load(config.basenet)['state_dict']
new_weights = {}
for wn, wv in basenet_weights.items():
wn = wn.replace('model.', '')
new_weights[wn] = wv
load_state(mobilenet_ssd.basenet, new_weights, is_resume=False)
return mobilenet_ssd
def ssd_mobilenet():
ssd_params = SampleConfig({
"variance": [0.1, 0.1, 0.2, 0.2],
"max_sizes": [60, 111, 162, 213, 264, 315],
"min_sizes": [30, 60, 111, 162, 213, 264],
"steps": [16, 32, 64, 100, 150, 300],
"aspect_ratios": [[2], [2, 3], [2, 3], [2, 3], [2], [2]],
"clip": False,
"flip": True,
"top_k": 200
})
return MobileNetSSD(21, ssd_params)
| [
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.nn.BatchNorm2d",
"torch.load"
] | 1.5.0 | evgeniya-egupova/nncf | 39a3c5b2e5cc7d33723154d2e622d4d7882a99a4 |
1.7 | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import torch
from pytorch_lightning import Trainer
from pytorch_lightning.plugins import DeepSpeedStrategy
from pytorch_lightning.utilities.deepspeed import convert_zero_checkpoint_to_fp32_state_dict
from tests.helpers.boring_model import BoringModel
from tests.helpers.runif import RunIf
@RunIf(min_gpus=2, deepspeed=True, standalone=True)
def test_deepspeed_collate_checkpoint(tmpdir):
"""Test to ensure that with DeepSpeed Stage 3 we can collate the sharded checkpoints into a single file."""
model = BoringModel()
trainer = Trainer(
default_root_dir=tmpdir, strategy=DeepSpeedStrategy(stage=3), gpus=2, fast_dev_run=True, precision=16
)
trainer.fit(model)
checkpoint_path = os.path.join(tmpdir, "model.pt")
checkpoint_path = trainer.strategy.broadcast(checkpoint_path)
trainer.save_checkpoint(checkpoint_path)
trainer.strategy.barrier()
if trainer.is_global_zero:
# ensure function call works
output_path = os.path.join(tmpdir, "single_model.pt")
convert_zero_checkpoint_to_fp32_state_dict(checkpoint_path, output_path)
_assert_checkpoint_equal(model, output_path)
def _assert_checkpoint_equal(model, output_path):
assert os.path.exists(output_path)
single_output = torch.load(output_path)
state_dict = model.state_dict()
for orig_param, saved_model_param in zip(state_dict.values(), single_output["state_dict"].values()):
if model.dtype == torch.half:
# moved model to float32 for comparison with single fp32 saved weights
saved_model_param = saved_model_param.half()
assert torch.equal(orig_param.cpu(), saved_model_param)
| [
"torch.load"
] | 1.7 | lemairecarl/pytorch-lightning | 85304d4672a9ed24a16f7f5b2abaa34148ab86f4 |
1.4 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import contextlib
import copy
import enum
import json
import logging
import math
import multiprocessing as mp
import time
from typing import Any, Dict, List, NamedTuple, Optional, Union
import torch
import torch.nn as nn
from classy_vision.dataset import ClassyDataset, build_dataset
from classy_vision.dataset.transforms.mixup import MixupTransform
from classy_vision.generic.distributed_util import (
all_reduce_mean,
barrier,
init_distributed_data_parallel_model,
is_distributed_training_run,
)
from classy_vision.generic.util import (
Timer,
copy_model_to_gpu,
load_and_broadcast_checkpoint,
master_params,
recursive_copy_to_gpu,
split_batchnorm_params,
update_classy_state,
)
from classy_vision.generic.util import get_torch_version
from classy_vision.hooks import CheckpointHook, ClassyHook, build_hooks
from classy_vision.losses import ClassyLoss, build_loss
from classy_vision.meters import ClassyMeter, build_meters
from classy_vision.models import ClassyModel, build_model
from classy_vision.optim import (
ClassyOptimizer,
build_optimizer,
build_optimizer_schedulers,
)
from classy_vision.optim.zero import ZeRO
from torch.distributed import broadcast
from . import register_task
from .classy_task import ClassyTask
try:
import apex
apex_available = True
except ImportError:
apex_available = False
try:
from torch.cuda.amp import GradScaler as TorchGradScaler
except ImportError:
pass
try:
from fairscale.optim.grad_scaler import ShardedGradScaler
fairscale_available = True
except ImportError:
fairscale_available = False
class AmpType(enum.Enum):
# Automatic Mixed Precision supported types
APEX = enum.auto()
PYTORCH = enum.auto()
class BroadcastBuffersMode(enum.Enum):
DISABLED = enum.auto()
# Enable DistributedDataParallel's broadcast_buffers option, synchronizing
# model buffers every forward pass.
FORWARD_PASS = enum.auto()
# Similar to FORWARD_PASS, but only synchronizes model buffers once
# per epoch, between train and test phases. If your motivation for
# synchronizing buffers is for buffers to be consistent during eval, use
# this instead of FORWARD_PASS to reduce training overhead.
BEFORE_EVAL = enum.auto()
class BatchNormSyncMode(enum.Enum):
DISABLED = enum.auto() # No Synchronized Batch Normalization
PYTORCH = enum.auto() # Use torch.nn.SyncBatchNorm
APEX = enum.auto() # Use apex.parallel.SyncBatchNorm, needs apex to be installed
class LastBatchInfo(NamedTuple):
loss: torch.Tensor
output: torch.Tensor
target: torch.Tensor
sample: Dict[str, Any]
step_data: Dict[str, Any]
@register_task("classification_task")
class ClassificationTask(ClassyTask):
"""Basic classification training task.
This task encapsultates all of the components and steps needed to
train a classifier using a :class:`classy_vision.trainer.ClassyTrainer`.
Assumes a train / test phase per each epoch and that the datasets
have the same API as the map-style Dataset class in
`torch.utils.data.dataset <https://pytorch.org/docs/stable/data.html
#torch.utils.data.Dataset>`_ (in particular, this task makes use of
the len). If you are using an `IterableDataset <https://pytorch.org/docs/
stable/data.html#torch.utils.data.IterableDataset>`_ then a custom task
may be appropriate.
:var loss: Loss (see :class:`classy_vision.losses.ClassyLoss`) function used
for computing the loss in each forward pass
:var datasets: Mapping from a ``phase_type`` in ["train", "test']
to dataset used for training (or testing)
:var meters: List of meters (see :class:`classy_vision.meters.ClassyMeter`)
to calculate during training
:var num_epochs: Number of epochs (passes over dataset) to train
:var test_only: Used to only run the test phase
:var base_model: Model to be trained, unwrapped in DDP or DP wrappers
:var optimizer: Optimizer used in train step
:var optimizer_schedulers: Dictionary. Key is the name of the optimizer
option (e.g. lr), value is a ClassyParamScheduler
:var checkpoint: Serializable dict which represents state in training
:var phases: List of phase specific information, e.g. if phase is
train / test.
:var hooks: List of hooks to apply during training
:var train: Phase type, if true it means we are training,
false means testing
:var distributed_model: Base model, but wrapped in DDP (DistributedDataParallel)
:var phase_idx: Current phase id, first phase is 0, if task has not started
training then returns -1
:var train_phase_idx: Only counts train phases
:var num_updates: Number of total parameter updates applied to model
by the optimizer
:var data_iterator: Iterator which can be used to obtain batches
:var losses: Loss curve
:var perf_log: list of training speed measurements, to be logged
:var clip_grad_norm: maximum gradient norm (default None)
:var simulated_global_batchsize: batch size simulated via gradient accumulation
:var optimizer_period: apply optimizer after this many steps; derived from
simulated_global_batchsize, default 1.
"""
def __init__(self):
"""Constructs a ClassificationTask"""
super().__init__()
self.base_loss = None
self.datasets = {}
self.meters = []
self.num_epochs = 1
self.test_phase_period = 1
self.train_phases_per_epoch = 0
self.test_only = False
self.base_model = None
self.optimizer = None
self.optimizer_schedulers = {}
self.checkpoint_dict = None
self.checkpoint_path = None
self.phases = []
self.hooks = []
self.train = True
self.distributed_model = None
self.distributed_loss = None
self.phase_idx = -1
self.train_phase_idx = -1
self.num_updates = 0
self.dataloader = None
self.data_iterator = None
self.losses = []
self.broadcast_buffers_mode: BroadcastBuffersMode = (
BroadcastBuffersMode.BEFORE_EVAL
)
self.amp_args = None
self.amp_type = None
self.amp_grad_scaler = None
self.mixup_transform = None
self.perf_log = []
self.last_batch = None
self.batch_norm_sync_mode = BatchNormSyncMode.DISABLED
self.find_unused_parameters = False
self.use_gpu = torch.cuda.is_available()
self.dataloader_mp_context = "spawn"
self.bn_weight_decay = False
self._train_only = True
self.clip_grad_norm = None
self.simulated_global_batchsize = None
self.optimizer_period = 1
self.ddp_bucket_cap_mb = 25
self.use_sharded_ddp = False
self.fp16_grad_compress = False
def set_use_sharded_ddp(self, use_sharded_ddp: bool):
self.use_sharded_ddp = use_sharded_ddp
if self.use_sharded_ddp:
logging.info("Using Sharded DDP")
return self
def set_use_gpu(self, use_gpu: bool):
self.use_gpu = use_gpu
assert (
not self.use_gpu or torch.cuda.is_available()
), "CUDA required to train on GPUs"
return self
def set_clip_grad_norm(self, clip_grad_norm: Optional[float]):
"""Sets maximum gradient norm.
None means gradient clipping is disabled. Defaults to None."""
self.clip_grad_norm = clip_grad_norm
if clip_grad_norm is None:
logging.info("Disabled gradient norm clipping.")
else:
logging.info(
f"Enabled gradient norm clipping with threshold: {clip_grad_norm}"
)
return self
def set_simulated_global_batchsize(self, simulated_global_batchsize: Optional[int]):
"""Sets a simulated batch size by gradient accumulation.
Gradient accumulation adds up gradients from multiple minibatches and
steps the optimizer every N train_steps, where N is optimizer_period.
When enabled, the very last train_steps might end up not updating the
model, depending on the number of total steps. None means gradient
accumulation is disabled. Defaults to None."""
self.simulated_global_batchsize = simulated_global_batchsize
return self
def set_checkpoint(self, checkpoint_path: str):
"""Sets checkpoint on task.
Args:
checkpoint_path: The path to load the checkpoint from. Can be a file or a
directory. See :func:`load_checkpoint` for more information.
"""
self.checkpoint_path = checkpoint_path
return self
def _set_checkpoint_dict(self, checkpoint_dict: Dict[str, Any]):
"""Sets the checkpoint dict in the task. Only used for testing.
Args:
checkpoint_dict: A serializable dict representing current task state
"""
self.checkpoint_dict = checkpoint_dict
return self
def set_num_epochs(self, num_epochs: Union[int, float]):
"""Set number of epochs to be run.
Args:
num_epochs: Number of epochs to run task
"""
self.num_epochs = num_epochs
return self
def set_test_phase_period(self, test_phase_period: int):
"""Set the period of test phase.
Args:
test_phase_period: The period of test phase
"""
self.test_phase_period = test_phase_period
return self
def set_dataset(self, dataset: ClassyDataset, phase_type: str):
"""Set dataset for phase type on task
Args:
dataset: ClassyDataset for returning samples.
phase_type: str must be one of "train" or "test"
"""
assert phase_type in [
"train",
"test",
], "phase_type must be in ['train', 'test']"
self.datasets[phase_type] = dataset
if phase_type == "train":
self.train_phases_per_epoch = getattr(dataset, "phases_per_epoch", 1)
else:
self._train_only = False
return self
def set_dataloader_mp_context(self, dataloader_mp_context: Optional[str]):
"""Set the multiprocessing context used by the dataloader.
The context can be either 'spawn', 'fork', 'forkserver' or None (uses the
default context). See
https://docs.python.org/3/library/multiprocessing.html#multiprocessing.get_context
for more details."""
self.dataloader_mp_context = dataloader_mp_context
return self
def set_optimizer(self, optimizer: ClassyOptimizer):
"""Set optimizer for task
Args:
optimizer: optimizer for task
"""
self.optimizer = optimizer
return self
def set_loss(self, loss: ClassyLoss):
"""Set loss function for task
Args:
loss: loss for task
"""
self.base_loss = loss
return self
def set_meters(self, meters: List["ClassyMeter"]):
"""Set meters for task
Args:
meters: list of meters to compute during training
"""
self.meters = meters
return self
def set_distributed_options(
self,
broadcast_buffers_mode: BroadcastBuffersMode = BroadcastBuffersMode.BEFORE_EVAL,
batch_norm_sync_mode: BatchNormSyncMode = BatchNormSyncMode.DISABLED,
batch_norm_sync_group_size: int = 0,
find_unused_parameters: bool = False,
bucket_cap_mb: int = 25,
fp16_grad_compress: bool = False,
):
"""Set distributed options.
Args:
broadcast_buffers_mode: Broadcast buffers mode. See
:class:`BroadcastBuffersMode` for options.
batch_norm_sync_mode: Batch normalization synchronization mode. See
:class:`BatchNormSyncMode` for options.
batch_norm_sync_group_size: Group size to use for synchronized batch norm.
0 means that the stats are synchronized across all replicas. For
efficient synchronization, set it to the number of GPUs in a node (
usually 8).
find_unused_parameters: See
:class:`torch.nn.parallel.DistributedDataParallel` for information.
bucket_cap_mb: See
:class:`torch.nn.parallel.DistributedDataParallel` for information.
Raises:
RuntimeError: If batch_norm_sync_mode is `BatchNormSyncMode.APEX` and apex
is not installed.
"""
self.broadcast_buffers_mode = broadcast_buffers_mode
if batch_norm_sync_group_size > 0:
if not batch_norm_sync_mode == BatchNormSyncMode.APEX:
# this should ideally work with PyTorch Sync BN as well, but it
# fails while initializing DDP for some reason.
raise ValueError(
"batch_norm_sync_group_size can be > 0 only when "
"Apex Synchronized Batch Normalization is being used."
)
self.batch_norm_sync_group_size = batch_norm_sync_group_size
if batch_norm_sync_mode == BatchNormSyncMode.DISABLED:
logging.info("Synchronized Batch Normalization is disabled")
else:
if batch_norm_sync_mode == BatchNormSyncMode.APEX and not apex_available:
raise RuntimeError("apex is not installed")
msg = f"Using Synchronized Batch Normalization using {batch_norm_sync_mode}"
if self.batch_norm_sync_group_size > 0:
msg += f" and group size {batch_norm_sync_group_size}"
logging.info(msg)
self.batch_norm_sync_mode = batch_norm_sync_mode
if find_unused_parameters:
logging.info("Enabling find_unused_parameters in DDP")
self.find_unused_parameters = find_unused_parameters
self.ddp_bucket_cap_mb = bucket_cap_mb
if fp16_grad_compress:
if get_torch_version() < [1, 8, 0]:
raise RuntimeError(
"FP16 grad compression is only supported since PyTorch 1.8"
)
logging.info("Enabling FP16 grad compression")
self.fp16_grad_compress = fp16_grad_compress
return self
def set_hooks(self, hooks: List["ClassyHook"]):
"""Set hooks for task
Args:
hooks: List of hooks to apply during training
"""
from classy_vision.hooks import ClassyHook
assert isinstance(hooks, list)
assert all(isinstance(hook, ClassyHook) for hook in hooks)
assert len({hook.name() for hook in hooks}) == len(
hooks
), "Cannot have repeated hooks of the same class"
# TODO (zyan3): we move checkpoint hook to the end of the list because some hooks
# may change the state of the model, and we want to save changed state in the checkpoint.
# This is temporary fix.
non_checkpoint_hooks = [
hook for hook in hooks if not isinstance(hook, CheckpointHook)
]
checkpoint_hooks = [hook for hook in hooks if isinstance(hook, CheckpointHook)]
hooks = non_checkpoint_hooks + checkpoint_hooks
self.hooks = hooks
return self
def set_model(self, model: ClassyModel):
"""Set model for task
Args:
model: Model to be trained
"""
self.base_model = model
return self
def set_test_only(self, test_only: bool):
"""Set test only flag
Args:
test_only: If true, only test phases will be run
"""
self.test_only = test_only
return self
def set_bn_weight_decay(self, bn_weight_decay: bool):
assert type(bn_weight_decay) == bool
self.bn_weight_decay = bn_weight_decay
return self
def set_amp_args(self, amp_args: Optional[Dict[str, Any]]):
"""Disable / enable apex.amp and set the automatic mixed precision parameters.
apex.amp can be utilized for mixed / half precision training.
Args:
amp_args: Dictionary containing arguments to be passed to
amp.initialize. Set to None to disable amp. To enable mixed
precision training, pass amp_args={"opt_level": "O1"} here.
See https://nvidia.github.io/apex/amp.html for more info.
Raises:
RuntimeError: If opt_level is not None and apex is not installed.
Warning: apex needs to be installed to utilize this feature.
"""
self.amp_args = amp_args
if amp_args is None:
logging.info("AMP disabled")
else:
# Check that the requested AMP type is known
try:
self.amp_type = AmpType[self.amp_args["amp_type"].upper()]
except KeyError:
logging.info("AMP type not specified, defaulting to Apex")
self.amp_type = AmpType.APEX
# Check for CUDA availability, required for both Apex and Pytorch AMP
if not torch.cuda.is_available():
raise RuntimeError(
"AMP is required but CUDA is not supported, cannot enable AMP"
)
# Check for Apex availability
if self.amp_type == AmpType.APEX and not apex_available:
raise RuntimeError(
"Apex AMP is required but Apex is not installed, cannot enable AMP"
)
if self.use_sharded_ddp:
if self.amp_type == AmpType.APEX:
raise RuntimeError(
"ShardedDDP has been requested, which is incompatible with Apex AMP"
)
if not fairscale_available:
raise RuntimeError(
"ShardedDDP has been requested, but fairscale is not installed in the current environment"
)
# Set Torch AMP grad scaler, used to prevent gradient underflow
elif self.amp_type == AmpType.PYTORCH:
if self.use_sharded_ddp:
logging.info("Using ShardedGradScaler to manage Pytorch AMP")
self.amp_grad_scaler = ShardedGradScaler()
else:
self.amp_grad_scaler = TorchGradScaler()
logging.info(f"AMP enabled with args {amp_args}")
return self
def set_mixup_transform(self, mixup_transform: Optional["MixupTransform"]):
"""Disable / enable mixup transform for data augmentation
Args::
mixup_transform: a callable object which performs mixup data augmentation
"""
self.mixup_transform = mixup_transform
if mixup_transform is None:
logging.info("mixup disabled")
else:
logging.info("mixup enabled")
return self
def set_optimizer_schedulers(self, schedulers):
self.optimizer_schedulers = schedulers
return self
@classmethod
def from_config(cls, config: Dict[str, Any]) -> "ClassificationTask":
"""Instantiates a ClassificationTask from a configuration.
Args:
config: A configuration for a ClassificationTask.
See :func:`__init__` for parameters expected in the config.
Returns:
A ClassificationTask instance.
"""
test_only = config.get("test_only", False)
if not test_only:
# TODO Make distinction between epochs and phases in optimizer clear
train_phases_per_epoch = config["dataset"]["train"].get(
"phases_per_epoch", 1
)
optimizer_config = config["optimizer"]
optimizer_config["num_epochs"] = (
config["num_epochs"] * train_phases_per_epoch
)
optimizer = build_optimizer(optimizer_config)
param_schedulers = build_optimizer_schedulers(optimizer_config)
datasets = {}
phase_types = ["train", "test"]
for phase_type in phase_types:
if phase_type in config["dataset"]:
datasets[phase_type] = build_dataset(config["dataset"][phase_type])
loss = build_loss(config["loss"])
amp_args = config.get("amp_args")
meters = build_meters(config.get("meters", {}))
model = build_model(config["model"])
mixup_transform = None
if config.get("mixup") is not None:
assert "alpha" in config["mixup"], "key alpha is missing in mixup dict"
mixup_transform = MixupTransform(
config["mixup"]["alpha"], config["mixup"].get("num_classes")
)
# hooks config is optional
hooks_config = config.get("hooks")
hooks = []
if hooks_config is not None:
hooks = build_hooks(hooks_config)
distributed_config = config.get("distributed", {})
distributed_options = {
"broadcast_buffers_mode": BroadcastBuffersMode[
distributed_config.get("broadcast_buffers", "before_eval").upper()
],
"batch_norm_sync_mode": BatchNormSyncMode[
distributed_config.get("batch_norm_sync_mode", "disabled").upper()
],
"batch_norm_sync_group_size": distributed_config.get(
"batch_norm_sync_group_size", 0
),
"find_unused_parameters": distributed_config.get(
"find_unused_parameters", False
),
"bucket_cap_mb": distributed_config.get("bucket_cap_mb", 25),
"fp16_grad_compress": distributed_config.get("fp16_grad_compress", False),
}
task = (
cls()
.set_num_epochs(config["num_epochs"])
.set_test_phase_period(config.get("test_phase_period", 1))
.set_loss(loss)
.set_test_only(test_only)
.set_model(model)
.set_meters(meters)
.set_amp_args(amp_args)
.set_mixup_transform(mixup_transform)
.set_distributed_options(**distributed_options)
.set_hooks(hooks)
.set_bn_weight_decay(config.get("bn_weight_decay", False))
.set_clip_grad_norm(config.get("clip_grad_norm"))
.set_simulated_global_batchsize(config.get("simulated_global_batchsize"))
.set_use_sharded_ddp(config.get("use_sharded_ddp", False))
)
if not test_only:
task.set_optimizer(optimizer)
task.set_optimizer_schedulers(param_schedulers)
use_gpu = config.get("use_gpu")
if use_gpu is not None:
task.set_use_gpu(use_gpu)
for phase_type in datasets:
task.set_dataset(datasets[phase_type], phase_type)
# NOTE: this is a private member and only meant to be used for
# logging/debugging purposes. See __repr__ implementation
task._config = config
return task
@property
def num_batches_per_phase(self):
"""Returns number of batches in current phase iterator"""
return len(self.data_iterator)
@property
def model(self):
"""Returns model used in training (can be wrapped with DDP)"""
return (
self.distributed_model if is_distributed_training_run() else self.base_model
)
@property
def loss(self):
"""Returns loss used in training (can be wrapped with DDP)"""
return self.distributed_loss if self.distributed_loss else self.base_loss
@property
def phase_type(self):
"""Returns current phase type. String with value "train" or "test" """
return "train" if self.train else "test"
@property
def eval_phase_idx(self):
"""Returns current evaluation phase"""
return self.phase_idx - self.train_phase_idx - 1
def get_total_training_phases(self):
"""
Returns the total number of "train" phases in the task
"""
num_training_phases = 0
for phase in self.phases:
if phase["train"] is True:
num_training_phases += 1
return num_training_phases
def get_total_test_phases(self):
"""
Returns the total number of "test" phases in the task
"""
num_test_phases = 0
for phase in self.phases:
if phase["train"] is False:
num_test_phases += 1
return num_test_phases
def _build_phases(self):
"""Returns list of phases from config.
These phases will look like:
{
train: is this a train or test phase?
optimizer: optimizer settings
}
- If this is a test only run, then only test phases will be
generated
- If this is a training run with both train and test datasets, then x phases =
x train phases + x test phases, interleaved. If test_phase_period > 1, test
phases are only added after test_phase_period train phases. The last phase is
always a test phase.
- If this is a training run with only a train dataset, then x phases = x train
phases.
"""
if not self.test_only:
phases = [
{"train": True}
for _ in range(math.ceil(self.train_phases_per_epoch * self.num_epochs))
]
if self._train_only:
return phases
final_phases = []
for i, phase in enumerate(phases):
final_phases.append(phase)
if (i + 1) % self.test_phase_period == 0:
final_phases.append({"train": False})
if final_phases[-1]["train"]:
final_phases.append({"train": False})
return final_phases
return [{"train": False} for _ in range(self.num_epochs)]
def build_dataloader_from_dataset(self, dataset, **kwargs):
"""Builds a dataloader from the provided dataset
Args:
dataset: A ClassyDataset
kwargs: Additional kwargs to pass during dataloader construction for
derived classes
"""
return dataset.iterator(
phase_type=self.phase_type,
current_phase_id=self.train_phase_idx if self.train else 0,
pin_memory=self.use_gpu and torch.cuda.device_count() > 1,
multiprocessing_context=mp.get_context(self.dataloader_mp_context),
**kwargs,
)
def build_dataloaders_for_current_phase(self):
"""Builds dataloader(s) for the current phase.
Deriving classes can override this method to support custom behavior, like
supporting multiple dataloaders in parallel.
"""
self.dataloader = self.build_dataloader_from_dataset(
self.datasets[self.phase_type]
)
def prepare_optimizer(self, optimizer, model, loss=None):
bn_params, other_params = split_batchnorm_params(model)
if loss is not None:
bn_params_loss, params_loss = split_batchnorm_params(loss)
bn_params = bn_params + bn_params_loss
other_params = other_params + params_loss
bn_schedulers = self.optimizer_schedulers.copy()
if not self.bn_weight_decay:
bn_schedulers["weight_decay"] = 0
param_groups = [{"params": other_params, **self.optimizer_schedulers}]
if len(bn_params) > 0:
param_groups.append({"params": bn_params, **bn_schedulers})
self.optimizer.set_param_groups(param_groups)
def prepare(self):
"""Prepares task for training, populates all derived attributes """
self.phases = self._build_phases()
self.train = False if self.test_only else self.train
if self.batch_norm_sync_mode == BatchNormSyncMode.PYTORCH:
self.base_model = nn.SyncBatchNorm.convert_sync_batchnorm(self.base_model)
elif self.batch_norm_sync_mode == BatchNormSyncMode.APEX:
sync_bn_process_group = apex.parallel.create_syncbn_process_group(
self.batch_norm_sync_group_size
)
self.base_model = apex.parallel.convert_syncbn_model(
self.base_model, process_group=sync_bn_process_group
)
# move the model and loss to the right device
if self.use_gpu:
self.base_model, self.base_loss = copy_model_to_gpu(
self.base_model, self.base_loss
)
else:
self.base_loss.cpu()
self.base_model.cpu()
if self.optimizer is not None:
self.prepare_optimizer(
optimizer=self.optimizer, model=self.base_model, loss=self.base_loss
)
if self.amp_args is not None:
if self.amp_type == AmpType.APEX:
# Initialize apex.amp. This updates the model and the PyTorch optimizer (
# if training, which is wrapped by the ClassyOptimizer in self.optimizer).
# Please note this must happen before loading the checkpoint, cause
# there's amp state to be restored.
if self.optimizer is None:
self.base_model = apex.amp.initialize(
self.base_model, optimizers=None, **self.amp_args
)
else:
self.base_model, self.optimizer.optimizer = apex.amp.initialize(
self.base_model, self.optimizer.optimizer, **self.amp_args
)
if self.simulated_global_batchsize is not None:
if self.simulated_global_batchsize % self.get_global_batchsize() != 0:
raise ValueError(
f"Global batch size ({self.get_global_batchsize()}) must divide "
f"simulated_global_batchsize ({self.simulated_global_batchsize})"
)
else:
self.simulated_global_batchsize = self.get_global_batchsize()
self.optimizer_period = (
self.simulated_global_batchsize // self.get_global_batchsize()
)
if self.optimizer_period > 1:
logging.info(
f"Using gradient accumulation with a period of {self.optimizer_period}"
)
if self.checkpoint_path:
self.checkpoint_dict = load_and_broadcast_checkpoint(self.checkpoint_path)
classy_state_dict = (
None
if self.checkpoint_dict is None
else self.checkpoint_dict["classy_state_dict"]
)
if classy_state_dict is not None:
state_load_success = update_classy_state(self, classy_state_dict)
assert (
state_load_success
), "Update classy state from checkpoint was unsuccessful."
self.init_distributed_data_parallel_model()
def init_distributed_data_parallel_model(self):
"""
Initialize
`torch.nn.parallel.distributed.DistributedDataParallel <https://pytorch.org/
docs/stable/nn.html#distributeddataparallel>`_.
Needed for distributed training. This is where a model should be wrapped by DDP.
"""
if not is_distributed_training_run():
return
assert (
self.distributed_model is None
), "init_ddp_non_elastic must only be called once"
broadcast_buffers = (
self.broadcast_buffers_mode == BroadcastBuffersMode.FORWARD_PASS
)
if self.use_sharded_ddp:
if not isinstance(self.optimizer, ZeRO):
raise ValueError(
"ShardedDataParallel engine should only be used in conjunction with ZeRO optimizer"
)
from fairscale.nn.data_parallel import ShardedDataParallel
# Replace the original DDP wrap by the shard-aware ShardedDDP
self.distributed_model = ShardedDataParallel(
module=self.base_model,
sharded_optimizer=self.optimizer.optimizer,
broadcast_buffers=broadcast_buffers,
)
else:
self.distributed_model = init_distributed_data_parallel_model(
self.base_model,
broadcast_buffers=broadcast_buffers,
find_unused_parameters=self.find_unused_parameters,
bucket_cap_mb=self.ddp_bucket_cap_mb,
)
if self.fp16_grad_compress:
from torch.distributed.algorithms import ddp_comm_hooks
# FP16 hook is stateless and only takes a process group as the state.
# We use the default process group so we set the state to None.
process_group = None
self.distributed_model.register_comm_hook(
process_group,
ddp_comm_hooks.default_hooks.fp16_compress_hook,
)
if (
isinstance(self.base_loss, ClassyLoss)
and self.base_loss.has_learned_parameters()
):
logging.info("Initializing distributed loss")
self.distributed_loss = init_distributed_data_parallel_model(
self.base_loss,
broadcast_buffers=broadcast_buffers,
find_unused_parameters=self.find_unused_parameters,
bucket_cap_mb=self.ddp_bucket_cap_mb,
)
@property
def where(self):
"""Returns the proportion of training that has completed. If in test
only mode, returns proportion of testing completed
Returned value is a float in the range [0, 1)
"""
current_step = self.num_updates / self.get_global_batchsize()
num_phases = (
self.get_total_test_phases()
if self.test_only
else self.get_total_training_phases()
)
if self.num_batches_per_phase <= 0:
raise RuntimeError("No batches to read. Is the dataset empty?")
num_steps = num_phases * self.num_batches_per_phase
where = current_step / num_steps
return where
def get_classy_state(self, deep_copy: bool = False):
"""Returns serialiable state of task
Args:
deep_copy: If true, does a deep copy of state before returning.
"""
optimizer_state = {}
if self.optimizer is not None:
optimizer_state = self.optimizer.get_classy_state()
classy_state_dict = {
"train": self.train,
"base_model": self.base_model.get_classy_state(),
"meters": [meter.get_classy_state() for meter in self.meters],
"optimizer": optimizer_state,
"phase_idx": self.phase_idx,
"train_phase_idx": self.train_phase_idx,
"num_updates": self.num_updates,
"losses": self.losses,
"hooks": {hook.name(): hook.get_classy_state() for hook in self.hooks},
"loss": {},
}
if "train" in self.datasets and self._is_checkpointable_dataset(
self.datasets["train"]
):
classy_state_dict["train_dataset_iterator"] = self.datasets[
"train"
].get_classy_state()
if isinstance(self.base_loss, ClassyLoss):
classy_state_dict["loss"] = self.base_loss.get_classy_state()
if self.amp_args is not None:
if self.amp_type == AmpType.APEX:
classy_state_dict["amp"] = apex.amp.state_dict()
elif self.amp_grad_scaler is not None:
classy_state_dict["amp"] = self.amp_grad_scaler.state_dict()
if deep_copy:
classy_state_dict = copy.deepcopy(classy_state_dict)
return classy_state_dict
def set_classy_state(self, state):
"""Set task state
Args:
state: Dict containing state of a task
"""
# some settings are different in test only
self.train = False if self.test_only else state["train"]
if not self.test_only:
self.phase_idx = state["phase_idx"]
self.num_updates = state["num_updates"]
self.train_phase_idx = state["train_phase_idx"]
self.losses = state["losses"]
for meter, meter_state in zip(self.meters, state["meters"]):
meter.set_classy_state(meter_state)
self.base_model.set_classy_state(state["base_model"])
if self.optimizer is not None:
self.optimizer.set_classy_state(state["optimizer"])
if state.get("loss") and isinstance(self.base_loss, ClassyLoss):
self.base_loss.set_classy_state(state["loss"])
if "amp" in state:
if self.amp_type == AmpType.APEX:
apex.amp.load_state_dict(state["amp"])
else:
self.amp_grad_scaler.load_state_dict(state["amp"])
for hook in self.hooks:
# we still want to be able to run when new hooks are added or old
# hooks are removed
if hook.name() in state["hooks"]:
hook.set_classy_state(state["hooks"][hook.name()])
else:
logging.warning(f"No state found for hook: {hook.name()}")
if "train" in self.datasets and self._is_checkpointable_dataset(
self.datasets["train"]
):
self.datasets["train"].set_classy_state(state.get("train_dataset_iterator"))
@staticmethod
def _is_checkpointable_dataset(dataset):
return hasattr(dataset, "get_classy_state") and hasattr(
dataset, "set_classy_state"
)
def eval_step(self):
self.last_batch = None
# Process next sample
with Timer() as timer:
sample = next(self.data_iterator)
assert isinstance(sample, dict) and "input" in sample and "target" in sample, (
f"Returned sample [{sample}] is not a map with 'input' and"
+ "'target' keys"
)
target = sample["target"]
if self.use_gpu:
sample = recursive_copy_to_gpu(sample, non_blocking=True)
# Optional Pytorch AMP context
torch_amp_context = (
torch.cuda.amp.autocast()
if self.amp_type == AmpType.PYTORCH
else contextlib.suppress()
)
with torch.no_grad(), torch_amp_context:
output = self.model(sample["input"])
local_loss = self.compute_loss(output, sample)
loss = local_loss.detach().clone()
self.check_inf_nan(loss)
self.losses.append(loss.data.cpu().item() * target.size(0))
self.update_meters(output, sample)
# Move some data to the task so hooks get a chance to access it
self.last_batch = LastBatchInfo(
loss=loss,
output=output,
target=target,
sample=sample,
step_data={"sample_fetch_time": timer.elapsed_time},
)
def check_inf_nan(self, loss):
if loss == float("inf") or loss == float("-inf") or loss != loss:
raise FloatingPointError(f"Loss is infinity or NaN: {loss}")
def _should_do_step(self):
"""Tells if we will be performing an optimizer step.
Returns True always if there is no gradient accumulation. With gradient
accumulation returns True only when the gradients will be synchronized and we
will be performing an optimizer step.
"""
update_idx = self.num_updates // self.get_global_batchsize()
return (update_idx % self.optimizer_period) == self.optimizer_period - 1
def train_step(self):
"""Train step to be executed in train loop."""
self.last_batch = None
# Process next sample
with Timer() as timer:
sample = next(self.data_iterator)
assert isinstance(sample, dict) and "input" in sample and "target" in sample, (
f"Returned sample [{sample}] is not a map with 'input' and"
+ "'target' keys"
)
# Copy sample to GPU
target = sample["target"]
if self.use_gpu:
sample = recursive_copy_to_gpu(sample, non_blocking=True)
if self.mixup_transform is not None:
sample = self.mixup_transform(sample)
# Optional Pytorch AMP context
torch_amp_context = (
torch.cuda.amp.autocast()
if self.amp_type == AmpType.PYTORCH
else contextlib.suppress()
)
# only sync with DDP when we need to perform an optimizer step
# an optimizer step can be skipped if gradient accumulation is enabled
do_step = self._should_do_step()
ctx_mgr_model = (
self.distributed_model.no_sync()
if self.distributed_model is not None and not do_step
else contextlib.suppress()
)
ctx_mgr_loss = (
self.distributed_loss.no_sync()
if self.distributed_loss is not None and not do_step
else contextlib.suppress()
)
with ctx_mgr_model, ctx_mgr_loss:
# Forward pass
with torch.enable_grad(), torch_amp_context:
output = self.model(sample["input"])
local_loss = self.compute_loss(output, sample)
loss = local_loss.detach().clone()
self.losses.append(loss.data.cpu().item() * target.size(0))
self.update_meters(output, sample)
# Backwards pass + optimizer step
self.run_optimizer(local_loss)
self.num_updates += self.get_global_batchsize()
# Move some data to the task so hooks get a chance to access it
self.last_batch = LastBatchInfo(
loss=loss,
output=output,
target=target,
sample=sample,
step_data={"sample_fetch_time": timer.elapsed_time},
)
def compute_loss(self, model_output, sample):
return self.loss(model_output, sample["target"])
def run_optimizer(self, loss):
"""Runs backwards pass and update the optimizer"""
self.check_inf_nan(loss)
# Gradient accumulation logic. We always set optimizer_period, even
# if gradient accumulation is disabled. Assumes all batches have the
# same size
update_idx = self.num_updates // self.get_global_batchsize()
do_zero_grad = (update_idx % self.optimizer_period) == 0
do_step = self._should_do_step()
if do_zero_grad:
self.optimizer.zero_grad()
if self.amp_type == AmpType.APEX:
with apex.amp.scale_loss(loss, self.optimizer.optimizer) as scaled_loss:
scaled_loss.backward()
elif self.amp_type == AmpType.PYTORCH:
self.amp_grad_scaler.scale(loss).backward()
else:
loss.backward()
if do_step:
# Handle gradient accumulation related gradient rescaling
if self.optimizer_period != 1:
self._rescale_gradients(1 / self.optimizer_period)
# Clipping must happen after grad accumulation
if self.clip_grad_norm is not None:
self._clip_gradients(self.clip_grad_norm)
if self.amp_type == AmpType.PYTORCH:
# If using mixed precision, handle underflow-related scaling
# See https://pytorch.org/docs/stable/amp.html#gradient-scaling
# for context
self.amp_grad_scaler.step(self.optimizer, where=self.where)
self.amp_grad_scaler.update()
else:
self.optimizer.step(where=self.where)
def _rescale_gradients(self, scale):
for param in master_params(self.optimizer):
if param.grad is not None:
param.grad.data.mul_(scale)
def _clip_gradients(self, max_norm):
nn.utils.clip_grad_norm_(master_params(self.optimizer), max_norm)
def update_meters(self, model_output, sample):
target = sample["target"].detach().cpu()
model_output = model_output.detach().cpu()
# Update meters
for meter in self.meters:
meter.update(model_output, target, is_train=self.train)
def synchronize_losses(self):
"""Average the losses across the different replicas"""
# Average losses across nodes
losses_tensor = torch.tensor(self.losses)
synchronized_losses_tensor = all_reduce_mean(losses_tensor)
self.losses = synchronized_losses_tensor.tolist()
def advance_phase(self):
"""Performs bookkeeping / task updates between phases
Increments phase idx, resets meters, resets loss history,
resets counters, shuffles dataset, rebuilds iterators, and
sets the train / test state for phase.
"""
logging.debug("Advancing phase")
# Reset meters for next phase / epoch
for meter in self.meters:
meter.reset()
# Reset loss history for next epoch
self.losses = []
# Setup new phase
self.phase_idx += 1
phase = self.phases[self.phase_idx]
self.train = True if phase["train"] else False
if self.train:
self.train_phase_idx += 1
# Re-build dataloader & re-create iterator anytime membership changes.
self.build_dataloaders_for_current_phase()
self.create_data_iterators()
# Set up pytorch module in train vs eval mode, update optimizer.
self._set_model_train_mode()
def done_training(self):
"""Stop condition for training"""
return self.phase_idx + 1 >= len(self.phases)
def create_data_iterators(self):
"""Creates data iterator(s) for the current phase."""
# Delete iterator explicitly so that all dataloader processes
# are cleaned up.
del self.data_iterator
self.data_iterator = iter(self.dataloader)
def _set_model_train_mode(self):
"""Set train mode for model"""
phase = self.phases[self.phase_idx]
self.base_model.train(phase["train"])
self.base_loss.train(phase["train"])
if (
self.broadcast_buffers_mode == BroadcastBuffersMode.BEFORE_EVAL
and not self.train
):
self._broadcast_buffers()
def _broadcast_buffers(self):
"""Explicitly synchronize buffers across all devices."""
if self.distributed_model is None:
return
buffers = list(self.base_model.buffers())
if len(buffers) > 0:
logging.info("Synchronizing buffers before evaluation.")
for buffer in buffers:
broadcast(buffer, 0, group=self.distributed_model.process_group)
# TODO: Functions below should be better abstracted into the dataloader
# abstraction
def get_batchsize_per_replica(self):
"""Return local replica's batchsize for dataset (e.g. batchsize per GPU)"""
return self.datasets[self.phase_type].get_batchsize_per_replica()
def get_global_batchsize(self):
"""Return global batchsize across all trainers"""
return self.datasets[self.phase_type].get_global_batchsize()
def on_start(self):
for hook in self.hooks:
hook.on_start(self)
def on_phase_start(self):
self.phase_start_time_total = time.perf_counter()
self.advance_phase()
for hook in self.hooks:
hook.on_phase_start(self)
self.phase_start_time_train = time.perf_counter()
def on_phase_end(self):
self.log_phase_end("train")
if self.train:
self.optimizer.on_epoch(where=self.where)
logging.debug("Syncing losses on phase end...")
self.synchronize_losses()
logging.debug("...losses synced")
logging.debug("Syncing meters on phase end...")
for meter in self.meters:
meter.sync_state()
logging.debug("...meters synced")
barrier()
for hook in self.hooks:
hook.on_phase_end(self)
self.perf_log = []
self.log_phase_end("total")
def on_end(self):
for hook in self.hooks:
hook.on_end(self)
def log_phase_end(self, tag):
if not self.train:
return
start_time = (
self.phase_start_time_train
if tag == "train"
else self.phase_start_time_total
)
phase_duration = time.perf_counter() - start_time
im_per_sec = (
self.get_global_batchsize() * self.num_batches_per_phase
) / phase_duration
self.perf_log.append(
{
"tag": tag,
"phase_idx": self.train_phase_idx,
"epoch_duration": phase_duration,
"im_per_sec": im_per_sec,
}
)
def __repr__(self):
if hasattr(self, "_config"):
config = json.dumps(self._config, indent=4)
return f"{super().__repr__()} initialized with config:\n{config}"
return super().__repr__()
| [
"torch.cuda.amp.autocast",
"torch.nn.SyncBatchNorm.convert_sync_batchnorm",
"torch.no_grad",
"torch.enable_grad",
"torch.cuda.device_count",
"torch.cuda.is_available",
"torch.tensor",
"torch.cuda.amp.GradScaler",
"torch.distributed.broadcast"
] | 1.4 | shinianzhihou/ClassyVision | b3f714ef94275b3e9753ab3f3c8256cb852b96fc |
1.1 | #!/usr/bin/env python
""" Translator Class and builder """
from __future__ import print_function
import codecs
import os
import math
import torch
from tensorboardX import SummaryWriter
from others.utils import rouge_results_to_str, test_rouge, tile
from translate.beam import GNMTGlobalScorer
def build_predictor(args, tokenizer, symbols, model, logger=None):
scorer = GNMTGlobalScorer(args.alpha,length_penalty='wu')
translator = Translator(args, model, tokenizer, symbols, global_scorer=scorer, logger=logger)
return translator
class Translator(object):
"""
Uses a model to translate a batch of sentences.
Args:
model (:obj:`onmt.modules.NMTModel`):
NMT model to use for translation
fields (dict of Fields): data fields
beam_size (int): size of beam to use
n_best (int): number of translations produced
max_length (int): maximum length output to produce
global_scores (:obj:`GlobalScorer`):
object to rescore final translations
copy_attn (bool): use copy attention during translation
cuda (bool): use cuda
beam_trace (bool): trace beam search for debugging
logger(logging.Logger): logger.
"""
def __init__(self,
args,
model,
vocab,
symbols,
global_scorer=None,
logger=None,
dump_beam=""):
self.logger = logger
self.cuda = args.visible_gpus != '-1'
self.args = args
self.model = model
self.generator = self.model.generator
self.vocab = vocab
self.symbols = symbols
self.start_token = symbols['BOS']
self.end_token = symbols['EOS']
self.global_scorer = global_scorer
self.beam_size = args.beam_size
self.min_length = args.min_length
self.max_length = args.max_length
self.dump_beam = dump_beam
# for debugging
self.beam_trace = self.dump_beam != ""
self.beam_accum = None
tensorboard_log_dir = args.model_path
self.tensorboard_writer = SummaryWriter(tensorboard_log_dir, comment="Unmt")
if self.beam_trace:
self.beam_accum = {
"predicted_ids": [],
"beam_parent_ids": [],
"scores": [],
"log_probs": []}
def _build_target_tokens(self, pred):
# vocab = self.fields["tgt"].vocab
tokens = []
for tok in pred:
tok = int(tok)
tokens.append(tok)
if tokens[-1] == self.end_token:
tokens = tokens[:-1]
break
tokens = [t for t in tokens if t < len(self.vocab)]
tokens = self.vocab.DecodeIds(tokens).split(' ')
return tokens
def from_batch(self, translation_batch):
batch = translation_batch["batch"]
assert (len(translation_batch["gold_score"]) ==
len(translation_batch["predictions"]))
batch_size = batch.batch_size
preds, pred_score, gold_score, tgt_str, src = translation_batch["predictions"],translation_batch["scores"],translation_batch["gold_score"],batch.tgt_str, batch.src
translations = []
for b in range(batch_size):
pred_sents = self.vocab.convert_ids_to_tokens([int(n) for n in preds[b][0]])
pred_sents = ' '.join(pred_sents).replace(' ##','')
gold_sent = ' '.join(tgt_str[b].split())
# translation = Translation(fname[b],src[:, b] if src is not None else None,
# src_raw, pred_sents,
# attn[b], pred_score[b], gold_sent,
# gold_score[b])
# src = self.spm.DecodeIds([int(t) for t in translation_batch['batch'].src[0][5] if int(t) != len(self.spm)])
raw_src = [self.vocab.ids_to_tokens[int(t)] for t in src[b]][:500]
raw_src = ' '.join(raw_src)
translation = (pred_sents, gold_sent, raw_src)
# translation = (pred_sents[0], gold_sent)
translations.append(translation)
return translations
def translate(self,
data_iter, step,
attn_debug=False):
self.model.eval()
gold_path = self.args.result_path + '.%d.gold' % step
can_path = self.args.result_path + '.%d.candidate' % step
self.gold_out_file = codecs.open(gold_path, 'w', 'utf-8')
self.can_out_file = codecs.open(can_path, 'w', 'utf-8')
# raw_gold_path = self.args.result_path + '.%d.raw_gold' % step
# raw_can_path = self.args.result_path + '.%d.raw_candidate' % step
self.gold_out_file = codecs.open(gold_path, 'w', 'utf-8')
self.can_out_file = codecs.open(can_path, 'w', 'utf-8')
raw_src_path = self.args.result_path + '.%d.raw_src' % step
self.src_out_file = codecs.open(raw_src_path, 'w', 'utf-8')
# pred_results, gold_results = [], []
ct = 0
with torch.no_grad():
for batch in data_iter:
if(self.args.recall_eval):
gold_tgt_len = batch.tgt.size(1)
self.min_length = gold_tgt_len + 20
self.max_length = gold_tgt_len + 60
batch_data = self.translate_batch(batch)
translations = self.from_batch(batch_data)
for trans in translations:
pred, gold, src = trans
pred_str = pred.replace('[unused1]', '').replace('[unused4]', '').replace('[PAD]', '').replace('[unused2]', '').replace(r' +', ' ').replace(' [unused3] ', '<q>').replace('[unused3]', '').strip()
gold_str = gold.strip()
if(self.args.recall_eval):
_pred_str = ''
gap = 1e3
for sent in pred_str.split('<q>'):
can_pred_str = _pred_str+ '<q>'+sent.strip()
can_gap = math.fabs(len(_pred_str.split())-len(gold_str.split()))
# if(can_gap>=gap):
if(len(can_pred_str.split())>=len(gold_str.split())+10):
pred_str = _pred_str
break
else:
gap = can_gap
_pred_str = can_pred_str
# pred_str = ' '.join(pred_str.split()[:len(gold_str.split())])
# self.raw_can_out_file.write(' '.join(pred).strip() + '\n')
# self.raw_gold_out_file.write(' '.join(gold).strip() + '\n')
self.can_out_file.write(pred_str + '\n')
self.gold_out_file.write(gold_str + '\n')
self.src_out_file.write(src.strip() + '\n')
ct += 1
self.can_out_file.flush()
self.gold_out_file.flush()
self.src_out_file.flush()
self.can_out_file.close()
self.gold_out_file.close()
self.src_out_file.close()
if (step != -1):
rouges = self._report_rouge(gold_path, can_path)
self.logger.info('Rouges at step %d \n%s' % (step, rouge_results_to_str(rouges)))
if self.tensorboard_writer is not None:
self.tensorboard_writer.add_scalar('test/rouge1-F', rouges['rouge_1_f_score'], step)
self.tensorboard_writer.add_scalar('test/rouge2-F', rouges['rouge_2_f_score'], step)
self.tensorboard_writer.add_scalar('test/rougeL-F', rouges['rouge_l_f_score'], step)
def _report_rouge(self, gold_path, can_path):
self.logger.info("Calculating Rouge")
results_dict = test_rouge(self.args.temp_dir, can_path, gold_path)
return results_dict
def translate_batch(self, batch, fast=False):
"""
Translate a batch of sentences.
Mostly a wrapper around :obj:`Beam`.
Args:
batch (:obj:`Batch`): a batch from a dataset object
data (:obj:`Dataset`): the dataset object
fast (bool): enables fast beam search (may not support all features)
Todo:
Shouldn't need the original dataset.
"""
with torch.no_grad():
return self._fast_translate_batch(
batch,
self.max_length,
min_length=self.min_length)
def _fast_translate_batch(self,
batch,
max_length,
min_length=0):
# TODO: faster code path for beam_size == 1.
# TODO: support these blacklisted features.
assert not self.dump_beam
beam_size = self.beam_size
batch_size = batch.batch_size
src = batch.src
segs = batch.segs
mask_src = batch.mask_src
src_features = self.model.bert(src, segs, mask_src)
dec_states = self.model.decoder.init_decoder_state(src, src_features, with_cache=True)
device = src_features.device
# Tile states and memory beam_size times.
dec_states.map_batch_fn(
lambda state, dim: tile(state, beam_size, dim=dim))
src_features = tile(src_features, beam_size, dim=0)
batch_offset = torch.arange(
batch_size, dtype=torch.long, device=device)
beam_offset = torch.arange(
0,
batch_size * beam_size,
step=beam_size,
dtype=torch.long,
device=device)
alive_seq = torch.full(
[batch_size * beam_size, 1],
self.start_token,
dtype=torch.long,
device=device)
# Give full probability to the first beam on the first step.
topk_log_probs = (
torch.tensor([0.0] + [float("-inf")] * (beam_size - 1),
device=device).repeat(batch_size))
# Structure that holds finished hypotheses.
hypotheses = [[] for _ in range(batch_size)] # noqa: F812
results = {}
results["predictions"] = [[] for _ in range(batch_size)] # noqa: F812
results["scores"] = [[] for _ in range(batch_size)] # noqa: F812
results["gold_score"] = [0] * batch_size
results["batch"] = batch
for step in range(max_length):
decoder_input = alive_seq[:, -1].view(1, -1)
# Decoder forward.
decoder_input = decoder_input.transpose(0,1)
dec_out, dec_states = self.model.decoder(decoder_input, src_features, dec_states,
step=step)
# Generator forward.
log_probs = self.generator.forward(dec_out.transpose(0,1).squeeze(0))
vocab_size = log_probs.size(-1)
if step < min_length:
log_probs[:, self.end_token] = -1e20
# Multiply probs by the beam probability.
log_probs += topk_log_probs.view(-1).unsqueeze(1)
alpha = self.global_scorer.alpha
length_penalty = ((5.0 + (step + 1)) / 6.0) ** alpha
# Flatten probs into a list of possibilities.
curr_scores = log_probs / length_penalty
if(self.args.block_trigram):
cur_len = alive_seq.size(1)
if(cur_len>3):
for i in range(alive_seq.size(0)):
fail = False
words = [int(w) for w in alive_seq[i]]
words = [self.vocab.ids_to_tokens[w] for w in words]
words = ' '.join(words).replace(' ##','').split()
if(len(words)<=3):
continue
trigrams = [(words[i-1],words[i],words[i+1]) for i in range(1,len(words)-1)]
trigram = tuple(trigrams[-1])
if trigram in trigrams[:-1]:
fail = True
if fail:
curr_scores[i] = -10e20
curr_scores = curr_scores.reshape(-1, beam_size * vocab_size)
topk_scores, topk_ids = curr_scores.topk(beam_size, dim=-1)
# Recover log probs.
topk_log_probs = topk_scores * length_penalty
# Resolve beam origin and true word ids.
topk_beam_index = topk_ids.div(vocab_size)
topk_ids = topk_ids.fmod(vocab_size)
# Map beam_index to batch_index in the flat representation.
batch_index = (
topk_beam_index
+ beam_offset[:topk_beam_index.size(0)].unsqueeze(1))
select_indices = batch_index.view(-1)
# Append last prediction.
alive_seq = torch.cat(
[alive_seq.index_select(0, select_indices),
topk_ids.view(-1, 1)], -1)
is_finished = topk_ids.eq(self.end_token)
if step + 1 == max_length:
is_finished.fill_(1)
# End condition is top beam is finished.
end_condition = is_finished[:, 0].eq(1)
# Save finished hypotheses.
if is_finished.any():
predictions = alive_seq.view(-1, beam_size, alive_seq.size(-1))
for i in range(is_finished.size(0)):
b = batch_offset[i]
if end_condition[i]:
is_finished[i].fill_(1)
finished_hyp = is_finished[i].nonzero().view(-1)
# Store finished hypotheses for this batch.
for j in finished_hyp:
hypotheses[b].append((
topk_scores[i, j],
predictions[i, j, 1:]))
# If the batch reached the end, save the n_best hypotheses.
if end_condition[i]:
best_hyp = sorted(
hypotheses[b], key=lambda x: x[0], reverse=True)
score, pred = best_hyp[0]
results["scores"][b].append(score)
results["predictions"][b].append(pred)
non_finished = end_condition.eq(0).nonzero().view(-1)
# If all sentences are translated, no need to go further.
if len(non_finished) == 0:
break
# Remove finished batches for the next step.
topk_log_probs = topk_log_probs.index_select(0, non_finished)
batch_index = batch_index.index_select(0, non_finished)
batch_offset = batch_offset.index_select(0, non_finished)
alive_seq = predictions.index_select(0, non_finished) \
.view(-1, alive_seq.size(-1))
# Reorder states.
select_indices = batch_index.view(-1)
src_features = src_features.index_select(0, select_indices)
dec_states.map_batch_fn(
lambda state, dim: state.index_select(dim, select_indices))
return results
class Translation(object):
"""
Container for a translated sentence.
Attributes:
src (`LongTensor`): src word ids
src_raw ([str]): raw src words
pred_sents ([[str]]): words from the n-best translations
pred_scores ([[float]]): log-probs of n-best translations
attns ([`FloatTensor`]) : attention dist for each translation
gold_sent ([str]): words from gold translation
gold_score ([float]): log-prob of gold translation
"""
def __init__(self, fname, src, src_raw, pred_sents,
attn, pred_scores, tgt_sent, gold_score):
self.fname = fname
self.src = src
self.src_raw = src_raw
self.pred_sents = pred_sents
self.attns = attn
self.pred_scores = pred_scores
self.gold_sent = tgt_sent
self.gold_score = gold_score
def log(self, sent_number):
"""
Log translation.
"""
output = '\nSENT {}: {}\n'.format(sent_number, self.src_raw)
best_pred = self.pred_sents[0]
best_score = self.pred_scores[0]
pred_sent = ' '.join(best_pred)
output += 'PRED {}: {}\n'.format(sent_number, pred_sent)
output += "PRED SCORE: {:.4f}\n".format(best_score)
if self.gold_sent is not None:
tgt_sent = ' '.join(self.gold_sent)
output += 'GOLD {}: {}\n'.format(sent_number, tgt_sent)
output += ("GOLD SCORE: {:.4f}\n".format(self.gold_score))
if len(self.pred_sents) > 1:
output += '\nBEST HYP:\n'
for score, sent in zip(self.pred_scores, self.pred_sents):
output += "[{:.4f}] {}\n".format(score, sent)
return output
| [
"torch.no_grad",
"torch.full",
"torch.arange"
] | 1.1.0 | SebastianVeile/PreSumm | 780c340e04fd5911badb4a8b2af2284c5cdbb8b5 |
0.1 | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Callable, List, NamedTuple, Tuple
import numpy as np
import plotly.graph_objs as go
import torch
from torch import Tensor
class SamplesSummary(NamedTuple):
num_chain: int
num_samples: int
single_sample_sz: Tensor
def _samples_info(query_samples: Tensor) -> SamplesSummary:
return SamplesSummary(
num_chain=query_samples.size(0),
num_samples=query_samples.size(1),
# pyre-fixme[6]: For 3rd param expected `Tensor` but got `Size`.
single_sample_sz=query_samples.size()[2:],
)
def trace_helper(
x: List[List[List[int]]], y: List[List[List[float]]], labels: List[str]
) -> Tuple[List[go.Scatter], List[str]]:
"""
this function gets results prepared by a plot-related function and
outputs a tuple including plotly object and its corresponding legend.
"""
all_traces = []
num_chains = len(x)
num_indices = len(x[0])
for index in range(num_indices):
trace = []
for chain in range(num_chains):
trace.append(
go.Scatter(
x=x[chain][index],
y=y[chain][index],
mode="lines",
name="chain" + str(chain),
)
)
all_traces.append(trace)
return (all_traces, labels)
def plot_helper(
query_samples: Tensor, func: Callable
) -> Tuple[List[go.Scatter], List[str]]:
"""
this function executes a plot-related function, passed as input parameter func, and
outputs a tuple including plotly object and its corresponding legend.
"""
num_chain, num_samples, single_sample_sz = _samples_info(query_samples)
x_axis, y_axis, all_labels = [], [], []
for chain in range(num_chain):
flattened_data = query_samples[chain].reshape(num_samples, -1)
numel = flattened_data[0].numel()
x_axis_data, y_axis_data, labels = [], [], []
for i in range(numel):
index = np.unravel_index(i, single_sample_sz)
data = flattened_data[:, i]
partial_label = f" for {list(index)}"
x_data, y_data = func(data.detach())
x_axis_data.append(x_data)
y_axis_data.append(y_data)
labels.append(partial_label)
x_axis.append(x_axis_data)
y_axis.append(y_axis_data)
all_labels.append(labels)
return trace_helper(x_axis, y_axis, all_labels[0])
def autocorr(x: Tensor) -> Tuple[List[int], List[float]]:
def autocorr_calculation(x: Tensor, lag: int) -> Tensor:
y1 = x[: (len(x) - lag)]
y2 = x[lag:]
sum_product = (
(y1 - (x.mean(dim=0).expand(y1.size())))
* (y2 - (x.mean(dim=0).expand(y2.size())))
).sum(0)
return sum_product / ((len(x) - lag) * torch.var(x, dim=0))
max_lag = x.size(0)
y_axis_data = [autocorr_calculation(x, lag).item() for lag in range(max_lag)]
x_axis_data = list(range(max_lag))
return (x_axis_data, y_axis_data)
def trace_plot(x: Tensor) -> Tuple[List[int], Tensor]:
return (list(range(x.size(0))), x)
| [
"torch.var"
] | 0.1.0 | facebookresearch/beanmachine | 225114d9964b90c3a49adddc4387b4a47d1b4262 |
0.1 | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import inspect
import math
import operator
from types import MethodType
from typing import Any, Callable, Dict, List, NoReturn, Optional, Set, Tuple
import beanmachine.ppl.compiler.bmg_nodes as bn
import torch
import torch.distributions as dist
from beanmachine.ppl.compiler.beanstalk_common import allowed_functions
from beanmachine.ppl.compiler.bm_graph_builder import BMGraphBuilder
from beanmachine.ppl.compiler.bmg_nodes import BMGNode
from beanmachine.ppl.compiler.hint import log1mexp, math_log1mexp
_in_place_operator_names = {
operator.iadd: "__iadd__",
operator.iand: "__iand__",
operator.ifloordiv: "__ifloordiv__",
operator.ilshift: "__ilshift__",
operator.imatmul: "__imatmul__",
operator.imod: "__imod__",
operator.imul: "__imul__",
operator.ior: "__ior__",
operator.ipow: "__ipow__",
operator.irshift: "__irshift__",
operator.isub: "__isub__",
operator.itruediv: "__idiv__",
operator.ixor: "__ixor__",
}
_in_place_to_regular = {
operator.iadd: operator.add,
operator.iand: operator.and_,
operator.ifloordiv: operator.floordiv,
operator.ilshift: operator.lshift,
operator.imatmul: operator.matmul,
operator.imod: operator.mod,
operator.imul: operator.mul,
operator.ior: operator.or_,
operator.ipow: operator.pow,
operator.irshift: operator.rshift,
operator.isub: operator.sub,
operator.itruediv: operator.truediv,
operator.ixor: operator.xor,
}
def _raise_unsupported(func: Any) -> NoReturn:
if inspect.ismethoddescriptor(func) or isinstance(
func, _builtin_function_or_method
):
func = func.__name__
raise ValueError(f"Function {func} is not supported by Bean Machine Graph.")
def _is_in_place_operator(func: Callable) -> bool:
return func in _in_place_to_regular
def _ordinary_arg_or_const(arg: Any) -> bool:
return isinstance(arg, bn.ConstantNode) or not isinstance(arg, BMGNode)
def only_ordinary_arguments(args, kwargs) -> bool:
if any(isinstance(arg, BMGNode) for arg in args):
return False
if any(isinstance(arg, BMGNode) for arg in kwargs.values()):
return False
return True
def _only_ordinary_arguments_or_constants(
args: List[Any], kwargs: Dict[str, Any]
) -> bool:
return all(_ordinary_arg_or_const(arg) for arg in args) and all(
_ordinary_arg_or_const(arg) for arg in kwargs.values()
)
def _get_ordinary_value(x: Any) -> Any:
return x.value if isinstance(x, bn.ConstantNode) else x
def _is_standard_normal(x: Any) -> bool:
return isinstance(x, dist.Normal) and x.mean == 0.0 and x.stddev == 1.0
def _is_phi_bound(f: Any, arguments: List[Any], kwargs: Dict[str, Any]) -> bool:
# Is this Normal(0.0, 1.0).cdf(x) ?
# TODO: Support kwargs
return (
isinstance(f, MethodType)
and f.__func__ is dist.Normal.cdf
and len(arguments) == 1
and _is_standard_normal(f.__self__)
)
def _is_phi_unbound(f: Any, arguments: List[Any], kwargs: Dict[str, Any]) -> bool:
# Is this Normal.cdf(Normal(0.0, 1.0), x)?
# TODO: Support kwargs
return (
f is dist.Normal.cdf
and len(arguments) == 2
and _is_standard_normal(arguments[0])
)
def _is_phi(f: Any, arguments: List[Any], kwargs: Dict[str, Any]) -> bool:
return _is_phi_unbound(f, arguments, kwargs) or _is_phi_bound(f, arguments, kwargs)
def _flatten_all_lists(xs):
"""Takes a list-of-lists, with arbitrary nesting level;
returns an iteration of all elements."""
if isinstance(xs, list):
for x in xs:
yield from _flatten_all_lists(x)
else:
yield xs
def _list_to_zeros(xs):
"""Takes a list-of-lists, with arbitrary nesting level;
returns a list-of-lists of the same shape but with every non-list
element replaced with zero."""
if isinstance(xs, list):
return [_list_to_zeros(x) for x in xs]
return 0
def _hashable(x: Any) -> bool:
# Oddly enough, Python does not allow you to test for set inclusion
# if the object is not hashable. Since it is impossible for an unhashable
# object to be in a set, Python could simply say no when asked if a set
# contains any unhashable object. It does not, so we are forced to do so.
# All hashable objects have a callable __hash__ attribute.
if not hasattr(x, "__hash__"):
return False
if not isinstance(x.__hash__, Callable):
return False
# It is possible that callable __hash__ exists but throws, which makes it
# unhashable. Eliminate that possibility as well.
try:
hash(x)
except Exception:
return False
return True
_empty_args = []
_empty_kwargs = {}
# Oddly enough there does not appear to be an easy way to obtain the type
# of builtin methods.
_builtin_function_or_method = type(abs)
def _is_any_torch_function(f: Callable) -> bool:
# Torch functions we either know about or we reject them immediately;
# we do not attempt to extract a graph of a model which contains
# a call to an unknown torch function with stochastic arguments.
#
# Given a reference to a function, how can we know if it is
# a torch function? Torch does not make it very easy on us to figure
# out what module a function is from. Let's choose some typical
# methods as examples, like arccos or erf:
#
# * torch.Tensor.arccos has no __module__ attribute.
# * torch.arccos.__module__ is None but .__objclass__ has a module string.
# * torch.special.erf.__module__ is the string "torch.special.erf.__module__"
# * torch.tensor(1).arccos.__module__ is None and has no .__objclass__, but
# does have a __self__ with a module.
#
# Our first step then is to see if we have a module.
m = getattr(f, "__module__", None)
if m is None:
# We don't have a module. Do we have an __objclass__ with a module?
oc = getattr(f, "__objclass__", None)
if oc is not None:
m = getattr(oc, "__module__", None)
if m is None:
# We still don't have a module. Maybe __self__ has a module.
s = getattr(f, "__self__", None)
if s is not None:
m = getattr(s, "__module__", None)
if m is not None:
return isinstance(m, str) and (m == "torch" or m.startswith("torch."))
# We don't have a module or an objclass.
#
# If we have something like torch.arccos then we can simply
# check the torch module to see if we can find this exact reference.
return any(item is f for _, item in torch.__dict__.items())
def _is_tensor_unbound_instance_method(f: Callable) -> bool:
# This identifies if a function object is a method *descriptor*
# such as torch.Tensor.add; that is, the method before it is bound
# to a particular self. This function does NOT identify if a function
# is a bound instance method, such as torch.tensor(1.0).add. See below.
if not inspect.ismethoddescriptor(f):
return False
objc = getattr(f, "__objclass__", None)
return objc is torch.Tensor or objc in torch.Tensor.__bases__
def _is_tensor_bound_instance_method(f: Callable) -> bool:
# This identifies if a function object is an instance method of
# a tensor already bound to a particular self. All such functions
# in torch are marked as builtin.
return isinstance(f, _builtin_function_or_method) and isinstance(
getattr(f, "__self__", None), torch.Tensor
)
def _get_unbound_tensor_method(f: Callable) -> Callable:
# Given a bound-to-self tensor instance method, obtain its corresponding
# unbound descriptor. In normal Python, the protocol is that the bound
# method has attribute __func__ pointing back to the descriptor but
# torch does not follow this protocol. Rather, we'll look it up by name.
assert _is_tensor_bound_instance_method(f)
unbound = getattr(torch.Tensor, f.__name__, None)
assert _is_tensor_unbound_instance_method(unbound)
return unbound
def canonicalize_function(
function: Any, arguments: List[Any]
) -> Tuple[Callable, List[Any]]:
# In Python a function that is a member of a class can be in either a "bound"
# or "unbound" form. Suppose c is of type C and we are calling foo with argument
# x. We could have:
#
# bound: c.foo(x)
# unbound: C.foo(c, x)
#
# The bound version calls the unbound version. How? In the bound case the fetch
# of c.foo returns a method object with attribute __self__ set to c and attribute
# __func__ set to C.foo. The call on the method object then invokes
# __func__(__self__, x).
#
# Unfortunately, calls to torch tensor methods do not follow this convention;
# instead of returning a method object with __func__ and __self__, it returns
# a builtin method object with __self__ but no __func__, so we call special helpers
# for those.
#
# It is useful when analyzing calls to have them in a consistent form. This function
# turns bound function calls into the equivalent unbound function call.
if isinstance(function, MethodType):
f = function.__func__
args = [function.__self__] + arguments
assert isinstance(f, Callable)
elif _is_tensor_bound_instance_method(function):
f = _get_unbound_tensor_method(function)
args = [function.__self__] + arguments
elif isinstance(function, Callable):
f = function
args = arguments
else:
_raise_unsupported(function)
assert isinstance(f, Callable), ( # pyre-ignore
"_canonicalize_function should return callable "
+ f"but got {type(f)} {str(f)}" # pyre-ignore
)
return (f, args) # pyre-ignore
# This helper class is to solve a problem in the simulated
# execution of the model during graph accumulation. Consider
# a model fragment like:
#
# n = normal()
# y = n.exp()
#
# During graph construction, n will be a SampleNode whose
# operand is a NormalNode, but SampleNode does not have a
# method "exp".
#
# The lifted program we execute will be something like:
#
# n = bmg.handle_function(normal, [])
# func = bmg.handle_dot(n, "exp")
# y = bmg.handle_function(func, [])
#
# The "func" that is returned is one of these KnownFunction
# objects, which captures the notion "I am an invocation
# of known function Tensor.exp on a receiver that is a BMG
# node". We then turn that into a exp node in handle_function.
class KnownFunction:
receiver: BMGNode
function: Callable
def __init__(self, receiver: BMGNode, function: Callable) -> None:
if not isinstance(receiver, BMGNode):
raise TypeError(
f"KnownFunction receiver must be BMGNode but is {type(receiver)}"
)
if not isinstance(function, Callable):
raise TypeError(
f"KnownFunction function must be Callable but is {type(function)}"
)
self.receiver = receiver
self.function = function
class SpecialFunctionCaller:
# As we execute the lifted program, we accumulate graph nodes in the
# graph builder,and the program passes around graph nodes instead of
# regular values. What happens when a graph node is passed to a
# function, or used as the receiver of a function? That function will be
# expecting a regular value as its argument or receiver.
#
# Certain function calls are special because they call graph nodes to
# be created; we have a dictionary here that maps Python function objects
# to the graph builder method that knows how to create the appropriate
# node type.
#
# There are also some functions which we know can be passed a graph node
# and will treat it correctly even though it is a graph node and not
# a value. For example, the function which constructs a dictionary
# or the function which constructs a list. When we encounter one of
# these functions in the lifted program, we do not create a graph node
# or call a special helper function; we simply allow it to be called normally.
_bmg: BMGraphBuilder
_function_map: Dict[Callable, Callable]
_special_tensor_instance_function_names: Set[str]
def __init__(self, bmg: BMGraphBuilder) -> None:
self._bmg = bmg
self._function_map = {
#
# Built-in functions
#
float: self._builtin_float,
#
# Math functions
#
math.exp: self._math_exp,
math.log: self._math_log,
#
# Hints
#
log1mexp: self._hint_log1mexp,
math_log1mexp: self._hint_log1mexp,
#
# Operators as functions
#
operator.add: self._operator_add,
operator.and_: self._operator_and,
operator.contains: self._operator_contains,
operator.eq: self._operator_eq,
operator.floordiv: self._operator_floordiv,
operator.ge: self._operator_ge,
operator.gt: self._operator_gt,
operator.inv: self._operator_inv,
operator.is_: self._operator_is,
operator.is_not: self._operator_is_not,
operator.le: self._operator_le,
operator.lshift: self._operator_lshift,
operator.lt: self._operator_lt,
operator.matmul: self._operator_matmul,
operator.mod: self._operator_mod,
operator.mul: self._operator_mul,
operator.ne: self._operator_ne,
operator.neg: self._operator_neg,
operator.not_: self._operator_not,
operator.or_: self._operator_or,
operator.pos: self._operator_pos,
operator.pow: self._operator_pow,
operator.rshift: self._operator_rshift,
operator.sub: self._operator_sub,
operator.truediv: self._operator_truediv,
operator.xor: self._operator_xor,
#
#
# Torch distributions
#
# (Remember to add a case to distribution_to_node.)
#
dist.Bernoulli: self._dist_bernoulli,
dist.Beta: self._dist_beta,
dist.Binomial: self._dist_binomial,
dist.Categorical: self._dist_categorical,
# TODO: Cauchy
dist.Chi2: self._dist_chi2,
# TODO: ContinuousBernoulli
dist.Dirichlet: self._dist_dirichlet,
# TODO: Exponential
# TODO: FisherSnedecor
dist.Gamma: self._dist_gamma,
# TODO: Geometric
# TODO: Gumbel
dist.HalfCauchy: self._dist_halfcauchy,
dist.HalfNormal: self._dist_halfnormal,
# TODO: Independent
# TODO: Kumaraswamy
# TODO: LKJCholesky
# TODO: Laplace
# TODO: LogNormal
# TODO: LowRankMultivariateNormal
# TODO: MixtureSameFamily
# TODO: Multinomial
# TODO: MultivariateNormal
# TODO: NegativeBinomial
dist.Normal: self._dist_normal,
# TODO: OneHotCategorical
# TODO: Pareto
# TODO: Poisson
dist.Poisson: self._dist_poisson,
# TODO: RelaxedBernoulli
# TODO: LogitRelaxedBernoulli
# TODO: RelaxedOneHotCategorical
dist.StudentT: self._dist_studentt,
# TODO: TransformedDistribution
dist.Uniform: self._dist_uniform,
# TODO: VonMises
# TODO: Weibull
#
# Torch functions
#
torch.Tensor.add: self._torch_add,
torch.add: self._torch_add,
torch.Tensor.bitwise_and: self._torch_bitwise_and,
torch.bitwise_and: self._torch_bitwise_and,
torch.Tensor.bitwise_not: self._torch_bitwise_not,
torch.bitwise_not: self._torch_bitwise_not,
torch.Tensor.bitwise_or: self._torch_bitwise_or,
torch.bitwise_or: self._torch_bitwise_or,
torch.Tensor.bitwise_xor: self._torch_bitwise_xor,
torch.bitwise_xor: self._torch_bitwise_xor,
torch.Tensor.bitwise_left_shift: self._torch_bitwise_left_shift,
torch.bitwise_left_shift: self._torch_bitwise_left_shift,
torch.Tensor.bitwise_right_shift: self._torch_bitwise_right_shift,
torch.bitwise_right_shift: self._torch_bitwise_right_shift,
torch.Tensor.cholesky: self._torch_cholesky,
torch.linalg.cholesky: self._torch_cholesky,
torch.Tensor.div: self._torch_div,
torch.div: self._torch_div,
torch.Tensor.divide: self._torch_div,
torch.divide: self._torch_div,
torch.Tensor.eq: self._torch_eq,
torch.eq: self._torch_eq,
torch.Tensor.equal: self._torch_eq,
torch.equal: self._torch_eq,
torch.Tensor.exp: self._torch_exp,
torch.exp: self._torch_exp,
torch.Tensor.exp2: self._torch_exp2,
torch.exp2: self._torch_exp2,
torch.special.exp2: self._torch_exp2,
torch.Tensor.expm1: self._torch_expm1,
torch.expm1: self._torch_expm1,
torch.special.expm1: self._torch_expm1,
torch.Tensor.float: self._torch_float,
# TODO: float_power
torch.Tensor.floor_divide: self._torch_floor_divide,
torch.floor_divide: self._torch_floor_divide,
torch.Tensor.fmod: self._torch_fmod,
torch.fmod: self._torch_fmod,
torch.Tensor.ge: self._torch_ge,
torch.ge: self._torch_ge,
torch.Tensor.greater: self._torch_gt,
torch.greater: self._torch_gt,
torch.Tensor.greater_equal: self._torch_ge,
torch.greater_equal: self._torch_ge,
torch.Tensor.gt: self._torch_gt,
torch.gt: self._torch_gt,
torch.Tensor.int: self._torch_int,
torch.Tensor.item: self._torch_item,
torch.Tensor.le: self._torch_le,
torch.le: self._torch_le,
torch.Tensor.less: self._torch_lt,
torch.less: self._torch_lt,
torch.Tensor.less_equal: self._torch_le,
torch.less_equal: self._torch_le,
torch.Tensor.log: self._torch_log,
torch.log: self._torch_log,
torch.Tensor.log10: self._torch_log10,
torch.log10: self._torch_log10,
torch.Tensor.log1p: self._torch_log1p,
torch.log1p: self._torch_log1p,
torch.special.log1p: self._torch_log1p,
torch.Tensor.log2: self._torch_log2,
torch.log2: self._torch_log2,
# TODO: logical_and
# TODO: special.logit
torch.Tensor.logical_not: self._torch_logical_not,
torch.logical_not: self._torch_logical_not,
# TODO: logical_or
# TODO: logical_xor
torch.Tensor.logsumexp: self._torch_logsumexp,
torch.logsumexp: self._torch_logsumexp,
torch.special.logsumexp: self._torch_logsumexp,
torch.Tensor.lt: self._torch_lt,
torch.lt: self._torch_lt,
torch.Tensor.matmul: self._torch_matmul,
torch.matmul: self._torch_matmul,
torch.Tensor.mm: self._torch_mm,
torch.mm: self._torch_mm,
torch.Tensor.mul: self._torch_mul,
torch.mul: self._torch_mul,
torch.Tensor.multiply: self._torch_mul,
torch.multiply: self._torch_mul,
torch.Tensor.ne: self._torch_ne,
torch.ne: self._torch_ne,
torch.Tensor.not_equal: self._torch_ne,
torch.not_equal: self._torch_ne,
torch.Tensor.neg: self._torch_neg,
torch.neg: self._torch_neg,
torch.Tensor.negative: self._torch_neg,
torch.negative: self._torch_neg,
torch.Tensor.pow: self._torch_pow,
torch.pow: self._torch_pow,
torch.Tensor.remainder: self._torch_fmod,
torch.remainder: self._torch_fmod,
torch.sigmoid: self._torch_sigmoid,
torch.Tensor.sigmoid: self._torch_sigmoid,
torch.special.expit: self._torch_sigmoid,
torch.Tensor.sqrt: self._torch_sqrt,
torch.sqrt: self._torch_sqrt,
torch.Tensor.sub: self._torch_sub,
torch.sub: self._torch_sub,
torch.Tensor.subtract: self._torch_sub,
torch.subtract: self._torch_sub,
torch.Tensor.sum: self._torch_sum,
torch.sum: self._torch_sum,
torch.Tensor.true_divide: self._torch_div,
torch.true_divide: self._torch_div,
}
self._special_tensor_instance_function_names = {
f.__name__
for f in self._function_map
if _is_tensor_unbound_instance_method(f)
}
def _is_special_tensor_bound_instance_method_name(self, name: str) -> bool:
return name in self._special_tensor_instance_function_names
def bind_tensor_instance_function(
self, receiver: BMGNode, name: str
) -> KnownFunction:
# TODO: What if the node represents a distribution, not a tensor?
# Should we produce a better error message?
if hasattr(torch.Tensor, name):
return KnownFunction(receiver, getattr(torch.Tensor, name))
_raise_unsupported(name)
def is_special_tensor_bound_instance_method(self, f: Callable) -> bool:
return self._is_special_tensor_bound_instance_method_name(
f.__name__
) and _is_tensor_bound_instance_method(f)
def get_special_tensor_unbound_instance_method(self, f: Callable) -> Callable:
assert self.is_special_tensor_bound_instance_method(f)
return _get_unbound_tensor_method(f)
def _make_constant(self, arg: Any) -> BMGNode:
return arg if isinstance(arg, BMGNode) else self._bmg.add_constant(arg)
def is_special_function(
self,
func: Callable,
args: List[Any] = _empty_args, # TODO: Unused
kwargs: Dict[str, Any] = _empty_kwargs, # TODO: Unused
) -> bool:
if isinstance(func, KnownFunction):
return True
if _is_any_torch_function(func):
return True
if not _hashable(func):
return False
if func in allowed_functions:
return True
if func in self._function_map:
return True
# All in-place operators are special functions.
if _is_in_place_operator(func):
return True
return False
def _canonicalize_function(
self, func: Callable, args: List[Any]
) -> Tuple[Callable, List[Any]]:
if isinstance(func, KnownFunction):
args = [func.receiver] + args
func = func.function
else:
func, args = canonicalize_function(func, args)
return func, args
def do_special_call_maybe_stochastic(
self,
func: Any,
args: List[Any],
kwargs: Dict[str, Any] = _empty_kwargs,
) -> Any:
# If we possibly can, just call the original function with ordinary arguments.
# Otherwise, convert everything to a graph node and call our helper which
# does node construction.
assert self.is_special_function(func, args, kwargs)
func, args = self._canonicalize_function(func, args)
if func is torch.tensor:
return self._tensor_constructor(*args, **kwargs)
if (
_only_ordinary_arguments_or_constants(args, kwargs)
or func in allowed_functions
):
new_args = (_get_ordinary_value(arg) for arg in args)
new_kwargs = {key: _get_ordinary_value(arg) for key, arg in kwargs.items()}
return func(*new_args, **new_kwargs)
if _is_in_place_operator(func):
return self._in_place_operator(func, *args)
return self.do_special_call_always_stochastic(func, args, kwargs)
def do_special_call_always_stochastic(
self,
func: Callable,
args: List[Any],
kwargs: Dict[str, Any] = _empty_kwargs,
) -> BMGNode:
# Never call the original function with ordinary arguments. Convert everything
# to a graph node and call our helper which does node construction.
assert self.is_special_function(func, args, kwargs)
# We should never call do_special_call_always_stochastic on (1) a tensor
# constructor, or (2) a function known to be allowed to take any values.
assert func not in allowed_functions
assert func is not torch.tensor
func, args = self._canonicalize_function(func, args)
if _is_phi_unbound(func, args, kwargs):
args = args[1:]
node_constructor = self._phi
elif _hashable(func) and func in self._function_map:
node_constructor = self._function_map[func]
else:
# We are trying to do an always-stochastic call on a function that
# we do not yet know how to handle.
_raise_unsupported(func)
new_args = (self._make_constant(arg) for arg in args)
new_kwargs = {key: self._make_constant(arg) for key, arg in kwargs.items()}
return node_constructor(*new_args, **new_kwargs) # pyre-ignore
#
# Builtins; these must have the same signature as their corresponding
# builtin functions.
#
def _builtin_float(self, input: BMGNode) -> BMGNode:
# TODO: Do we want to do this at all? Why should float(t) insert a
# TO_REAL node into the graph? We can simply insert TO_REAL where required
# by the BMG type system.
return self._bmg.add_to_real(input)
#
# Math functions
#
def _math_exp(self, input: BMGNode) -> BMGNode:
# TODO: Right signature?
return self._bmg.add_exp(input)
def _math_log(self, input: BMGNode) -> BMGNode:
return self._bmg.add_log(input)
#
# Hints
# TODO: Eliminate this hack. Write a problem fixer which detects these
# patterns and rewrites them into the more efficient operator.
#
def _hint_log1mexp(self, x: BMGNode) -> BMGNode:
return self._bmg.add_log1mexp(x)
#
# Distributions; these must have the same signature as the corresponding
# constructor.
#
def distribution_to_node( # noqa
self, distribution: dist.Distribution
) -> bn.DistributionNode:
t = type(distribution)
if isinstance(distribution, dist.Bernoulli):
args = [distribution.probs]
elif isinstance(distribution, dist.Beta):
args = [distribution.concentration1, distribution.concentration0]
elif isinstance(distribution, dist.Binomial):
args = [distribution.total_count, distribution.probs]
elif isinstance(distribution, dist.Categorical):
args = [distribution.probs]
elif isinstance(distribution, dist.Chi2):
args = [distribution.df]
elif isinstance(distribution, dist.Dirichlet):
args = [distribution.concentration]
elif isinstance(distribution, dist.Gamma):
args = [distribution.concentration, distribution.rate]
elif isinstance(distribution, dist.HalfCauchy):
args = [distribution.scale]
elif isinstance(distribution, dist.HalfNormal):
args = [distribution.scale]
elif isinstance(distribution, dist.Normal):
args = [distribution.mean, distribution.stddev]
elif isinstance(distribution, dist.Poisson):
args = [distribution.rate]
elif isinstance(distribution, dist.StudentT):
args = [distribution.df, distribution.loc, distribution.scale]
elif isinstance(distribution, dist.Uniform):
args = [distribution.low, distribution.high]
else:
# TODO: Better error
raise TypeError(
f"Distribution '{t.__name__}' is not supported by Bean Machine Graph."
)
d = self.do_special_call_always_stochastic(t, args, {})
assert isinstance(d, bn.DistributionNode)
return d
def _dist_bernoulli(
self,
probs: Optional[BMGNode] = None,
logits: Optional[BMGNode] = None,
validate_args: Any = None,
) -> BMGNode:
if (probs is None and logits is None) or (
probs is not None and logits is not None
):
raise ValueError("Bernoulli requires exactly one of probs or logits")
if logits is not None:
return self._bmg.add_bernoulli_logit(logits)
return self._bmg.add_bernoulli(probs)
def _dist_beta(
self,
concentration1: BMGNode,
concentration0: BMGNode,
validate_args: Any = None,
) -> BMGNode:
return self._bmg.add_beta(concentration1, concentration0)
def _dist_binomial(
self,
total_count: Optional[BMGNode] = None,
probs: Optional[BMGNode] = None,
logits: Optional[BMGNode] = None,
validate_args: Any = None,
) -> BMGNode:
if (probs is None and logits is None) or (
probs is not None and logits is not None
):
raise ValueError("Binomial requires exactly one of probs or logits")
# TODO: Create a test case for Binomial(probs=0.5) where total_count
# is omitted.
if total_count is None:
total_count = self._make_constant(1)
if logits is not None:
return self._bmg.add_binomial_logit(total_count, logits)
return self._bmg.add_binomial(total_count, probs)
def _dist_categorical(
self,
probs: Optional[BMGNode] = None,
logits: Optional[BMGNode] = None,
validate_args: Any = None,
) -> BMGNode:
if (probs is None and logits is None) or (
probs is not None and logits is not None
):
raise ValueError("Categorical requires exactly one of probs or logits")
if logits is not None:
return self._bmg.add_categorical_logit(logits)
return self._bmg.add_categorical(probs)
def _dist_chi2(self, df: BMGNode, validate_args: Any = None) -> BMGNode:
return self._bmg.add_chi2(df)
def _dist_dirichlet(self, concentration: BMGNode, validate_args=None) -> BMGNode:
return self._bmg.add_dirichlet(concentration)
def _dist_gamma(
self, concentration: BMGNode, rate: BMGNode, validate_args=None
) -> BMGNode:
return self._bmg.add_gamma(concentration, rate)
def _dist_halfcauchy(self, scale: BMGNode, validate_args=None) -> BMGNode:
return self._bmg.add_halfcauchy(scale)
def _dist_halfnormal(self, scale: Any, validate_args=None) -> BMGNode:
return self._bmg.add_halfnormal(scale)
def _dist_normal(self, loc: BMGNode, scale: BMGNode, validate_args=None) -> BMGNode:
return self._bmg.add_normal(loc, scale)
def _dist_poisson(self, rate: BMGNode) -> BMGNode:
return self._bmg.add_poisson(rate)
def _dist_studentt(
self,
df: BMGNode,
loc: Optional[BMGNode] = None,
scale: Optional[BMGNode] = None,
validate_args=None,
) -> BMGNode:
if loc is None:
loc = self._make_constant(0)
if scale is None:
scale = self._make_constant(1)
return self._bmg.add_studentt(df, loc, scale)
def _dist_uniform(self, low: BMGNode, high: BMGNode, validate_args=None) -> BMGNode:
return self._bmg.add_uniform(low, high)
#
# Tensor constructor
#
def _tensor_constructor(self, data: Any) -> Any:
# The tensor constructor is a bit tricky because it takes a single
# argument that is either a value or a list of values. We need:
# (1) a flattened list of all the arguments, and
# (2) the size of the original tensor.
flattened_args = list(_flatten_all_lists(data))
if not any(isinstance(arg, BMGNode) for arg in flattened_args):
# None of the arguments are graph nodes. We can just
# construct the tensor normally.
return torch.tensor(data)
# At least one of the arguments is a graph node.
#
# If we're constructing a singleton tensor and the single value
# is a graph node, we can just keep it as that graph node.
if len(flattened_args) == 1:
return flattened_args[0]
# We have two or more arguments and at least one is a graph node.
# Convert them all to graph nodes.
for index, arg in enumerate(flattened_args):
if not isinstance(arg, BMGNode):
flattened_args[index] = self._bmg.add_constant(arg)
# What shape is this tensor? Rather than duplicating the logic in the
# tensor class, let's just construct the same shape made of entirely
# zeros and then ask what shape it is.
size = torch.tensor(_list_to_zeros(data)).size()
return self._bmg.add_tensor(size, *flattened_args)
#
# Tensor functions; these must have the same signature as the
# corresponding torch function.
#
# TODO: We do not support mutation of stochastic tensors; we should produce an
# error if there are any "out" values.
def _phi(self, value: BMGNode) -> BMGNode:
return self._bmg.add_phi(value)
def _torch_add(
self,
input: BMGNode,
other: BMGNode,
alpha: Optional[BMGNode] = None,
out: Any = None,
) -> BMGNode:
# TODO: tensor add has the semantics input + alpha * other; if alpha is present
# then we need to generate a multiply and an addition.
return self._bmg.add_addition(input, other)
def _torch_bitwise_and(
self, input: BMGNode, other: BMGNode, out: Any = None
) -> BMGNode:
return self._bmg.add_bitand(input, other)
def _torch_bitwise_left_shift(
self, input: BMGNode, other: BMGNode, out: Any = None
) -> BMGNode:
# TODO: In torch, a << b is not bitwise at all. Rather it is simply an
# an alias for a * (2 ** b). Make a rewriter that turns shifts into
# this operation.
return self._bmg.add_lshift(input, other)
def _torch_bitwise_not(self, input: BMGNode, out: Any = None) -> BMGNode:
return self._bmg.add_invert(input)
def _torch_bitwise_or(
self, input: BMGNode, other: BMGNode, out: Any = None
) -> BMGNode:
return self._bmg.add_bitor(input, other)
def _torch_bitwise_right_shift(
self, input: BMGNode, other: BMGNode, out: Any = None
) -> BMGNode:
# TODO: In torch, a >> b is not bitwise at all. Rather it is simply an
# an alias for a * (2 ** -b). Make a rewriter that turns shifts into
# this operation.
return self._bmg.add_rshift(input, other)
def _torch_bitwise_xor(
self, input: BMGNode, other: BMGNode, out: Any = None
) -> BMGNode:
return self._bmg.add_bitxor(input, other)
def _torch_cholesky(
self,
input: BMGNode,
upper: Optional[BMGNode] = None,
out: Any = None,
) -> BMGNode:
# TODO: What to do with upper?
return self._bmg.add_cholesky(input)
def _torch_div(
self,
input: BMGNode,
other: BMGNode,
rounding_mode: Optional[BMGNode] = None,
out: Any = None,
) -> BMGNode:
# TODO: Should we give an error if there is a rounding mode?
return self._bmg.add_division(input, other)
def _torch_eq(self, input: BMGNode, other: BMGNode, out: Any = None) -> BMGNode:
return self._bmg.add_equal(input, other)
def _torch_exp(self, input: BMGNode, out: Any = None) -> BMGNode:
return self._bmg.add_exp(input)
def _torch_exp2(self, input: BMGNode, out: Any = None) -> BMGNode:
return self._bmg.add_exp2(input)
def _torch_expm1(self, input: BMGNode, out: Any = None) -> BMGNode:
return self._bmg.add_expm1(input)
def _torch_float(
self, input: BMGNode, memory_format: Optional[BMGNode] = None
) -> BMGNode:
# TODO: Do we want to do this at all? Why should t.float() insert a
# TO_REAL node into the graph? We can simply insert TO_REAL where required
# by the BMG type system.
# TODO: If we do keep this, what should we do with memory_format?
return self._bmg.add_to_real(input)
def _torch_floor_divide(
self,
input: BMGNode,
other: BMGNode,
out: Any = None,
) -> BMGNode:
return self._bmg.add_floordiv(input, other)
def _torch_fmod(
self,
input: BMGNode,
other: BMGNode,
out: Any = None,
) -> BMGNode:
return self._bmg.add_mod(input, other)
def _torch_ge(self, input: BMGNode, other: BMGNode, out: Any = None) -> BMGNode:
return self._bmg.add_greater_than_equal(input, other)
def _torch_gt(self, input: BMGNode, other: BMGNode, out: Any = None) -> BMGNode:
return self._bmg.add_greater_than(input, other)
def _torch_int(
self, input: BMGNode, memory_format: Optional[BMGNode] = None
) -> BMGNode:
# TODO: What should we do with memory_format?
return self._bmg.add_to_int(input)
def _torch_item(self, input: BMGNode) -> Any:
return self._bmg.add_item(input)
def _torch_le(self, input: BMGNode, other: BMGNode, out: Any = None) -> BMGNode:
return self._bmg.add_less_than_equal(input, other)
def _torch_log(self, input: BMGNode, out: Any = None) -> Any:
return self._bmg.add_log(input)
def _torch_log10(self, input: BMGNode, out: Any = None) -> Any:
return self._bmg.add_log10(input)
def _torch_log1p(self, input: BMGNode, out: Any = None) -> Any:
return self._bmg.add_log1p(input)
def _torch_log2(self, input: BMGNode, out: Any = None) -> Any:
return self._bmg.add_log2(input)
def _torch_logical_not(self, input: BMGNode, out: Any = None) -> Any:
return self._bmg.add_not(input)
def _torch_logsumexp(
self,
input: BMGNode,
dim: BMGNode,
keepdim: Optional[BMGNode] = None,
out: Any = None,
) -> Any:
if keepdim is None:
keepdim = self._make_constant(False)
return self._bmg.add_logsumexp_torch(input, dim, keepdim)
def _torch_lt(self, input: BMGNode, other: BMGNode, out: Any = None) -> BMGNode:
return self._bmg.add_less_than(input, other)
def _torch_matmul(self, input: BMGNode, other: BMGNode, out: Any = None) -> BMGNode:
# TODO: mm and matmul have different behavior; we probably need to make
# a distinction here.
return self._bmg.add_matrix_multiplication(input, other)
def _torch_mm(self, input: BMGNode, mat2: BMGNode, out: Any = None) -> BMGNode:
# TODO: mm and matmul have different behavior; we probably need to make
# a distinction here.
return self._bmg.add_matrix_multiplication(input, mat2)
def _torch_mul(self, input: BMGNode, other: BMGNode, out: Any = None) -> BMGNode:
return self._bmg.add_multiplication(input, other)
def _torch_ne(self, input: BMGNode, other: BMGNode, out: Any = None) -> BMGNode:
return self._bmg.add_not_equal(input, other)
def _torch_neg(self, input: BMGNode, out: Any = None) -> BMGNode:
return self._bmg.add_negate(input)
def _torch_pow(self, input: BMGNode, exponent: BMGNode, out: Any = None) -> BMGNode:
return self._bmg.add_power(input, exponent)
def _torch_sigmoid(self, input: BMGNode, out: Any = None) -> BMGNode:
return self._bmg.add_logistic(input)
def _torch_sqrt(self, input: BMGNode, out: Any = None) -> Any:
return self._bmg.add_squareroot(input)
def _torch_sub(
self,
input: BMGNode,
other: BMGNode,
alpha: Optional[BMGNode] = None,
out: Any = None,
) -> BMGNode:
# TODO: tensor sub has the semantics input - alpha * other; if alpha is present
# then we need to generate a multiply and an subtraction
return self._bmg.add_subtraction(input, other)
def _torch_sum(
self,
input: BMGNode,
dtype: Any = None,
) -> Any:
return self._bmg.add_sum(input)
#
# Operators as functions
#
def _operator_add(self, a: BMGNode, b: BMGNode) -> BMGNode:
return self._bmg.add_addition(a, b)
def _operator_and(self, a: BMGNode, b: BMGNode) -> BMGNode:
return self._bmg.add_bitand(a, b)
def _operator_contains(self, a: BMGNode, b: BMGNode) -> BMGNode:
# Note that "a" is the container and "b" is the query. That is,
# this means "b in a", NOT "a in b"
return self._bmg.add_in(b, a)
def _operator_eq(self, a: Any, b: Any) -> Any:
return self._bmg.add_equal(a, b)
def _operator_floordiv(self, a: BMGNode, b: BMGNode) -> BMGNode:
return self._bmg.add_floordiv(a, b)
def _operator_ge(self, a: Any, b: Any) -> Any:
return self._bmg.add_greater_than_equal(a, b)
def _operator_gt(self, a: Any, b: Any) -> Any:
return self._bmg.add_greater_than(a, b)
def _operator_inv(self, obj: BMGNode) -> BMGNode:
return self._bmg.add_invert(obj)
def _operator_is(self, a: Any, b: Any) -> Any:
return self._bmg.add_is(a, b)
def _operator_is_not(self, a: Any, b: Any) -> Any:
return self._bmg.add_is_not(a, b)
def _operator_le(self, a: Any, b: Any) -> Any:
return self._bmg.add_less_than_equal(a, b)
def _operator_lshift(self, a: BMGNode, b: BMGNode) -> BMGNode:
# TODO: In torch, a << b is not bitwise at all. Rather it is simply an
# an alias for a * (2 ** b). Make a rewriter that turns shifts into
# this operation.
return self._bmg.add_lshift(a, b)
def _operator_lt(self, a: Any, b: Any) -> Any:
return self._bmg.add_less_than(a, b)
def _operator_matmul(self, a: BMGNode, b: BMGNode) -> BMGNode:
return self._bmg.add_matrix_multiplication(a, b)
def _operator_mod(self, a: BMGNode, b: BMGNode) -> BMGNode:
return self._bmg.add_mod(a, b)
def _operator_mul(self, a: BMGNode, b: BMGNode) -> BMGNode:
return self._bmg.add_multiplication(a, b)
def _operator_ne(self, a: Any, b: Any) -> Any:
return self._bmg.add_not_equal(a, b)
def _operator_neg(self, obj: BMGNode) -> BMGNode:
return self._bmg.add_negate(obj)
def _operator_not(self, obj: BMGNode) -> BMGNode:
return self._bmg.add_not(obj)
def _operator_or(self, a: BMGNode, b: BMGNode) -> BMGNode:
return self._bmg.add_bitor(a, b)
def _operator_pos(self, obj: BMGNode) -> BMGNode:
# unary + is an identity on graph nodes
return obj
def _operator_pow(self, a: BMGNode, b: BMGNode) -> BMGNode:
return self._bmg.add_power(a, b)
def _operator_rshift(self, a: BMGNode, b: BMGNode) -> BMGNode:
# TODO: In torch, a >> b is not bitwise at all. Rather it is simply an
# an alias for a * (2 ** -b). Make a rewriter that turns shifts into
# this operation.
return self._bmg.add_rshift(a, b)
def _operator_sub(self, a: BMGNode, b: BMGNode) -> BMGNode:
return self._bmg.add_subtraction(a, b)
def _operator_truediv(self, a: BMGNode, b: BMGNode) -> BMGNode:
return self._bmg.add_division(a, b)
def _operator_xor(self, a: BMGNode, b: BMGNode) -> BMGNode:
return self._bmg.add_bitxor(a, b)
#
# Augmented assignment operators
#
def _in_place_operator(
self,
native_in_place: Callable, # operator.iadd, for example
left: Any,
right: Any,
) -> Any:
# Handling augmented assignments (+=, -=, *=, and so on) has a lot of cases;
# to cut down on code duplication we call this higher-level method. Throughout
# the comments below we assume that we're handling a +=; the logic is the same
# for all the operators.
# TODO: We have a problem that we need to resolve regarding compilation of models
# which have mutations of aliased tensors. Compare the action of these two similar:
# models in the original Bean Machine implementation:
#
# @functional def foo():
# x = flip() # 0 or 1
# y = x # y is an alias for x
# y += 1 # y is mutated in place and continues to alias x
# return x # returns 1 or 2
#
# vs
#
# @functional def foo():
# x = flip() # 0 or 1
# y = x # y is an alias for x
# y = y + 1 # y no longer aliases x; y is 1 or 2
# return x # returns 0 or 1
#
# Suppose we are asked to compile the first model; how should we execute
# the rewritten form of it so as to accumulate the correct graph? Unlike
# tensors, graph nodes are not mutable!
#
# Here's what we're going to do for now:
#
# If neither operand is a graph node then do exactly what the model would
# normally do:
#
if not isinstance(left, BMGNode) and not isinstance(right, BMGNode):
return native_in_place(left, right)
assert native_in_place in _in_place_to_regular
native_regular = _in_place_to_regular[native_in_place]
# At least one operand is a graph node. If we have tensor += graph_node
# or graph_node += anything then optimistically assume that there
# is NOT any alias of the mutated left side, and treat the += as though
# it is a normal addition.
#
# TODO: Should we produce some sort of warning here telling the user that
# the compiled model semantics might be different than the original model?
# Or is that too noisy? There are going to be a lot of models with += where
# one of the operands is an ordinary tensor and one is a graph node, but which
# do not have any aliasing problem.
if isinstance(left, torch.Tensor) or isinstance(left, BMGNode):
return self.do_special_call_always_stochastic(
native_regular, [left, right], {}
)
# If we've made it here then we have x += graph_node, where x is not a
# tensor. There are two possibilities: either x is some type which implements
# mutating in-place +=, or it is not. If it is, then just call the mutator
# and hope for the best.
#
# TODO: This scenario is another opportunity for a warning or error, since
# the model is probably not one that can be compiled if it is depending on
# in-place mutation of an object which has a stochastic quantity added to it.
assert isinstance(right, BMGNode)
assert native_in_place in _in_place_operator_names
if hasattr(left, _in_place_operator_names[native_in_place]):
# It is possible that the operator exists but either returns
# NotImplemented or raises NotImplementedError. In either case,
# assume that we can fall back to non-mutating addition.
try:
result = native_in_place(left, right)
if result is not NotImplemented:
return result
except NotImplementedError:
pass
# We have x += graph_node, and x is not mutating in place, so just
# do x + graph_node:
return self.do_special_call_maybe_stochastic(native_regular, [left, right], {})
| [
"torch.__dict__.items",
"torch.tensor"
] | 0.1.0 | facebookresearch/beanmachine | 225114d9964b90c3a49adddc4387b4a47d1b4262 |
1.7 | from collections import OrderedDict
import torch
import torch.nn as nn
import numpy as np
from torch.nn import functional as F
from .SubLayers import MultiHeadAttention, PositionwiseFeedForward
class FFTBlock(torch.nn.Module):
"""FFT Block"""
def __init__(self, d_model, n_head, d_k, d_v, d_inner, kernel_size, dropout=0.1):
super(FFTBlock, self).__init__()
self.slf_attn = MultiHeadAttention(n_head, d_model, d_k, d_v, dropout=dropout)
self.pos_ffn = PositionwiseFeedForward(
d_model, d_inner, kernel_size, dropout=dropout
)
def forward(self, enc_input, mask=None, slf_attn_mask=None):
enc_output, enc_slf_attn = self.slf_attn(
enc_input, enc_input, enc_input, mask=slf_attn_mask
)
enc_output = enc_output.masked_fill(mask.unsqueeze(-1), 0)
enc_output = self.pos_ffn(enc_output)
enc_output = enc_output.masked_fill(mask.unsqueeze(-1), 0)
return enc_output, enc_slf_attn
class ConvNorm(torch.nn.Module):
def __init__(
self,
in_channels,
out_channels,
kernel_size=1,
stride=1,
padding=None,
dilation=1,
bias=True,
w_init_gain="linear",
):
super(ConvNorm, self).__init__()
if padding is None:
assert kernel_size % 2 == 1
padding = int(dilation * (kernel_size - 1) / 2)
self.conv = torch.nn.Conv1d(
in_channels,
out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias,
)
def forward(self, signal):
conv_signal = self.conv(signal)
return conv_signal
class PostNet(nn.Module):
"""
PostNet: Five 1-d convolution with 512 channels and kernel size 5
"""
def __init__(
self,
n_mel_channels=80,
postnet_embedding_dim=512,
postnet_kernel_size=5,
postnet_n_convolutions=5,
):
super(PostNet, self).__init__()
self.convolutions = nn.ModuleList()
self.convolutions.append(
nn.Sequential(
ConvNorm(
n_mel_channels,
postnet_embedding_dim,
kernel_size=postnet_kernel_size,
stride=1,
padding=int((postnet_kernel_size - 1) / 2),
dilation=1,
w_init_gain="tanh",
),
nn.BatchNorm1d(postnet_embedding_dim),
)
)
for i in range(1, postnet_n_convolutions - 1):
self.convolutions.append(
nn.Sequential(
ConvNorm(
postnet_embedding_dim,
postnet_embedding_dim,
kernel_size=postnet_kernel_size,
stride=1,
padding=int((postnet_kernel_size - 1) / 2),
dilation=1,
w_init_gain="tanh",
),
nn.BatchNorm1d(postnet_embedding_dim),
)
)
self.convolutions.append(
nn.Sequential(
ConvNorm(
postnet_embedding_dim,
n_mel_channels,
kernel_size=postnet_kernel_size,
stride=1,
padding=int((postnet_kernel_size - 1) / 2),
dilation=1,
w_init_gain="linear",
),
nn.BatchNorm1d(n_mel_channels),
)
)
def forward(self, x):
x = x.contiguous().transpose(1, 2)
for i in range(len(self.convolutions) - 1):
x = F.dropout(torch.tanh(self.convolutions[i](x)), 0.5, self.training)
x = F.dropout(self.convolutions[-1](x), 0.5, self.training)
x = x.contiguous().transpose(1, 2)
return x
| [
"torch.nn.BatchNorm1d",
"torch.nn.ModuleList",
"torch.nn.Conv1d"
] | 1.7.1 | richarai9/FastSpeech2 | d044c00a44cbfa3e1c89a22c8285a374a00e27a9 |
1.1 | import argparse
import os
import os.path as osp
import shutil
import tempfile
import json
import pdb
import numpy as np
import pickle
import pandas as pd
import mmcv
import torch
import torch.distributed as dist
from mmcv.parallel import MMDataParallel, MMDistributedDataParallel
from mmcv.runner import get_dist_info, load_checkpoint
from mmdet.apis import init_dist
from mmdet.core import lvis_eval, results2json, wrap_fp16_model
from mmdet.datasets import build_dataloader, build_dataset
from mmdet.models import build_detector
from mmdet.core import build_assigner
from utils import filter_logits_by_gt
TEMP_DATASET_SIZE = 5000
def single_gpu_test(model, data_loader, show=False, cfg=None, index=0, img_meta=None):
model.eval()
results = []
logits_list = []
dataset = data_loader.dataset
prog_bar = mmcv.ProgressBar(len(dataset))
class_instances = pickle.load(open('train_instances_list.p', 'rb'))
normalized_classes = np.zeros(1231)
for i, c in enumerate(class_instances):
if c:
normalized_classes[i] = 1/np.sqrt(c)
for i, data in enumerate(data_loader):
# if i < TEMP_DATASET_SIZE*index:
# continue
if i >= TEMP_DATASET_SIZE*(index+1): # temporary condition for testing
break
with torch.no_grad():
bbox_results, det_bboxes, det_labels, scores = model(return_loss=False, rescale=not show, **data, img_id=i, norm_cls=normalized_classes)
det_bboxes = det_bboxes.detach().cpu()
det_labels = det_labels.detach().cpu()
scores = scores.detach().cpu()
# save original logits:
# filename = data['img_meta'][0].data[0][0]['filename'].split('/')[-1] # get the file name, e.g: '000000397133.jpg'
# with open(f'test_logits/logits_per_img/{filename}.p', 'wb') as outfile:
# pickle.dump(scores, outfile)
results.append(bbox_results)
logits_list.append((det_bboxes, det_labels, scores))
if show:
model.module.show_result(data, bbox_results)
batch_size = data['img'][0].size(0)
for _ in range(batch_size):
prog_bar.update()
return results, logits_list # return also class. logits and labels
def multi_gpu_test(model, data_loader, tmpdir=None):
model.eval()
results = []
dataset = data_loader.dataset
rank, world_size = get_dist_info()
if rank == 0:
prog_bar = mmcv.ProgressBar(len(dataset))
for i, data in enumerate(data_loader):
with torch.no_grad():
result = model(return_loss=False, rescale=True, **data)
results.append(result)
if rank == 0:
batch_size = data['img'][0].size(0)
for _ in range(batch_size * world_size):
prog_bar.update()
# collect results from all ranks
results = collect_results(results, len(dataset), tmpdir)
return results
def collect_results(result_part, size, tmpdir=None):
rank, world_size = get_dist_info()
# create a tmp dir if it is not specified
if tmpdir is None:
MAX_LEN = 512
# 32 is whitespace
dir_tensor = torch.full((MAX_LEN, ),
32,
dtype=torch.uint8,
device='cuda')
if rank == 0:
tmpdir = tempfile.mkdtemp()
tmpdir = torch.tensor(
bytearray(tmpdir.encode()), dtype=torch.uint8, device='cuda')
dir_tensor[:len(tmpdir)] = tmpdir
dist.broadcast(dir_tensor, 0)
tmpdir = dir_tensor.cpu().numpy().tobytes().decode().rstrip()
else:
mmcv.mkdir_or_exist(tmpdir)
# dump the part result to the dir
mmcv.dump(result_part, osp.join(tmpdir, 'part_{}.pkl'.format(rank)))
dist.barrier()
# collect all parts
if rank != 0:
return None
else:
# load results of all parts from tmp dir
part_list = []
for i in range(world_size):
part_file = osp.join(tmpdir, 'part_{}.pkl'.format(i))
part_list.append(mmcv.load(part_file))
# sort the results
ordered_results = []
for res in zip(*part_list):
ordered_results.extend(list(res))
# the dataloader may pad some samples
ordered_results = ordered_results[:size]
# remove tmp dir
shutil.rmtree(tmpdir)
return ordered_results
def parse_args():
parser = argparse.ArgumentParser(description='MMDet test detector')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument('--out', help='output result file')
parser.add_argument(
'--json_out',
help='output result file name without extension',
type=str)
parser.add_argument(
'--eval',
type=str,
nargs='+',
choices=['proposal', 'proposal_fast', 'bbox', 'segm', 'keypoints'],
help='eval types')
parser.add_argument('--show', action='store_true', help='show results')
parser.add_argument('--tmpdir', help='tmp dir for writing some results')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
parser.add_argument('--tau', type=float, default=0.0)
parser.add_argument('--data_index', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def reweight_cls(model, tauuu):
if tauuu == 0:
return model
model_dict = model.state_dict()
def pnorm(weights, tau):
normB = torch.norm(weights, 2, 1)
ws = weights.clone()
for i in range(0, weights.shape[0]):
ws[i] = ws[i] / torch.pow(normB[i], tau)
return ws
reweight_set = ['bbox_head.fc_cls.weight']
tau = tauuu
for k in reweight_set:
weight = model_dict[k] # ([1231, 1024])
weight = pnorm(weight, tau)
model_dict[k].copy_(weight)
print('Reweight param {:<30} with tau={}'.format(k, tau))
return model
def logits_process(logits):
"""
Get the logits as a tuple of softmax logits ,bounding boxes and labels.
Output: to matrices:
logits_mat in size (dataset, 300, 1231) - top 300 logits for each image.
bboxes_mat in size (dataset, 300, 4) - top 300 bboxes for each image.
labels_mat in size (dataset, 300, 1) - corresponding labels. 300 for each image.
"""
# all_bboxes_logits = []
# for image in logits:
# image_bboxes_logits = []
# for i, bbox in enumerate(image[0]):
# bboxes_logits_dict = dict() # image[0] = tensor including 300 bboxes
# index = int(bbox[5].item()) # bbox[6] specifies the relevant line in the logits matrix
# logits_vector = image[1][index]
# bboxes_logits_dict['bbox'] = bbox[:4]
# bboxes_logits_dict['score'] = bbox[4]
# bboxes_logits_dict['logits'] = logits_vector
# image_bboxes_logits.append(bboxes_logits_dict)
# all_bboxes_logits.append(image_bboxes_logits)
# for idx in range(len(dataset)):
# img_id = dataset.img_ids[idx]
logits_mat = np.zeros((TEMP_DATASET_SIZE, 300, 1231))
bboxes_mat = np.zeros((TEMP_DATASET_SIZE, 300, 4))
labels_mat = np.zeros((TEMP_DATASET_SIZE, 300))
proposal_num = np.zeros((TEMP_DATASET_SIZE, 300, 1))
for i, image in enumerate(logits):
for j, bbox in enumerate(image[0]): # image[0] = tensor including 300 bboxes
# bboxes_logits_dict = dict()
index = int(bbox[5].item()) # bbox[5] specifies the relevant line in the logits matrix
logits_vector = image[2][index] # image[2] includes the scores
# bbox_arr = np.array(bbox[:4])
bboxes_mat[i][j][:] = bbox[:4]
logits_mat[i][j] = np.array(logits_vector)
# added this to compute proposal numbers
proposal_num[i][j] = bbox[-1]
labels_mat[i] = image[1] # image[1] includes the labels
return bboxes_mat, labels_mat, logits_mat, proposal_num
def main():
args = parse_args()
os.environ["CUDA_VISIBLE_DEVICES"] = str(args.data_index % 2)
assert args.out or args.show or args.json_out, \
('Please specify at least one operation (save or show the results) '
'with the argument "--out" or "--show" or "--json_out"')
if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
raise ValueError('The output file must be a pkl file.')
if args.json_out is not None and args.json_out.endswith('.json'):
args.json_out = args.json_out[:-5]
cfg = mmcv.Config.fromfile(args.config)
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
cfg.model.pretrained = None
cfg.data.test.test_mode = True
# init distributed env first, since logger depends on the dist info.
if args.launcher == 'none':
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
# build the dataloader
# TODO: support multiple images per gpu (only minor changes are needed)
dataset = build_dataset(cfg.data.test) # original - test | changed to test_with_train_data
data_loader = build_dataloader(
dataset,
imgs_per_gpu=1,
workers_per_gpu=0, # cfg.data.workers_per_gpu
dist=distributed,
shuffle=False)
# save gt boxes and labels for learning nms
# for i, data in enumerate(data_loader):
# img_id = dataset.img_infos[i]['id']
# gt = dataset.get_ann_info(i)
# gt_boxes = gt['bboxes']
# gt_labels = gt['labels']
# filename = f'test_logits/learning_nms_data/{i}/gt_boxes.p' # file name for new directory
# os.makedirs(os.path.dirname(filename), exist_ok=True)
# with open(f'test_logits/learning_nms_data/{i}/gt_boxes.p', 'wb') as outfile: # possible to include img_id
# pickle.dump(gt_boxes, outfile)
# with open(f'test_logits/learning_nms_data/{i}/gt_labels.p', 'wb') as outfile:
# pickle.dump(gt_boxes, outfile)
#
# # filename = dataset.img_infos[i]['filename']
# # with open(f'test_gt/{filename}.p', 'wb') as outfile:
# # pickle.dump(gt_labels, outfile)
# save gt instances per class
# instances_list = np.zeros(1231)
# for i, data in enumerate(data_loader): # original script in test_lvis_tnorm.py
# gt = dataset.get_ann_info(i)
# print(i)
# for label in gt['labels']:
# instances_list[label] += 1
# with open('train_instances_list.p', 'wb') as outfile:
# pickle.dump(instances_list, outfile)
# build the model and load checkpoint
model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)
fp16_cfg = cfg.get('fp16', None)
if fp16_cfg is not None:
wrap_fp16_model(model)
checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu')
# old versions did not save class info in checkpoints, this walkaround is
# for backward compatibility
if 'CLASSES' in checkpoint['meta']:
model.CLASSES = checkpoint['meta']['CLASSES']
else:
model.CLASSES = dataset.CLASSES
model = reweight_cls(model, args.tau)
if not distributed:
model = MMDataParallel(model, device_ids=[0])
outputs, logits = single_gpu_test(model, data_loader, args.show, cfg, args.data_index)
else:
model = MMDistributedDataParallel(model.cuda())
outputs = multi_gpu_test(model, data_loader, args.tmpdir)
# save outputs as csv:
# pd.DataFrame(outputs).to_csv("original_outputs_full.csv")
# preprocess logits and save them on json file
# otp = np.asarray(outputs) # temp
# df = pd.DataFrame(otp)
# df.to_csv('otp.csv', index=False)
bboxes_mat, labels_mat, logits_mat, proposal_num = logits_process(logits)
# save labels, boxes and logits
# with open('test_logits/dragon_test_bboxes_mat.p', 'wb') as outfile:
# pickle.dump(bboxes_mat, outfile)
# with open('test_logits/dragon_labels_mat.p', 'wb') as outfile:
# pickle.dump(labels_mat, outfile)
# with open('logits_mat1.p', 'wb') as outfile:
# pickle.dump(logits_mat[:1000], outfile)
# with open('logits_mat2.p', 'wb') as outfile:
# pickle.dump(logits_mat[1000:2000], outfile)
# with open('logits_mat3.p', 'wb') as outfile:
# pickle.dump(logits_mat[2000:3000], outfile)
# with open('logits_mat4.p', 'wb') as outfile:
# pickle.dump(logits_mat[3000:4000], outfile)
# with open('logits_mat5.p', 'wb') as outfile:
# pickle.dump(logits_mat[4000:], outfile)
# filter detections by iou with gt (for dragon training)
gt_list = []
results_per_image = []
for i, data in enumerate(data_loader): # original script in test_lvis_tnorm.py
# if i < TEMP_DATASET_SIZE*args.data_index:
# continue
if i >= TEMP_DATASET_SIZE: # temporary condition for testing
break
print(i)
img_id = dataset.img_infos[i]['id']
gt = dataset.get_ann_info(i)
gt_dict = dict()
gt_dict['id'] = img_id
gt_dict['bboxes'] = gt['bboxes']
gt_dict['labels'] = gt['labels']
gt_list.append(gt_dict)
# filter logits according to equivalent ground truth.
# after filtering, for each image we get a list in length of classes and detections belongs to this class.
results = filter_logits_by_gt(bboxes_mat[i], logits_mat[i], gt_list[i], proposal_num[i], i)
results_per_image.append(results)
with open(f'dragon_bboxes_logits_map24.p', 'wb') as outfile:
pickle.dump(results_per_image, outfile)
print('saved')
# evaluation:
rank, _ = get_dist_info()
if args.out and rank == 0:
print('\nwriting results to {}'.format(args.out))
mmcv.dump(outputs, args.out)
eval_types = args.eval
if eval_types:
print('Starting evaluate {}'.format(' and '.join(eval_types)))
if eval_types == ['proposal_fast']:
result_file = args.out
lvis_eval(result_file, eval_types, dataset.lvis)
else:
if not isinstance(outputs[0], dict):
result_files = results2json(dataset, outputs, args.out, args.data_index)
lvis_eval(result_files, eval_types, dataset.lvis, max_dets=300)
else:
for name in outputs[0]:
print('\nEvaluating {}'.format(name))
outputs_ = [out[name] for out in outputs]
result_file = args.out + '.{}'.format(name)
result_files = results2json(dataset, outputs_,
result_file)
lvis_eval(result_files, eval_types, dataset.lvis)
# Save predictions in the COCO json format
if args.json_out and rank == 0:
if not isinstance(outputs[0], dict):
results2json(dataset, outputs, args.json_out)
else:
for name in outputs[0]:
outputs_ = [out[name] for out in outputs]
result_file = args.json_out + '.{}'.format(name)
results2json(dataset, outputs_, result_file)
if __name__ == '__main__':
main()
| [
"torch.norm",
"torch.no_grad",
"torch.pow",
"torch.full",
"torch.distributed.barrier",
"torch.distributed.broadcast"
] | 1.1 | ydiller/BalancedGroupSoftmax | 6fecf9fbb8ed1f54540787188e212ab39cd2b501 |
1.3 | """A training script of TD3 on OpenAI Gym Mujoco environments.
This script follows the settings of http://arxiv.org/abs/1802.09477 as much
as possible.
"""
import argparse
import logging
import sys
import gym
import gym.wrappers
import numpy as np
import torch
from torch import nn
import pfrl
from pfrl import experiments, explorers, replay_buffers, utils
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--outdir",
type=str,
default="results",
help=(
"Directory path to save output files."
" If it does not exist, it will be created."
),
)
parser.add_argument(
"--env",
type=str,
default="Hopper-v2",
help="OpenAI Gym MuJoCo env to perform algorithm on.",
)
parser.add_argument("--seed", type=int, default=0, help="Random seed [0, 2 ** 32)")
parser.add_argument(
"--gpu", type=int, default=0, help="GPU to use, set to -1 if no GPU."
)
parser.add_argument(
"--load", type=str, default="", help="Directory to load agent from."
)
parser.add_argument(
"--steps",
type=int,
default=10**6,
help="Total number of timesteps to train the agent.",
)
parser.add_argument(
"--eval-n-runs",
type=int,
default=10,
help="Number of episodes run for each evaluation.",
)
parser.add_argument(
"--eval-interval",
type=int,
default=5000,
help="Interval in timesteps between evaluations.",
)
parser.add_argument(
"--replay-start-size",
type=int,
default=10000,
help="Minimum replay buffer size before " + "performing gradient updates.",
)
parser.add_argument("--batch-size", type=int, default=100, help="Minibatch size")
parser.add_argument(
"--render", action="store_true", help="Render env states in a GUI window."
)
parser.add_argument(
"--demo", action="store_true", help="Just run evaluation, not training."
)
parser.add_argument("--load-pretrained", action="store_true", default=False)
parser.add_argument(
"--pretrained-type", type=str, default="best", choices=["best", "final"]
)
parser.add_argument(
"--monitor", action="store_true", help="Wrap env with gym.wrappers.Monitor."
)
parser.add_argument(
"--log-level", type=int, default=logging.INFO, help="Level of the root logger."
)
args = parser.parse_args()
logging.basicConfig(level=args.log_level)
args.outdir = experiments.prepare_output_dir(args, args.outdir, argv=sys.argv)
print("Output files are saved in {}".format(args.outdir))
# Set a random seed used in PFRL
utils.set_random_seed(args.seed)
def make_env(test):
env = gym.make(args.env)
# Unwrap TimeLimit wrapper
assert isinstance(env, gym.wrappers.TimeLimit)
env = env.env
# Use different random seeds for train and test envs
env_seed = 2**32 - 1 - args.seed if test else args.seed
env.seed(env_seed)
# Cast observations to float32 because our model uses float32
env = pfrl.wrappers.CastObservationToFloat32(env)
if args.monitor:
env = pfrl.wrappers.Monitor(env, args.outdir)
if args.render and not test:
env = pfrl.wrappers.Render(env)
return env
env = make_env(test=False)
timestep_limit = env.spec.max_episode_steps
obs_space = env.observation_space
action_space = env.action_space
print("Observation space:", obs_space)
print("Action space:", action_space)
obs_size = obs_space.low.size
action_size = action_space.low.size
policy = nn.Sequential(
nn.Linear(obs_size, 400),
nn.ReLU(),
nn.Linear(400, 300),
nn.ReLU(),
nn.Linear(300, action_size),
nn.Tanh(),
pfrl.policies.DeterministicHead(),
)
policy_optimizer = torch.optim.Adam(policy.parameters())
def make_q_func_with_optimizer():
q_func = nn.Sequential(
pfrl.nn.ConcatObsAndAction(),
nn.Linear(obs_size + action_size, 400),
nn.ReLU(),
nn.Linear(400, 300),
nn.ReLU(),
nn.Linear(300, 1),
)
q_func_optimizer = torch.optim.Adam(q_func.parameters())
return q_func, q_func_optimizer
q_func1, q_func1_optimizer = make_q_func_with_optimizer()
q_func2, q_func2_optimizer = make_q_func_with_optimizer()
rbuf = replay_buffers.ReplayBuffer(10**6)
explorer = explorers.AdditiveGaussian(
scale=0.1, low=action_space.low, high=action_space.high
)
def burnin_action_func():
"""Select random actions until model is updated one or more times."""
return np.random.uniform(action_space.low, action_space.high).astype(np.float32)
# Hyperparameters in http://arxiv.org/abs/1802.09477
agent = pfrl.agents.TD3(
policy,
q_func1,
q_func2,
policy_optimizer,
q_func1_optimizer,
q_func2_optimizer,
rbuf,
gamma=0.99,
soft_update_tau=5e-3,
explorer=explorer,
replay_start_size=args.replay_start_size,
gpu=args.gpu,
minibatch_size=args.batch_size,
burnin_action_func=burnin_action_func,
)
if len(args.load) > 0 or args.load_pretrained:
# either load or load_pretrained must be false
assert not len(args.load) > 0 or not args.load_pretrained
if len(args.load) > 0:
agent.load(args.load)
else:
agent.load(
utils.download_model("TD3", args.env, model_type=args.pretrained_type)[
0
]
)
eval_env = make_env(test=True)
if args.demo:
eval_stats = experiments.eval_performance(
env=eval_env,
agent=agent,
n_steps=None,
n_episodes=args.eval_n_runs,
max_episode_len=timestep_limit,
)
print(
"n_runs: {} mean: {} median: {} stdev {}".format(
args.eval_n_runs,
eval_stats["mean"],
eval_stats["median"],
eval_stats["stdev"],
)
)
import json
import os
with open(os.path.join(args.outdir, "demo_scores.json"), "w") as f:
json.dump(eval_stats, f)
else:
experiments.train_agent_with_evaluation(
agent=agent,
env=env,
steps=args.steps,
eval_env=eval_env,
eval_n_steps=None,
eval_n_episodes=args.eval_n_runs,
eval_interval=args.eval_interval,
outdir=args.outdir,
train_max_episode_len=timestep_limit,
)
if __name__ == "__main__":
main()
| [
"torch.nn.Linear",
"torch.nn.Tanh",
"torch.nn.ReLU"
] | 1.3.0 | yhisaki/pfrl | d89ddf66201bcfaaae6130bdee704d56ee4b7b76 |
1.10 | # Copyright 2021 solo-learn development team.
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies
# or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE
# FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import torch
from solo.utils.misc import gather
def test_gather_layer():
X = torch.randn(10, 30, requires_grad=True)
X_gathered = gather(X)
assert isinstance(X, torch.Tensor)
dummy_loss = torch.mm(X_gathered, X_gathered.T).sum()
dummy_loss.backward()
assert X.grad is not None
| [
"torch.randn",
"torch.mm"
] | 1.10.0 | xwyzsn/solo-learn | 16d021d8053439a3de205337ab2a11d191500b09 |
1.10 | # Copyright 2021 solo-learn development team.
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies
# or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE
# FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import argparse
from typing import Any, Dict, List, Sequence
import torch
import torch.nn as nn
import torch.nn.functional as F
from solo.losses.deepclusterv2 import deepclusterv2_loss_func
from solo.methods.base import BaseMethod
from solo.utils.kmeans import KMeans
class DeepClusterV2(BaseMethod):
def __init__(
self,
proj_output_dim: int,
proj_hidden_dim: int,
num_prototypes: Sequence[int],
temperature: float,
kmeans_iters: int,
**kwargs,
):
"""Implements DeepCluster V2 (https://arxiv.org/abs/2006.09882).
Args:
proj_output_dim (int): number of dimensions of the projected features.
proj_hidden_dim (int): number of neurons in the hidden layers of the projector.
num_prototypes (Sequence[int]): number of prototypes.
temperature (float): temperature for the softmax.
kmeans_iters (int): number of iterations for k-means clustering.
"""
super().__init__(**kwargs)
self.proj_output_dim = proj_output_dim
self.temperature = temperature
self.num_prototypes = num_prototypes
self.kmeans_iters = kmeans_iters
# projector
self.projector = nn.Sequential(
nn.Linear(self.features_dim, proj_hidden_dim),
nn.BatchNorm1d(proj_hidden_dim),
nn.ReLU(),
nn.Linear(proj_hidden_dim, proj_output_dim),
)
# prototypes
self.prototypes = nn.ModuleList(
[nn.Linear(proj_output_dim, np, bias=False) for np in num_prototypes]
)
# normalize and set requires grad to false
for proto in self.prototypes:
for params in proto.parameters():
params.requires_grad = False
proto.weight.copy_(F.normalize(proto.weight.data.clone(), dim=-1))
@staticmethod
def add_model_specific_args(parent_parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
parent_parser = super(DeepClusterV2, DeepClusterV2).add_model_specific_args(parent_parser)
parser = parent_parser.add_argument_group("deepclusterv2")
# projector
parser.add_argument("--proj_output_dim", type=int, default=128)
parser.add_argument("--proj_hidden_dim", type=int, default=2048)
# parameters
parser.add_argument("--temperature", type=float, default=0.1)
parser.add_argument("--num_prototypes", type=int, nargs="+", default=[3000, 3000, 3000])
parser.add_argument("--kmeans_iters", type=int, default=10)
return parent_parser
@property
def learnable_params(self) -> List[dict]:
"""Adds projector and prototypes parameters to the parent's learnable parameters.
Returns:
List[dict]: list of learnable parameters.
"""
extra_learnable_params = [{"params": self.projector.parameters()}]
return super().learnable_params + extra_learnable_params
def on_train_start(self):
"""Gets the world size and initializes the memory banks."""
# k-means needs the world size and the dataset size
self.world_size = self.trainer.world_size if self.trainer else 1
self.dataset_size = getattr(self, "dali_epoch_size", None) or len(
self.trainer.train_dataloader.dataset
)
# build k-means helper object
self.kmeans = KMeans(
world_size=self.world_size,
rank=self.global_rank,
num_large_crops=self.num_large_crops,
dataset_size=self.dataset_size,
proj_features_dim=self.proj_output_dim,
num_prototypes=self.num_prototypes,
kmeans_iters=self.kmeans_iters,
)
# initialize memory banks
size_memory_per_process = len(self.trainer.train_dataloader) * self.batch_size
self.register_buffer(
"local_memory_index",
torch.zeros(size_memory_per_process).long().to(self.device, non_blocking=True),
)
self.register_buffer(
"local_memory_embeddings",
F.normalize(
torch.randn(self.num_large_crops, size_memory_per_process, self.proj_output_dim),
dim=-1,
).to(self.device, non_blocking=True),
)
def on_train_epoch_start(self) -> None:
"""Prepares assigments and prototype centroids for the next epoch."""
if self.current_epoch == 0:
self.assignments = -torch.ones(
len(self.num_prototypes), self.dataset_size, device=self.device
).long()
else:
self.assignments, centroids = self.kmeans.cluster_memory(
self.local_memory_index, self.local_memory_embeddings
)
for proto, centro in zip(self.prototypes, centroids):
proto.weight.copy_(centro)
def update_memory_banks(self, idxs: torch.Tensor, z: torch.Tensor, batch_idx: int) -> None:
"""Updates DeepClusterV2's memory banks of indices and features.
Args:
idxs (torch.Tensor): set of indices of the samples of the current batch.
z (torch.Tensor): projected features of the samples of the current batch.
batch_idx (int): batch index relative to the current epoch.
"""
start_idx, end_idx = batch_idx * self.batch_size, (batch_idx + 1) * self.batch_size
self.local_memory_index[start_idx:end_idx] = idxs
for c, z_c in enumerate(z):
self.local_memory_embeddings[c][start_idx:end_idx] = z_c.detach()
def forward(self, X: torch.Tensor, *args, **kwargs) -> Dict[str, Any]:
"""Performs the forward pass of the backbone, the projector and the prototypes.
Args:
X (torch.Tensor): a batch of images in the tensor format.
Returns:
Dict[str, Any]:
a dict containing the outputs of the parent,
the projected features and the logits.
"""
out = super().forward(X, *args, **kwargs)
z = F.normalize(self.projector(out["feats"]))
p = torch.stack([p(z) for p in self.prototypes])
return {**out, "z": z, "p": p}
def training_step(self, batch: Sequence[Any], batch_idx: int) -> torch.Tensor:
"""Training step for DeepClusterV2 reusing BaseMethod training step.
Args:
batch (Sequence[Any]): a batch of data in the format of [img_indexes, [X], Y], where
[X] is a list of size num_crops containing batches of images.
batch_idx (int): index of the batch.
Returns:
torch.Tensor: total loss composed of DeepClusterV2 loss and classification loss.
"""
idxs = batch[0]
out = super().training_step(batch, batch_idx)
class_loss = out["loss"]
feats1, feats2 = out["feats"]
z1 = F.normalize(self.projector(feats1))
z2 = F.normalize(self.projector(feats2))
p1 = torch.stack([proto(z1) for proto in self.prototypes])
p2 = torch.stack([proto(z2) for proto in self.prototypes])
# ------- deepclusterv2 loss -------
preds = torch.stack([p1.unsqueeze(1), p2.unsqueeze(1)], dim=1)
assignments = self.assignments[:, idxs]
deepcluster_loss = deepclusterv2_loss_func(preds, assignments, self.temperature)
# ------- update memory banks -------
self.update_memory_banks(idxs, [z1, z2], batch_idx)
self.log("train_deepcluster_loss", deepcluster_loss, on_epoch=True, sync_dist=True)
return deepcluster_loss + class_loss
| [
"torch.nn.Linear",
"torch.zeros",
"torch.nn.ReLU",
"torch.nn.BatchNorm1d",
"torch.randn"
] | 1.10.0 | xwyzsn/solo-learn | 16d021d8053439a3de205337ab2a11d191500b09 |
0.4 | import torch.nn as nn
import torch.nn.functional as F
class RNNAgent(nn.Module):
def __init__(self, input_shape, args):
super(RNNAgent, self).__init__()
self.args = args
self.fc1 = nn.Linear(input_shape, args.rnn_hidden_dim)
self.rnn = nn.GRUCell(args.rnn_hidden_dim, args.rnn_hidden_dim)
if not args.levin_flag_quantile:
self.fc2 = nn.Linear(args.rnn_hidden_dim, args.n_actions)
else:
self.fc2 = nn.Linear(args.rnn_hidden_dim, args.n_actions * args.N_QUANT)
def init_hidden(self):
# make hidden states on same device as model
# 主要是在 controllers 中使用
return self.fc1.weight.new(1, self.args.rnn_hidden_dim).zero_()
def forward(self, inputs, hidden_state):
mb_size = inputs.size(0)
x = F.relu(self.fc1(inputs))
h_in = hidden_state.reshape(-1, self.args.rnn_hidden_dim)
h = self.rnn(x, h_in)
if not self.args.levin_flag_quantile:
q = self.fc2(h)
else:
q = self.fc2(h).view(mb_size, self.args.n_actions, self.args.N_QUANT)
return q, h
| [
"torch.nn.Linear",
"torch.nn.GRUCell"
] | 0.4.1 | halleanwoo/AGMA | a1c4980e05150a9cfa1be338e7c8cbd8ccd6b002 |
1.0 | import unittest
import torch
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
BertConfig,
BertForSequenceClassification,
GlueDataset,
GlueDataTrainingArguments,
Trainer,
TrainingArguments,
)
from transformers.adapters.composition import Fuse
from transformers.testing_utils import slow
class TestAdapterTrainer(unittest.TestCase):
def test_resume_training(self):
tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased")
data_args = GlueDataTrainingArguments(
task_name="mrpc", data_dir="./tests/fixtures/tests_samples/MRPC", overwrite_cache=True
)
train_dataset = GlueDataset(data_args, tokenizer=tokenizer, mode="train")
model = AutoModelForSequenceClassification.from_pretrained("bert-base-uncased")
model.add_adapter("adapter")
model.add_adapter("additional_adapter")
model.set_active_adapters("adapter")
training_args = TrainingArguments(
output_dir="./examples",
do_train=True,
learning_rate=0.1,
logging_steps=1,
max_steps=1,
save_steps=1,
remove_unused_columns=False,
)
trainer = Trainer(
model=model,
args=training_args,
train_dataset=train_dataset,
do_save_adapters=True,
do_save_full_model=False,
)
trainer.train()
# create second model that should resume the training of the first
model_resume = AutoModelForSequenceClassification.from_pretrained("bert-base-uncased")
model_resume.add_adapter("adapter")
model_resume.add_adapter("additional_adapter")
model_resume.set_active_adapters("adapter")
trainer_resume = Trainer(
model=model_resume,
args=TrainingArguments(do_train=True, max_steps=1, output_dir="./examples"),
train_dataset=train_dataset,
do_save_adapters=True,
do_save_full_model=False,
)
trainer_resume.train(resume_from_checkpoint=True)
self.assertEqual(model.config.adapters.adapters, model_resume.config.adapters.adapters)
for ((k1, v1), (k2, v2)) in zip(trainer.model.state_dict().items(), trainer_resume.model.state_dict().items()):
self.assertEqual(k1, k2)
if "adapter" in k1:
self.assertTrue(torch.equal(v1, v2), k1)
def test_resume_training_with_fusion(self):
tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased")
data_args = GlueDataTrainingArguments(
task_name="mrpc", data_dir="./tests/fixtures/tests_samples/MRPC", overwrite_cache=True
)
train_dataset = GlueDataset(data_args, tokenizer=tokenizer, mode="train")
model = AutoModelForSequenceClassification.from_pretrained("bert-base-uncased")
model.add_adapter("adapter")
model.add_adapter("additional_adapter")
model.add_adapter_fusion(Fuse("adapter", "additional_adapter"))
model.set_active_adapters(Fuse("adapter", "additional_adapter"))
training_args = TrainingArguments(
output_dir="./examples",
do_train=True,
learning_rate=0.1,
logging_steps=1,
max_steps=1,
save_steps=1,
remove_unused_columns=False,
)
trainer = Trainer(
model=model,
args=training_args,
train_dataset=train_dataset,
do_save_adapters=True,
do_save_full_model=False,
do_save_adapter_fusion=True,
)
trainer.train()
model_resume = AutoModelForSequenceClassification.from_pretrained("bert-base-uncased")
model_resume.add_adapter("adapter")
model_resume.add_adapter("additional_adapter")
model_resume.add_adapter_fusion(Fuse("adapter", "additional_adapter"))
model_resume.set_active_adapters(Fuse("adapter", "additional_adapter"))
trainer_resume = Trainer(
model=model_resume,
args=TrainingArguments(do_train=True, max_steps=1, output_dir="./examples"),
train_dataset=train_dataset,
do_save_full_model=False,
do_save_adapters=True,
)
trainer_resume.train(resume_from_checkpoint=True)
self.assertEqual(model.config.adapters.adapters, model_resume.config.adapters.adapters)
for ((k1, v1), (k2, v2)) in zip(trainer.model.state_dict().items(), trainer_resume.model.state_dict().items()):
self.assertEqual(k1, k2)
if "adapter" in k1:
self.assertTrue(torch.equal(v1, v2), k1)
def test_auto_set_save_adapters(self):
model = BertForSequenceClassification(
BertConfig(
hidden_size=32,
num_hidden_layers=4,
num_attention_heads=4,
intermediate_size=37,
)
)
model.add_adapter("adapter")
model.train_adapter("adapter")
training_args = TrainingArguments(
output_dir="./examples",
)
trainer = Trainer(
model=model,
args=training_args,
)
self.assertFalse(trainer.do_save_full_model)
self.assertTrue(trainer.do_save_adapters)
self.assertTrue(trainer.do_save_adapter_fusion)
@slow
def test_training_load_best_model_at_end_full_model(self):
tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased")
data_args = GlueDataTrainingArguments(
task_name="mrpc", data_dir="./tests/fixtures/tests_samples/MRPC", overwrite_cache=True
)
train_dataset = GlueDataset(data_args, tokenizer=tokenizer, mode="train")
eval_dataset = GlueDataset(data_args, tokenizer=tokenizer, mode="dev")
model = AutoModelForSequenceClassification.from_pretrained("bert-base-uncased")
model.add_adapter("adapter")
model.train_adapter("adapter")
training_args = TrainingArguments(
output_dir="./examples",
do_train=True,
learning_rate=0.001,
max_steps=1,
save_steps=1,
remove_unused_columns=False,
load_best_model_at_end=True,
evaluation_strategy="epoch",
num_train_epochs=2,
)
trainer = Trainer(
model=model,
args=training_args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
do_save_adapters=False,
do_save_full_model=True,
)
trainer.train()
self.assertIsNotNone(trainer.model.active_adapters)
if __name__ == "__main__":
unittest.main()
| [
"torch.equal"
] | 1.0 | AngadSethi/adapter-transformers | b147bba9107a5a561aca28c99f4e4ec2816a6e4f |
4 | import torch.nn as nn
import math
import torch
import torch.nn.functional as F
def conv_bn(inp, oup, stride, k_size=3):
return nn.Sequential(
nn.Conv2d(inp, oup, k_size, stride, 1, bias=False),
nn.BatchNorm2d(oup),
nn.PReLU()
)
def conv_1x1_bn(inp, oup):
return nn.Sequential(
nn.Conv2d(inp, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
nn.PReLU()
)
class DWC(nn.Module):
def __init__(self, in_channels, out_channels):
super(DWC, self).__init__()
#self.depthwise = nn.Conv2d(in_channels=in_channels, out_channels=in_channels, kernel_size=(7,6),
#stride=1, padding=0, groups=in_channels, bias=False)
self.batch_norm_in = nn.BatchNorm2d(in_channels)
self.depthwise = nn.AvgPool2d((7, 6), stride=1, padding=0)
self.pointwise = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=1,
stride=1, padding=0, bias=False)
def forward(self, x):
x = self.depthwise(x)
#x = self.batch_norm_in(x)
x = self.pointwise(x)
return x
class Max_AvgPool(nn.Module):
def __init__(self, kernel_size=(3,3), stride=2, padding=1, dim=128):
super(Max_AvgPool, self).__init__()
self.Maxpool = nn.MaxPool2d(kernel_size=kernel_size, stride=stride, padding=padding)
self.Avgpool = nn.AvgPool2d(kernel_size=kernel_size, stride=stride, padding=padding)
def forward(self, x):
x = self.Maxpool(x) + self.Avgpool(x) # add some channelwise gating?
return x
class Max_AvgPool(nn.Module):
def __init__(self, kernel_size=(3,3), stride=2, padding=1, dim=128):
super(Max_AvgPool, self).__init__()
self.Maxpool = nn.MaxPool2d(kernel_size=kernel_size, stride=stride, padding=padding)
self.Avgpool = nn.AvgPool2d(kernel_size=kernel_size, stride=stride, padding=padding)
def forward(self, x):
x = self.Maxpool(x) + self.Avgpool(x) # add some channelwise gating?
return x
class gated_conv1x1(nn.Module):
def __init__(self, inc=128, outc=128):
super(gated_conv1x1, self).__init__()
self.inp = int(inc/2)
self.oup = int(outc/2)
self.conv1x1_1 = nn.Conv2d(self.inp, self.oup, 1, 1, 0, bias=False)
self.gate_1 = nn.Conv2d(self.inp, self.oup, 1, 1, 0, bias=True)
self.conv1x1_2 = nn.Conv2d(self.inp, self.oup, 1, 1, 0, bias=False)
self.gate_2 = nn.Conv2d(self.inp, self.oup, 1, 1, 0, bias=True)
def forward(self, x):
x_1 = x[:, :self.inp, :, :]
x_2 = x[:, self.inp:, :, :]
a_1 = self.conv1x1_1(x_1)
g_1 = F.sigmoid(self.gate_1(x_1))
a_2 = self.conv1x1_2(x_2)
g_2 = F.sigmoid(self.gate_2(x_2))
ret = torch.cat((a_1*g_1, a_2*g_2), 1)
return ret
class InvertedResidual_dwc(nn.Module):
def __init__(self, inp, oup, stride, expand_ratio):
super(InvertedResidual_dwc, self).__init__()
self.stride = stride
assert stride in [1, 2]
hidden_dim = int(round(inp * expand_ratio))
self.use_res_connect = self.stride == 1 and inp == oup
self.conv = []
if expand_ratio == 1:
self.conv.append(nn.Conv2d(inp, hidden_dim, kernel_size=(3, 3), stride=stride, padding=1, groups=hidden_dim))
self.conv.append(nn.BatchNorm2d(hidden_dim))
self.conv.append(nn.PReLU())
#self.conv.append(nn.MaxPool2d(kernel_size=(3, 3), stride=stride, padding=1))
#self.conv.append(gated_conv1x1(inc=hidden_dim,outc=oup))
self.conv.append(nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False))
self.conv.append(nn.BatchNorm2d(oup))
else:
#self.conv.append(gated_conv1x1(inc=inp,outc=hidden_dim))
self.conv.append(nn.Conv2d(inp, hidden_dim, 1, 1, 0, bias=False))
self.conv.append(nn.BatchNorm2d(hidden_dim))
self.conv.append(nn.PReLU())
self.conv.append(nn.Conv2d(hidden_dim, hidden_dim, kernel_size=(3, 3), stride=stride, padding=1, groups=hidden_dim))
self.conv.append(nn.BatchNorm2d(hidden_dim))
self.conv.append(nn.PReLU())
#self.conv.append(gated_conv1x1(inc=hidden_dim,outc=oup))
self.conv.append(nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False))
self.conv.append(nn.BatchNorm2d(oup))
self.conv = nn.Sequential(*self.conv)
def forward(self, x):
if self.use_res_connect:
return x + self.conv(x)
else:
return self.conv(x)
class InvertedResidual(nn.Module):
def __init__(self, inp, oup, stride, expand_ratio):
super(InvertedResidual, self).__init__()
self.stride = stride
assert stride in [1, 2]
hidden_dim = int(round(inp * expand_ratio))
self.use_res_connect = self.stride == 1 and inp == oup
self.conv = []
if expand_ratio == 1:
self.conv.append(nn.MaxPool2d(kernel_size=(3, 3), stride=stride, padding=1))
#self.conv.append(gated_conv1x1(inc=hidden_dim,outc=oup))
self.conv.append(nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False))
self.conv.append(nn.BatchNorm2d(oup))
else:
#self.conv.append(gated_conv1x1(inc=inp,outc=hidden_dim))
self.conv.append(nn.Conv2d(inp, hidden_dim, 1, 1, 0, bias=False))
self.conv.append(nn.BatchNorm2d(hidden_dim))
self.conv.append(nn.PReLU())
self.conv.append(nn.MaxPool2d(kernel_size=(3, 3), stride=stride, padding=1))
#self.conv.append(gated_conv1x1(inc=hidden_dim,outc=oup))
self.conv.append(nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False))
self.conv.append(nn.BatchNorm2d(oup))
self.conv = nn.Sequential(*self.conv)
def forward(self, x):
if self.use_res_connect:
return x + self.conv(x)
else:
return self.conv(x)
class Net(nn.Module): #mobileNet v2
def __init__(self, embedding_size=128, input_size=224, width_mult=1.):
super(Net, self).__init__()
block = InvertedResidual
block_dwc = InvertedResidual_dwc
input_channel = 64
last_channel = 256
interverted_residual_setting = [
# t, c, n, s
[1, 48, 1, 1], # depthwise conv for first row
[2, 48, 2, 1],
[4, 48, 2, 2],
[2, 48, 2, 1],
[4, 48, 5, 1],
[2, 48, 2, 2],
[2, 48, 6, 2],
]
# building first layer
input_channel = int(input_channel * width_mult)
self.last_channel = int(last_channel * width_mult) if width_mult > 1.0 else last_channel
self.features = [conv_bn(3, input_channel, 2)]
# building inverted residual
cnt = 0
for t, c, n, s in interverted_residual_setting:
output_channel = int(c * width_mult)
for i in range(n):
if cnt>1:
if i == n - 1: # reduce the featuremap in the last.
self.features.append(block_dwc(input_channel, output_channel, s, expand_ratio=t))
else:
self.features.append(block_dwc(input_channel, output_channel, 1, expand_ratio=t))
input_channel = output_channel
else:
if i == n - 1: # reduce the featuremap in the last.
self.features.append(block_dwc(input_channel, output_channel, s, expand_ratio=t))
else:
self.features.append(block_dwc(input_channel, output_channel, 1, expand_ratio=t))
input_channel = output_channel
cnt+=1
# building last several layers
self.features.append(gated_conv1x1(input_channel, self.last_channel))
# make it nn.Sequential
self.features_sequential = nn.Sequential(*self.features)
# Global depthwise conv
#self.GDCconv = DWC(self.last_channel, embedding_size)
self._initialize_weights()
def forward(self, x):
x = self.features_sequential(x).view(-1, 256*4)
return x
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
n = m.weight.size(1)
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_() | [
"torch.cat",
"torch.nn.MaxPool2d",
"torch.nn.Sequential",
"torch.nn.AvgPool2d",
"torch.nn.BatchNorm2d",
"torch.nn.Conv2d",
"torch.nn.PReLU"
] | 4 | GzuPark/EXTD_Pytorch | e99af10f282d07054c1cf7c4b8c035084daaff78 |
1.2 | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
#-------------------------------------------------#
# MISH激活函数
#-------------------------------------------------#
class Mish(nn.Module):
def __init__(self):
super(Mish, self).__init__()
def forward(self, x):
return x * torch.tanh(F.softplus(x))
#---------------------------------------------------#
# 卷积块 -> 卷积 + 标准化 + 激活函数
# Conv2d + BatchNormalization + Mish
#---------------------------------------------------#
class CBM(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1):
super(CBM, self).__init__()
#pad = kernel_size//2,表示1x1卷积不补零,3x3卷积补一圈0,这样输出图片的尺寸不会改变
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, kernel_size//2, bias=False)
self.bn = nn.BatchNorm2d(out_channels)
self.activation = Mish()
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.activation(x)
return x
#---------------------------------------------------#
# CSPdarknet的结构块的组成部分
# 内部堆叠的残差块
#---------------------------------------------------#
class Resblock(nn.Module):
def __init__(self, channels, hidden_channels=None):
super(Resblock, self).__init__()
if hidden_channels is None:
hidden_channels = channels
self.block = nn.Sequential(
CBM(channels, hidden_channels, 1),
CBM(hidden_channels, channels, 3)
)
def forward(self, x):
return x + self.block(x)
#--------------------------------------------------------------------#
# CSPdarknet的结构块
# 首先利用ZeroPadding2D和一个步长为2x2的卷积块进行高和宽的压缩
# 然后建立一个大的残差边shortconv、这个大残差边绕过了很多的残差结构
# 主干部分会对num_blocks进行循环,循环内部是残差结构。
# 对于整个CSPdarknet的结构块,就是一个大残差块+内部多个小残差块
#--------------------------------------------------------------------#
class Resblock_body(nn.Module):
def __init__(self, in_channels, out_channels, num_blocks, first):
super(Resblock_body, self).__init__()
#----------------------------------------------------------------#
# 利用一个步长为2x2的卷积块进行高和宽的压缩
#----------------------------------------------------------------#
self.downsample_conv = CBM(in_channels, out_channels, 3, stride=2)
if first:
#--------------------------------------------------------------------------#
# 然后建立一个大的残差边self.split_conv0、这个大残差边绕过了很多的残差结构
#--------------------------------------------------------------------------#
self.split_conv0 = CBM(out_channels, out_channels, 1)
#----------------------------------------------------------------#
# 主干部分会对num_blocks进行循环,循环内部是残差结构。
#----------------------------------------------------------------#
self.split_conv1 = CBM(out_channels, out_channels, 1)
self.blocks_conv = nn.Sequential(
Resblock(channels=out_channels, hidden_channels=out_channels//2),
CBM(out_channels, out_channels, 1)
)
self.concat_conv = CBM(out_channels*2, out_channels, 1)
else:
#--------------------------------------------------------------------------#
# 然后建立一个大的残差边self.split_conv0、这个大残差边绕过了很多的残差结构
#--------------------------------------------------------------------------#
self.split_conv0 = CBM(out_channels, out_channels//2, 1)
#----------------------------------------------------------------#
# 主干部分会对num_blocks进行循环,循环内部是残差结构。
#----------------------------------------------------------------#
self.split_conv1 = CBM(out_channels, out_channels//2, 1)
self.blocks_conv = nn.Sequential(
*[Resblock(out_channels//2) for _ in range(num_blocks)],
CBM(out_channels//2, out_channels//2, 1)
)
self.concat_conv = CBM(out_channels, out_channels, 1)
def forward(self, x):
x = self.downsample_conv(x)
x0 = self.split_conv0(x)
x1 = self.split_conv1(x)
x1 = self.blocks_conv(x1)
#------------------------------------#
# 将大残差边再堆叠回来
#------------------------------------#
x = torch.cat([x1, x0], dim=1)
#------------------------------------#
# 最后对通道数进行整合
#------------------------------------#
x = self.concat_conv(x)
return x
#---------------------------------------------------#
# CSPdarknet53 的主体部分
# 输入为一张416x416x3的图片
# 输出为三个有效特征层
#---------------------------------------------------#
class CSPDarkNet(nn.Module):
def __init__(self, layers):
super(CSPDarkNet, self).__init__()
self.inplanes = 32
# 416,416,3 -> 416,416,32
self.conv1 = CBM(3, self.inplanes, kernel_size=3, stride=1)
self.feature_channels = [64, 128, 256, 512, 1024]
self.stages = nn.ModuleList([
# 416,416,32 -> 208,208,64
Resblock_body(self.inplanes, self.feature_channels[0], layers[0], first=True),
# 208,208,64 -> 104,104,128
Resblock_body(self.feature_channels[0], self.feature_channels[1], layers[1], first=False),
# 104,104,128 -> 52,52,256
Resblock_body(self.feature_channels[1], self.feature_channels[2], layers[2], first=False),
# 52,52,256 -> 26,26,512
Resblock_body(self.feature_channels[2], self.feature_channels[3], layers[3], first=False),
# 26,26,512 -> 13,13,1024
Resblock_body(self.feature_channels[3], self.feature_channels[4], layers[4], first=False)
])
self.num_features = 1
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def forward(self, x):
x = self.conv1(x)
x = self.stages[0](x)
x = self.stages[1](x)
out3 = self.stages[2](x)
out4 = self.stages[3](out3)
out5 = self.stages[4](out4)
return out3, out4, out5
def darknet53(pretrained, **kwargs):
model = CSPDarkNet([1, 2, 8, 8, 4])
if pretrained:
if isinstance(pretrained, str):
model.load_state_dict(torch.load(pretrained))
else:
raise Exception("darknet request a pretrained path. got [{}]".format(pretrained))
return model
| [
"torch.cat",
"torch.nn.functional.softplus",
"torch.nn.BatchNorm2d",
"torch.nn.Conv2d",
"torch.load"
] | 1.2.0 | Arcofcosmos/MyYolov4_Pytorch | 14c445503d0fc69b8a8b64ecdc87256ac4c1fce1 |
1.2 | from collections import OrderedDict
import torch
import torch.nn as nn
from nets.CSPdarknet import darknet53
#---------------------------------------------------#
# 卷积块 -> 卷积 + 标准化 + 激活函数
# Conv2d + BatchNormalization + LeakyRelu
#---------------------------------------------------#
def CBL(filter_in, filter_out, kernel_size, stride=1):
pad = (kernel_size - 1) // 2 if kernel_size else 0
return nn.Sequential(OrderedDict([
("conv", nn.Conv2d(filter_in, filter_out, kernel_size=kernel_size, stride=stride, padding=pad, bias=False)),
("bn", nn.BatchNorm2d(filter_out)),
("relu", nn.LeakyReLU(0.1)),
]))
#---------------------------------------------------#
# SPP结构,利用不同大小的池化核进行池化
# 池化后堆叠
#---------------------------------------------------#
class SpatialPyramidPooling(nn.Module):
def __init__(self, pool_sizes=[5, 9, 13]):
super(SpatialPyramidPooling, self).__init__()
self.maxpools = nn.ModuleList([nn.MaxPool2d(pool_size, 1, pool_size//2) for pool_size in pool_sizes])
def forward(self, x):
features = [maxpool(x) for maxpool in self.maxpools[::-1]]
features = torch.cat(features + [x], dim=1)
return features
#---------------------------------------------------#
# 卷积 + 上采样
#---------------------------------------------------#
class Upsample(nn.Module):
def __init__(self, in_channels, out_channels):
super(Upsample, self).__init__()
self.upsample = nn.Sequential(
CBL(in_channels, out_channels, 1),
nn.Upsample(scale_factor=2, mode='nearest')
)
def forward(self, x,):
x = self.upsample(x)
return x
#---------------------------------------------------#
# 三次卷积块
#---------------------------------------------------#
def make_three_conv(filters_list, in_filters):
m = nn.Sequential(
CBL(in_filters, filters_list[0], 1),
CBL(filters_list[0], filters_list[1], 3),
CBL(filters_list[1], filters_list[0], 1),
)
return m
#---------------------------------------------------#
# 五次卷积块
#---------------------------------------------------#
def make_five_conv(filters_list, in_filters):
m = nn.Sequential(
CBL(in_filters, filters_list[0], 1),
CBL(filters_list[0], filters_list[1], 3),
CBL(filters_list[1], filters_list[0], 1),
CBL(filters_list[0], filters_list[1], 3),
CBL(filters_list[1], filters_list[0], 1),
)
return m
#---------------------------------------------------#
# 最后获得yolov4的输出
#---------------------------------------------------#
def yolo_head(filters_list, in_filters):
m = nn.Sequential(
CBL(in_filters, filters_list[0], 3),
nn.Conv2d(filters_list[0], filters_list[1], 1),
)
return m
#---------------------------------------------------#
# yolo_body
#---------------------------------------------------#
class YoloBody(nn.Module):
def __init__(self, num_anchors, num_classes):
super(YoloBody, self).__init__()
#---------------------------------------------------#
# 生成CSPdarknet53的主干模型
# 获得三个有效特征层,他们的shape分别是:
# 52,52,256
# 26,26,512
# 13,13,1024
#---------------------------------------------------#
self.backbone = darknet53(None)
self.conv1 = make_three_conv([512,1024],1024)
self.SPP = SpatialPyramidPooling()
self.conv2 = make_three_conv([512,1024],2048)
self.upsample1 = Upsample(512,256)
self.conv_for_P4 = CBL(512,256,1)
self.make_five_conv1 = make_five_conv([256, 512],512)
self.upsample2 = Upsample(256,128)
self.conv_for_P3 = CBL(256,128,1)
self.make_five_conv2 = make_five_conv([128, 256],256)
# 3*(5+num_classes) = 3*(5+20) = 3*(4+1+20)=75
final_out_filter2 = num_anchors * (5 + num_classes)
self.yolo_head3 = yolo_head([256, final_out_filter2],128)
self.down_sample1 = CBL(128,256,3,stride=2)
self.make_five_conv3 = make_five_conv([256, 512],512)
# 3*(5+num_classes) = 3*(5+20) = 3*(4+1+20)=75
final_out_filter1 = num_anchors * (5 + num_classes)
self.yolo_head2 = yolo_head([512, final_out_filter1],256)
self.down_sample2 = CBL(256,512,3,stride=2)
self.make_five_conv4 = make_five_conv([512, 1024],1024)
# 3*(5+num_classes)=3*(5+20)=3*(4+1+20)=75
final_out_filter0 = num_anchors * (5 + num_classes)
self.yolo_head1 = yolo_head([1024, final_out_filter0],512)
def forward(self, x):
# backbone
x2, x1, x0 = self.backbone(x)
# 13,13,1024 -> 13,13,512 -> 13,13,1024 -> 13,13,512 -> 13,13,2048
P5 = self.conv1(x0)
P5 = self.SPP(P5)
# 13,13,2048 -> 13,13,512 -> 13,13,1024 -> 13,13,512
P5 = self.conv2(P5)
# 13,13,512 -> 13,13,256 -> 26,26,256
P5_upsample = self.upsample1(P5)
# 26,26,512 -> 26,26,256
P4 = self.conv_for_P4(x1)
# 26,26,256 + 26,26,256 -> 26,26,512
P4 = torch.cat([P4,P5_upsample],axis=1)
# 26,26,512 -> 26,26,256 -> 26,26,512 -> 26,26,256 -> 26,26,512 -> 26,26,256
P4 = self.make_five_conv1(P4)
# 26,26,256 -> 26,26,128 -> 52,52,128
P4_upsample = self.upsample2(P4)
# 52,52,256 -> 52,52,128
P3 = self.conv_for_P3(x2)
# 52,52,128 + 52,52,128 -> 52,52,256
P3 = torch.cat([P3,P4_upsample],axis=1)
# 52,52,256 -> 52,52,128 -> 52,52,256 -> 52,52,128 -> 52,52,256 -> 52,52,128
P3 = self.make_five_conv2(P3)
# 52,52,128 -> 26,26,256
P3_downsample = self.down_sample1(P3)
# 26,26,256 + 26,26,256 -> 26,26,512
P4 = torch.cat([P3_downsample,P4],axis=1)
# 26,26,512 -> 26,26,256 -> 26,26,512 -> 26,26,256 -> 26,26,512 -> 26,26,256
P4 = self.make_five_conv3(P4)
# 26,26,256 -> 13,13,512
P4_downsample = self.down_sample2(P4)
# 13,13,512 + 13,13,512 -> 13,13,1024
P5 = torch.cat([P4_downsample,P5],axis=1)
# 13,13,1024 -> 13,13,512 -> 13,13,1024 -> 13,13,512 -> 13,13,1024 -> 13,13,512
P5 = self.make_five_conv4(P5)
#---------------------------------------------------#
# 第三个特征层
# y3=(batch_size,75,52,52)
#---------------------------------------------------#
out2 = self.yolo_head3(P3)
#---------------------------------------------------#
# 第二个特征层
# y2=(batch_size,75,26,26)
#---------------------------------------------------#
out1 = self.yolo_head2(P4)
#---------------------------------------------------#
# 第一个特征层
# y1=(batch_size,75,13,13)
#---------------------------------------------------#
out0 = self.yolo_head1(P5)
return out0, out1, out2
| [
"torch.cat",
"torch.nn.MaxPool2d",
"torch.nn.BatchNorm2d",
"torch.nn.LeakyReLU",
"torch.nn.Upsample",
"torch.nn.Conv2d"
] | 1.2.0 | Arcofcosmos/MyYolov4_Pytorch | 14c445503d0fc69b8a8b64ecdc87256ac4c1fce1 |
1.6 | import torch
from utils.distmat import compute_distmat
def init_feedback_indices(q, g, device=None):
return torch.zeros((q, g), dtype=torch.bool, device=device)
def init_feedback_indices_qg(q, g, positive=False, device=None):
indices = torch.zeros(q, q + g, dtype=torch.bool, device=device)
if positive:
indices[torch.arange(q), torch.arange(q)] = True
return indices
def greedy_feedback(distmat, q_pids, g_pids, positive_indices, negative_indices, inplace=True):
"""
Update positive_indices, negative_indices with one round of feedback. Provide feedback for top-ranked gallery.
Note that distmat is corrupted if inplace=True.
:param distmat: q x g Tensor (adjusted query to gallery)
:param q_pids: q
:param g_pids: g
:param positive_indices: q x g
:param negative_indices: q x g
:return:
(positive_indices, negative_indices, matches)
"""
q, g = tuple(distmat.shape)
if not inplace:
distmat = distmat.clone().detach()
positive_indices = positive_indices.copy()
negative_indices = negative_indices.copy()
distmat[positive_indices] = float("inf")
distmat[negative_indices] = float("inf")
indices = distmat.argmin(dim=1)
pmap = g_pids[indices] == q_pids
positive_q = torch.arange(0, q)[pmap]
negative_q = torch.arange(0, q)[pmap == False]
positive_g = indices[pmap]
negative_g = indices[pmap == False]
existing = positive_indices[positive_q, positive_g]
assert (not existing.any())
positive_indices[positive_q, positive_g] = True
existing = negative_indices[negative_q, negative_g]
assert (not existing.any())
negative_indices[negative_q, negative_g] = True
return positive_indices, negative_indices, pmap
def naive_round(qf, gf, q_pids, g_pids, positive_indices=None, negative_indices=None,
inplace=True, previous_distmat=None, device=None):
"""
qf: q x m
gf: g x m
q_pids: q
g_pids: g
positive_indices: q x g
negative_indices: q x g
previous_distmat: adjusted distmat (== compute_distmat(qf, gf) only at init)
"""
q, g = qf.shape[0], gf.shape[0]
assert (qf.shape[1] == gf.shape[1])
if positive_indices is None: positive_indices = init_feedback_indices(q, g, device=device)
if negative_indices is None: negative_indices = init_feedback_indices(q, g, device=device)
if previous_distmat is None:
distmat = compute_distmat(qf, gf)
else:
distmat = previous_distmat
res = greedy_feedback(distmat, q_pids, g_pids, positive_indices, negative_indices, inplace=inplace)
positive_indices, negative_indices, matches = res
distmat = compute_distmat(qf, gf)
distmat[positive_indices] = 0
distmat[negative_indices] = float("inf")
return distmat, positive_indices, negative_indices, matches
| [
"torch.zeros",
"torch.arange"
] | 1.6.0 | itsnamgyu/reid-metric | 437e02ebad510b482f620a293fd8c7baa4f42ad6 |
1.4 | import torch
import numpy as np
from torch import nn
import torch.nn.functional as F
class Actor(nn.Module):
"""For advanced usage (how to customize the network), please refer to
:ref:`build_the_network`.
"""
def __init__(self, preprocess_net, action_shape, hidden_layer_size=128):
super().__init__()
self.preprocess = preprocess_net
self.last = nn.Linear(hidden_layer_size, np.prod(action_shape))
def forward(self, s, state=None, info={}):
r"""s -> Q(s, \*)"""
logits, h = self.preprocess(s, state)
logits = F.softmax(self.last(logits), dim=-1)
return logits, h
class Critic(nn.Module):
"""For advanced usage (how to customize the network), please refer to
:ref:`build_the_network`.
"""
def __init__(self, preprocess_net, hidden_layer_size=128):
super().__init__()
self.preprocess = preprocess_net
self.last = nn.Linear(hidden_layer_size, 1)
def forward(self, s, **kwargs):
"""s -> V(s)"""
logits, h = self.preprocess(s, state=kwargs.get('state', None))
logits = self.last(logits)
return logits
class DQN(nn.Module):
"""For advanced usage (how to customize the network), please refer to
:ref:`build_the_network`.
"""
def __init__(self, h, w, action_shape, device='cpu'):
super(DQN, self).__init__()
self.device = device
self.conv1 = nn.Conv2d(4, 16, kernel_size=5, stride=2)
self.bn1 = nn.BatchNorm2d(16)
self.conv2 = nn.Conv2d(16, 32, kernel_size=5, stride=2)
self.bn2 = nn.BatchNorm2d(32)
self.conv3 = nn.Conv2d(32, 32, kernel_size=5, stride=2)
self.bn3 = nn.BatchNorm2d(32)
def conv2d_size_out(size, kernel_size=5, stride=2):
return (size - (kernel_size - 1) - 1) // stride + 1
convw = conv2d_size_out(conv2d_size_out(conv2d_size_out(w)))
convh = conv2d_size_out(conv2d_size_out(conv2d_size_out(h)))
linear_input_size = convw * convh * 32
self.fc = nn.Linear(linear_input_size, 512)
self.head = nn.Linear(512, action_shape)
def forward(self, x, state=None, info={}):
r"""x -> Q(x, \*)"""
if not isinstance(x, torch.Tensor):
x = torch.tensor(x, device=self.device, dtype=torch.float32)
x = x.permute(0, 3, 1, 2)
x = F.relu(self.bn1(self.conv1(x)))
x = F.relu(self.bn2(self.conv2(x)))
x = F.relu(self.bn3(self.conv3(x)))
x = self.fc(x.reshape(x.size(0), -1))
return self.head(x), state
| [
"torch.nn.Linear",
"torch.nn.BatchNorm2d",
"torch.nn.Conv2d",
"torch.tensor"
] | 1.4.0 | FightingSrain/tianshou | bd9c3c7f8d144448c44a350828b2c5222298bd8e |
1.10 | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import sys
sys.path.append('./')
from update import BasicUpdateBlock, SmallUpdateBlock
from extractor import BasicEncoder, SmallEncoder
from corr import CorrBlock, AlternateCorrBlock
from util import bilinear_sampler, coords_grid, upflow8
try:
autocast = torch.cuda.amp.autocast
except:
# dummy autocast for PyTorch < 1.6
class autocast:
def __init__(self, enabled):
pass
def __enter__(self):
pass
def __exit__(self, *args):
pass
class RAFT(nn.Module):
def __init__(self, args):
super(RAFT, self).__init__()
self.args = args
if args.small:
self.hidden_dim = hdim = 96
self.context_dim = cdim = 64
args.corr_levels = 4
args.corr_radius = 3
else:
self.hidden_dim = hdim = 128
self.context_dim = cdim = 128
args.corr_levels = 4
args.corr_radius = 4
if 'dropout' not in self.args:
self.args.dropout = 0
if 'alternate_corr' not in self.args:
self.args.alternate_corr = False
# feature network, context network, and update block
if args.small:
self.fnet = SmallEncoder(output_dim=128, norm_fn='instance', dropout=args.dropout)
self.cnet = SmallEncoder(output_dim=hdim+cdim, norm_fn='none', dropout=args.dropout)
self.update_block = SmallUpdateBlock(self.args, hidden_dim=hdim)
else:
self.fnet = BasicEncoder(output_dim=256, norm_fn='instance', dropout=args.dropout)
self.cnet = BasicEncoder(output_dim=hdim+cdim, norm_fn='batch', dropout=args.dropout)
self.update_block = BasicUpdateBlock(self.args, hidden_dim=hdim)
def freeze_bn(self):
for m in self.modules():
if isinstance(m, nn.BatchNorm2d):
m.eval()
def initialize_flow(self, img):
""" Flow is represented as difference between two coordinate grids flow = coords1 - coords0"""
N, C, H, W = img.shape
coords0 = coords_grid(N, H//8, W//8).to(img.device)
coords1 = coords_grid(N, H//8, W//8).to(img.device)
# optical flow computed as difference: flow = coords1 - coords0
return coords0, coords1
def upsample_flow(self, flow, mask):
""" Upsample flow field [H/8, W/8, 2] -> [H, W, 2] using convex combination """
N, _, H, W = flow.shape
mask = mask.view(N, 1, 9, 8, 8, H, W)
mask = torch.softmax(mask, dim=2)
up_flow = F.unfold(8 * flow, [3,3], padding=1)
up_flow = up_flow.view(N, 2, 9, 1, 1, H, W)
up_flow = torch.sum(mask * up_flow, dim=2)
up_flow = up_flow.permute(0, 1, 4, 2, 5, 3)
return up_flow.reshape(N, 2, 8*H, 8*W)
def forward(self, image1):
""" get featmap for one frame """
image1 = 2 * (image1 / 255.0) - 1.0
image1 = image1.contiguous()
hdim = self.hidden_dim
cdim = self.context_dim
# run the feature network
with autocast(enabled=self.args.mixed_precision):
fmap1 = self.fnet(image1)
fmap1 = fmap1.float()
return fmap1
def old_forward(self, image1, image2, iters=12, flow_init=None, upsample=True, test_mode=False):
""" Estimate optical flow between pair of frames """
image1 = 2 * (image1 / 255.0) - 1.0
image2 = 2 * (image2 / 255.0) - 1.0
image1 = image1.contiguous()
image2 = image2.contiguous()
hdim = self.hidden_dim
cdim = self.context_dim
# run the feature network
with autocast(enabled=self.args.mixed_precision):
fmap1, fmap2 = self.fnet([image1, image2])
fmap1 = fmap1.float()
fmap2 = fmap2.float()
if self.args.alternate_corr:
corr_fn = AlternateCorrBlock(fmap1, fmap2, radius=self.args.corr_radius)
else:
corr_fn = CorrBlock(fmap1, fmap2, radius=self.args.corr_radius)
# run the context network
with autocast(enabled=self.args.mixed_precision):
cnet = self.cnet(image1)
net, inp = torch.split(cnet, [hdim, cdim], dim=1)
net = torch.tanh(net)
inp = torch.relu(inp)
coords0, coords1 = self.initialize_flow(image1)
if flow_init is not None:
coords1 = coords1 + flow_init
flow_predictions = []
for itr in range(iters):
coords1 = coords1.detach()
corr = corr_fn(coords1) # index correlation volume
flow = coords1 - coords0
with autocast(enabled=self.args.mixed_precision):
net, up_mask, delta_flow = self.update_block(net, inp, corr, flow)
# F(t+1) = F(t) + \Delta(t)
coords1 = coords1 + delta_flow
# upsample predictions
if up_mask is None:
flow_up = upflow8(coords1 - coords0)
else:
flow_up = self.upsample_flow(coords1 - coords0, up_mask)
flow_predictions.append(flow_up)
if test_mode:
corr = corr_fn(coords1) # index correlation volume
# feat = torch.cat([inp, corr], dim=1)
feat = inp
return coords1 - coords0, flow_up, (feat, fmap1, fmap2)
return flow_predictions
| [
"torch.nn.functional.unfold",
"torch.relu",
"torch.split",
"torch.softmax",
"torch.tanh",
"torch.sum"
] | 1.10.0 | aharley/track_check_repeat | 564c3065a758deea11acdcaeea7a187ce376d564 |
1.3 | import torch
import os
import os.path
import shutil
import numpy as np
import soundfile as sf
from pathlib import PurePath
from torch import nn
from torch.utils.data import DataLoader, random_split
from asteroid.data import TimitDataset
from asteroid.data.utils import CachedWavSet, RandomMixtureSet, FixedMixtureSet
from tqdm import tqdm
from torch import optim
from pytorch_lightning import Trainer, seed_everything, loggers as pl_loggers
from pytorch_lightning.callbacks.early_stopping import EarlyStopping
from pytorch_lightning.callbacks import ModelCheckpoint
from asteroid_filterbanks.transforms import mag
from asteroid.engine import System
from asteroid.losses import singlesrc_neg_sisdr
from egs.whamr.TasNet.model import TasNet
BATCH_SIZE = 8 # could be more on cluster, test if larger one work
SAMPLE_RATE = 8000 # as agreed upon
CROP_LEN = 24000 # average track len in TIMIT
SEED = 42 # magic number :)
def sisdr_loss_wrapper(est_target, target):
return singlesrc_neg_sisdr(est_target.squeeze(1), target).mean()
def train_val_split(ds, val_fraction=0.1, random_seed=SEED):
assert val_fraction > 0 and val_fraction < 0.5
len_train = int(len(ds) * (1 - val_fraction))
len_val = len(ds) - len_train
return random_split(ds, [len_train, len_val], generator=torch.Generator().manual_seed(random_seed))
DRONE_NOISE_DIR = '/jmain01/home/JAD007/txk02/aaa18-txk02/Datasets/noises-train-drones'
# fixed SNRs for validation set
TRAIN_SNRS = [-25, -20, -15, -10, -5]
TIMIT_DIR = PurePath('/jmain01/home/JAD007/txk02/aaa18-txk02/Datasets/TIMIT')
TIMIT_DIR_8kHZ = PurePath('/jmain01/home/JAD007/txk02/aaa18-txk02/Datasets/TIMIT_8kHZ')
# Reproducibility - fix all random seeds
seed_everything(SEED)
# Load noises, resample and save into the memory
noises = CachedWavSet(DRONE_NOISE_DIR, sample_rate=SAMPLE_RATE, precache=True)
# Load clean data and split it into train and val
timit = TimitDataset(TIMIT_DIR_8kHZ, subset='train', sample_rate=SAMPLE_RATE, with_path=False)
timit_train, timit_val = train_val_split(timit, val_fraction=0.1, random_seed=SEED)
# Training data mixes crops randomly on the fly with random SNR in range (effectively infinite training data)
# `repeat_factor=20` means that the dataset contains 20 copies of itself - it is the easiest way to make the epoch longer
timit_train = RandomMixtureSet(timit_train, noises, random_seed=SEED, snr_range=(-25, -5),
crop_length=CROP_LEN, repeat_factor=30)
# Validation data is fixed (for stability): mix every clean clip with all the noises in the folder
# Argument `mixtures_per_clean` regulates with how many different noise files each clean file will be mixed
timit_val = FixedMixtureSet(timit_val, noises, snrs=TRAIN_SNRS, random_seed=SEED,
mixtures_per_clean=5, crop_length=CROP_LEN)
NUM_WORKERS = 5
train_loader = DataLoader(timit_train, shuffle=True, batch_size=BATCH_SIZE,
num_workers=NUM_WORKERS, drop_last=True)
val_loader = DataLoader(timit_val, batch_size=BATCH_SIZE,
num_workers=NUM_WORKERS, drop_last=True)
# some random parameters, does it look sensible?
LR = 1e-3
REDUCE_LR_PATIENCE = 5
EARLY_STOP_PATIENCE = 20
MAX_EPOCHS = 20
# the model here should be constructed in the script accordingly to the passed config (including the model type)
# most of the models accept `sample_rate` parameter for encoders, which is important (default is 16000, override)
model = TasNet(fb_conf={'n_filters': 512, 'kernel_size': 40, 'stride': 20},
mask_conf ={'n_layers': 4, 'n_units': 500, 'dropout': 0.3, "n_src": 1})
optimizer = optim.Adam(model.parameters(), lr=LR)
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=REDUCE_LR_PATIENCE)
early_stopping = EarlyStopping(monitor='val_loss', patience=EARLY_STOP_PATIENCE)
checkpoint = ModelCheckpoint(
filename='{epoch:02d}-{val_loss:.2f}',
monitor="val_loss",
mode="min",
save_top_k=5,
verbose=True
)
# Probably we also need to subclass `System`, in order to log the target metrics on the validation set (PESQ/STOI)
system = System(model, optimizer, sisdr_loss_wrapper, train_loader, val_loader, scheduler)
# log dir and model name are also part of the config, of course
LOG_DIR = 'logs'
logger = pl_loggers.TensorBoardLogger(LOG_DIR, name='TIMIT-drones-TasNet-random_test', version=1)
# choose the proper accelerator for JADE, probably `ddp` (also, `auto_select_gpus=True` might be useful)
trainer = Trainer(max_epochs=MAX_EPOCHS, gpus=-1,
logger=logger, callbacks=[early_stopping, checkpoint], deterministic=True, gradient_clip_val=5.0,)
trainer.fit(system)
#torch.save(model.serialize(), 'tasnet_model.pt')
| [
"torch.optim.lr_scheduler.ReduceLROnPlateau",
"torch.Generator",
"torch.utils.data.DataLoader"
] | 1.3.0 | flyingleafe/asteroid | 1c3c68ffc83f4b0bf7b00893083e4eff1f577b88 |
1.4 | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
from collections import defaultdict
from pathlib import Path
import numpy as np
import pandas as pd
import pytorch_lightning as pl
import torch
import torchvision
from PIL import Image
from torch.utils.data import DataLoader
from torch.utils.data.sampler import RandomSampler
from common import evaluate
from common.utils import save_reconstructions
from data.mri_data import SliceData
class MRIModel(pl.LightningModule):
"""
Abstract super class for Deep Learning based reconstruction models.
This is a subclass of the LightningModule class from pytorch_lightning, with
some additional functionality specific to fastMRI:
- fastMRI data loaders
- Evaluating reconstructions
- Visualization
- Saving test reconstructions
To implement a new reconstruction model, inherit from this class and implement the
following methods:
- train_data_transform, val_data_transform, test_data_transform:
Create and return data transformer objects for each data split
- training_step, validation_step, test_step:
Define what happens in one step of training, validation and testing respectively
- configure_optimizers:
Create and return the optimizers
Other methods from LightningModule can be overridden as needed.
"""
def __init__(self, hparams):
super().__init__()
self.hparams = hparams
def _create_data_loader(self, data_transform, data_partition, sample_rate=None):
sample_rate = sample_rate or self.hparams.sample_rate
dataset = SliceData(
root=self.hparams.data_path / f'{self.hparams.challenge}_{data_partition}',
transform=data_transform,
sample_rate=sample_rate,
challenge=self.hparams.challenge
)
sampler = RandomSampler(dataset)
# sampler = DistributedSampler(dataset)
return DataLoader(
dataset=dataset,
batch_size=self.hparams.batch_size,
num_workers=4,
pin_memory=False,
sampler=sampler,
)
def train_data_transform(self):
raise NotImplementedError
@pl.data_loader
def train_dataloader(self):
return self._create_data_loader(self.train_data_transform(), data_partition='train')
def val_data_transform(self):
raise NotImplementedError
@pl.data_loader
def val_dataloader(self):
return self._create_data_loader(self.val_data_transform(), data_partition='val')
def test_data_transform(self):
raise NotImplementedError
@pl.data_loader
def test_dataloader(self):
return self._create_data_loader(self.test_data_transform(), data_partition='test', sample_rate=1.)
def _evaluate(self, val_logs):
losses = []
outputs = defaultdict(list)
targets = defaultdict(list)
for log in val_logs:
losses.append(log['val_loss'].cpu().numpy())
for i, (fname, slice) in enumerate(zip(log['fname'], log['slice'])):
outputs[fname].append((slice, log['output'][i]))
targets[fname].append((slice, log['target'][i]))
metrics = dict(val_loss=losses, nmse=[], ssim=[], psnr=[])
for fname in outputs:
output = np.stack([out for _, out in sorted(outputs[fname])])
target = np.stack([tgt for _, tgt in sorted(targets[fname])])
metrics['nmse'].append(evaluate.nmse(target, output))
metrics['ssim'].append(evaluate.ssim(target, output))
metrics['psnr'].append(evaluate.psnr(target, output))
metrics = {metric: np.mean(values) for metric, values in metrics.items()}
print(metrics, '\n')
# save the metrics data
metric_file_path = Path(self.hparams.exp_dir) / self.hparams.exp / "validation_metrics"
metric_file_path.mkdir(parents=True, exist_ok=True)
metric_file_path = metric_file_path / "metrics.csv"
df = pd.DataFrame([metrics])
if metric_file_path.exists():
df.to_csv(metric_file_path, mode="a", header=False, index=False)
else:
df.to_csv(metric_file_path, mode="w", header=True, index=False)
return dict(log=metrics, **metrics)
def _visualize(self, val_logs):
def _normalize(image):
image = image[np.newaxis]
image -= image.min()
return image / image.max()
def _save_image(image, tag):
grid = torchvision.utils.make_grid(torch.Tensor(image), nrow=4, pad_value=1)
grid_path = Path(self.hparams.exp_dir) / self.hparams.exp / "image_validation_step"
grid_path.mkdir(parents=True, exist_ok=True)
grid_path = grid_path / tag
grid_np = grid.mul_(255).add_(0.5).clamp_(0, 255).permute(1, 2, 0).to('cpu', torch.uint8).numpy()
grid_pil = Image.fromarray(grid_np)
try:
grid_pil.save(grid_path, format="PNG")
except ValueError as e:
print(e)
# Only process first size to simplify visualization.
visualize_size = val_logs[0]['output'].shape
val_logs = [x for x in val_logs if x['output'].shape == visualize_size]
num_logs = len(val_logs)
num_viz_images = 16
step = (num_logs + num_viz_images - 1) // num_viz_images
outputs, targets = [], []
for i in range(0, num_logs, step):
outputs.append(_normalize(val_logs[i]['output'][0]))
targets.append(_normalize(val_logs[i]['target'][0]))
outputs = np.stack(outputs)
targets = np.stack(targets)
_save_image(targets, 'Target')
_save_image(outputs, 'Reconstruction')
_save_image(np.abs(targets - outputs), 'Error')
def validation_epoch_end(self, val_logs):
self._visualize(val_logs)
return self._evaluate(val_logs)
def test_epoch_end(self, test_logs):
outputs = defaultdict(list)
for log in test_logs:
for i, (fname, slice) in enumerate(zip(log['fname'], log['slice'])):
outputs[fname].append((slice, log['output'][i]))
for fname in outputs:
outputs[fname] = np.stack([out for _, out in sorted(outputs[fname])])
save_reconstructions(outputs, self.hparams.exp_dir / self.hparams.exp / 'reconstructions')
return dict()
| [
"torch.utils.data.sampler.RandomSampler",
"torch.utils.data.DataLoader",
"torch.Tensor"
] | 1.4.0 | ygrepo/fastMRI | cb9a2019f1833bfffe4969023113189abcbad0f7 |
1.8 | # -*- coding: utf-8 -*-
"""
Created on Mon Jun 7 14:34:39 2021
@author: Eric
"""
#%%
from model import Unet
from utils import random_fliplr, random_crop
import torch
import torch.nn as nn
import torch.optim as optim
from torchvision import transforms
from torch.utils.data import DataLoader
from torch.utils.data import Dataset
from torch.utils.data import random_split
from torchvision.utils import save_image
# from torchinfo import summary
import os
import glob
import numpy as np
from tqdm import tqdm
from time import sleep
from PIL import Image
import matplotlib.pyplot as plt
import json
from torch.utils.tensorboard import SummaryWriter
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
#%%
DIR_TRAIN = "dataset/train"
DIR_VALID = "valid"
DIR_TEST = "test"
CHK_OUT = "checkpoints/norm"
TEST_CROP = 512 # px
PARAMS = {
"Type": "Normal net",
# "pretrain": "norm_net_epoch_200.pth",
"pretrain": None,
"train": {
"epochs": 100,
"batch": 4,
"lr": 5e-4,
"split": 0.9,
"nWorkers": 2,
},
"valid": {
"num": 2, # should be smaller than batch size
"log_interv": 10,
},
"image": {
"img_resize": 512,
"img_crop": 512,
"rand_flip": True,
"rand_crop": None
},
"writer": False, # Tensorboard on/off
}
if not os.path.exists(DIR_VALID):
os.makedirs(DIR_VALID)
if not os.path.exists(CHK_OUT):
os.makedirs(CHK_OUT)
if PARAMS["train"]["batch"] <= PARAMS["valid"]["num"]:
PARAMS["valid"]["num"] = PARAMS["train"]["batch"]
def pretty_json(hp):
json_hp = json.dumps(hp, indent=2)
return "".join("\t" + line for line in json_hp.splitlines(True))
#%%
transform = transforms.Compose([
transforms.Resize(PARAMS["image"]["img_resize"]),
transforms.CenterCrop(PARAMS["image"]["img_crop"]),
transforms.ToTensor(),
transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)) # (input - mean) / std
# outputs range from -1 to 1
])
test_transform = transforms.Compose([
transforms.Resize(TEST_CROP),
transforms.CenterCrop(TEST_CROP),
transforms.ToTensor(),
transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)) # (input - mean) / std
# outputs range from -1 to 1
])
class TrainDataset(Dataset):
def __init__(self, img_dir, target_dir, name_list):
self.img_dir = img_dir
self.target_dir = target_dir
self.names = name_list
def __len__(self):
return len(self.names)
def __str__(self):
return self.names
def __getitem__(self, i):
img_filename = os.path.join(self.img_dir, self.names[i]) + ".jpg"
target_filename = os.path.join(self.target_dir, self.names[i]) + ".jpg"
img = Image.open(img_filename).convert('RGB')
target = Image.open(target_filename).convert('RGB')
img = transform(img)
target = transform(target)
return (img, target, self.names[i])
class TestDataset(Dataset):
def __init__(self, img_dir):
self.file_list = glob.glob(img_dir+"/*.jpg")
self.names = [os.path.splitext(os.path.basename(fp))[0] for fp in self.file_list]
def __len__(self):
return len(self.names)
def __getitem__(self, i):
img = Image.open(self.file_list[i]).convert('RGB')
img = test_transform(img)
return img, self.names[i]
#%%
def train(img_folder, label_folder, name_list, valid_folder, pretrained=None):
data_train = TrainDataset(img_folder, label_folder, name_list)
num_train = int(len(data_train) * PARAMS["train"]["split"])
data_train, data_valid = random_split(data_train, [num_train, len(data_train) - num_train])
print("Train data: %d, Validation data: %d, Train batches: %.2f\n" % \
(len(data_train), len(data_valid), len(data_train)/PARAMS["train"]["batch"]))
trainloader = DataLoader(data_train, batch_size=PARAMS["train"]["batch"],
num_workers=PARAMS["train"]["nWorkers"], shuffle=True, drop_last=True)
validloader = DataLoader(data_valid, batch_size=PARAMS["train"]["batch"], shuffle=False, num_workers=2)
net = Unet()
net.weight_init(mean=0.0, std=0.02)
net.to(device)
# summary(net, (1, 3, 512, 512))
criterion = nn.MSELoss().to(device)
optimizer = optim.Adam(net.parameters(), lr=PARAMS["train"]["lr"], betas=(0.5, 0.999))
# train
train_loss_hist = []
valid_loss_hist = []
if pretrained:
checkpoint = torch.load(os.path.join(CHK_OUT, pretrained))
net.load_state_dict(checkpoint["model"])
optimizer.load_state_dict(checkpoint["optim"])
train_loss_hist = checkpoint["train_loss_hist"]
valid_loss_hist = checkpoint["valid_loss_hist"]
start_epoch = checkpoint["epoch"]
else:
start_epoch = 0
# fixed valid output
v_num = PARAMS["valid"]["num"]
if len(data_valid) <= v_num:
v_num = len(data_valid) # on the off-chance valid dataset only has 1 image
valid_img_data = next(iter(validloader))
valid_img_data = [data[:v_num] for data in valid_img_data]
if PARAMS["image"]["rand_crop"]:
valid_img_data[0], valid_img_data[1] = random_crop(valid_img_data[0], valid_img_data[1], PARAMS["image"]["rand_crop"])
# tensorboard
if PARAMS["writer"]:
writer = SummaryWriter()
writer.add_text("Parameters", pretty_json(PARAMS), 0)
writer.add_text("Validation images", str(valid_img_data[2]), 0)
sleep(0.3)
for epoch in range(start_epoch, PARAMS["train"]["epochs"]):
# train
pbar = tqdm(trainloader, ascii=True, bar_format='{l_bar}{bar:10}{r_bar}')
p_desc = "Train %2d/%d" % (epoch + 1, PARAMS["train"]["epochs"])
pbar.set_description(p_desc)
net.train()
tmp_loss = []
for batch_id, (img_in, target, _) in enumerate(pbar):
if PARAMS["image"]["rand_flip"]:
img_in, target = random_fliplr(img_in, target)
if PARAMS["image"]["rand_crop"]:
img_in, target = random_crop(img_in, target, PARAMS["image"]["rand_crop"])
img_in = img_in.to(device)
target = target.to(device)
optimizer.zero_grad()
img_out = net(img_in)
loss = criterion(img_out, target)
loss.backward()
optimizer.step()
tmp_loss.append(loss.item())
p_post = f"T_Loss: {loss.item(): .4f}"
pbar.set_postfix_str(p_post)
pbar.update(0)
train_loss_hist.append(np.mean(tmp_loss))
if PARAMS["writer"]:
writer.add_scalar("Loss/Train", train_loss_hist[-1], epoch)
# validation
pbar = tqdm(validloader, ascii=True, bar_format='{l_bar}{bar:10}{r_bar}')
p_desc = "Valid %2d/%d" % (epoch + 1, PARAMS["train"]["epochs"])
pbar.set_description(p_desc)
net.eval()
tmp_loss = []
with torch.no_grad():
for batch_id, (img_in, target, _) in enumerate(pbar):
if PARAMS["image"]["rand_flip"]:
img_in, target = random_fliplr(img_in, target)
if PARAMS["image"]["rand_crop"]:
img_in, target = random_crop(img_in, target, PARAMS["image"]["rand_crop"])
img_in = img_in.to(device)
target = target.to(device)
img_out = net(img_in)
loss = criterion(img_out, target)
tmp_loss.append(loss.item())
p_post = f"V_Loss: {loss.item(): .4f}"
pbar.set_postfix_str(p_post)
pbar.update(0)
valid_loss_hist.append(np.mean(tmp_loss))
if PARAMS["writer"]:
writer.add_scalar("Loss/Valid", valid_loss_hist[-1], epoch)
if (epoch+1) % PARAMS["valid"]["log_interv"] == 0 or epoch == 0:
with torch.no_grad():
img_in = valid_img_data[0].to(device)
target = valid_img_data[1].to(device)
img_out = net(img_in)
imgs = torch.cat([img_in, target, img_out])
save_image(imgs, os.path.join(valid_folder, f"epoch_{epoch+1}.png"),
value_range=(-1,1), normalize=True, nrow=v_num)
# save pth
torch.save({
"epoch": epoch+1,
"model": net.state_dict(),
"optim": optimizer.state_dict(),
"train_loss_hist": train_loss_hist,
"valid_loss_hist": valid_loss_hist
}, os.path.join(CHK_OUT, f"norm_net_epoch_{epoch+1:03}.pth"))
plotLoss(train_loss_hist, valid_loss_hist, "Loss history")
# tensorboard
if PARAMS["writer"]:
writer.flush()
writer.close()
return net
#%% test
def test(net, in_folder, out_folder):
data_test = TestDataset(in_folder)
batch_size = len(data_test)
# print(batch_size)
testloader = DataLoader(data_test, batch_size=batch_size, shuffle=False)
print("\nOutput test files...")
net.eval()
with torch.no_grad():
for idx, data in enumerate(testloader):
img_in = data[0].to(device)
img_out = net(img_in)
# print(img_name)
out_filename = os.path.join(out_folder, "output.png")
save_image(torch.cat([img_in, img_out]), out_filename, value_range=(-1,1), normalize=True, nrow=batch_size)
print("Done!")
#%%
def plotLoss(t_hist, v_hist, title):
plt.figure()
plt.plot(t_hist, label="Train")
plt.plot(v_hist, label="Valid")
plt.title(title)
plt.legend()
plt.xlabel("Epochs")
plt.show()
#%%
def main():
# ==== train normal ====
print("Normal map")
color_folder = os.path.join(DIR_TRAIN, "color")
norm_folder = os.path.join(DIR_TRAIN, "normal")
name_txt = os.path.join(DIR_TRAIN, "name_list.txt")
with open(name_txt, "r") as f:
name_list = [line.rstrip('\n') for line in f.readlines()]
test_in_folder = os.path.join(DIR_TEST, "input")
test_norm_folder = os.path.join(DIR_TEST, "output_norm")
valid_folder = os.path.join(DIR_VALID, "norm")
if not os.path.exists(test_norm_folder):
os.makedirs(test_norm_folder)
if not os.path.exists(valid_folder):
os.makedirs(valid_folder)
norm_net = train(color_folder, norm_folder, name_list, valid_folder, pretrained=PARAMS["pretrain"])
test(norm_net, test_in_folder, test_norm_folder)
if str(device) == 'cuda':
torch.cuda.empty_cache()
if __name__ == "__main__":
main()
| [
"torch.cat",
"torch.nn.MSELoss",
"torch.no_grad",
"torch.cuda.empty_cache",
"torch.cuda.is_available",
"torch.utils.data.DataLoader",
"torch.utils.tensorboard.SummaryWriter"
] | 1.8.0 | yuchen071/Normal-map-generator | 40f92a38a75a35dcf4b8309517bf83b6a52b4fbb |
1.7 | """
---
title: Train Feedback Transformer
summary: This is training code with notes for a feedback transformer.
---
# Train Feedback Transformer
This trains a [feedback transformer](index.html) model for auto-regression.
You can pick the original feedback transformer or the new version
where the keys and values are precalculated.
Here's a Colab notebook for training a feedback transformer on Tiny Shakespeare dataset.
[](https://colab.research.google.com/github/lab-ml/nn/blob/master/labml_nn/transformers/feedback/experiment.ipynb)
[](https://web.lab-ml.com/run?uuid=d8eb9416530a11eb8fb50242ac1c0002)
"""
import torch
from torch import nn
from labml import experiment
from labml.configs import option
from labml.utils.pytorch import get_modules
from labml_helpers.module import Module
from labml_nn.experiments.nlp_autoregression import NLPAutoRegressionConfigs
from labml_nn.transformers import Encoder, Generator, TransformerConfigs
from labml_nn.transformers.utils import subsequent_mask
class AutoregressiveModel(Module):
"""
## Auto regressive model
"""
def __init__(self, n_vocab: int, d_model: int, transformer: Module):
super().__init__()
# Token embedding module
self.src_embed = nn.Embedding(n_vocab, d_model)
self.transformer = transformer
self.generator = nn.Linear(d_model, n_vocab)
def __call__(self, x: torch.Tensor):
# Embed the tokens
x = self.src_embed(x)
# Run it through the the transformer
res = self.transformer(x)
# Generate logits of the next token
return self.generator(res), None
class Configs(NLPAutoRegressionConfigs):
"""
## Configurations
The default configs can and will be over-ridden when we start the experiment
"""
model: AutoregressiveModel
d_model: int = 512
heads: int = 8
dropout: float = 0.0
d_ff: int = 2048
n_layers: int = 6
@option(Configs.model)
def feedback_transformer(c: Configs):
"""
Create [original feedback transformer](index.html).
"""
from labml_nn.transformers.feedback import FeedbackTransformer, FeedbackTransformerLayer, \
FeedbackAttention, FeedForward
return AutoregressiveModel(
c.n_tokens, c.d_model,
FeedbackTransformer(
FeedbackTransformerLayer(d_model=c.d_model,
attn=FeedbackAttention(c.heads, c.d_model, c.dropout),
feed_forward=FeedForward(c.d_model, c.d_ff, c.dropout),
dropout_prob=c.dropout),
c.n_layers)).to(c.device)
@option(Configs.model)
def feedback_transformer_kv(c: Configs):
"""
Create [updated feedback transformer](index.html#kv_shared), with precalculated keys and values.
"""
from labml_nn.transformers.feedback import FeedbackTransformerKV, FeedbackTransformerLayer, \
FeedbackAttention, FeedForward
return AutoregressiveModel(
c.n_tokens, c.d_model,
FeedbackTransformerKV(
FeedbackTransformerLayer(d_model=c.d_model,
attn=FeedbackAttention(c.heads, c.d_model, c.dropout,
is_kv_precomputed=True),
feed_forward=FeedForward(c.d_model, c.d_ff, c.dropout),
dropout_prob=c.dropout),
c.n_layers, c.d_model, c.heads)).to(c.device)
def main():
# Create experiment
experiment.create(name="feedback_transformer")
# Create configs
conf = Configs()
# Load configurations
experiment.configs(conf,
# A dictionary of configurations to override
{'tokenizer': 'character',
'text': 'tiny_shakespeare',
'optimizer.learning_rate': 1.0,
'optimizer.optimizer': 'Noam',
'prompt': 'It is',
'prompt_separator': '',
# Use `feedback_transformer` for original feedback transformer
'model': 'feedback_transformer_kv',
'train_loader': 'shuffled_train_loader',
'valid_loader': 'shuffled_valid_loader',
'seq_len': 128,
'epochs': 128,
'batch_size': 64,
'inner_iterations': 25})
# Set models for saving and loading
experiment.add_pytorch_models(get_modules(conf))
# Start the experiment
with experiment.start():
# Run the training loop
conf.run()
if __name__ == '__main__':
main()
| [
"torch.nn.Linear",
"torch.nn.Embedding"
] | 1.7 | lc0/nn | 0de7e343a11685de37a03ae4ee2510d18fc07369 |
1.0 | #!/usr/bin/env python3
import torch
import torch.cuda.profiler as profiler
from apex import pyprof
class Foo(torch.jit.ScriptModule):
def __init__(self, size):
super(Foo, self).__init__()
self.n = torch.nn.Parameter(torch.ones(size))
self.m = torch.nn.Parameter(torch.ones(size))
@torch.jit.script_method
def forward(self, input):
return self.n*input + self.m
#Initialize pyprof after the JIT step
pyprof.nvtx.init()
#Hook up the forward function to pyprof
pyprof.nvtx.wrap(Foo, 'forward')
foo = Foo(4)
foo.cuda()
x = torch.ones(4).cuda()
with torch.autograd.profiler.emit_nvtx():
profiler.start()
z = foo(x)
profiler.stop()
print(z)
| [
"torch.cuda.profiler.start",
"torch.autograd.profiler.emit_nvtx",
"torch.cuda.profiler.stop",
"torch.ones"
] | 1.0 | oyj0594/apex | b66ffc1d952d0b20d6706ada783ae5b23e4ee734 |
1.0 | #!/usr/bin/env python3
"""
Example to run pyprof with imagenet models.
"""
import sys
import torch
import torch.nn as nn
import torchvision.models as models
import torch.cuda.profiler as profiler
import argparse
from apex import pyprof
from apex.optimizers import FusedAdam
def parseArgs():
parser = argparse.ArgumentParser(prog=sys.argv[0], description="Run popular imagenet models.")
parser.add_argument("-m",
type=str,
default="resnet50",
choices=["alexnet", "densenet121", "densenet161", "densenet169", "densenet201", "googlenet", "mnasnet0_5", "mnasnet0_75", "mnasnet1_0", "mnasnet1_3", "mobilenet_v2", "resnet18", "resnet34", "resnet50", "resnet101", "resnet152", "resnext50_32x4d", "resnext101_32x8d", "wide_resnet50_2", "wide_resnet101_2", "shufflenet_v2_x0_5", "shufflenet_v2_x1_0", "shufflenet_v2_x1_5", "shufflenet_v2_x2_0", "squeezenet1_0", "squeezenet1_1", "vgg11", "vgg11_bn", "vgg13", "vgg13_bn", "vgg16", "vgg16_bn", "vgg19", "vgg19_bn", "inception_v3"],
help="Model.")
parser.add_argument("-b",
type=int,
default=32,
help="Batch size.")
parser.add_argument("-o",
type=str,
default="adam",
choices=["adam", "sgd"],
help="Optimizer.")
args = parser.parse_args()
return args
d = {
"alexnet": {'H': 224, 'W': 224, 'opts': {}},
"densenet121": {'H': 224, 'W': 224, 'opts': {}},
"densenet161": {'H': 224, 'W': 224, 'opts': {}},
"densenet169": {'H': 224, 'W': 224, 'opts': {}},
"densenet201": {'H': 224, 'W': 224, 'opts': {}},
"googlenet": {'H': 224, 'W': 224, 'opts': {'aux_logits': False}},
"mnasnet0_5": {'H': 224, 'W': 224, 'opts': {}},
"mnasnet0_75": {'H': 224, 'W': 224, 'opts': {}},
"mnasnet1_0": {'H': 224, 'W': 224, 'opts': {}},
"mnasnet1_3": {'H': 224, 'W': 224, 'opts': {}},
"mobilenet_v2": {'H': 224, 'W': 224, 'opts': {}},
"resnet18": {'H': 224, 'W': 224, 'opts': {}},
"resnet34": {'H': 224, 'W': 224, 'opts': {}},
"resnet50": {'H': 224, 'W': 224, 'opts': {}},
"resnet101": {'H': 224, 'W': 224, 'opts': {}},
"resnet152": {'H': 224, 'W': 224, 'opts': {}},
"resnext50_32x4d": {'H': 224, 'W': 224, 'opts': {}},
"resnext101_32x8d": {'H': 224, 'W': 224, 'opts': {}},
"wide_resnet50_2": {'H': 224, 'W': 224, 'opts': {}},
"wide_resnet101_2": {'H': 224, 'W': 224, 'opts': {}},
"shufflenet_v2_x0_5": {'H': 224, 'W': 224, 'opts': {}},
"shufflenet_v2_x1_0": {'H': 224, 'W': 224, 'opts': {}},
"shufflenet_v2_x1_5": {'H': 224, 'W': 224, 'opts': {}},
"shufflenet_v2_x2_0": {'H': 224, 'W': 224, 'opts': {}},
"squeezenet1_0": {'H': 224, 'W': 224, 'opts': {}},
"squeezenet1_1": {'H': 224, 'W': 224, 'opts': {}},
"vgg11": {'H': 224, 'W': 224, 'opts': {}},
"vgg11_bn": {'H': 224, 'W': 224, 'opts': {}},
"vgg13": {'H': 224, 'W': 224, 'opts': {}},
"vgg13_bn": {'H': 224, 'W': 224, 'opts': {}},
"vgg16": {'H': 224, 'W': 224, 'opts': {}},
"vgg16_bn": {'H': 224, 'W': 224, 'opts': {}},
"vgg19": {'H': 224, 'W': 224, 'opts': {}},
"vgg19_bn": {'H': 224, 'W': 224, 'opts': {}},
"inception_v3": {'H': 299, 'W': 299, 'opts': {'aux_logits': False}},
}
def main():
args = parseArgs()
pyprof.nvtx.init()
# pyprof.nvtx.wrap(fused_adam_cuda, 'adam')
N = args.b
C = 3
H = d[args.m]['H']
W = d[args.m]['W']
opts = d[args.m]['opts']
classes = 1000
net = getattr(models, args.m)
net = net(**opts).cuda().half()
net.train()
x = torch.rand(N, C, H, W).cuda().half()
target = torch.empty(N, dtype=torch.long).random_(classes).cuda()
criterion = nn.CrossEntropyLoss().cuda()
if (args.o == "sgd"):
optimizer = torch.optim.SGD(net.parameters(), lr = 0.01, momentum=0.9)
elif (args.o == "adam"):
optimizer = FusedAdam(net.parameters())
else:
assert False
#Warm up without profiler
for i in range(2):
output = net(x)
loss = criterion(output, target)
optimizer.zero_grad()
loss.backward()
optimizer.step()
with torch.autograd.profiler.emit_nvtx():
profiler.start()
output = net(x)
loss = criterion(output, target)
optimizer.zero_grad()
loss.backward()
optimizer.step()
profiler.stop()
if __name__ == "__main__":
main()
| [
"torch.rand",
"torch.autograd.profiler.emit_nvtx",
"torch.cuda.profiler.stop",
"torch.cuda.profiler.start",
"torch.empty",
"torch.nn.CrossEntropyLoss"
] | 1.0 | oyj0594/apex | b66ffc1d952d0b20d6706ada783ae5b23e4ee734 |
1.1 | #!/usr/bin/python
# -*- encoding: utf-8 -*-
from logger import setup_logger
from models.model_stages import BiSeNet
from cityscapes import CityScapes
from loss.loss import OhemCELoss
from loss.detail_loss import DetailAggregateLoss
from evaluation import MscEvalV0
from optimizer_loss import Optimizer
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
import torch.nn.functional as F
import torch.distributed as dist
import os
import os.path as osp
import logging
import time
import datetime
import argparse
logger = logging.getLogger()
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Unsupported value encountered.')
def parse_args():
parse = argparse.ArgumentParser()
parse.add_argument(
'--local_rank',
dest = 'local_rank',
type = int,
default = -1,
)
parse.add_argument(
'--n_workers_train',
dest = 'n_workers_train',
type = int,
default = 8,
)
parse.add_argument(
'--n_workers_val',
dest = 'n_workers_val',
type = int,
default = 0,
)
parse.add_argument(
'--n_img_per_gpu',
dest = 'n_img_per_gpu',
type = int,
default = 4,
)
parse.add_argument(
'--max_iter',
dest = 'max_iter',
type = int,
default = 40000,
)
parse.add_argument(
'--save_iter_sep',
dest = 'save_iter_sep',
type = int,
default = 1000,
)
parse.add_argument(
'--warmup_steps',
dest = 'warmup_steps',
type = int,
default = 1000,
)
parse.add_argument(
'--mode',
dest = 'mode',
type = str,
default = 'train',
)
parse.add_argument(
'--ckpt',
dest = 'ckpt',
type = str,
default = None,
)
parse.add_argument(
'--respath',
dest = 'respath',
type = str,
default = None,
)
parse.add_argument(
'--backbone',
dest = 'backbone',
type = str,
default = 'CatNetSmall',
)
parse.add_argument(
'--pretrain_path',
dest = 'pretrain_path',
type = str,
default = '',
)
parse.add_argument(
'--use_conv_last',
dest = 'use_conv_last',
type = str2bool,
default = False,
)
parse.add_argument(
'--use_boundary_2',
dest = 'use_boundary_2',
type = str2bool,
default = False,
)
parse.add_argument(
'--use_boundary_4',
dest = 'use_boundary_4',
type = str2bool,
default = False,
)
parse.add_argument(
'--use_boundary_8',
dest = 'use_boundary_8',
type = str2bool,
default = False,
)
parse.add_argument(
'--use_boundary_16',
dest = 'use_boundary_16',
type = str2bool,
default = False,
)
return parse.parse_args()
def train():
args = parse_args()
save_pth_path = os.path.join(args.respath, 'pths')
dspth = './data'
# print(save_pth_path)
# print(osp.exists(save_pth_path))
# if not osp.exists(save_pth_path) and dist.get_rank()==0:
if not osp.exists(save_pth_path):
os.makedirs(save_pth_path)
torch.cuda.set_device(args.local_rank)
dist.init_process_group(
backend = 'nccl',
init_method = 'env://',
world_size = torch.cuda.device_count(),
rank=args.local_rank
)
setup_logger(args.respath)
## dataset
n_classes = 19
n_img_per_gpu = args.n_img_per_gpu
n_workers_train = args.n_workers_train
n_workers_val = args.n_workers_val
use_boundary_16 = args.use_boundary_16
use_boundary_8 = args.use_boundary_8
use_boundary_4 = args.use_boundary_4
use_boundary_2 = args.use_boundary_2
mode = args.mode
cropsize = [1024, 512]
randomscale = (0.125, 0.25, 0.375, 0.5, 0.625, 0.75, 0.875, 1.0, 1.125, 1.25, 1.375, 1.5)
if dist.get_rank()==0:
logger.info('n_workers_train: {}'.format(n_workers_train))
logger.info('n_workers_val: {}'.format(n_workers_val))
logger.info('use_boundary_2: {}'.format(use_boundary_2))
logger.info('use_boundary_4: {}'.format(use_boundary_4))
logger.info('use_boundary_8: {}'.format(use_boundary_8))
logger.info('use_boundary_16: {}'.format(use_boundary_16))
logger.info('mode: {}'.format(args.mode))
ds = CityScapes(dspth, cropsize=cropsize, mode=mode, randomscale=randomscale)
sampler = torch.utils.data.distributed.DistributedSampler(ds)
dl = DataLoader(ds,
batch_size = n_img_per_gpu,
shuffle = False,
sampler = sampler,
num_workers = n_workers_train,
pin_memory = False,
drop_last = True)
# exit(0)
dsval = CityScapes(dspth, mode='val', randomscale=randomscale)
sampler_val = torch.utils.data.distributed.DistributedSampler(dsval)
dlval = DataLoader(dsval,
batch_size = 2,
shuffle = False,
sampler = sampler_val,
num_workers = n_workers_val,
drop_last = False)
## model
ignore_idx = 255
net = BiSeNet(backbone=args.backbone, n_classes=n_classes, pretrain_model=args.pretrain_path,
use_boundary_2=use_boundary_2, use_boundary_4=use_boundary_4, use_boundary_8=use_boundary_8,
use_boundary_16=use_boundary_16, use_conv_last=args.use_conv_last)
if not args.ckpt is None:
net.load_state_dict(torch.load(args.ckpt, map_location='cpu'))
net.cuda()
net.train()
net = nn.parallel.DistributedDataParallel(net,
device_ids = [args.local_rank, ],
output_device = args.local_rank,
find_unused_parameters=True
)
score_thres = 0.7
n_min = n_img_per_gpu*cropsize[0]*cropsize[1]//16
criteria_p = OhemCELoss(thresh=score_thres, n_min=n_min, ignore_lb=ignore_idx)
criteria_16 = OhemCELoss(thresh=score_thres, n_min=n_min, ignore_lb=ignore_idx)
criteria_32 = OhemCELoss(thresh=score_thres, n_min=n_min, ignore_lb=ignore_idx)
boundary_loss_func = DetailAggregateLoss()
## optimizer
maxmIOU50 = 0.
maxmIOU75 = 0.
momentum = 0.9
weight_decay = 5e-4
lr_start = 1e-2
max_iter = args.max_iter
save_iter_sep = args.save_iter_sep
power = 0.9
warmup_steps = args.warmup_steps
warmup_start_lr = 1e-5
if dist.get_rank()==0:
print('max_iter: ', max_iter)
print('save_iter_sep: ', save_iter_sep)
print('warmup_steps: ', warmup_steps)
optim = Optimizer(
model = net.module,
loss = boundary_loss_func,
lr0 = lr_start,
momentum = momentum,
wd = weight_decay,
warmup_steps = warmup_steps,
warmup_start_lr = warmup_start_lr,
max_iter = max_iter,
power = power)
## train loop
msg_iter = 50
loss_avg = []
loss_boundery_bce = []
loss_boundery_dice = []
st = glob_st = time.time()
diter = iter(dl)
epoch = 0
for it in range(max_iter):
try:
im, lb = next(diter)
if not im.size()[0]==n_img_per_gpu: raise StopIteration
except StopIteration:
epoch += 1
sampler.set_epoch(epoch)
diter = iter(dl)
im, lb = next(diter)
im = im.cuda()
lb = lb.cuda()
H, W = im.size()[2:]
lb = torch.squeeze(lb, 1)
optim.zero_grad()
if use_boundary_2 and use_boundary_4 and use_boundary_8:
out, out16, out32, detail2, detail4, detail8 = net(im)
if (not use_boundary_2) and use_boundary_4 and use_boundary_8:
out, out16, out32, detail4, detail8 = net(im)
if (not use_boundary_2) and (not use_boundary_4) and use_boundary_8:
out, out16, out32, detail8 = net(im)
if (not use_boundary_2) and (not use_boundary_4) and (not use_boundary_8):
out, out16, out32 = net(im)
lossp = criteria_p(out, lb)
loss2 = criteria_16(out16, lb)
loss3 = criteria_32(out32, lb)
boundery_bce_loss = 0.
boundery_dice_loss = 0.
if use_boundary_2:
# if dist.get_rank()==0:
# print('use_boundary_2')
boundery_bce_loss2, boundery_dice_loss2 = boundary_loss_func(detail2, lb)
boundery_bce_loss += boundery_bce_loss2
boundery_dice_loss += boundery_dice_loss2
if use_boundary_4:
# if dist.get_rank()==0:
# print('use_boundary_4')
boundery_bce_loss4, boundery_dice_loss4 = boundary_loss_func(detail4, lb)
boundery_bce_loss += boundery_bce_loss4
boundery_dice_loss += boundery_dice_loss4
if use_boundary_8:
# if dist.get_rank()==0:
# print('use_boundary_8')
boundery_bce_loss8, boundery_dice_loss8 = boundary_loss_func(detail8, lb)
boundery_bce_loss += boundery_bce_loss8
boundery_dice_loss += boundery_dice_loss8
loss = lossp + loss2 + loss3 + boundery_bce_loss + boundery_dice_loss
loss.backward()
optim.step()
loss_avg.append(loss.item())
loss_boundery_bce.append(boundery_bce_loss.item())
loss_boundery_dice.append(boundery_dice_loss.item())
## print training log message
if (it+1)%msg_iter==0:
loss_avg = sum(loss_avg) / len(loss_avg)
lr = optim.lr
ed = time.time()
t_intv, glob_t_intv = ed - st, ed - glob_st
eta = int((max_iter - it) * (glob_t_intv / it))
eta = str(datetime.timedelta(seconds=eta))
loss_boundery_bce_avg = sum(loss_boundery_bce) / len(loss_boundery_bce)
loss_boundery_dice_avg = sum(loss_boundery_dice) / len(loss_boundery_dice)
msg = ', '.join([
'it: {it}/{max_it}',
'lr: {lr:4f}',
'loss: {loss:.4f}',
'boundery_bce_loss: {boundery_bce_loss:.4f}',
'boundery_dice_loss: {boundery_dice_loss:.4f}',
'eta: {eta}',
'time: {time:.4f}',
]).format(
it = it+1,
max_it = max_iter,
lr = lr,
loss = loss_avg,
boundery_bce_loss = loss_boundery_bce_avg,
boundery_dice_loss = loss_boundery_dice_avg,
time = t_intv,
eta = eta
)
logger.info(msg)
loss_avg = []
loss_boundery_bce = []
loss_boundery_dice = []
st = ed
# print(boundary_loss_func.get_params())
if (it+1)%save_iter_sep==0:# and it != 0:
## model
logger.info('evaluating the model ...')
logger.info('setup and restore model')
net.eval()
# ## evaluator
logger.info('compute the mIOU')
with torch.no_grad():
single_scale1 = MscEvalV0()
mIOU50 = single_scale1(net, dlval, n_classes)
# single_scale2= MscEvalV0(scale=0.75)
# mIOU75 = single_scale2(net, dlval, n_classes)
save_pth = osp.join(save_pth_path, 'model_iter{}_mIOU50_{}.pth'
.format(it+1, str(round(mIOU50,4))))
state = net.module.state_dict() if hasattr(net, 'module') else net.state_dict()
if dist.get_rank()==0:
torch.save(state, save_pth)
logger.info('training iteration {}, model saved to: {}'.format(it+1, save_pth))
if mIOU50 > maxmIOU50:
maxmIOU50 = mIOU50
save_pth = osp.join(save_pth_path, 'model_maxmIOU50.pth'.format(it+1))
state = net.module.state_dict() if hasattr(net, 'module') else net.state_dict()
if dist.get_rank()==0:
torch.save(state, save_pth)
logger.info('max mIOU model saved to: {}'.format(save_pth))
# if mIOU75 > maxmIOU75:
# maxmIOU75 = mIOU75
# save_pth = osp.join(save_pth_path, 'model_maxmIOU75.pth'.format(it+1))
# state = net.module.state_dict() if hasattr(net, 'module') else net.state_dict()
# if dist.get_rank()==0: torch.save(state, save_pth)
# logger.info('max mIOU model saved to: {}'.format(save_pth))
logger.info('mIOU50 is: {}'.format(mIOU50))
logger.info('maxmIOU50 is: {}'.format(maxmIOU50))
net.train()
## dump the final model
save_pth = osp.join(save_pth_path, 'model_final.pth')
net.cpu()
state = net.module.state_dict() if hasattr(net, 'module') else net.state_dict()
if dist.get_rank()==0: torch.save(state, save_pth)
logger.info('training done, model saved to: {}'.format(save_pth))
print('epoch: ', epoch)
if __name__ == "__main__":
train()
| [
"torch.save",
"torch.no_grad",
"torch.nn.parallel.DistributedDataParallel",
"torch.cuda.device_count",
"torch.cuda.set_device",
"torch.squeeze",
"torch.utils.data.DataLoader",
"torch.utils.data.distributed.DistributedSampler",
"torch.load",
"torch.distributed.get_rank"
] | 1.1.0 | Toby-SZZ/STDC-Seg | 9273e03b02241fda107962bfc7bd366310a8d23b |
1.3 | import string
import numpy as np
import torch as th
from ttools.training import ModelInterface
from . import utils
class VectorizerInterface(ModelInterface):
def __init__(self, model, lr, n_primitives, canvas_size, w_surface, w_alignment, csg, rounded, cuda=True):
self.model = model
self.cuda = cuda
self.n_primitives = n_primitives
self.canvas_size = canvas_size
self.w_surface = w_surface
self.w_alignment = w_alignment
self.csg = csg
self.rounded = rounded
self._step = 0
if self.cuda:
self.model.cuda()
self.optimizer = th.optim.Adam(self.model.parameters(), lr=lr)
def forward(self, batch):
df = batch['distance_fields']
if self.cuda:
df = df.cuda()
params = self.model(df[:,None]).view(df.size(0), self.n_primitives, -1)
params = th.cat([0.35*params[...,:3], params[...,3:]], dim=-1)
df = utils.distance_to_rounded_cuboids if self.rounded else utils.distance_to_cuboids
if self.csg:
plus_params = params[:,:self.n_primitives//2]
plus_distance_fields = utils.compute_distance_fields(plus_params, self.canvas_size, df=df)
plus_distance_fields = plus_distance_fields.min(1)[0]
minus_params = params[:,self.n_primitives//2:]
minus_distance_fields = utils.compute_distance_fields(minus_params, self.canvas_size, df=df)
minus_distance_fields = minus_distance_fields.min(1)[0]
distance_fields = th.max(plus_distance_fields, -minus_distance_fields) ** 2
else:
distance_fields = utils.compute_distance_fields(params, self.canvas_size, df=df)
distance_fields = distance_fields.min(1)[0] ** 2
alignment_fields = utils.compute_alignment_fields(distance_fields)
distance_fields = distance_fields[...,1:-1,1:-1,1:-1]
occupancy_fields = utils.compute_occupancy_fields(distance_fields)
return {
'distance_fields': distance_fields,
'alignment_fields': alignment_fields,
'occupancy_fields': occupancy_fields
}
def _compute_lossses(self, batch, fwd_data):
ret = {}
target_distance_fields = batch['distance_fields']
target_alignment_fields = batch['alignment_fields']
target_occupancy_fields = batch['occupancy_fields']
if self.cuda:
target_distance_fields = target_distance_fields.cuda()
target_alignment_fields = target_alignment_fields.cuda()
target_occupancy_fields = target_occupancy_fields.cuda()
distance_fields = fwd_data['distance_fields']
alignment_fields = fwd_data['alignment_fields']
occupancy_fields = fwd_data['occupancy_fields']
surfaceloss = th.mean(target_occupancy_fields*distance_fields + target_distance_fields*occupancy_fields)
alignmentloss = th.mean(1 - th.sum(target_alignment_fields*alignment_fields, dim=-1)**2)
ret['surfaceloss'] = surfaceloss
ret['alignmentloss'] = alignmentloss
loss = self.w_surface*surfaceloss + self.w_alignment*alignmentloss
ret['loss'] = loss
return ret
def training_step(self, batch):
self.model.train()
fwd_data = self.forward(batch)
self.optimizer.zero_grad()
losses_dict = self._compute_lossses(batch, fwd_data)
loss = losses_dict['loss']
loss.backward()
self.optimizer.step()
self._step += 1
return { k: v.item() for k, v in losses_dict.items() }
def init_validation(self):
losses = ['loss', 'surfaceloss', 'alignmentloss']
ret = { l: 0 for l in losses }
ret['count'] = 0
return ret
def validation_step(self, batch, running_data):
self.model.eval()
n = batch['distance_fields'].shape[0]
count = running_data['count']
fwd_data = self.forward(batch)
losses_dict = self._compute_lossses(batch, fwd_data)
loss = losses_dict['loss']
surfaceloss = losses_dict['surfaceloss']
alignmentloss = losses_dict['alignmentloss']
return {
'loss': (running_data['loss']*count + loss.item()*n) / (count+n),
'surfaceloss': (running_data['surfaceloss']*count + surfaceloss.item()*n) / (count+n),
'alignmentloss': (running_data['alignmentloss']*count + alignmentloss.item()*n) / (count+n),
'count': count+n
}
| [
"torch.sum",
"torch.cat",
"torch.mean",
"torch.max"
] | 1.3.1 | dmsm/DeepParametricShapes | 2e0de365191b29c61796f7cd6cbd2bdf631eae2c |
1.1 | ######################################################################################
#FSSNet: Fast Semantic Segmentation for Scene Perception
#Paper-Link: https://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8392426
######################################################################################
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchsummary import summary
from utils.activations import NON_LINEARITY
__all__ = ["FSSNet"]
# NON_LINEARITY = {
# 'ReLU': nn.ReLU(inplace=True),
# 'PReLU': nn.PReLU(),
# 'ReLu6': nn.ReLU6(inplace=True)
# }
class InitialBlock(nn.Module):
def __init__(self, ninput, noutput, non_linear='ReLU'):
super().__init__()
self.conv = nn.Conv2d(ninput, noutput-ninput, (3, 3), stride=2, padding=1, bias=False)
self.pool = nn.MaxPool2d(2, stride=2)
self.bn = nn.BatchNorm2d(noutput-ninput, eps=1e-3)
self.relu = NON_LINEARITY[non_linear]
def forward(self, input):
output = self.relu(self.bn(self.conv(input)))
output = torch.cat([output, self.pool(input)], 1)
return output
class DownsamplingBottleneck(nn.Module):
def __init__(self, in_channels, out_channels, internal_ratio=4, kernel_size=3,
padding=0, dropout_prob=0., bias=False, non_linear='ReLU'):
super().__init__()
# Store parameters that are needed later
internal_channels = in_channels // internal_ratio
# Main branch - max pooling followed by feature map (channels) padding
self.main_max1 = nn.Sequential(
nn.MaxPool2d(kernel_size=2, stride=2),
nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, bias=bias),
)
# Extension branch - 2x2 convolution, followed by a regular, dilated or
# asymmetric convolution, followed by another 1x1 convolution. Number
# of channels is doubled.
# 2x2 projection convolution with stride 2, no padding
self.ext_conv1 = nn.Sequential(
nn.Conv2d(in_channels, internal_channels, kernel_size=2, stride=2, bias=bias),
nn.BatchNorm2d(internal_channels),
NON_LINEARITY[non_linear]
)
# Convolution
self.ext_conv2 = nn.Sequential(
nn.Conv2d(internal_channels, internal_channels, kernel_size=kernel_size, stride=1, padding=padding,
bias=bias),
nn.BatchNorm2d(internal_channels),
NON_LINEARITY[non_linear]
)
# 1x1 expansion convolution
self.ext_conv3 = nn.Sequential(
nn.Conv2d(internal_channels, out_channels, kernel_size=1, stride=1, bias=bias),
nn.BatchNorm2d(out_channels),
NON_LINEARITY[non_linear]
)
self.ext_regul = nn.Dropout2d(p=dropout_prob)
# PReLU layer to apply after concatenating the branches
self.out_prelu = NON_LINEARITY[non_linear]
def forward(self, x):
# Main branch shortcut
main = self.main_max1(x)
# Extension branch
ext = self.ext_conv1(x)
ext = self.ext_conv2(ext)
ext = self.ext_conv3(ext)
ext = self.ext_regul(ext)
# Add main and extension branches
out = self.out_prelu(main + ext)
return out
class UpsamplingBottleneck(nn.Module):
def __init__(self, in_channels, out_channels, internal_ratio=4, kernel_size=2,
padding=0, dropout_prob=0., bias=False, non_linear='ReLU'):
super().__init__()
internal_channels = in_channels // internal_ratio
# Main branch - max pooling followed by feature map (channels) padding
self.main_conv1 = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size=1, bias=bias),
nn.BatchNorm2d(out_channels))
# Remember that the stride is the same as the kernel_size, just like
# the max pooling layers
# self.main_unpool1 = nn.MaxUnpool2d(kernel_size=2)
# Extension branch - 1x1 convolution, followed by a regular, dilated or
# asymmetric convolution, followed by another 1x1 convolution. Number
# of channels is doubled.
# 1x1 projection convolution with stride 1
self.ext_conv1 = nn.Sequential(
nn.Conv2d(in_channels, internal_channels, kernel_size=1, bias=bias),
nn.BatchNorm2d(internal_channels),
NON_LINEARITY[non_linear]
)
# Transposed convolution
self.ext_conv2 = nn.Sequential(
nn.ConvTranspose2d(internal_channels, internal_channels, kernel_size=kernel_size, stride=2, padding=padding,
output_padding=0, bias=bias),
nn.BatchNorm2d(internal_channels),
NON_LINEARITY[non_linear]
)
# 1x1 expansion convolution
self.ext_conv3 = nn.Sequential(
nn.Conv2d(internal_channels, out_channels, kernel_size=1, bias=bias),
nn.BatchNorm2d(out_channels),
NON_LINEARITY[non_linear]
)
self.ext_regul = nn.Dropout2d(p=dropout_prob)
# PReLU layer to apply after concatenating the branches
self.out_prelu = NON_LINEARITY[non_linear]
def forward(self, x, x_pre):
# Main branch shortcut # here different origin paper, Fig 4 contradict to Fig 9
main = x + x_pre
main = self.main_conv1(main) # 2. conv first, follow up
main = F.interpolate(main, scale_factor=2, mode="bilinear", align_corners=True) # 1. up first, follow conv
# main = self.main_conv1(main)
# Extension branch
ext = self.ext_conv1(x)
ext = self.ext_conv2(ext)
ext = self.ext_conv3(ext)
ext = self.ext_regul(ext)
# Add main and extension branches
out = self.out_prelu(main + ext)
return out
class DilatedBlock(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, dilation=1,
dropout_prob=0., bias=False, non_linear='ReLU'):
super(DilatedBlock, self).__init__()
self.relu = NON_LINEARITY[non_linear]
self.internal_channels = in_channels // 4
# compress conv
self.conv1 = nn.Conv2d(in_channels, self.internal_channels, 1, bias=bias)
self.conv1_bn = nn.BatchNorm2d(self.internal_channels)
# a relu
self.conv2 = nn.Conv2d(self.internal_channels, self.internal_channels, kernel_size,
stride, padding=int((kernel_size - 1) / 2 * dilation), dilation=dilation, groups=1,
bias=bias)
self.conv2_bn = nn.BatchNorm2d(self.internal_channels)
# a relu
self.conv4 = nn.Conv2d(self.internal_channels, out_channels, 1, bias=bias)
self.conv4_bn = nn.BatchNorm2d(out_channels)
self.regul = nn.Dropout2d(p=dropout_prob)
def forward(self, x):
residual = x
main = self.relu(self.conv1_bn(self.conv1(x)))
main = self.relu(self.conv2_bn(self.conv2(main)))
main = self.conv4_bn(self.conv4(main))
main = self.regul(main)
out = self.relu(torch.add(main, residual))
return out
class Factorized_Block(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, dilation=1,
dropout_prob=0., bias=False, non_linear='ReLU'):
super(Factorized_Block, self).__init__()
self.relu = NON_LINEARITY[non_linear]
self.internal_channels = in_channels // 4
self.compress_conv1 = nn.Conv2d(in_channels, self.internal_channels, 1, padding=0, bias=bias)
self.conv1_bn = nn.BatchNorm2d(self.internal_channels)
# here is relu
self.conv2_1 = nn.Conv2d(self.internal_channels, self.internal_channels, (kernel_size, 1), stride=(stride, 1),
padding=(int((kernel_size - 1) / 2 * dilation), 0), dilation=(dilation, 1), bias=bias)
self.conv2_1_bn = nn.BatchNorm2d(self.internal_channels)
self.conv2_2 = nn.Conv2d(self.internal_channels, self.internal_channels, (1, kernel_size), stride=(1, stride),
padding=(0, int((kernel_size - 1) / 2 * dilation)), dilation=(1, dilation), bias=bias)
self.conv2_2_bn = nn.BatchNorm2d(self.internal_channels)
# here is relu
self.extend_conv3 = nn.Conv2d(self.internal_channels, out_channels, 1, padding=0, bias=bias)
self.conv3_bn = nn.BatchNorm2d(out_channels)
self.regul = nn.Dropout2d(p=dropout_prob)
def forward(self, x):
residual = x
main = self.relu((self.conv1_bn(self.compress_conv1(x))))
main = self.relu(self.conv2_1_bn(self.conv2_1(main)))
main = self.relu(self.conv2_2_bn(self.conv2_2(main)))
main = self.conv3_bn(self.extend_conv3(main))
main = self.regul(main)
out = self.relu((torch.add(residual, main)))
return out
class FSSNet(nn.Module):
def __init__(self, classes):
super().__init__()
self.initial_block = InitialBlock(3, 16)
# Stage 1 - Encoder
self.downsample1_0 = DownsamplingBottleneck(16, 64, padding=1, dropout_prob=0.03)
self.factorized1_1 = Factorized_Block(64, 64, dropout_prob=0.03)
self.factorized1_2 = Factorized_Block(64, 64, dropout_prob=0.03)
self.factorized1_3 = Factorized_Block(64, 64, dropout_prob=0.03)
self.factorized1_4 = Factorized_Block(64, 64, dropout_prob=0.03)
# Stage 2 - Encoder
self.downsample2_0 = DownsamplingBottleneck(64, 128, padding=1, dropout_prob=0.3)
self.dilated2_1 = DilatedBlock(128, 128, dilation=2, dropout_prob=0.3)
self.dilated2_2 = DilatedBlock(128, 128, dilation=5, dropout_prob=0.3)
self.dilated2_3 = DilatedBlock(128, 128, dilation=9, dropout_prob=0.3)
self.dilated2_4 = DilatedBlock(128, 128, dilation=2, dropout_prob=0.3)
self.dilated2_5 = DilatedBlock(128, 128, dilation=5, dropout_prob=0.3)
self.dilated2_6 = DilatedBlock(128, 128, dilation=9, dropout_prob=0.3)
# Stage 4 - Decoder
self.upsample4_0 = UpsamplingBottleneck(128, 64, dropout_prob=0.3)
self.bottleneck4_1 = DilatedBlock(64, 64, dropout_prob=0.3)
self.bottleneck4_2 = DilatedBlock(64, 64, dropout_prob=0.3)
# Stage 5 - Decoder
self.upsample5_0 = UpsamplingBottleneck(64, 16, dropout_prob=0.3)
self.bottleneck5_1 = DilatedBlock(16, 16, dropout_prob=0.3)
self.bottleneck5_2 = DilatedBlock(16, 16, dropout_prob=0.3)
self.transposed_conv = nn.ConvTranspose2d(16, classes, kernel_size=3, stride=2, padding=1, output_padding=1, bias=False)
def forward(self, x):
# Initial block
# Initial block
x = self.initial_block(x)
# Encoder - Block 1
x_1= self.downsample1_0(x)
x = self.factorized1_1(x_1)
x = self.factorized1_2(x)
x = self.factorized1_3(x)
x = self.factorized1_4(x)
# Encoder - Block 2
x_2 = self.downsample2_0(x)
# print(x_2.shape)
x = self.dilated2_1(x_2)
x = self.dilated2_2(x)
x = self.dilated2_3(x)
x = self.dilated2_4(x)
x = self.dilated2_5(x)
x = self.dilated2_6(x)
# print(x.shape)
# Decoder - Block 3
x = self.upsample4_0(x, x_2)
x = self.bottleneck4_1(x)
x = self.bottleneck4_2(x)
# Decoder - Block 4
x = self.upsample5_0(x, x_1)
x = self.bottleneck5_1(x)
x = self.bottleneck5_2(x)
# Fullconv - DeConv
x = self.transposed_conv(x)
return x
"""print layers and params of network"""
if __name__ == '__main__':
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = FSSNet(classes=19).to(device)
summary(model,(3,512,1024))
| [
"torch.nn.MaxPool2d",
"torch.nn.BatchNorm2d",
"torch.nn.functional.interpolate",
"torch.nn.ConvTranspose2d",
"torch.add",
"torch.nn.Conv2d",
"torch.cuda.is_available",
"torch.nn.Dropout2d"
] | 1.1.0 | ZAKAUDD/Segmentation-Networks | 7e006809a7345819ebc50326175df156beeca618 |
1.4 | from abc import ABC, abstractmethod
import random
import torch
import torch.nn as nn
import torch.nn.functional as F
class mab_user(ABC):
def __init__(self, n_arms, lamb=1):
super(mab_user, self).__init__()
self.t = torch.tensor(1.0)
self.r = torch.zeros(n_arms)
self.n = torch.zeros(n_arms)
self.id = -1
self.returns = 0
self.lamb = lamb
@abstractmethod
def choose(self):
pass
@abstractmethod
def update(self, arm, reward):
pass
class perfect_user(mab_user):
# users that always make perfect decision -- can be paired with recEngines
# in CF simulations
def __init__(self, n_arms):
super().__init__(n_arms)
def setup_learners(self, learners):
#this setup routine must be called before perfect_user can run
self.learners = learners
def choose(self):
l_max = [0]*len(self.learners)
for i,learner in enumerate(self.learners):
l_max[i] = torch.max(learner.U[self.id] @ learner.V.t())
return torch.argmax(torch.tensor(l_max))
def update(self, arm, reward):
pass
class ucb_user(mab_user):
def __init__(self, n_arms):
super().__init__(n_arms)
def _ranking(self):
return self.r + self.lamb*torch.sqrt(2*torch.log(self.t)/self.n)
def choose(self):
return torch.argmax(self._ranking())
def update(self, arm, reward):
self.r[arm] = self.r[arm]*(self.n[arm]) + reward
self.n[arm] += 1
self.r[arm] /= self.n[arm]
self.t += 1
self.returns += reward
class e_greedy_user(ucb_user):
def __init__(self, n_arms, eps_scaling=0.333, r_tol=1e-20, eps0=1.0):
super().__init__(n_arms)
self.eps_scaling = eps_scaling
self.eps = eps0
self.eps0 = eps0
self.n_arms = n_arms
self.r_tol = r_tol
def choose(self):
if random.random() > self.eps:
a = torch.argmax(self.r + self.r_tol*torch.randn(self.r.shape))
else:
a = random.randint(0,self.n_arms-1)
return a
def update(self, arm, reward):
super().update(arm, reward)
self.eps = self.eps0/(self.t**self.eps_scaling)
class sw_ucb_user(mab_user):
def __init__(self, n_arms):
super(ucb_user, self).__init__()
self.n_arms = n_arms
self.t = torch.tensor(1.0)
self.tau
self.sw_r = []
self.sw_arms = []
self.n = torch.zeros(self.n_arms)
self.r = torch.zeros(self.n_arms)
self.alpha = 0.9
self.lamb = 1
self.id = -1
self.returns = 0
def _ranking(self):
return self.r/self.n + self.lamb*torch.sqrt(
(1+self.alpha)*torch.log(self.t)/self.n)
def update(self, arm, reward):
self.sw_arm.append(arm)
self.sw_r.append(reward)
self.r[arm] += reward
self.returns += reward
self.n[arm] += 1
tau_prime = torch.min(torch.ceil(self.lamb*(self.t**self.alpha)),self.t)
delta_tau = tau_prime - self.tau
if delta_tau < 1.0:
arm = self.sw_arm.pop(0)
self.r[arm] -= [self.sw_r.pop(0)]
self.n[arm] -= 1
self.tau = tau_prime
| [
"torch.zeros",
"torch.ceil",
"torch.tensor",
"torch.log",
"torch.randn"
] | 1.4.0 | tginart/competing-ai | 75c456854e4770adf8be7cd56e58177d50f74a24 |
1.0 | import numpy as np
import tempfile
import os
import pytest
import torch
from anndata import AnnData
from scvi.dataset import (
AnnDatasetFromAnnData,
CortexDataset,
SyntheticDataset,
GeneExpressionDataset,
Dataset10X,
)
from scvi.inference import (
JointSemiSupervisedTrainer,
AlternateSemiSupervisedTrainer,
ClassifierTrainer,
UnsupervisedTrainer,
AdapterTrainer,
TotalTrainer,
TotalPosterior,
)
from scvi.inference.posterior import unsupervised_clustering_accuracy
from scvi.inference.posterior_utils import load_posterior
from scvi.inference.annotation import compute_accuracy_rf, compute_accuracy_svc
from scvi.models import VAE, SCANVI, VAEC, LDVAE, TOTALVI, AutoZIVAE
from scvi.models.distributions import ZeroInflatedNegativeBinomial, NegativeBinomial
from scvi.models.classifier import Classifier
from scvi.models.log_likelihood import log_zinb_positive, log_nb_positive
from scvi import set_seed
set_seed(0)
use_cuda = True
def test_cortex(save_path):
cortex_dataset = CortexDataset(save_path=save_path)
vae = VAE(cortex_dataset.nb_genes, cortex_dataset.n_batches)
trainer_cortex_vae = UnsupervisedTrainer(
vae, cortex_dataset, train_size=0.5, use_cuda=use_cuda
)
trainer_cortex_vae.train(n_epochs=1)
trainer_cortex_vae.train_set.reconstruction_error()
trainer_cortex_vae.train_set.differential_expression_stats()
trainer_cortex_vae.train_set.generate_feature_correlation_matrix(
n_samples=2, correlation_type="pearson"
)
trainer_cortex_vae.train_set.generate_feature_correlation_matrix(
n_samples=2, correlation_type="spearman"
)
trainer_cortex_vae.train_set.imputation(n_samples=1)
trainer_cortex_vae.test_set.imputation(n_samples=5)
trainer_cortex_vae.corrupt_posteriors(corruption="binomial")
trainer_cortex_vae.corrupt_posteriors()
trainer_cortex_vae.train(n_epochs=1)
trainer_cortex_vae.uncorrupt_posteriors()
trainer_cortex_vae.train_set.imputation_benchmark(
n_samples=1, show_plot=False, title_plot="imputation", save_path=save_path
)
trainer_cortex_vae.train_set.generate_parameters()
n_cells, n_genes = (
len(trainer_cortex_vae.train_set.indices),
cortex_dataset.nb_genes,
)
n_samples = 3
(dropout, means, dispersions) = trainer_cortex_vae.train_set.generate_parameters()
assert dropout.shape == (n_cells, n_genes) and means.shape == (n_cells, n_genes)
assert dispersions.shape == (n_cells, n_genes)
(dropout, means, dispersions) = trainer_cortex_vae.train_set.generate_parameters(
n_samples=n_samples
)
assert dropout.shape == (n_samples, n_cells, n_genes)
assert means.shape == (n_samples, n_cells, n_genes)
(dropout, means, dispersions) = trainer_cortex_vae.train_set.generate_parameters(
n_samples=n_samples, give_mean=True
)
assert dropout.shape == (n_cells, n_genes) and means.shape == (n_cells, n_genes)
full = trainer_cortex_vae.create_posterior(
vae, cortex_dataset, indices=np.arange(len(cortex_dataset))
)
x_new, x_old = full.generate(n_samples=10)
assert x_new.shape == (cortex_dataset.nb_cells, cortex_dataset.nb_genes, 10)
assert x_old.shape == (cortex_dataset.nb_cells, cortex_dataset.nb_genes)
trainer_cortex_vae.train_set.imputation_benchmark(
n_samples=1, show_plot=False, title_plot="imputation", save_path=save_path
)
svaec = SCANVI(
cortex_dataset.nb_genes, cortex_dataset.n_batches, cortex_dataset.n_labels
)
trainer_cortex_svaec = JointSemiSupervisedTrainer(
svaec, cortex_dataset, n_labelled_samples_per_class=3, use_cuda=use_cuda
)
trainer_cortex_svaec.train(n_epochs=1)
trainer_cortex_svaec.labelled_set.accuracy()
trainer_cortex_svaec.full_dataset.reconstruction_error()
svaec = SCANVI(
cortex_dataset.nb_genes, cortex_dataset.n_batches, cortex_dataset.n_labels
)
trainer_cortex_svaec = AlternateSemiSupervisedTrainer(
svaec, cortex_dataset, n_labelled_samples_per_class=3, use_cuda=use_cuda
)
trainer_cortex_svaec.train(n_epochs=1, lr=1e-2)
trainer_cortex_svaec.unlabelled_set.accuracy()
data_train, labels_train = trainer_cortex_svaec.labelled_set.raw_data()
data_test, labels_test = trainer_cortex_svaec.unlabelled_set.raw_data()
compute_accuracy_svc(
data_train,
labels_train,
data_test,
labels_test,
param_grid=[{"C": [1], "kernel": ["linear"]}],
)
compute_accuracy_rf(
data_train,
labels_train,
data_test,
labels_test,
param_grid=[{"max_depth": [3], "n_estimators": [10]}],
)
cls = Classifier(cortex_dataset.nb_genes, n_labels=cortex_dataset.n_labels)
cls_trainer = ClassifierTrainer(cls, cortex_dataset)
cls_trainer.train(n_epochs=1)
cls_trainer.train_set.accuracy()
def test_synthetic_1():
synthetic_dataset = SyntheticDataset()
synthetic_dataset.cell_types = np.array(["A", "B", "C"])
svaec = SCANVI(
synthetic_dataset.nb_genes,
synthetic_dataset.n_batches,
synthetic_dataset.n_labels,
)
trainer_synthetic_svaec = JointSemiSupervisedTrainer(
svaec, synthetic_dataset, use_cuda=use_cuda
)
trainer_synthetic_svaec.train(n_epochs=1)
trainer_synthetic_svaec.labelled_set.entropy_batch_mixing()
with tempfile.TemporaryDirectory() as temp_dir:
posterior_save_path = os.path.join(temp_dir, "posterior_data")
original_post = trainer_synthetic_svaec.labelled_set.sequential()
original_post.save_posterior(posterior_save_path)
new_svaec = SCANVI(
synthetic_dataset.nb_genes,
synthetic_dataset.n_batches,
synthetic_dataset.n_labels,
)
new_post = load_posterior(posterior_save_path, model=new_svaec, use_cuda=False)
assert np.array_equal(new_post.indices, original_post.indices)
assert np.array_equal(new_post.gene_dataset.X, original_post.gene_dataset.X)
assert np.array_equal(
new_post.gene_dataset.labels, original_post.gene_dataset.labels
)
trainer_synthetic_svaec.full_dataset.knn_purity()
trainer_synthetic_svaec.labelled_set.show_t_sne(n_samples=5)
trainer_synthetic_svaec.unlabelled_set.show_t_sne(n_samples=5, color_by="labels")
trainer_synthetic_svaec.labelled_set.show_t_sne(
n_samples=5, color_by="batches and labels"
)
trainer_synthetic_svaec.labelled_set.clustering_scores()
trainer_synthetic_svaec.labelled_set.clustering_scores(prediction_algorithm="gmm")
trainer_synthetic_svaec.unlabelled_set.unsupervised_classification_accuracy()
trainer_synthetic_svaec.unlabelled_set.differential_expression_score(
synthetic_dataset.labels.ravel() == 1,
synthetic_dataset.labels.ravel() == 2,
n_samples=2,
M_permutation=10,
)
trainer_synthetic_svaec.unlabelled_set.one_vs_all_degenes(
n_samples=2, M_permutation=10
)
def test_synthetic_2():
synthetic_dataset = SyntheticDataset()
vaec = VAEC(
synthetic_dataset.nb_genes,
synthetic_dataset.n_batches,
synthetic_dataset.n_labels,
)
trainer_synthetic_vaec = JointSemiSupervisedTrainer(
vaec,
synthetic_dataset,
use_cuda=use_cuda,
frequency=1,
early_stopping_kwargs={
"early_stopping_metric": "reconstruction_error",
"on": "labelled_set",
"save_best_state_metric": "reconstruction_error",
},
)
trainer_synthetic_vaec.train(n_epochs=2)
def base_benchmark(gene_dataset):
vae = VAE(gene_dataset.nb_genes, gene_dataset.n_batches, gene_dataset.n_labels)
trainer = UnsupervisedTrainer(vae, gene_dataset, train_size=0.5, use_cuda=use_cuda)
trainer.train(n_epochs=1)
return trainer
def ldvae_benchmark(dataset, n_epochs, use_cuda=True):
ldvae = LDVAE(
dataset.nb_genes, n_batch=dataset.n_batches, latent_distribution="normal"
)
trainer = UnsupervisedTrainer(ldvae, dataset, use_cuda=use_cuda)
trainer.train(n_epochs=n_epochs)
trainer.test_set.reconstruction_error()
trainer.test_set.marginal_ll()
ldvae = LDVAE(dataset.nb_genes, n_batch=dataset.n_batches, latent_distribution="ln")
trainer = UnsupervisedTrainer(ldvae, dataset, use_cuda=use_cuda)
trainer.train(n_epochs=n_epochs)
trainer.test_set.reconstruction_error()
ldvae.get_loadings()
return trainer
def totalvi_benchmark(dataset, n_epochs, use_cuda=True):
totalvae = TOTALVI(
dataset.nb_genes, len(dataset.protein_names), n_batch=dataset.n_batches
)
trainer = TotalTrainer(
totalvae, dataset, train_size=0.5, use_cuda=use_cuda, early_stopping_kwargs=None
)
trainer.train(n_epochs=n_epochs)
trainer.test_set.reconstruction_error()
trainer.test_set.marginal_ll()
trainer.test_set.get_protein_background_mean()
trainer.test_set.get_latent()
trainer.test_set.generate()
trainer.test_set.get_sample_dropout()
trainer.test_set.get_normalized_denoised_expression(transform_batch=0)
trainer.test_set.get_normalized_denoised_expression(transform_batch=0)
trainer.test_set.imputation()
trainer.test_set.get_protein_mean()
trainer.test_set.one_vs_all_degenes(n_samples=2, M_permutation=10)
trainer.test_set.generate_feature_correlation_matrix(n_samples=2)
trainer.test_set.generate_feature_correlation_matrix(n_samples=2, transform_batch=0)
return trainer
def test_synthetic_3():
gene_dataset = SyntheticDataset()
trainer = base_benchmark(gene_dataset)
adapter_trainer = AdapterTrainer(
trainer.model, gene_dataset, trainer.train_set, frequency=1
)
adapter_trainer.train(n_path=1, n_epochs=1)
def test_nb_not_zinb():
synthetic_dataset = SyntheticDataset()
svaec = SCANVI(
synthetic_dataset.nb_genes,
synthetic_dataset.n_batches,
synthetic_dataset.n_labels,
labels_groups=[0, 0, 1],
reconstruction_loss="nb",
)
trainer_synthetic_svaec = JointSemiSupervisedTrainer(
svaec, synthetic_dataset, use_cuda=use_cuda
)
trainer_synthetic_svaec.train(n_epochs=1)
def test_poisson_not_zinb():
synthetic_dataset = SyntheticDataset()
svaec = SCANVI(
synthetic_dataset.nb_genes,
synthetic_dataset.n_batches,
synthetic_dataset.n_labels,
labels_groups=[0, 0, 1],
reconstruction_loss="poisson",
)
trainer_synthetic_svaec = JointSemiSupervisedTrainer(
svaec, synthetic_dataset, use_cuda=use_cuda
)
trainer_synthetic_svaec.train(n_epochs=1)
def test_classifier_accuracy(save_path):
cortex_dataset = CortexDataset(save_path=save_path)
cls = Classifier(cortex_dataset.nb_genes, n_labels=cortex_dataset.n_labels)
cls_trainer = ClassifierTrainer(
cls,
cortex_dataset,
metrics_to_monitor=["accuracy"],
frequency=1,
early_stopping_kwargs={
"early_stopping_metric": "accuracy",
"save_best_state_metric": "accuracy",
},
)
cls_trainer.train(n_epochs=2)
cls_trainer.train_set.accuracy()
def test_LDVAE(save_path):
synthetic_datset_one_batch = SyntheticDataset(n_batches=1)
ldvae_benchmark(synthetic_datset_one_batch, n_epochs=1, use_cuda=False)
synthetic_datset_two_batches = SyntheticDataset(n_batches=2)
ldvae_benchmark(synthetic_datset_two_batches, n_epochs=1, use_cuda=False)
def test_sampling_zl(save_path):
cortex_dataset = CortexDataset(save_path=save_path)
cortex_vae = VAE(cortex_dataset.nb_genes, cortex_dataset.n_batches)
trainer_cortex_vae = UnsupervisedTrainer(
cortex_vae, cortex_dataset, train_size=0.5, use_cuda=use_cuda
)
trainer_cortex_vae.train(n_epochs=2)
cortex_cls = Classifier((cortex_vae.n_latent + 1), n_labels=cortex_dataset.n_labels)
trainer_cortex_cls = ClassifierTrainer(
cortex_cls, cortex_dataset, sampling_model=cortex_vae, sampling_zl=True
)
trainer_cortex_cls.train(n_epochs=2)
trainer_cortex_cls.test_set.accuracy()
def test_annealing_procedures(save_path):
cortex_dataset = CortexDataset(save_path=save_path)
cortex_vae = VAE(cortex_dataset.nb_genes, cortex_dataset.n_batches)
trainer_cortex_vae = UnsupervisedTrainer(
cortex_vae,
cortex_dataset,
train_size=0.5,
use_cuda=use_cuda,
n_epochs_kl_warmup=1,
)
trainer_cortex_vae.train(n_epochs=2)
assert trainer_cortex_vae.kl_weight >= 0.99, "Annealing should be over"
trainer_cortex_vae = UnsupervisedTrainer(
cortex_vae,
cortex_dataset,
train_size=0.5,
use_cuda=use_cuda,
n_epochs_kl_warmup=5,
)
trainer_cortex_vae.train(n_epochs=2)
assert trainer_cortex_vae.kl_weight <= 0.99, "Annealing should be proceeding"
# iter
trainer_cortex_vae = UnsupervisedTrainer(
cortex_vae,
cortex_dataset,
train_size=0.5,
use_cuda=use_cuda,
n_iter_kl_warmup=1,
n_epochs_kl_warmup=None,
)
trainer_cortex_vae.train(n_epochs=2)
assert trainer_cortex_vae.kl_weight >= 0.99, "Annealing should be over"
def test_differential_expression(save_path):
dataset = CortexDataset(save_path=save_path)
n_cells = len(dataset)
all_indices = np.arange(n_cells)
vae = VAE(dataset.nb_genes, dataset.n_batches)
trainer = UnsupervisedTrainer(vae, dataset, train_size=0.5, use_cuda=use_cuda)
trainer.train(n_epochs=2)
post = trainer.create_posterior(vae, dataset, shuffle=False, indices=all_indices)
with tempfile.TemporaryDirectory() as temp_dir:
posterior_save_path = os.path.join(temp_dir, "posterior_data")
post = post.sequential(batch_size=3)
post.save_posterior(posterior_save_path)
new_vae = VAE(dataset.nb_genes, dataset.n_batches)
new_post = load_posterior(posterior_save_path, model=new_vae, use_cuda=False)
assert new_post.data_loader.batch_size == 3
assert np.array_equal(new_post.indices, post.indices)
assert np.array_equal(new_post.gene_dataset.X, post.gene_dataset.X)
# Sample scale example
px_scales = post.scale_sampler(
n_samples_per_cell=4, n_samples=None, selection=all_indices
)["scale"]
assert (
px_scales.shape[1] == dataset.nb_genes
), "posterior scales should have shape (n_samples, n_genes)"
# Differential expression different models
idx_1 = [1, 2, 3]
idx_2 = [4, 5, 6, 7]
de_dataframe = post.differential_expression_score(
idx1=idx_1,
idx2=idx_2,
n_samples=10,
mode="vanilla",
use_permutation=True,
M_permutation=100,
)
de_dataframe = post.differential_expression_score(
idx1=idx_1,
idx2=idx_2,
n_samples=10,
mode="change",
use_permutation=True,
M_permutation=100,
cred_interval_lvls=[0.5, 0.95],
)
print(de_dataframe.keys())
assert (
de_dataframe["lfc_confidence_interval_0.5_min"]
<= de_dataframe["lfc_confidence_interval_0.5_max"]
).all()
assert (
de_dataframe["lfc_confidence_interval_0.95_min"]
<= de_dataframe["lfc_confidence_interval_0.95_max"]
).all()
# DE estimation example
de_probabilities = de_dataframe.loc[:, "proba_de"]
assert ((0.0 <= de_probabilities) & (de_probabilities <= 1.0)).all()
# Test totalVI DE
sp = os.path.join(save_path, "10X")
dataset = Dataset10X(dataset_name="pbmc_10k_protein_v3", save_path=sp)
n_cells = len(dataset)
all_indices = np.arange(n_cells)
vae = TOTALVI(
dataset.nb_genes, len(dataset.protein_names), n_batch=dataset.n_batches
)
trainer = TotalTrainer(
vae, dataset, train_size=0.5, use_cuda=use_cuda, early_stopping_kwargs=None
)
trainer.train(n_epochs=2)
post = trainer.create_posterior(
vae, dataset, shuffle=False, indices=all_indices, type_class=TotalPosterior
)
# Differential expression different models
idx_1 = [1, 2, 3]
idx_2 = [4, 5, 6, 7]
de_dataframe = post.differential_expression_score(
idx1=idx_1,
idx2=idx_2,
n_samples=10,
mode="vanilla",
use_permutation=True,
M_permutation=100,
)
de_dataframe = post.differential_expression_score(
idx1=idx_1,
idx2=idx_2,
n_samples=10,
mode="change",
use_permutation=True,
M_permutation=100,
)
def test_totalvi(save_path):
synthetic_dataset_one_batch = SyntheticDataset(n_batches=1)
totalvi_benchmark(synthetic_dataset_one_batch, n_epochs=1, use_cuda=use_cuda)
synthetic_dataset_two_batches = SyntheticDataset(n_batches=2)
totalvi_benchmark(synthetic_dataset_two_batches, n_epochs=1, use_cuda=use_cuda)
# adversarial testing
dataset = synthetic_dataset_two_batches
totalvae = TOTALVI(
dataset.nb_genes, len(dataset.protein_names), n_batch=dataset.n_batches
)
trainer = TotalTrainer(
totalvae,
dataset,
train_size=0.5,
use_cuda=use_cuda,
early_stopping_kwargs=None,
use_adversarial_loss=True,
)
trainer.train(n_epochs=1)
with tempfile.TemporaryDirectory() as temp_dir:
posterior_save_path = os.path.join(temp_dir, "posterior_data")
original_post = trainer.create_posterior(
totalvae,
dataset,
indices=np.arange(len(dataset)),
type_class=TotalPosterior,
)
original_post.save_posterior(posterior_save_path)
new_totalvae = TOTALVI(
dataset.nb_genes, len(dataset.protein_names), n_batch=dataset.n_batches
)
new_post = load_posterior(
posterior_save_path, model=new_totalvae, use_cuda=False
)
assert new_post.posterior_type == "TotalPosterior"
assert np.array_equal(
new_post.gene_dataset.protein_expression, dataset.protein_expression
)
def test_autozi(save_path):
data = SyntheticDataset(n_batches=1)
for disp_zi in ["gene", "gene-label"]:
autozivae = AutoZIVAE(
n_input=data.nb_genes,
dispersion=disp_zi,
zero_inflation=disp_zi,
n_labels=data.n_labels,
)
trainer_autozivae = UnsupervisedTrainer(
model=autozivae, gene_dataset=data, train_size=0.5
)
trainer_autozivae.train(n_epochs=2, lr=1e-2)
trainer_autozivae.test_set.elbo()
trainer_autozivae.test_set.reconstruction_error()
trainer_autozivae.test_set.marginal_ll()
def test_multibatches_features():
data = [
np.random.randint(1, 5, size=(20, 10)),
np.random.randint(1, 10, size=(20, 10)),
np.random.randint(1, 10, size=(20, 10)),
np.random.randint(1, 10, size=(30, 10)),
]
dataset = GeneExpressionDataset()
dataset.populate_from_per_batch_list(data)
vae = VAE(dataset.nb_genes, dataset.n_batches)
trainer = UnsupervisedTrainer(vae, dataset, train_size=0.5, use_cuda=use_cuda)
trainer.train(n_epochs=2)
trainer.test_set.imputation(n_samples=2, transform_batch=0)
trainer.train_set.imputation(n_samples=2, transform_batch=[0, 1, 2])
def test_deprecated_munkres():
y = np.array([0, 1, 0, 1, 0, 1, 1, 1])
y_pred = np.array([0, 0, 0, 0, 1, 1, 1, 1])
reward, assignment = unsupervised_clustering_accuracy(y, y_pred)
assert reward == 0.625
assert (assignment == np.array([[0, 0], [1, 1]])).all()
y = np.array([1, 1, 2, 2, 0, 0, 3, 3])
y_pred = np.array([1, 1, 2, 2, 3, 3, 0, 0])
reward, assignment = unsupervised_clustering_accuracy(y, y_pred)
assert reward == 1.0
assert (assignment == np.array([[0, 3], [1, 1], [2, 2], [3, 0]])).all()
def test_zinb_distribution():
theta = 100.0 + torch.rand(size=(2,))
mu = 15.0 * torch.ones_like(theta)
pi = torch.randn_like(theta)
x = torch.randint_like(mu, high=20)
log_p_ref = log_zinb_positive(x, mu, theta, pi)
dist = ZeroInflatedNegativeBinomial(mu=mu, theta=theta, zi_logits=pi)
log_p_zinb = dist.log_prob(x)
assert (log_p_ref - log_p_zinb).abs().max().item() <= 1e-8
torch.manual_seed(0)
s1 = dist.sample((100,))
assert s1.shape == (100, 2)
s2 = dist.sample(sample_shape=(4, 3))
assert s2.shape == (4, 3, 2)
log_p_ref = log_nb_positive(x, mu, theta)
dist = NegativeBinomial(mu=mu, theta=theta)
log_p_nb = dist.log_prob(x)
assert (log_p_ref - log_p_nb).abs().max().item() <= 1e-8
s1 = dist.sample((1000,))
assert s1.shape == (1000, 2)
assert (s1.mean(0) - mu).abs().mean() <= 1e0
assert (s1.std(0) - (mu + mu * mu / theta) ** 0.5).abs().mean() <= 1e0
size = (50, 3)
theta = 100.0 + torch.rand(size=size)
mu = 15.0 * torch.ones_like(theta)
pi = torch.randn_like(theta)
x = torch.randint_like(mu, high=20)
dist1 = ZeroInflatedNegativeBinomial(mu=mu, theta=theta, zi_logits=pi)
dist2 = NegativeBinomial(mu=mu, theta=theta)
assert dist1.log_prob(x).shape == size
assert dist2.log_prob(x).shape == size
with pytest.raises(ValueError):
ZeroInflatedNegativeBinomial(mu=-mu, theta=theta, zi_logits=pi)
with pytest.warns(UserWarning):
dist1.log_prob(-x) # ensures neg values raise warning
with pytest.warns(UserWarning):
dist2.log_prob(0.5 * x) # ensures float values raise warning
def test_anndata_loader():
x = np.random.randint(low=0, high=100, size=(15, 4))
batch_ids = np.random.randint(low=0, high=2, size=(15,))
n_batches = 2
adata = AnnData(X=x, obs=dict(batch=batch_ids))
_ = AnnDatasetFromAnnData(adata, batch_label="batch")
dataset = AnnDatasetFromAnnData(adata, batch_label="batch")
assert (
dataset.n_batches == n_batches
), "AnnDatasetFromAnnData should not modify the anndata object"
| [
"torch.rand",
"torch.manual_seed",
"torch.randn_like",
"torch.ones_like",
"torch.randint_like"
] | 1.0.1 | shaoxin0801/scVI | f439eeb7b696b01a281af2f0e2f49592318614cb |
1.0 | import argparse
import pandas as pd
from tqdm import tqdm
import torch
import torch.nn.parallel
from contextlib import suppress
import os
from effdet import create_model, create_loader
from effdet.data import resolve_input_config
from timm.utils import setup_default_logging
from timm.models.layers import set_layer_config
from dataset import SiimCovidDataset
from utils import seed_everything, refine_det
has_apex = False
try:
from apex import amp
has_apex = True
except ImportError:
pass
has_native_amp = False
try:
if getattr(torch.cuda.amp, 'autocast') is not None:
has_native_amp = True
except AttributeError:
pass
torch.backends.cudnn.benchmark = True
def add_bool_arg(parser, name, default=False, help=''): # FIXME move to utils
dest_name = name.replace('-', '_')
group = parser.add_mutually_exclusive_group(required=False)
group.add_argument('--' + name, dest=dest_name, action='store_true', help=help)
group.add_argument('--no-' + name, dest=dest_name, action='store_false', help=help)
parser.set_defaults(**{dest_name: default})
parser = argparse.ArgumentParser(description='PyTorch ImageNet Validation')
parser.add_argument("--folds", default=[0,1,2,3,4], nargs="+", type=int)
parser.add_argument("--frac", default=1.0, type=float)
parser.add_argument('--image-size', type=int, default=None)
parser.add_argument('--model', '-m', metavar='MODEL', default='tf_efficientdet_d7',
help='model architecture (default: tf_efficientdet_d7)')
add_bool_arg(parser, 'redundant-bias', default=None,
help='override model config for redundant bias layers')
add_bool_arg(parser, 'soft-nms', default=None, help='override model config for soft-nms')
parser.add_argument('--num-classes', type=int, default=None, metavar='N',
help='Override num_classes in model config if set. For fine-tuning from pretrained.')
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('-b', '--batch-size', default=128, type=int,
metavar='N', help='mini-batch size (default: 128)')
parser.add_argument('--mean', type=float, nargs='+', default=None, metavar='MEAN',
help='Override mean pixel value of dataset')
parser.add_argument('--std', type=float, nargs='+', default=None, metavar='STD',
help='Override std deviation of of dataset')
parser.add_argument('--interpolation', default='bilinear', type=str, metavar='NAME',
help='Image resize interpolation type (overrides model)')
parser.add_argument('--fill-color', default=None, type=str, metavar='NAME',
help='Image augmentation fill (background) color ("mean" or int)')
parser.add_argument('--pretrained', dest='pretrained', action='store_true',
help='use pre-trained model')
parser.add_argument('--no-prefetcher', action='store_true', default=False,
help='disable fast prefetcher')
parser.add_argument('--pin-mem', action='store_true', default=False,
help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.')
parser.add_argument('--use-ema', dest='use_ema', action='store_true',
help='use ema version of weights if present')
parser.add_argument('--amp', action='store_true', default=False,
help='Use AMP mixed precision. Defaults to Apex, fallback to native Torch AMP.')
parser.add_argument('--apex-amp', action='store_true', default=False,
help='Use NVIDIA Apex AMP mixed precision')
parser.add_argument('--native-amp', action='store_true', default=False,
help='Use Native Torch AMP mixed precision')
parser.add_argument('--torchscript', dest='torchscript', action='store_true',
help='convert model torchscript for inference')
args = parser.parse_args()
SEED = 123
seed_everything(SEED)
if __name__ == "__main__":
os.makedirs('predictions', exist_ok = True)
setup_default_logging()
if args.amp:
if has_apex:
args.apex_amp = True
elif has_native_amp:
args.native_amp = True
assert not args.apex_amp or not args.native_amp, "Only one AMP mode should be set."
args.prefetcher = not args.no_prefetcher
test_df = pd.read_csv('../../dataset/siim-covid19-detection/test_meta.csv')
if args.frac != 1:
test_df = test_df.sample(frac=args.frac).reset_index(drop=True)
models = {}
for fold in args.folds:
print('*'*20, 'Fold {}'.format(fold), '*'*20)
CHECKPOINT = 'checkpoints/{}_{}_fold{}/model_best.pth.tar'.format(args.model, args.image_size, fold)
# create model
with set_layer_config(scriptable=args.torchscript):
extra_args = {}
bench = create_model(
args.model,
bench_task='predict',
image_size=args.image_size,
num_classes=args.num_classes,
pretrained=args.pretrained,
redundant_bias=args.redundant_bias,
soft_nms=args.soft_nms,
checkpoint_path=CHECKPOINT,
checkpoint_ema=args.use_ema,
**extra_args,
)
model_config = bench.config
param_count = sum([m.numel() for m in bench.parameters()])
print('Model %s created, param count: %d' % (args.model, param_count))
bench = bench.cuda()
amp_autocast = suppress
if args.apex_amp:
bench = amp.initialize(bench, opt_level='O1')
print('Using NVIDIA APEX AMP. Validating in mixed precision.')
elif args.native_amp:
amp_autocast = torch.cuda.amp.autocast
print('Using native Torch AMP. Validating in mixed precision.')
else:
print('AMP not enabled. Validating in float32.')
input_config = resolve_input_config(args, model_config)
bench.eval()
models[fold] = bench
dataset = SiimCovidDataset(df=test_df, images_dir='../../dataset/siim-covid19-detection/images/test', image_size=args.image_size)
loader = create_loader(
dataset,
input_size=input_config['input_size'],
batch_size=args.batch_size,
use_prefetcher=args.prefetcher,
interpolation=input_config['interpolation'],
fill_color=input_config['fill_color'],
mean=input_config['mean'],
std=input_config['std'],
num_workers=args.workers,
pin_mem=args.pin_mem)
predict_dict = {}
for input, target in tqdm(loader):
image_idxs = target['img_idx'].data.cpu().numpy().tolist()
image_sizes = target['img_size'].data.cpu().numpy().tolist()
with amp_autocast(), torch.no_grad():
for fold in args.folds:
dets = models[fold](input, img_info=target).data.cpu().numpy()
flip_dets = models[fold](torch.flip(input, dims=(3,)).contiguous(), img_info=target).data.cpu().numpy()
for idx, det_pred, flip_det_pred, img_size in zip(image_idxs, dets, flip_dets, image_sizes):
imageid = test_df.loc[idx, 'imageid']
if imageid not in list(predict_dict.keys()):
predict_dict[imageid] = [[],[],[], img_size[0], img_size[1]]
box_pred = det_pred[:,:4].astype(float)
box_pred[:,[0,2]] = box_pred[:,[0,2]]/float(img_size[0])
box_pred[:,[1,3]] = box_pred[:,[1,3]]/float(img_size[1])
score_pred = det_pred[:,4]
label_pred = det_pred[:,5].astype(int) - 1
box_pred, label_pred, score_pred = refine_det(box_pred, label_pred, score_pred)
flip_box_pred = flip_det_pred[:,:4].astype(float)
flip_box_pred[:,[0,2]] = flip_box_pred[:,[0,2]]/float(img_size[0])
flip_box_pred[:,[1,3]] = flip_box_pred[:,[1,3]]/float(img_size[1])
flip_box_pred[:,[0,2]] = 1 - flip_box_pred[:,[0,2]]
flip_score_pred = flip_det_pred[:,4]
flip_label_pred = flip_det_pred[:,5].astype(int) - 1
flip_box_pred, flip_label_pred, flip_score_pred = refine_det(flip_box_pred, flip_label_pred, flip_score_pred)
predict_dict[imageid][0] += [box_pred, flip_box_pred]
predict_dict[imageid][1] += [score_pred, flip_score_pred]
predict_dict[imageid][2] += [label_pred, flip_label_pred]
pred_dict_path = 'predictions/{}_{}_fold{}_test_pred.pth'.format(args.model, args.image_size, '_'.join(str(x) for x in args.folds))
torch.save(predict_dict, pred_dict_path)
| [
"torch.flip",
"torch.save",
"torch.no_grad"
] | 1.0.3 | yellowdolphin/SIIM-COVID19-Detection | 31e8653b467ac35a8b1d92330ad5f15a12622676 |
1.3 | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import init
from torchvision import models
import numpy as np
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
def weights_init_normal(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
init.normal_(m.weight.data, 0.0, 0.02)
elif classname.find('Linear') != -1:
init.normal(m.weight.data, 0.0, 0.02)
elif classname.find('BatchNorm2d') != -1:
init.normal_(m.weight.data, 1.0, 0.02)
init.constant_(m.bias.data, 0.0)
def weights_init_xavier(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
init.xavier_normal_(m.weight.data, gain=0.02)
elif classname.find('Linear') != -1:
init.xavier_normal_(m.weight.data, gain=0.02)
elif classname.find('BatchNorm2d') != -1:
init.normal_(m.weight.data, 1.0, 0.02)
init.constant_(m.bias.data, 0.0)
def weights_init_kaiming(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif classname.find('Linear') != -1:
init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif classname.find('BatchNorm2d') != -1:
init.normal_(m.weight.data, 1.0, 0.02)
init.constant_(m.bias.data, 0.0)
def init_weights(net, init_type='normal'):
print('initialization method [%s]' % init_type)
if init_type == 'normal':
net.apply(weights_init_normal)
elif init_type == 'xavier':
net.apply(weights_init_xavier)
elif init_type == 'kaiming':
net.apply(weights_init_kaiming)
else:
raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
class FeatureExtraction(nn.Module):
def __init__(self, input_nc, ngf=64, n_layers=3, norm_layer=nn.BatchNorm2d, use_dropout=False):
super(FeatureExtraction, self).__init__()
downconv = nn.Conv2d(input_nc, ngf, kernel_size=4, stride=2, padding=1)
model = [downconv, nn.ReLU(True), norm_layer(ngf)]
for i in range(n_layers):
in_ngf = 2**i * ngf if 2**i * ngf < 512 else 512
out_ngf = 2**(i+1) * ngf if 2**i * ngf < 512 else 512
downconv = nn.Conv2d(in_ngf, out_ngf, kernel_size=4, stride=2, padding=1)
model += [downconv, nn.ReLU(True)]
model += [norm_layer(out_ngf)]
model += [nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1), nn.ReLU(True)]
model += [norm_layer(512)]
model += [nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1), nn.ReLU(True)]
self.model = nn.Sequential(*model)
init_weights(self.model, init_type='normal')
def forward(self, x):
return self.model(x)
class FeatureL2Norm(torch.nn.Module):
def __init__(self):
super(FeatureL2Norm, self).__init__()
def forward(self, feature):
epsilon = 1e-6
norm = torch.pow(torch.sum(torch.pow(feature,2),1)+epsilon,0.5).unsqueeze(1).expand_as(feature)
return torch.div(feature,norm)
class FeatureCorrelation(nn.Module):
def __init__(self):
super(FeatureCorrelation, self).__init__()
def forward(self, feature_A, feature_B):
b,c,h,w = feature_A.size()
# reshape features for matrix multiplication
feature_A = feature_A.transpose(2,3).contiguous().view(b,c,h*w)
feature_B = feature_B.view(b,c,h*w).transpose(1,2)
# perform matrix mult.
feature_mul = torch.bmm(feature_B,feature_A)
correlation_tensor = feature_mul.view(b,h,w,h*w).transpose(2,3).transpose(1,2)
return correlation_tensor
class FeatureRegression(nn.Module):
def __init__(self, input_nc=512,output_dim=6):
super(FeatureRegression, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(input_nc, 512, kernel_size=4, stride=2, padding=1),
nn.BatchNorm2d(512),
nn.ReLU(inplace=True),
nn.Conv2d(512, 256, kernel_size=4, stride=2, padding=1),
nn.BatchNorm2d(256),
nn.ReLU(inplace=True),
nn.Conv2d(256, 128, kernel_size=3, padding=1),
nn.BatchNorm2d(128),
nn.ReLU(inplace=True),
nn.Conv2d(128, 64, kernel_size=3, padding=1),
nn.BatchNorm2d(64),
nn.ReLU(inplace=True),
)
self.linear = nn.Linear(64 * 4 * 3, output_dim)
self.tanh = nn.Tanh()
'''self.conv.to(device)
self.linear.to(device)
self.tanh.to(device)'''
def forward(self, x):
x = self.conv(x)
x = x.reshape(x.size(0), -1)
x = self.linear(x)
x = self.tanh(x)
return x
class TpsGridGen(nn.Module):
def __init__(self, out_h=256, out_w=192, use_regular_grid=True, grid_size=3, reg_factor=0):
super(TpsGridGen, self).__init__()
self.out_h, self.out_w = out_h, out_w
self.reg_factor = reg_factor
# create grid in numpy
self.grid = np.zeros( [self.out_h, self.out_w, 3], dtype=np.float32)
# sampling grid with dim-0 coords (Y)
self.grid_X,self.grid_Y = np.meshgrid(np.linspace(-1,1,out_w),np.linspace(-1,1,out_h))
# grid_X,grid_Y: size [1,H,W,1,1]
self.grid_X = torch.FloatTensor(self.grid_X).unsqueeze(0).unsqueeze(3)
self.grid_Y = torch.FloatTensor(self.grid_Y).unsqueeze(0).unsqueeze(3)
self.grid_X = self.grid_X.to(device)
self.grid_Y = self.grid_Y.to(device)
# initialize regular grid for control points P_i
if use_regular_grid:
axis_coords = np.linspace(-1,1,grid_size)
self.N = grid_size*grid_size
P_Y,P_X = np.meshgrid(axis_coords,axis_coords)
P_X = np.reshape(P_X,(-1,1)) # size (N,1)
P_Y = np.reshape(P_Y,(-1,1)) # size (N,1)
P_X = torch.FloatTensor(P_X)
P_X = P_X.to(device)
P_Y = torch.FloatTensor(P_Y)
P_Y = P_Y.to(device)
self.P_X_base = P_X.clone()
self.P_X_base = self.P_X_base.to(device)
self.P_Y_base = P_Y.clone()
self.P_Y_base = self.P_Y_base.to(device)
self.Li = self.compute_L_inverse(P_X,P_Y).unsqueeze(0)
self.P_X = P_X.unsqueeze(2).unsqueeze(3).unsqueeze(4).transpose(0,4)
self.P_Y = P_Y.unsqueeze(2).unsqueeze(3).unsqueeze(4).transpose(0,4)
def forward(self, theta):
warped_grid = self.apply_transformation(theta,torch.cat((self.grid_X,self.grid_Y),3))
return warped_grid
def compute_L_inverse(self,X,Y):
N = X.size()[0] # num of points (along dim 0)
# construct matrix K
Xmat = X.expand(N,N)
Ymat = Y.expand(N,N)
P_dist_squared = torch.pow(Xmat-Xmat.transpose(0,1),2)+torch.pow(Ymat-Ymat.transpose(0,1),2)
P_dist_squared[P_dist_squared==0]=1 # make diagonal 1 to avoid NaN in log computation
K = torch.mul(P_dist_squared,torch.log(P_dist_squared))
# construct matrix L
O = torch.FloatTensor(N,1).fill_(1)
O = O.to(device)
Z = torch.FloatTensor(3,3).fill_(0)
Z = Z.to(device)
P = torch.cat((O,X,Y),1)
L = torch.cat((torch.cat((K,P),1),torch.cat((P.transpose(0,1),Z),1)),0)
Li = torch.inverse(L)
Li = Li.to(device)
return Li
def apply_transformation(self,theta,points):
if theta.dim()==2:
theta = theta.unsqueeze(2).unsqueeze(3)
# points should be in the [B,H,W,2] format,
# where points[:,:,:,0] are the X coords
# and points[:,:,:,1] are the Y coords
# input are the corresponding control points P_i
batch_size = theta.size()[0]
# split theta into point coordinates
Q_X=theta[:,:self.N,:,:].squeeze(3)
Q_Y=theta[:,self.N:,:,:].squeeze(3)
Q_X = Q_X + self.P_X_base.expand_as(Q_X)
Q_Y = Q_Y + self.P_Y_base.expand_as(Q_Y)
# get spatial dimensions of points
points_b = points.size()[0]
points_h = points.size()[1]
points_w = points.size()[2]
# repeat pre-defined control points along spatial dimensions of points to be transformed
P_X = self.P_X.expand((1,points_h,points_w,1,self.N))
P_Y = self.P_Y.expand((1,points_h,points_w,1,self.N))
# compute weigths for non-linear part
W_X = torch.bmm(self.Li[:,:self.N,:self.N].expand((batch_size,self.N,self.N)),Q_X)
W_Y = torch.bmm(self.Li[:,:self.N,:self.N].expand((batch_size,self.N,self.N)),Q_Y)
# reshape
# W_X,W,Y: size [B,H,W,1,N]
W_X = W_X.unsqueeze(3).unsqueeze(4).transpose(1,4).repeat(1,points_h,points_w,1,1)
W_Y = W_Y.unsqueeze(3).unsqueeze(4).transpose(1,4).repeat(1,points_h,points_w,1,1)
# compute weights for affine part
A_X = torch.bmm(self.Li[:,self.N:,:self.N].expand((batch_size,3,self.N)),Q_X)
A_Y = torch.bmm(self.Li[:,self.N:,:self.N].expand((batch_size,3,self.N)),Q_Y)
# reshape
# A_X,A,Y: size [B,H,W,1,3]
A_X = A_X.unsqueeze(3).unsqueeze(4).transpose(1,4).repeat(1,points_h,points_w,1,1)
A_Y = A_Y.unsqueeze(3).unsqueeze(4).transpose(1,4).repeat(1,points_h,points_w,1,1)
# compute distance P_i - (grid_X,grid_Y)
# grid is expanded in point dim 4, but not in batch dim 0, as points P_X,P_Y are fixed for all batch
points_X_for_summation = points[:,:,:,0].unsqueeze(3).unsqueeze(4).expand(points[:,:,:,0].size()+(1,self.N))
points_Y_for_summation = points[:,:,:,1].unsqueeze(3).unsqueeze(4).expand(points[:,:,:,1].size()+(1,self.N))
if points_b==1:
delta_X = points_X_for_summation-P_X
delta_Y = points_Y_for_summation-P_Y
else:
# use expanded P_X,P_Y in batch dimension
delta_X = points_X_for_summation-P_X.expand_as(points_X_for_summation)
delta_Y = points_Y_for_summation-P_Y.expand_as(points_Y_for_summation)
dist_squared = torch.pow(delta_X,2)+torch.pow(delta_Y,2)
# U: size [1,H,W,1,N]
dist_squared[dist_squared==0]=1 # avoid NaN in log computation
U = torch.mul(dist_squared,torch.log(dist_squared))
# expand grid in batch dimension if necessary
points_X_batch = points[:,:,:,0].unsqueeze(3)
points_Y_batch = points[:,:,:,1].unsqueeze(3)
if points_b==1:
points_X_batch = points_X_batch.expand((batch_size,)+points_X_batch.size()[1:])
points_Y_batch = points_Y_batch.expand((batch_size,)+points_Y_batch.size()[1:])
points_X_prime = A_X[:,:,:,:,0]+ \
torch.mul(A_X[:,:,:,:,1],points_X_batch) + \
torch.mul(A_X[:,:,:,:,2],points_Y_batch) + \
torch.sum(torch.mul(W_X,U.expand_as(W_X)),4)
points_Y_prime = A_Y[:,:,:,:,0]+ \
torch.mul(A_Y[:,:,:,:,1],points_X_batch) + \
torch.mul(A_Y[:,:,:,:,2],points_Y_batch) + \
torch.sum(torch.mul(W_Y,U.expand_as(W_Y)),4)
return torch.cat((points_X_prime,points_Y_prime),3)
class GMM(nn.Module):
'''Geometric matching module
'''
def __init__(self, opt):
super(GMM, self).__init__()
self.extraction_agnostic = FeatureExtraction(22, ngf=64, n_layers=3, norm_layer=nn.BatchNorm2d)#.to(device)
self.extraction_cloth = FeatureExtraction(3, ngf=64, n_layers=3, norm_layer=nn.BatchNorm2d)#.to(device)
self.l2norm = FeatureL2Norm()#.to(device)
self.correlation = FeatureCorrelation()#.to(device)
self.regression_zero = FeatureRegression(input_nc=192, output_dim=2*opt.grid_size**2)#.to(device)
self.gridGen = TpsGridGen(opt.fine_height, opt.fine_width, grid_size=opt.grid_size)#.to(device)
self.extraction_warped_cloth = FeatureExtraction(3, ngf=64, n_layers=3, norm_layer=nn.BatchNorm2d)#.to(device)
self.regression_one = FeatureRegression(input_nc=192, output_dim=2*opt.grid_size**2)#.to(device)
def forward(self, agn, clt):
feature_agn = self.extraction_agnostic(agn)
feature_clt = self.extraction_cloth(clt)
feature_agn = self.l2norm(feature_agn)
feature_clt = self.l2norm(feature_clt)
coorelation_0 = self.correlation(feature_agn, feature_clt)
theta = self.regression_zero(coorelation_0)
grid_zero = self.gridGen(theta)
warped_coarse_cloth = F.grid_sample(clt, grid_zero, padding_mode='border')
feature_wc = self.extraction_warped_cloth(warped_coarse_cloth)
feature_wc = self.l2norm(feature_wc)
coorelation_1 = self.correlation(feature_agn, feature_wc)
delta_theta = self.regression_one(coorelation_1)
#here in original paper there is not much details of theta + delta theta
#so I have done element-wise addition
grid_one = self.gridGen(theta.add(delta_theta))
return grid_zero, theta, grid_one, delta_theta
| [
"torch.nn.Linear",
"torch.cat",
"torch.nn.BatchNorm2d",
"torch.nn.init.kaiming_normal_",
"torch.inverse",
"torch.bmm",
"torch.cuda.is_available",
"torch.mul",
"torch.nn.init.constant_",
"torch.FloatTensor",
"torch.nn.init.normal_",
"torch.div",
"torch.nn.init.xavier_normal_",
"torch.nn.Sequential",
"torch.nn.Tanh",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.nn.init.normal",
"torch.log",
"torch.pow",
"torch.nn.functional.grid_sample"
] | 1.3.0 | levindabhi/SieveNet | a5e2263acf28b52a551d4e139328957cf454e7e8 |
1.7 | # -*- coding: utf-8 -*- #
"""*********************************************************************************************"""
# FileName [ split_long_utter_to_short.py ]
# Synopsis [ preprocess long audio / speech to shorter versions ]
# Author [ Andy T. Liu (Andi611) ]
# Copyright [ Copyleft(c), Speech Lab, NTU, Taiwan ]
"""*********************************************************************************************"""
###############
# IMPORTATION #
###############
import os
import torch
import argparse
import torchaudio
from tqdm import tqdm
from pathlib import Path
from joblib import Parallel, delayed
torchaudio.set_audio_backend("sox_io")
#############################
# PREPROCESS CONFIGURATIONS #
#############################
def get_preprocess_args():
parser = argparse.ArgumentParser(description='preprocess arguments for any dataset.')
parser.add_argument('-i', '--input_path', default='/livingrooms/public/LibriLight/', type=str, help='Path to your LibriSpeech directory', required=False)
parser.add_argument('-o', '--output_path', default='/livingrooms/public/LibriLight/', type=str, help='Path to store output', required=False)
parser.add_argument('-s', '--split_size', default=60, type=int, help='Split size in seconds', required=False)
parser.add_argument('-a', '--audio_extension', default='.flac', type=str, help='audio file type (.wav / .flac / .mp3 / etc)', required=False)
parser.add_argument('-n', '--name', default='-splitted', type=str, help='Name to append on the original directory', required=False)
parser.add_argument('--n_jobs', default=-1, type=int, help='Number of jobs used for computation', required=False)
args = parser.parse_args()
return args
##################
# SPLIT AND SAVE #
##################
def split_and_save(input_file, current_split, args):
wav, sr = torchaudio.load(input_file)
# compute the size of each chunk
chunk_size = args.split_size*sr
quotient, remainder = divmod(wav.size(1), chunk_size)
sections = [chunk_size for _ in range(quotient)]
sections.append(remainder) # the remainder is the last chunk
splitted_wav = torch.split(wav, split_size_or_sections=sections, dim=1)
check_sum = 0
for i, w in enumerate(splitted_wav):
check_sum += w.size(1)
file_name = os.path.basename(input_file).split('.')[0]
new_file_name = file_name.replace(file_name, file_name+'-'+str(i))
new_file_path = input_file.replace(current_split, current_split+args.name)
new_file_path = new_file_path.replace(file_name, new_file_name)
if args.input_path != args.output_path:
new_file_path = new_file_path.replace(args.input_path, args.output_path)
os.makedirs((os.path.dirname(new_file_path)), exist_ok=True)
torchaudio.save(new_file_path, w, sr)
assert check_sum == wav.size(1)
###################
# GENERATE SPLITS #
###################
def generate_splits(args, tr_set, audio_extension):
for i, s in enumerate(tr_set):
if os.path.isdir(os.path.join(args.input_path, s.lower())):
s = s.lower()
elif os.path.isdir(os.path.join(args.input_path, s.upper())):
s = s.upper()
else:
assert NotImplementedError
print('')
todo = list(Path(os.path.join(args.input_path, s)).rglob('*' + audio_extension)) # '*.flac'
print(f'Preprocessing data in: {s}, {len(todo)} audio files found.')
print('Splitting audio to shorter length...', flush=True)
Parallel(n_jobs=args.n_jobs)(delayed(split_and_save)(str(file), s, args) for file in tqdm(todo))
print('All done, saved at', args.output_path, 'exit.')
########
# MAIN #
########
def main():
# get arguments
args = get_preprocess_args()
if 'librilight' in args.input_path.lower():
SETS = ['small', 'medium', 'large']
elif 'librispeech' in args.input_path.lower():
SETS = ['train-clean-100', 'train-clean-360', 'train-other-500', 'dev-clean', 'dev-other', 'test-clean', 'test-other']
elif 'timit' in args.input_path.lower():
SETS = ['TRAIN', 'TEST']
else:
raise NotImplementedError
# change the SETS list to match your dataset, for example:
# SETS = ['train', 'dev', 'test']
# SETS = ['TRAIN', 'TEST']
# SETS = ['train-clean-100', 'train-clean-360', 'train-other-500', 'dev-clean', 'dev-other', 'test-clean', 'test-other']
# Select data sets
for idx, s in enumerate(SETS):
print('\t', idx, ':', s)
tr_set = input('Please enter the index of splits you wish to use preprocess. (seperate with space): ')
tr_set = [SETS[int(t)] for t in tr_set.split(' ')]
# Run split
generate_splits(args, tr_set, args.audio_extension)
if __name__ == '__main__':
main()
| [
"torch.split"
] | 1.7.0 | hhhaaahhhaa/s3prl | a469787f05c42196c4d989555082f5fd9dcbe8a6 |
1.10 | import os
from easydict import EasyDict
import torch
# architecture
from basicts.archs.DCRNN_arch import DCRNN
# runner
from basicts.runners.DCRNN_runner import DCRNNRunner
from basicts.data.base_dataset import BaseDataset
from basicts.metrics.mae import masked_mae
from basicts.metrics.mape import masked_mape
from basicts.metrics.rmse import masked_rmse
from basicts.losses.losses import masked_l1_loss
from basicts.utils.serialization import load_adj
CFG = EasyDict()
resume = False # DCRNN does not allow to load parameters since it creates parameters in the first iteration
if not resume:
import random
_ = random.randint(-1e6, 1e6)
# ================= general ================= #
CFG.DESCRIPTION = 'DCRNN model configuration'
CFG.RUNNER = DCRNNRunner
CFG.DATASET_CLS = BaseDataset
CFG.DATASET_NAME = "PEMS07"
CFG.DATASET_TYPE = 'Traffic speed'
CFG._ = _
CFG.GPU_NUM = 1
CFG.METRICS = {
"MAE": masked_mae,
"RMSE": masked_rmse,
"MAPE": masked_mape
}
# ================= environment ================= #
CFG.ENV = EasyDict()
CFG.ENV.SEED = 1
CFG.ENV.CUDNN = EasyDict()
CFG.ENV.CUDNN.ENABLED = True
# ================= model ================= #
CFG.MODEL = EasyDict()
CFG.MODEL.NAME = 'DCRNN'
CFG.MODEL.ARCH = DCRNN
adj_mx, _ = load_adj("datasets/" + CFG.DATASET_NAME + "/adj_mx.pkl", "doubletransition")
CFG.MODEL.PARAM = {
"cl_decay_steps" : 2000,
"horizon" : 12,
"input_dim" : 2,
"max_diffusion_step": 2,
"num_nodes" : 883,
"num_rnn_layers" : 2,
"output_dim" : 1,
"rnn_units" : 64,
"seq_len" : 12,
"adj_mx" : [torch.tensor(i).cuda() for i in adj_mx],
"use_curriculum_learning": True
}
CFG.MODEL.FROWARD_FEATURES = [0, 1] # traffic speed, time in day
CFG.MODEL.TARGET_FEATURES = [0] # traffic speed
# ================= optim ================= #
CFG.TRAIN = EasyDict()
CFG.TRAIN.LOSS = masked_l1_loss
CFG.TRAIN.OPTIM = EasyDict()
CFG.TRAIN.OPTIM.TYPE = "Adam"
CFG.TRAIN.OPTIM.PARAM= {
"lr":0.003,
"eps":1e-3
}
CFG.TRAIN.LR_SCHEDULER = EasyDict()
CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR"
CFG.TRAIN.LR_SCHEDULER.PARAM= {
"milestones":[80],
"gamma":0.3
}
# ================= train ================= #
# CFG.TRAIN.CLIP = 5
CFG.TRAIN.NUM_EPOCHS = 200
CFG.TRAIN.CKPT_SAVE_DIR = os.path.join(
'checkpoints',
'_'.join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)])
)
CFG.TRAIN.SETUP_GRAPH = True
# train data
CFG.TRAIN.DATA = EasyDict()
CFG.TRAIN.NULL_VAL = 0.0
## read data
CFG.TRAIN.DATA.DIR = 'datasets/' + CFG.DATASET_NAME
## dataloader args, optional
CFG.TRAIN.DATA.BATCH_SIZE = 64
CFG.TRAIN.DATA.PREFETCH = False
CFG.TRAIN.DATA.SHUFFLE = True
CFG.TRAIN.DATA.NUM_WORKERS = 2
CFG.TRAIN.DATA.PIN_MEMORY = False
# ================= validate ================= #
CFG.VAL = EasyDict()
CFG.VAL.INTERVAL = 1
# validating data
CFG.VAL.DATA = EasyDict()
## read data
CFG.VAL.DATA.DIR = 'datasets/' + CFG.DATASET_NAME
## dataloader args, optional
CFG.VAL.DATA.BATCH_SIZE = 64
CFG.VAL.DATA.PREFETCH = False
CFG.VAL.DATA.SHUFFLE = False
CFG.VAL.DATA.NUM_WORKERS = 2
CFG.VAL.DATA.PIN_MEMORY = False
# ================= test ================= #
CFG.TEST = EasyDict()
CFG.TEST.INTERVAL = 1
# validating data
CFG.TEST.DATA = EasyDict()
## read data
CFG.TEST.DATA.DIR = 'datasets/' + CFG.DATASET_NAME
## dataloader args, optional
CFG.TEST.DATA.BATCH_SIZE = 64
CFG.TEST.DATA.PREFETCH = False
CFG.TEST.DATA.SHUFFLE = False
CFG.TEST.DATA.NUM_WORKERS = 2
CFG.TEST.DATA.PIN_MEMORY = False
| [
"torch.tensor"
] | 1.10.0 | zezhishao/BasicTS | 584ca6f8215a6fc9976789b600996934ba2d499e |
1.2 | #!/usr/bin/env python
# coding: utf-8
import os
import yaml
import torch
import argparse
import numpy as np
from torch.distributed import get_rank, get_world_size
# For reproducibility, comment these may speed up training
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# Arguments
parser = argparse.ArgumentParser(description='Training E2E asr.')
parser.add_argument('--config', type=str, help='Path to experiment config.')
parser.add_argument('--name', default=None, type=str, help='Name for logging.')
parser.add_argument('--logdir', default='log/', type=str,
help='Logging path.', required=False)
parser.add_argument('--ckpdir', default='ckpt/', type=str,
help='Checkpoint path.', required=False)
parser.add_argument('--outdir', default='result/', type=str,
help='Decode output path.', required=False)
parser.add_argument('--load', default=None, type=str,
help='Load pre-trained model (for training only)', required=False)
parser.add_argument('--seed', default=0, type=int,
help='Random seed for reproducable results.', required=False)
parser.add_argument('--cudnn-ctc', action='store_true',
help='Switches CTC backend from torch to cudnn')
parser.add_argument('--njobs', default=6, type=int,
help='Number of threads for dataloader/decoding.', required=False)
parser.add_argument('--cpu', action='store_true', help='Disable GPU training.')
parser.add_argument('--no-pin', action='store_true',
help='Disable pin-memory for dataloader')
parser.add_argument('--test', action='store_true', help='Test the model.')
parser.add_argument('--no-msg', action='store_true', help='Hide all messages.')
parser.add_argument('--lm', action='store_true',
help='Option for training RNNLM.')
# Following features in development.
parser.add_argument('--amp', action='store_true', help='Option to enable AMP.')
parser.add_argument('--reserve-gpu', default=0, type=float,
help='Option to reserve GPU ram for training.')
parser.add_argument('--jit', action='store_true',
help='Option for enabling jit in pytorch. (feature in development)')
parser.add_argument('--upstream',
help='Specify the upstream variant according to torch.hub.list')
parser.add_argument('--upstream_feature_selection',
help=f'Specify the layer to be extracted as the representation according to torch.hub.help')
parser.add_argument('--upstream_refresh', action='store_true',
help='Re-download cached ckpts for on-the-fly upstream variants')
parser.add_argument('--upstream_ckpt', metavar='{PATH,URL,GOOGLE_DRIVE_ID}',
help='Only set when the specified upstream has \'ckpt\' as an argument in torch.hub.help')
parser.add_argument('--upstream_trainable', '-f', action='store_true',
help='To fine-tune the whole upstream model')
parser.add_argument('--upstream_same_stride', action='store_true',
help='Make sure all upstream features are projected to the same stride in waveform seconds.')
parser.add_argument('--cache_dir', help='Explicitly set the dir for torch.hub')
parser.add_argument('--local_rank', type=int,
help=f'The GPU id this process should use while distributed training. \
None when not launched by torch.distributed.launch')
parser.add_argument('--backend', default='nccl', help='The backend for distributed training')
parser.add_argument('--load_ddp_to_nonddp', action='store_true',
help='The checkpoint is trained with ddp but loaded to a non-ddp model')
parser.add_argument('--load_nonddp_to_ddp', action='store_true',
help='The checkpoint is trained without ddp but loaded to a ddp model')
parser.add_argument('--dryrun', action='store_true',
help='Iterate the dataset decendingly by sequence length to make sure the training will not OOM')
parser.add_argument('--reinit_optimizer', action='store_true',
help='Load model without loading optimizer')
###
paras = parser.parse_args()
setattr(paras, 'gpu', not paras.cpu)
setattr(paras, 'pin_memory', not paras.no_pin)
setattr(paras, 'verbose', not paras.no_msg)
config = yaml.load(open(paras.config, 'r'), Loader=yaml.FullLoader)
if paras.cache_dir is not None:
os.makedirs(paras.cache_dir, exist_ok=True)
torch.hub.set_dir(paras.cache_dir)
# When torch.distributed.launch is used
if paras.local_rank is not None:
torch.cuda.set_device(paras.local_rank)
torch.distributed.init_process_group(paras.backend)
np.random.seed(paras.seed)
torch.manual_seed(paras.seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(paras.seed)
# Hack to preserve GPU ram just incase OOM later on server
if paras.gpu and paras.reserve_gpu > 0:
buff = torch.randn(int(paras.reserve_gpu*1e9//4)).cuda()
del buff
if paras.lm:
# Train RNNLM
from bin.train_lm import Solver
mode = 'train'
else:
if paras.test:
# Test ASR
assert paras.load is None, 'Load option is mutually exclusive to --test'
from bin.test_asr import Solver
mode = 'test'
else:
# Train ASR
from bin.train_asr import Solver
mode = 'train'
solver = Solver(config, paras, mode)
solver.load_data()
solver.set_model()
solver.exec()
| [
"torch.cuda.manual_seed_all",
"torch.distributed.init_process_group",
"torch.hub.set_dir",
"torch.manual_seed",
"torch.cuda.set_device",
"torch.cuda.is_available"
] | 1.2.0 | s3prl/End-to-end-ASR-Pytorch | 64e3d844cebca1eb442b9327f43145c95c9a6088 |
1.7 | import time
from pathlib import Path
from tqdm import tqdm
import hydra
from omegaconf import DictConfig
# 言語処理
# import fasttext
# import fasttext.util
from transformers import BertTokenizer, BertModel
# データ処理
import numpy as np
import torch
def extract_feats(config):
start = time.time()
# FPs
with open(config.fp_list_path, "r") as f:
fp_list = [l.strip() for l in f]
# Prepare bert
bert_model_dir = Path(config.bert_model_dir)
vocab_file_path = bert_model_dir / "vocab.txt"
bert_tokenizer = BertTokenizer(vocab_file_path, do_lower_case=False, do_basic_tokenize=False)
bert_model = BertModel.from_pretrained(bert_model_dir)
bert_model.eval()
def preprocess_ipu(speaker_id, koen_id, ipu_id, ipu_tagtext, in_dir, out_dir):
# get tokens and fp labels
fp_labels = [0] # fps sometimes appear at the head of the breath group
tokens = ["[CLS]"]
for m in ipu_tagtext.split(" "):
if m.startswith("(F"):
fp = m.split("(F")[1].split(")")[0]
if fp in fp_list:
fp_labels[-1] = fp_list.index(fp) + 1
elif m != "":
tokens.append(m)
fp_labels.append(0)
tokens += ["[SEP]"]
fp_labels.append(0)
# get embedding
token_ids = bert_tokenizer.convert_tokens_to_ids(tokens)
token_tensor = torch.Tensor(token_ids).unsqueeze(0).to(torch.long)
outputs = bert_model(token_tensor)
outputs_numpy = outputs[0].numpy().squeeze(axis=0).copy()
assert outputs_numpy.shape[0] == np.array(fp_labels).shape[0], \
"1st array length {} should be equal to 2nd array length {}".format(
outputs_numpy.shape[0], np.array(fp_labels).shape[0])
np.save(in_dir / f"{speaker_id}-{koen_id}-{ipu_id}-feats.npy", outputs_numpy)
np.save(out_dir / f"{speaker_id}-{koen_id}-{ipu_id}-feats.npy", np.array(fp_labels))
# extraxt features
infeats_dir = Path(config.out_dir) / "infeats"
outfeats_dir = Path(config.out_dir) / "outfeats"
infeats_dir.mkdir(parents=True, exist_ok=True)
outfeats_dir.mkdir(parents=True, exist_ok=True)
with open(Path(config.out_dir) / f"ipu.list", "r") as f:
ipus = [tuple(l.split(":")) for l in f.readlines()]
with torch.no_grad():
for speaker_id, koen_id, ipu_id, ipu in tqdm(ipus):
preprocess_ipu(speaker_id, koen_id, ipu_id, ipu, infeats_dir, outfeats_dir)
# count time
n_ipu = len(ipus)
elapsed_time = time.time() - start
time_log = "elapsed_time of feature extraction: {} [sec]".format(elapsed_time)
time_log_ipu = "elapsed_time of feature extraction (per IPU): \
{} [sec]".format(elapsed_time / n_ipu)
print(time_log + "\n" + time_log_ipu)
with open(Path(config.out_dir) / "time.log", "w") as f:
f.write(time_log + "\n" + time_log_ipu)
def extract_feats_test(data_dir, fp_list_path, bert_model_dir, utt_list_name):
start = time.time()
# FPs
with open(fp_list_path, "r") as f:
fp_list = [l.strip() for l in f]
# Prepare bert
bert_model_dir = Path(bert_model_dir)
vocab_file_path = bert_model_dir / "vocab.txt"
bert_tokenizer = BertTokenizer(
vocab_file_path, do_lower_case=False, do_basic_tokenize=False)
bert_model = BertModel.from_pretrained(bert_model_dir)
bert_model.eval()
def preprocess_utt(utt_id, utt, in_dir, out_dir):
# get tokens and fp labels
fp_labels = [0] # fps sometimes appear at the head of the breath group
tokens = ["[CLS]"]
for m in utt.split(" "):
if m.startswith("(F"):
fp = m.split("(F")[1].split(")")[0]
if fp in fp_list:
fp_labels[-1] = fp_list.index(fp) + 1
elif m != "":
tokens.append(m)
fp_labels.append(0)
tokens += ["[SEP]"]
fp_labels.append(0)
# get embedding
token_ids = bert_tokenizer.convert_tokens_to_ids(tokens)
token_tensor = torch.Tensor(token_ids).unsqueeze(0).to(torch.long)
outputs = bert_model(token_tensor)
outputs_numpy = outputs[0].numpy().squeeze(axis=0).copy()
assert outputs_numpy.shape[0] == np.array(fp_labels).shape[0], \
"1st array length {} should be equal to 2nd array length {}".format(
outputs_numpy.shape[0], np.array(fp_labels).shape[0])
np.save(in_dir / f"{utt_id}-feats.npy", outputs_numpy)
np.save(out_dir / f"{utt_id}-feats.npy", np.array(fp_labels))
# extraxt features
infeats_dir = Path(data_dir) / "infeats"
outfeats_dir = Path(data_dir) / "outfeats"
infeats_dir.mkdir(parents=True, exist_ok=True)
outfeats_dir.mkdir(parents=True, exist_ok=True)
with open(Path(data_dir) / "{}.list".format(utt_list_name), "r") as f:
utts = [tuple(l.split(":")) for l in f.readlines()]
with torch.no_grad():
for utt_id, utt in tqdm(utts):
preprocess_utt(utt_id, utt, infeats_dir, outfeats_dir)
# count time
n_utt = len(utts)
elapsed_time = time.time() - start
time_log ="elapsed_time of feature extraction: {} [sec]".format(elapsed_time)
time_log_utt ="elapsed_time of feature extraction (per utt): \
{} [sec]".format(elapsed_time / n_utt)
print(time_log + "\n" + time_log_utt)
with open(Path(data_dir) / "time.log", "w") as f:
f.write(time_log + "\n" + time_log_utt)
@hydra.main(config_path="conf/preprocess", config_name="config")
def main(config: DictConfig):
extract_feats(config)
if __name__=="__main__":
main() | [
"torch.no_grad",
"torch.Tensor"
] | 1.7.0 | ndkgit339/filledpause_prediction_group | db511c081f155ec2c23afe82bc44c03c38618590 |
1.7 | import pytest
from typing import Any
import torch
from torchtyping import TensorType
import typeguard
a = b = c = None
def test_fixed_int_dim():
@typeguard.typechecked
def _3_dim_checker(x: TensorType[3]):
pass
@typeguard.typechecked
def _3m1_dim_checker(x: TensorType[3, -1]):
pass
@typeguard.typechecked
def _4_dim_checker(x: TensorType[4]):
pass
@typeguard.typechecked
def _4m1_dim_checker(x: TensorType[4, -1]):
pass
@typeguard.typechecked
def _m14_dim_checker(x: TensorType[-1, 4]):
pass
@typeguard.typechecked
def _m1m1_dim_checker(x: TensorType[-1, -1]):
pass
@typeguard.typechecked
def _34_dim_checker(x: TensorType[3, 4]):
pass
@typeguard.typechecked
def _34m1_dim_checker(x: TensorType[3, 4, -1]):
pass
@typeguard.typechecked
def _m14m1_dim_checker(x: TensorType[-1, 4, -1]):
pass
x = torch.rand(3)
_3_dim_checker(x)
with pytest.raises(TypeError):
_3m1_dim_checker(x)
with pytest.raises(TypeError):
_4_dim_checker(x)
with pytest.raises(TypeError):
_4m1_dim_checker(x)
with pytest.raises(TypeError):
_m14_dim_checker(x)
with pytest.raises(TypeError):
_m1m1_dim_checker(x)
with pytest.raises(TypeError):
_34_dim_checker(x)
with pytest.raises(TypeError):
_34m1_dim_checker(x)
with pytest.raises(TypeError):
_m14m1_dim_checker(x)
x = torch.rand(3, 4)
_3m1_dim_checker(x)
_m14_dim_checker(x)
_m1m1_dim_checker(x)
_34_dim_checker(x)
with pytest.raises(TypeError):
_3_dim_checker(x)
with pytest.raises(TypeError):
_4_dim_checker(x)
with pytest.raises(TypeError):
_4m1_dim_checker(x)
with pytest.raises(TypeError):
_34m1_dim_checker(x)
with pytest.raises(TypeError):
_m14m1_dim_checker(x)
x = torch.rand(4, 3)
_4m1_dim_checker(x)
_m1m1_dim_checker(x)
with pytest.raises(TypeError):
_3_dim_checker(x)
with pytest.raises(TypeError):
_3m1_dim_checker(x)
with pytest.raises(TypeError):
_4_dim_checker(x)
with pytest.raises(TypeError):
_m14_dim_checker(x)
with pytest.raises(TypeError):
_34_dim_checker(x)
with pytest.raises(TypeError):
_34m1_dim_checker(x)
with pytest.raises(TypeError):
_m14m1_dim_checker(x)
def test_str_dim():
@typeguard.typechecked
def _a_dim_checker(x: TensorType["a"]):
pass
@typeguard.typechecked
def _ab_dim_checker(x: TensorType["a", "b"]):
pass
@typeguard.typechecked
def _abc_dim_checker(x: TensorType["a", "b", "c"]):
pass
@typeguard.typechecked
def _cb_dim_checker(x: TensorType["c", "b"]):
pass
@typeguard.typechecked
def _am1_dim_checker(x: TensorType["a", -1]):
pass
@typeguard.typechecked
def _m1b_dim_checker(x: TensorType[-1, "b"]):
pass
@typeguard.typechecked
def _abm1_dim_checker(x: TensorType["a", "b", -1]):
pass
@typeguard.typechecked
def _m1bm1_dim_checker(x: TensorType[-1, "b", -1]):
pass
x = torch.rand(3, 4)
_ab_dim_checker(x)
_cb_dim_checker(x)
_am1_dim_checker(x)
_m1b_dim_checker(x)
with pytest.raises(TypeError):
_a_dim_checker(x)
with pytest.raises(TypeError):
_abc_dim_checker(x)
with pytest.raises(TypeError):
_abm1_dim_checker(x)
with pytest.raises(TypeError):
_m1bm1_dim_checker(x)
def test_int_str_dim():
@typeguard.typechecked
def _a_dim_checker1(x: TensorType["a":3]):
pass
@typeguard.typechecked
def _a_dim_checker2(x: TensorType["a":-1]):
pass
@typeguard.typechecked
def _ab_dim_checker1(x: TensorType["a":3, "b":4]):
pass
@typeguard.typechecked
def _ab_dim_checker2(x: TensorType["a":3, "b":-1]):
pass
@typeguard.typechecked
def _ab_dim_checker3(x: TensorType["a":-1, "b":4]):
pass
@typeguard.typechecked
def _ab_dim_checker4(x: TensorType["a":3, "b"]):
pass
@typeguard.typechecked
def _ab_dim_checker5(x: TensorType["a", "b":4]):
pass
@typeguard.typechecked
def _ab_dim_checker6(x: TensorType["a":5, "b":4]):
pass
@typeguard.typechecked
def _ab_dim_checker7(x: TensorType["a":5, "b":-1]):
pass
@typeguard.typechecked
def _m1b_dim_checker(x: TensorType[-1, "b":4]):
pass
@typeguard.typechecked
def _abm1_dim_checker(x: TensorType["a":3, "b":4, -1]):
pass
@typeguard.typechecked
def _m1bm1_dim_checker(x: TensorType[-1, "b":4, -1]):
pass
x = torch.rand(3, 4)
_ab_dim_checker1(x)
_ab_dim_checker2(x)
_ab_dim_checker3(x)
_ab_dim_checker4(x)
_ab_dim_checker5(x)
_m1b_dim_checker(x)
with pytest.raises(TypeError):
_a_dim_checker1(x)
with pytest.raises(TypeError):
_a_dim_checker2(x)
with pytest.raises(TypeError):
_ab_dim_checker6(x)
with pytest.raises(TypeError):
_ab_dim_checker7(x)
with pytest.raises(TypeError):
_abm1_dim_checker(x)
with pytest.raises(TypeError):
_m1bm1_dim_checker(x)
def test_any_dim():
@typeguard.typechecked
def _3any_dim_checker(x: TensorType[3, Any]):
pass
@typeguard.typechecked
def _any4_dim_checker(x: TensorType[Any, 4]):
pass
@typeguard.typechecked
def _anyany_dim_checker(x: TensorType[Any, Any]):
pass
@typeguard.typechecked
def _34any_dim_checker(x: TensorType[3, 4, Any]):
pass
@typeguard.typechecked
def _any4any_dim_checker(x: TensorType[Any, 4, Any]):
pass
x = torch.rand(3)
with pytest.raises(TypeError):
_3any_dim_checker(x)
with pytest.raises(TypeError):
_any4_dim_checker(x)
with pytest.raises(TypeError):
_anyany_dim_checker(x)
with pytest.raises(TypeError):
_34any_dim_checker(x)
with pytest.raises(TypeError):
_any4any_dim_checker(x)
x = torch.rand((3, 4))
_3any_dim_checker(x)
_any4_dim_checker(x)
_anyany_dim_checker(x)
x = torch.rand((4, 5))
with pytest.raises(TypeError):
_any4_dim_checker(x)
x = torch.rand(4, 5)
with pytest.raises(TypeError):
_3any_dim_checker(x)
x = torch.rand((3, 4, 5))
_34any_dim_checker(x)
_any4any_dim_checker(x)
x = torch.rand((3, 5, 5))
with pytest.raises(TypeError):
x = _any4any_dim_checker(x)
with pytest.raises(TypeError):
_34any_dim_checker(x) | [
"torch.rand"
] | 1.7.0 | olliethomas/torchtyping | 81e1cffa841307d700b11e9a2c970a5face65020 |
1.7 | # -*- coding: utf-8 -*- #
"""*********************************************************************************************"""
# FileName [ model.py ]
# Synopsis [ the 1-hidden model ]
# Author [ S3PRL ]
# Copyright [ Copyleft(c), Speech Lab, NTU, Taiwan ]
"""*********************************************************************************************"""
import torch
import torch.nn as nn
import torch.nn.functional as F
class ConvBank(nn.Module):
def __init__(self, input_dim, output_class_num, kernels, cnn_size, hidden_size, dropout, **kwargs):
super(ConvBank, self).__init__()
self.drop_p = dropout
self.in_linear = nn.Linear(input_dim, hidden_size)
latest_size = hidden_size
# conv bank
self.cnns = nn.ModuleList()
assert len(kernels) > 0
for kernel in kernels:
self.cnns.append(nn.Conv1d(latest_size, cnn_size, kernel, padding=kernel//2))
latest_size = cnn_size * len(kernels)
self.out_linear = nn.Linear(latest_size, output_class_num)
def forward(self, features):
hidden = F.dropout(F.relu(self.in_linear(features)), p=self.drop_p)
conv_feats = []
hidden = hidden.transpose(1, 2).contiguous()
for cnn in self.cnns:
conv_feats.append(cnn(hidden))
hidden = torch.cat(conv_feats, dim=1).transpose(1, 2).contiguous()
hidden = F.dropout(F.relu(hidden), p=self.drop_p)
predicted = self.out_linear(hidden)
return predicted
class Framelevel1Hidden(nn.Module):
def __init__(self, input_dim, output_class_num, hidden_size, dropout, **kwargs):
super(Framelevel1Hidden, self).__init__()
# init attributes
self.in_linear = nn.Linear(input_dim, hidden_size)
self.out_linear = nn.Linear(hidden_size, output_class_num)
self.drop = nn.Dropout(dropout)
self.act_fn = nn.functional.relu
def forward(self, features):
hidden = self.in_linear(features)
hidden = self.drop(hidden)
hidden = self.act_fn(hidden)
predicted = self.out_linear(hidden)
return predicted
| [
"torch.nn.Linear",
"torch.nn.Dropout",
"torch.cat",
"torch.nn.ModuleList",
"torch.nn.Conv1d",
"torch.nn.functional.relu"
] | 1.7.1 | OlegJakushkin/s3prl | c0e41f07fa56f0f79b5bf3839b4d0a4cf7c421bf |
0.4 | #!/usr/bin/env python
import argparse
import logging
import os
from shutil import copyfile, rmtree
import numpy as np
import torch
import torch.nn as nn
from ase.data import atomic_numbers
from torch.optim import Adam
from torch.utils.data.sampler import RandomSampler
import schnetpack as spk
from schnetpack.datasets import MD17
from schnetpack.utils import compute_params, to_json, read_from_json
logging.basicConfig(level=os.environ.get("LOGLEVEL", "INFO"))
def get_parser():
""" Setup parser for command line arguments """
main_parser = argparse.ArgumentParser()
## command-specific
cmd_parser = argparse.ArgumentParser(add_help=False)
cmd_parser.add_argument('--cuda', help='Set flag to use GPU(s)', action='store_true')
cmd_parser.add_argument('--parallel',
help='Run data-parallel on all available GPUs (specify with environment variable'
+ ' CUDA_VISIBLE_DEVICES)', action='store_true')
cmd_parser.add_argument('--batch_size', type=int,
help='Mini-batch size for training and prediction (default: %(default)s)',
default=100)
## training
train_parser = argparse.ArgumentParser(add_help=False, parents=[cmd_parser])
train_parser.add_argument('datapath', help='Path / destination of MD17 dataset directory')
train_parser.add_argument('molecule', help='Selected molecule trajectory of MD17 collection',
choices=MD17.existing_datasets)
train_parser.add_argument('modelpath', help='Destination for models and logs')
train_parser.add_argument('--seed', type=int, default=None, help='Set random seed for torch and numpy.')
train_parser.add_argument('--overwrite', help='Remove previous model directory.', action='store_true')
# data split
train_parser.add_argument('--split_path', help='Path / destination of npz with data splits',
default=None)
train_parser.add_argument('--split', help='Give sizes of train and validation splits and use remaining for testing',
type=int, nargs=2, default=[None, None])
train_parser.add_argument('--max_epochs', type=int, help='Maximum number of training epochs (default: %(default)s)',
default=5000)
train_parser.add_argument('--lr', type=float, help='Initial learning rate (default: %(default)s)',
default=1e-4)
train_parser.add_argument('--lr_patience', type=int,
help='Epochs without improvement before reducing the learning rate (default: %(default)s)',
default=25)
train_parser.add_argument('--lr_decay', type=float, help='Learning rate decay (default: %(default)s)',
default=0.5)
train_parser.add_argument('--lr_min', type=float, help='Minimal learning rate (default: %(default)s)',
default=1e-6)
train_parser.add_argument('--rho', type=float,
help='Energy-force trade-off. For rho=0, use forces only. (default: %(default)s)',
default=0.1)
train_parser.add_argument('--logger', help='Choose logger for training process (default: %(default)s)',
choices=['csv', 'tensorboard'], default='csv')
train_parser.add_argument('--log_every_n_epochs', type=int,
help='Log metrics every given number of epochs (default: %(default)s)',
default=1)
## evaluation
eval_parser = argparse.ArgumentParser(add_help=False, parents=[cmd_parser])
eval_parser.add_argument('datapath', help='Path / destination of MD17 dataset directory')
eval_parser.add_argument('molecule', help='Molecule trajectory',
choices=MD17.existing_datasets)
eval_parser.add_argument('modelpath', help='Path of stored model')
eval_parser.add_argument('--split', help='Evaluate on trained model on given split',
choices=['train', 'validation', 'test'], default=['test'], nargs='+')
# model-specific parsers
model_parser = argparse.ArgumentParser(add_help=False)
####### SchNet #######
schnet_parser = argparse.ArgumentParser(add_help=False, parents=[model_parser])
schnet_parser.add_argument('--features', type=int, help='Size of atom-wise representation (default: %(default)s)',
default=256)
schnet_parser.add_argument('--interactions', type=int, help='Number of interaction blocks (default: %(default)s)',
default=6)
schnet_parser.add_argument('--cutoff', type=float, default=5.,
help='Cutoff radius of local environment (default: %(default)s)')
schnet_parser.add_argument('--num_gaussians', type=int, default=25,
help='Number of Gaussians to expand distances (default: %(default)s)')
####### wACSF ########
wacsf_parser = argparse.ArgumentParser(add_help=False, parents=[model_parser])
# wACSF parameters
wacsf_parser.add_argument('--radial', type=int, default=22,
help='Number of radial symmetry functions (default: %(default)s)')
wacsf_parser.add_argument('--angular', type=int, default=5,
help='Number of angular symmetry functions (default: %(default)s)')
wacsf_parser.add_argument('--zetas', type=int, nargs='+', default=[1],
help='List of zeta exponents used for angle resolution (default: %(default)s)')
wacsf_parser.add_argument('--standardize', action='store_true',
help='Standardize wACSF before atomistic network.')
wacsf_parser.add_argument('--cutoff', type=float, default=5.0,
help='Cutoff radius of local environment (default: %(default)s)')
# Atomistic network parameters
wacsf_parser.add_argument('--n_nodes', type=int, default=100,
help='Number of nodes in atomic networks (default: %(default)s)')
wacsf_parser.add_argument('--n_layers', type=int, default=2,
help='Number of layers in atomic networks (default: %(default)s)')
# Advances wACSF settings
wacsf_parser.add_argument('--centered', action='store_true', help='Use centered Gaussians for radial functions')
wacsf_parser.add_argument('--crossterms', action='store_true', help='Use crossterms in angular functions')
wacsf_parser.add_argument('--behler', action='store_true', help='Switch to conventional ACSF')
wacsf_parser.add_argument('--elements', default=['H', 'C', 'O'], nargs='+',
help='List of elements to be used for symmetry functions (default: %(default)s).')
## setup subparser structure
cmd_subparsers = main_parser.add_subparsers(dest='mode', help='Command-specific arguments')
cmd_subparsers.required = True
subparser_train = cmd_subparsers.add_parser('train', help='Training help')
subparser_eval = cmd_subparsers.add_parser('eval', help='Eval help')
subparser_export = cmd_subparsers.add_parser('export', help='Export help')
subparser_export.add_argument('modelpath', help='Path of stored model')
subparser_export.add_argument('destpath', help='Destination path for exported model')
train_subparsers = subparser_train.add_subparsers(dest='model', help='Model-specific arguments')
train_subparsers.required = True
train_subparsers.add_parser('schnet', help='SchNet help', parents=[train_parser, schnet_parser])
train_subparsers.add_parser('wacsf', help='wACSF help', parents=[train_parser, wacsf_parser])
eval_subparsers = subparser_eval.add_subparsers(dest='model', help='Model-specific arguments')
eval_subparsers.required = True
eval_subparsers.add_parser('schnet', help='SchNet help', parents=[eval_parser, schnet_parser])
eval_subparsers.add_parser('wacsf', help='wACSF help', parents=[eval_parser, wacsf_parser])
return main_parser
def train(args, model, train_loader, val_loader, device):
# setup hook and logging
hooks = [
spk.train.MaxEpochHook(args.max_epochs)
]
# setup optimizer for training
# to_opt = model.parameters()
# Bugfix, since model will not train with requires grad variables
to_opt = filter(lambda p: p.requires_grad, model.parameters())
optimizer = Adam(to_opt, lr=args.lr)
schedule = spk.train.ReduceLROnPlateauHook(optimizer, patience=args.lr_patience, factor=args.lr_decay,
min_lr=args.lr_min,
window_length=1, stop_after_min=True)
hooks.append(schedule)
# index into model output: [energy, forces]
metrics = [spk.metrics.MeanAbsoluteError(MD17.energies, "y"),
spk.metrics.RootMeanSquaredError(MD17.energies, "y"),
spk.metrics.MeanAbsoluteError(MD17.forces, "dydx"),
spk.metrics.RootMeanSquaredError(MD17.forces, "dydx")]
if args.logger == 'csv':
logger = spk.train.CSVHook(os.path.join(args.modelpath, 'log'),
metrics, every_n_epochs=args.log_every_n_epochs)
hooks.append(logger)
elif args.logger == 'tensorboard':
logger = spk.train.TensorboardHook(os.path.join(args.modelpath, 'log'),
metrics, every_n_epochs=args.log_every_n_epochs)
hooks.append(logger)
# setup loss function
def loss(batch, result):
ediff = batch[MD17.energies] - result["y"]
ediff = ediff ** 2
fdiff = batch[MD17.forces] - result["dydx"]
fdiff = fdiff ** 2
err_sq = args.rho * torch.mean(ediff.view(-1)) + (1 - args.rho) * torch.mean(fdiff.view(-1))
return err_sq
trainer = spk.train.Trainer(args.modelpath, model, loss, optimizer,
train_loader, val_loader, hooks=hooks)
trainer.train(device)
def evaluate(args, model, train_loader, val_loader, test_loader, device):
header = ['Subset', 'Energy MAE', 'Energy RMSE',
'Force MAE', 'Force RMSE', 'Force Length MAE', 'Force Length RMSE', 'Force Angle MAE', 'Angle RMSE']
metrics = [
spk.metrics.MeanAbsoluteError(MD17.energies, "y"),
spk.metrics.RootMeanSquaredError(MD17.energies, "y"),
spk.metrics.MeanAbsoluteError(MD17.forces, "dydx"),
spk.metrics.RootMeanSquaredError(MD17.forces, "dydx"),
spk.metrics.LengthMAE(MD17.forces, "dydx"),
spk.metrics.LengthRMSE(MD17.forces, "dydx"),
spk.metrics.AngleMAE(MD17.forces, "dydx"),
spk.metrics.AngleRMSE(MD17.forces, "dydx")
]
results = []
if 'train' in args.split:
results.append(['training'] + ['%.5f' % i for i in evaluate_dataset(metrics, model, train_loader, device)])
if 'validation' in args.split:
results.append(['validation'] + ['%.5f' % i for i in evaluate_dataset(metrics, model, val_loader, device)])
if 'test' in args.split:
results.append(['test'] + ['%.5f' % i for i in evaluate_dataset(metrics, model, test_loader, device)])
header = ','.join(header)
results = np.array(results)
np.savetxt(os.path.join(args.modelpath, 'evaluation.csv'), results, header=header, fmt='%s', delimiter=',')
def evaluate_dataset(metrics, model, loader, device):
for metric in metrics:
metric.reset()
for batch in loader:
batch = {
k: v.to(device)
for k, v in batch.items()
}
result = model(batch)
for metric in metrics:
metric.add_batch(batch, result)
results = [
metric.aggregate() for metric in metrics
]
return results
def get_model(args, atomref=None, mean=None, stddev=None, train_loader=None, parallelize=False, mode='train'):
if args.model == 'schnet':
representation = spk.representation.SchNet(args.features, args.features, args.interactions,
args.cutoff, args.num_gaussians)
atomwise_output = spk.atomistic.Energy(args.features, mean=mean, stddev=stddev, atomref=atomref,
return_force=True, create_graph=True)
model = spk.atomistic.AtomisticModel(representation, atomwise_output)
elif args.model == 'wacsf':
sfmode = ('weighted', 'Behler')[args.behler]
# Convert element strings to atomic charges
elements = frozenset((atomic_numbers[i] for i in sorted(args.elements)))
representation = spk.representation.BehlerSFBlock(args.radial, args.angular, zetas=set(args.zetas),
cutoff_radius=args.cutoff,
centered=args.centered, crossterms=args.crossterms,
elements=elements,
mode=sfmode)
logging.info("Using {:d} {:s}-type SF".format(representation.n_symfuncs, sfmode))
# Standardize representation if requested
if args.standardize and mode == 'train':
if train_loader is None:
raise ValueError("Specification of a trainig_loader is required to standardize wACSF")
else:
logging.info("Computing and standardizing symmetry function statistics")
else:
train_loader = None
representation = spk.representation.StandardizeSF(representation, train_loader, cuda=args.cuda)
# Build HDNN model
atomwise_output = spk.atomistic.ElementalEnergy(representation.n_symfuncs, n_hidden=args.n_nodes,
n_layers=args.n_layers, mean=mean, stddev=stddev,
atomref=atomref, return_force=True, create_graph=True,
elements=elements)
model = spk.atomistic.AtomisticModel(representation, atomwise_output)
else:
raise ValueError('Unknown model class:', args.model)
if parallelize:
model = nn.DataParallel(model)
logging.info("The model you built has: %d parameters" % compute_params(model))
return model
if __name__ == '__main__':
parser = get_parser()
args = parser.parse_args()
device = torch.device("cuda" if args.cuda else "cpu")
argparse_dict = vars(args)
jsonpath = os.path.join(args.modelpath, 'args.json')
if args.mode == 'train':
if args.overwrite and os.path.exists(args.modelpath):
logging.info('existing model will be overwritten...')
rmtree(args.modelpath)
if not os.path.exists(args.modelpath):
os.makedirs(args.modelpath)
to_json(jsonpath, argparse_dict)
spk.utils.set_random_seed(args.seed)
train_args = args
else:
train_args = read_from_json(jsonpath)
# will download md17 if necessary, calculate_triples is required for wACSF angular functions
logging.info('MD17 will be loaded...')
md17 = MD17(args.datapath, args.molecule, download=True, parse_all=False, collect_triples=args.model == 'wacsf')
# splits the dataset in test, val, train sets
split_path = os.path.join(args.modelpath, 'split.npz')
if args.mode == 'train':
if args.split_path is not None:
copyfile(args.split_path, split_path)
logging.info('create splits...')
data_train, data_val, data_test = md17.create_splits(*train_args.split, split_file=split_path)
logging.info('load data...')
train_loader = spk.data.AtomsLoader(data_train, batch_size=args.batch_size, sampler=RandomSampler(data_train),
num_workers=4, pin_memory=True)
val_loader = spk.data.AtomsLoader(data_val, batch_size=args.batch_size, num_workers=2, pin_memory=True)
if args.mode == 'train':
logging.info('calculate statistics...')
mean, stddev = train_loader.get_statistics(MD17.energies, True)
else:
mean, stddev = None, None
# Construct the model.
model = get_model(train_args, mean=mean, stddev=stddev, train_loader=train_loader, parallelize=args.parallel,
mode=args.mode).to(device)
if args.mode == 'eval':
if args.parallel:
model.module.load_state_dict(
torch.load(os.path.join(args.modelpath, 'best_model')))
else:
model.load_state_dict(
torch.load(os.path.join(args.modelpath, 'best_model')))
if args.mode == 'train':
logging.info("training...")
train(args, model, train_loader, val_loader, device)
logging.info("...training done!")
elif args.mode == 'eval':
logging.info("evaluating...")
test_loader = spk.data.AtomsLoader(data_test, batch_size=args.batch_size,
num_workers=2, pin_memory=True)
evaluate(args, model, train_loader, val_loader, test_loader, device)
logging.info("... done!")
else:
print('Unknown mode:', args.mode)
| [
"torch.device",
"torch.utils.data.sampler.RandomSampler",
"torch.optim.Adam",
"torch.nn.DataParallel"
] | 0.4 | heytitle/schnetpack | 6facf724e6e220053f4ba8d5b81744744d1abef3 |
1.6 | """
2020.06.09-Changed for building GhostNet
Huawei Technologies Co., Ltd. <[email protected]>
Creates a GhostNet Model as defined in:
GhostNet: More Features from Cheap Operations By Kai Han, Yunhe Wang,
Qi Tian, Jianyuan Guo, Chunjing Xu, Chang Xu.
https://arxiv.org/abs/1911.11907
Modified from https://github.com/d-li14/mobilenetv3.pytorch
and https://github.com/rwightman/pytorch-image-models
"""
import logging
import math
import warnings
import torch
import torch.nn as nn
import torch.nn.functional as F
from ..module.activation import act_layers
def get_url(width_mult=1.0):
if width_mult == 1.0:
return "https://raw.githubusercontent.com/huawei-noah/CV-Backbones/master/ghostnet_pytorch/models/state_dict_73.98.pth" # noqa E501
else:
logging.info("GhostNet only has 1.0 pretrain model. ")
return None
def _make_divisible(v, divisor, min_value=None):
"""
This function is taken from the original tf repo.
It ensures that all layers have a channel number that is divisible by 8
It can be seen here:
https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
"""
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v
def hard_sigmoid(x, inplace: bool = False):
if inplace:
return x.add_(3.0).clamp_(0.0, 6.0).div_(6.0)
else:
return F.relu6(x + 3.0) / 6.0
class SqueezeExcite(nn.Module):
def __init__(
self,
in_chs,
se_ratio=0.25,
reduced_base_chs=None,
activation="ReLU",
gate_fn=hard_sigmoid,
divisor=4,
**_
):
super(SqueezeExcite, self).__init__()
self.gate_fn = gate_fn
reduced_chs = _make_divisible((reduced_base_chs or in_chs) * se_ratio, divisor)
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.conv_reduce = nn.Conv2d(in_chs, reduced_chs, 1, bias=True)
self.act1 = act_layers(activation)
self.conv_expand = nn.Conv2d(reduced_chs, in_chs, 1, bias=True)
def forward(self, x):
x_se = self.avg_pool(x)
x_se = self.conv_reduce(x_se)
x_se = self.act1(x_se)
x_se = self.conv_expand(x_se)
x = x * self.gate_fn(x_se)
return x
# EA layer
class EcaLayer(nn.Module):
"""Constructs a ECA module.
Args:
channel: Number of channels of the input feature map
k_size: Adaptive selection of kernel size
"""
def __init__(self, channel, k_size=3):
super(EcaLayer, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.conv = nn.Conv1d(1, 1, kernel_size=k_size, padding=(k_size - 1) // 2, bias=False)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
# x: input features with shape [b, c, h, w]
b, c, h, w = x.size()
# feature descriptor on the global spatial information
y = self.avg_pool(x)
# Two different branches of ECA module
y = self.conv(y.squeeze(-1).transpose(-1, -2)).transpose(-1, -2).unsqueeze(-1)
# Multi-scale information fusion
y = self.sigmoid(y)
return x * y.expand_as(x)
class ConvBnAct(nn.Module):
def __init__(self, in_chs, out_chs, kernel_size, stride=1, activation="ReLU"):
super(ConvBnAct, self).__init__()
self.conv = nn.Conv2d(
in_chs, out_chs, kernel_size, stride, kernel_size // 2, bias=False
)
self.bn1 = nn.BatchNorm2d(out_chs)
self.act1 = act_layers(activation)
def forward(self, x):
x = self.conv(x)
x = self.bn1(x)
x = self.act1(x)
return x
class GhostModule(nn.Module):
def __init__(
self, inp, oup, kernel_size=1, ratio=2, dw_size=3, stride=1, activation="ReLU"
):
super(GhostModule, self).__init__()
self.oup = oup
init_channels = math.ceil(oup / ratio)
new_channels = init_channels * (ratio - 1)
self.primary_conv = nn.Sequential(
nn.Conv2d(
inp, init_channels, kernel_size, stride, kernel_size // 2, bias=False
),
nn.BatchNorm2d(init_channels),
act_layers(activation) if activation else nn.Sequential(),
)
self.cheap_operation = nn.Sequential(
nn.Conv2d(
init_channels,
new_channels,
dw_size,
1,
dw_size // 2,
groups=init_channels,
bias=False,
),
nn.BatchNorm2d(new_channels),
act_layers(activation) if activation else nn.Sequential(),
)
def forward(self, x):
x1 = self.primary_conv(x)
x2 = self.cheap_operation(x1)
out = torch.cat([x1, x2], dim=1)
return out
class GhostBottleneck(nn.Module):
"""Ghost bottleneck w/ optional SE"""
def __init__(
self,
in_chs,
mid_chs,
out_chs,
dw_kernel_size=3,
stride=1,
activation="ReLU",
se_ratio=0.0,
):
super(GhostBottleneck, self).__init__()
has_se = se_ratio is not None and se_ratio > 0.0
self.stride = stride
# Point-wise expansion
self.ghost1 = GhostModule(in_chs, mid_chs, activation=activation)
# Depth-wise convolution
if self.stride > 1:
self.conv_dw = nn.Conv2d(
mid_chs,
mid_chs,
dw_kernel_size,
stride=stride,
padding=(dw_kernel_size - 1) // 2,
groups=mid_chs,
bias=False,
)
self.bn_dw = nn.BatchNorm2d(mid_chs)
# Squeeze-and-excitation
if has_se:
self.se = SqueezeExcite(mid_chs, se_ratio=se_ratio)
else:
self.se = None
# Point-wise linear projection
self.ghost2 = GhostModule(mid_chs, out_chs, activation=None)
# shortcut
if in_chs == out_chs and self.stride == 1:
self.shortcut = nn.Sequential()
else:
self.shortcut = nn.Sequential(
nn.Conv2d(
in_chs,
in_chs,
dw_kernel_size,
stride=stride,
padding=(dw_kernel_size - 1) // 2,
groups=in_chs,
bias=False,
),
nn.BatchNorm2d(in_chs),
nn.Conv2d(in_chs, out_chs, 1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(out_chs),
)
def forward(self, x):
residual = x
# 1st ghost bottleneck
x = self.ghost1(x)
# Depth-wise convolution
if self.stride > 1:
x = self.conv_dw(x)
x = self.bn_dw(x)
# Squeeze-and-excitation
# 2nd ghost bottleneck
x = self.ghost2(x)
x += self.shortcut(residual)
return x
class GhostBottleneckSandGlass(nn.Module):
""" Ghost bottleneck w/ optional SE"""
def __init__(self, in_chs, mid_chs, out_chs, dw_kernel_size=3,
stride=1, activation="ReLU", se_ratio=0.0):
super(GhostBottleneckSandGlass, self).__init__()
self.stride = stride
# Depth-wise for more space detail
self.dw1 = nn.Sequential(
nn.Conv2d(in_chs, in_chs, dw_kernel_size, stride=stride, padding=(dw_kernel_size - 1) // 2, groups=in_chs, bias=False),
nn.BatchNorm2d(in_chs),
nn.ReLU6()
)
# Point-wise expansion
self.ghost1 = GhostModule(in_chs, mid_chs)
# Eca-Layer
self.eca = EcaLayer(out_chs)
# Point-wise linear projection
self.ghost2 = GhostModule(mid_chs, out_chs)
# Depth-wise for more space detail
self.dw2 = nn.Sequential(
nn.Conv2d(out_chs, out_chs, dw_kernel_size, stride=stride, padding=(dw_kernel_size - 1) // 2, groups=out_chs, bias=False),
nn.BatchNorm2d(out_chs)
)
# shortcut
if in_chs == out_chs and self.stride == 1:
self.shortcut = nn.Sequential()
else:
self.shortcut = nn.Sequential(
nn.Conv2d(in_chs, in_chs, dw_kernel_size, stride=stride,
padding=(dw_kernel_size - 1) // 2, groups=in_chs, bias=False),
nn.BatchNorm2d(in_chs),
nn.Conv2d(in_chs, out_chs, 1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(out_chs),
)
def forward(self, x):
residual = x
x = self.dw1(x)
# 1st ghost bottleneck
x = self.ghost1(x)
# 2nd ghost bottleneck
x = self.ghost2(x)
x = self.dw2(x)
# eca
x = self.eca(x)
x += self.shortcut(residual)
return x
class GhostNet(nn.Module):
def __init__(
self,
width_mult=1.0,
out_stages=(4, 6, 9),
activation="ReLU",
pretrain=True,
act=None,
):
super(GhostNet, self).__init__()
assert set(out_stages).issubset(i for i in range(10))
self.width_mult = width_mult
self.out_stages = out_stages
# setting of inverted residual blocks
self.cfgs = [
# k, t, c, SE, s
# stage1
[[3, 16, 16, 0, 1]], # 0
# stage2
[[3, 48, 24, 0, 2]], # 1
[[3, 72, 24, 0, 1]], # 2 1/4
# stage3
[[5, 72, 40, 0.25, 2]], # 3
[[5, 120, 40, 0.25, 1]], # 4 1/8
# stage4
[[3, 240, 80, 0, 2]], # 5
[
[3, 200, 80, 0, 1],
[3, 184, 80, 0, 1],
[3, 184, 80, 0, 1],
[3, 480, 112, 0.25, 1],
[3, 672, 112, 0.25, 1],
], # 6 1/16
# stage5
[[5, 672, 160, 0.25, 2]], # 7
[
[5, 960, 160, 0, 1],
[5, 960, 160, 0.25, 1],
[5, 960, 160, 0, 1],
[5, 960, 160, 0.25, 1],
], # 8
]
# ------conv+bn+act----------# 9 1/32
self.activation = activation
if act is not None:
warnings.warn(
"Warning! act argument has been deprecated, " "use activation instead!"
)
self.activation = act
# building first layer
output_channel = _make_divisible(16 * width_mult, 4)
self.conv_stem = nn.Conv2d(3, output_channel, 3, 2, 1, bias=False)
self.bn1 = nn.BatchNorm2d(output_channel)
self.act1 = act_layers(self.activation)
input_channel = output_channel
# building inverted residual blocks
stages = []
block = GhostBottleneck
# block = GhostBottleneckSandGlass
for cfg in self.cfgs:
layers = []
for k, exp_size, c, se_ratio, s in cfg:
output_channel = _make_divisible(c * width_mult, 4)
hidden_channel = _make_divisible(exp_size * width_mult, 4)
layers.append(
block(
input_channel,
hidden_channel,
output_channel,
k,
s,
activation=self.activation,
se_ratio=se_ratio,
)
)
input_channel = output_channel
stages.append(nn.Sequential(*layers))
output_channel = _make_divisible(exp_size * width_mult, 4)
stages.append(
nn.Sequential(
ConvBnAct(input_channel, output_channel, 1, activation=self.activation)
)
) # 9
self.blocks = nn.Sequential(*stages)
self._initialize_weights(pretrain)
def forward(self, x):
x = self.conv_stem(x)
x = self.bn1(x)
x = self.act1(x)
output = []
for i in range(10):
x = self.blocks[i](x)
if i in self.out_stages:
output.append(x)
return tuple(output)
def _initialize_weights(self, pretrain=True):
print("init weights...")
for name, m in self.named_modules():
if isinstance(m, nn.Conv2d):
if "conv_stem" in name:
nn.init.normal_(m.weight, 0, 0.01)
else:
nn.init.normal_(m.weight, 0, 1.0 / m.weight.shape[1])
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
if m.bias is not None:
nn.init.constant_(m.bias, 0.0001)
nn.init.constant_(m.running_mean, 0)
elif isinstance(m, nn.BatchNorm1d):
nn.init.constant_(m.weight, 1)
if m.bias is not None:
nn.init.constant_(m.bias, 0.0001)
nn.init.constant_(m.running_mean, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
if pretrain:
url = get_url(self.width_mult)
if url is not None:
state_dict = torch.hub.load_state_dict_from_url(url, progress=True)
self.load_state_dict(state_dict, strict=False)
| [
"torch.cat",
"torch.nn.functional.relu6",
"torch.nn.Sigmoid",
"torch.nn.Conv1d",
"torch.nn.Sequential",
"torch.nn.BatchNorm2d",
"torch.nn.init.constant_",
"torch.nn.ReLU6",
"torch.nn.Conv2d",
"torch.nn.init.normal_",
"torch.hub.load_state_dict_from_url",
"torch.nn.AdaptiveAvgPool2d"
] | 1.6 | samcw/nanodet | dc7c4f6021199d6988221b516d49af392a52d748 |
1.7 | import torch
from torch.nn.parallel import DistributedDataParallel as DDP
import torch.distributed as dist
import logging
import pandas as pd
import traceback
from ...core import models
from ..misc import utils
class BaseManager:
"""
Manager all modules and computation devices. Support three kinds of computation:
1. DataParallel (single machine)
2. DistributedDataParallel (single machine)
3. DistributedDataParallel (multi machines)
"""
def __init__(
self,
trainer,
tester,
recorder,
objects_dict,
device=None,
schedulers=None,
gradclipper=None,
samplers=None,
collatefns=None,
is_resume=False,
is_distributed=False,
device_wrapper_type="DP",
dist_port=23456,
world_size=None,
phase="train",
primary_metric=["test", "recall_at_1"],
to_device_list=["models", "collectors"],
to_wrap_list=["models"],
patience=10,
):
self.trainer = trainer
self.tester = tester
self.recorder = recorder
self.objects_dict = objects_dict
self.device = device
self.schedulers = schedulers
self.gradclipper = gradclipper
self.samplers = samplers
self.collatefns = collatefns
self.epochs = 0
self.is_resume = is_resume
self.is_distributed = is_distributed
self.device_wrapper_type = device_wrapper_type
self.dist_port = dist_port
self.world_size = world_size
self.phase = phase
self.primary_metric = primary_metric
self.to_device_list = to_device_list
self.to_wrap_list = to_wrap_list
self.patience = patience
self.best_metric = -1
self.patience_counts = 0
self.is_best = False
self.assert_phase()
self.assert_device()
self.assert_required_member()
self.assert_resume_folder_exist()
self.initiate_objects_dict()
self.initiate_members()
@property
def _required_member(self):
return [
"metrics",
"collectors",
"selectors",
"models",
"losses",
"evaluators",
"optimizers",
"transforms",
"datasets",
]
def assert_phase(self):
assert self.phase in ["train", "evaluate"]
def assert_device(self):
assert self.device_wrapper_type in ["DP", "DDP"]
if self.is_distributed:
assert self.device_wrapper_type == "DDP"
def assert_required_member(self):
object_dict_keys = list(self.objects_dict.keys())
assert all(
[item in object_dict_keys
for item in self._required_member]
)
def assert_resume_folder_exist(self):
if self.is_resume:
assert not self.recorder.delete_old_folder
def initiate_objects_dict(self):
for k, v in self.objects_dict.items():
setattr(self, k, v)
del self.objects_dict
def initiate_members(self):
self.initiate_device()
self.initiate_models()
self.initiate_collectors()
self.initiate_selectors()
self.initiate_losses()
self.initiate_schedulers()
# for distributed training
if self.is_distributed:
self.initiate_distributed_trainers()
self.initiate_distributed_testers()
self.initiate_addition_items()
def initiate_addition_items(self):
pass
def initiate_device(self):
if self.device_wrapper_type == "DDP" and not self.is_distributed:
torch.distributed.init_process_group(
backend='nccl',
init_method='tcp://localhost:{}'.format(self.dist_port),
rank=0,
world_size=1
)
if self.is_distributed:
self.world_size = (
dist.get_world_size()
if self.world_size is None
else self.world_size
)
self.main_device_id, self.device_ids = None, None
self.multi_gpu = False
if self.device is None:
self.main_device_id = 0
self.device_ids = [0]
elif isinstance(self.device, int):
self.main_device_id = self.device
self.device_ids = [self.device]
elif isinstance(self.device, list):
self.main_device_id = self.device[0]
self.device_ids = self.device
self.multi_gpu = (
True if len(self.device_ids) > 1
else False
)
else:
raise TypeError(
"Device type error!"
)
# initiate self.device
self.device = torch.device(
"cuda:{}".format(self.main_device_id)
if torch.cuda.is_available()
else "cpu"
)
def initiate_models(self):
# to device
is_to_device = "models" in self.to_device_list
is_to_wrap = "models" in self.to_wrap_list
if is_to_device:
self._members_to_device("models", to_warp=is_to_wrap)
def initiate_collectors(self):
# to device
is_to_device = "collectors" in self.to_device_list
is_to_wrap = "collectors" in self.to_wrap_list
if is_to_device:
self._members_to_device("collectors", to_warp=is_to_wrap)
def initiate_selectors(self):
# to device
is_to_device = "selectors" in self.to_device_list
is_to_wrap = "selectors" in self.to_wrap_list
if is_to_device:
self._members_to_device("selectors", to_warp=is_to_wrap)
def initiate_losses(self):
# to device
is_to_device = "losses" in self.to_device_list
is_to_wrap = "losses" in self.to_wrap_list
if is_to_device:
self._members_to_device("losses", to_warp=is_to_wrap)
def initiate_distributed_trainers(self):
total_batch_size = self.trainer.batch_size
assert (total_batch_size % self.world_size) == 0
sub_batch_size = int(total_batch_size // self.world_size)
self.trainer.set_distributed(True)
self.trainer.set_batch_size(sub_batch_size)
def initiate_distributed_testers(self):
self.tester.set_distributed(True)
def initiate_schedulers(self):
if self.schedulers is None:
self.schedulers = {}
def _members_to_device(self, module_name: str, to_warp=True):
members = getattr(self, module_name)
# to device
if not self.is_distributed:
# single-device
if self.multi_gpu:
for k, v in members.items():
members[k] = members[k].to(self.device)
if to_warp:
if self.device_wrapper_type == "DP":
members[k] = torch.nn.DataParallel(
v,
device_ids=self.device_ids
)
else:
try:
members[k] = DDP(
v,
device_ids=self.device_ids,
find_unused_parameters=True
)
except:
trace = traceback.format_exc()
logging.warning("{}".format(trace))
else:
for k, v in members.items():
members[k] = v.to(self.device)
else:
# multi-device
for k, v in members.items():
members[k] = members[k].to(self.device)
try:
members[k] = DDP(
members[k],
device_ids=self.device_ids,
find_unused_parameters=True
)
except:
trace = traceback.format_exc()
logging.warning("{}".format(trace))
"""
Run
"""
def run(self, phase="train", start_epoch=0, total_epochs=61, is_test=True, is_save=True, interval=1, warm_up=2, warm_up_list=None):
self.phase = phase
self.assert_phase()
self.prepare()
self.maybe_resume()
if self.phase == "train":
for i in range(start_epoch, total_epochs):
self.epochs = i
if i < warm_up:
logging.info("Warm up with {}".format(warm_up_list))
self.trainer.set_activated_optims(warm_up_list)
else:
self.trainer.set_activated_optims()
self.train(epochs=self.epochs)
self.release_memory()
if is_test:
if (i % interval) == 0:
self.test()
self.display_metrics()
self.save_metrics()
self.release_memory()
if is_save:
self.save_models()
# early stop
if self.patience_counts >= self.patience:
logging.info("Training terminated!")
break
elif self.phase == "evaluate":
self.test()
self.display_metrics()
def prepare(self):
# prepare trainer
utils.func_params_mediator(
[self],
self.trainer.prepare
)
# prepare tester
utils.func_params_mediator(
[
{"recorders": self.recorder},
self,
],
self.tester.prepare
)
def maybe_resume(self):
if self.is_resume:
logging.info("Resume objects...")
self.recorder.load_models(
obj=self.trainer,
device=self.device
)
def meta_test(self):
self.epochs = -1
self.test()
self.save_metrics()
self.display_metrics()
def save_metrics(self):
for k, v in self.metrics.items():
data, _ = self.recorder.get_data({k:v})
self.recorder.update(data, self.epochs)
def display_metrics(self):
# best metric check
cur_metric = self.metrics[self.primary_metric[0]][self.primary_metric[1]]
if cur_metric > self.best_metric:
self.best_metric = cur_metric
self.is_best = True
logging.info("NEW BEST METRIC!!!")
self.patience_counts = 0
else:
self.is_best = False
self.patience_counts += 1
self.metrics[self.primary_metric[0]]["BEST_" + self.primary_metric[1]] = self.best_metric
# display
for k, v in self.metrics.items():
logging.info("{} Metrics ---".format(k.upper()))
print(pd.DataFrame([v]))
def save_models(self):
self.recorder.save_models(self.trainer, step=self.epochs, best=self.is_best)
def train(self, epochs=None):
self.trainer.train(epochs=epochs)
def test(self):
self.metrics = self.tester.test()
def release_memory(self):
torch.cuda.empty_cache()
| [
"torch.distributed.get_world_size",
"torch.nn.parallel.DistributedDataParallel",
"torch.cuda.empty_cache",
"torch.cuda.is_available",
"torch.nn.DataParallel"
] | 1.7.0 | wangck20/GeDML | 1f76ac2094d7b88be7fd4eb6145e5586e547b9ca |
1.7 | import torch
import torchvision
import collections
import numpy as np
'''
_sparsity_ratios is a dictionary to save the sparsity ratios of each layer,
Key - the layer name
Value - list of sparsity ratios for the executed forward passes
'''
_sparsity_ratios_per_layer = collections.defaultdict(list)
_sparsity_ratios_per_layer_type = collections.defaultdict(list)
_total_memory_per_layer_type = collections.defaultdict(list)
_bitmap_memory_footprint = collections.defaultdict(list)
_dense_memory_footprint = collections.defaultdict(list)
_activations_stats_for_hist = []
_layers_types = [torch.nn.Conv2d, torch.nn.BatchNorm2d, torch.nn.Dropout,
torch.nn.Linear, torch.nn.MaxPool2d, torch.nn.AdaptiveAvgPool2d]
_layers_names = {torch.nn.Conv2d:"Conv",
torch.nn.BatchNorm2d: "BatchNorm",
torch.nn.Dropout: "Dropout",
torch.nn.Linear: "Linear",
torch.nn.MaxPool2d: "MaxPool",
torch.nn.AdaptiveAvgPool2d: "AvgPool"}
class Hook():
def __init__(self, module, name, pre=False):
self.name = name
self.type = _layers_names[type(module)]
if pre==False:
self.hook = module.register_forward_hook(self.hook_fn)
else:
self.hook = module.register_forward_pre_hook(self.hook_pre_fn)
def hook_fn(self, module, input, output):
assert len(input) == 1
self.input = input[0].detach()
self.output = output.detach()
def hook_pre_fn(self, module, input):
assert len(input) == 1
self.input = input[0].detach()
def close(self):
self.hook.remove()
'''
_place_hooks: places hooks at the given layer types
'''
def _place_hooks(model, layers_types):
hooks = []
for name, module in model.named_modules():
if type(module) in layers_types:
hooks.append(Hook(module, name, pre=True))
else:
print("Skipped", name, type(module))
return hooks
'''
_update_sparsity_ratios_dict: updates the sparsity ratios dictinary
according to new values at hooks
'''
def _update_sparsity_ratios_dict(hooks):
for hook in hooks:
activation = hook.input
_activations_stats_for_hist.extend(list(activation.view(-1)))
total_elements = torch.numel(activation)
non_zero_elements = torch.count_nonzero(torch.abs(activation) > 0.001)
sparsity_ratio = 1 - (non_zero_elements/total_elements)
_sparsity_ratios_per_layer[hook.name].append(sparsity_ratio.item())
_sparsity_ratios_per_layer_type[hook.type].append(sparsity_ratio.item())
_total_memory_per_layer_type[hook.type].append(total_elements)
_bitmap_memory_footprint[hook.name].append(total_elements*1 + non_zero_elements*4)
_dense_memory_footprint[hook.name].append(np.prod(list(activation.shape))*4)
'''
_compute_sparsity_ratios_at_hooks: loop on dataset and
calculate the sparsity at each layer
'''
def _compute_sparsity_ratios_at_hooks(model, hooks, dataloader, device):
for inputs, _ in dataloader:
# Perform the forward path to save activations
print(inputs.shape)
inputs = inputs.to(device)
model(inputs)
# Update the sparsity matrix and clear the activations
_update_sparsity_ratios_dict(hooks)
break
'''
_replace_relu_inplace_to_relu: used as a workaround because hooks work wrong
with inplace operations, replace each inplace ReLU
with similar one with implace = False
'''
def _replace_relu_inplace_to_relu(model, relu_type):
for child_name, child in model.named_children():
if isinstance(child, relu_type):
setattr(model, child_name, relu_type(inplace=False))
else:
_replace_relu_inplace_to_relu(child, relu_type)
def calc_zero_activations_percentages(model, dataloader, \
name, device, verbose=False):
# Workaround:: Replace the RELU inplace to normal because
# the hooks work wrong with ReLU inplace
relu_types = [torch.nn.ReLU6, torch.nn.ReLU]
for layer in relu_types:
_replace_relu_inplace_to_relu(model, layer)
print(model)
# Place the hooks at the required layer type
hooks = _place_hooks(model, _layers_types)
# Compute sparsity ratios
_compute_sparsity_ratios_at_hooks(model, hooks, dataloader, device)
# Reemove hooks
for hook in hooks:
hook.close()
# Print average sparsity ratios
avg_sparsity_per_layer = []
avg_saving_to_dense_per_layer = []
for layer_name in _sparsity_ratios_per_layer:
avg_sparsity = np.mean(_sparsity_ratios_per_layer[layer_name])
avg_saving_to_dense = 1 - np.mean(_bitmap_memory_footprint[layer_name])/ \
np.mean(_dense_memory_footprint[layer_name])
if avg_sparsity > 0.15:
avg_saving_to_dense_per_layer.append(avg_saving_to_dense)
avg_sparsity_per_layer.append(100*avg_sparsity)
else:
avg_saving_to_dense_per_layer.append(0)
avg_sparsity_per_layer.append(0)
if verbose:
print('Layer {} - input sparsity is {:.2f} %, saved {:.2f}% than dense \
and {:.2f}% than COO'.format(layer_name, 100*avg_sparsity, \
100*avg_saving_to_dense))
total_avg = np.mean(avg_sparsity_per_layer)
if verbose:
print('All - average zero activations percentage is {:.2f} %'.format(total_avg))
print("Average Saving compared to dense is {:.2f}".format(100*np.mean(avg_saving_to_dense_per_layer)))
avg_sparsity_per_layer_type = []
total_memory = []
layer_types = []
for layer_type in _sparsity_ratios_per_layer_type:
avg_sparsity = np.mean(_sparsity_ratios_per_layer_type[layer_type])
if verbose:
print('Layer {} - input sparsity is {:.4f} %'.format(layer_type, 100*avg_sparsity))
avg_sparsity_per_layer_type.append(100*avg_sparsity)
layer_types.append(layer_type)
total_memory.append(np.sum(_total_memory_per_layer_type[layer_type]))
total_memory_percentage = []
for idx, value in enumerate(total_memory):
total_memory_percentage.append(value/np.sum(total_memory))
return avg_sparsity_per_layer, avg_sparsity_per_layer_type, total_memory_percentage, \
layer_types, _activations_stats_for_hist | [
"torch.abs",
"torch.numel"
] | 1.7.1 | scale-lab/BitTrain | 3a15f96cc32222e3d6fceb00a622521e31745d4c |
1.8 | import typing
from typing import Dict, Union, Tuple, Iterator, Any
from typing import Optional
import numpy as np
import torch
from gym.utils import seeding
from advisor_losses import AlphaScheduler, AdvisorWeightedStage
from allenact.algorithms.offpolicy_sync.losses.abstract_offpolicy_loss import (
AbstractOffPolicyLoss,
)
from allenact.algorithms.onpolicy_sync.policy import ActorCriticModel
from allenact.base_abstractions.misc import Memory
_DATASET_CACHE: Dict[str, Any] = {}
class PoisonedDoorsOffPolicyExpertCELoss(AbstractOffPolicyLoss[ActorCriticModel]):
def __init__(self, total_episodes_in_epoch: Optional[int] = None):
super().__init__()
self.total_episodes_in_epoch = total_episodes_in_epoch
def loss(
self,
model: ActorCriticModel,
batch: Dict[str, Union[torch.Tensor, Dict[str, torch.Tensor]]],
memory: Memory,
*args,
**kwargs
) -> Tuple[torch.FloatTensor, Dict[str, float], Memory, int]:
rollout_len, nrollouts, _, = batch["poisoned_door_state"].shape
observations = {}
for k in ["poisoned_door_state"]:
if k in batch:
observations[k] = batch[k].view(
rollout_len, nrollouts, *batch[k].shape[2:]
)
ac_out, memory = model.forward(
observations=observations,
memory=memory,
prev_actions=None,
masks=batch["masks"],
)
expert_ce_loss = -ac_out.distributions.log_prob(
batch["expert_action"].view(rollout_len, nrollouts, 1)
).mean()
info = {"expert_ce": expert_ce_loss.item()}
if self.total_episodes_in_epoch is not None:
if "completed_episode_count" not in memory:
memory["completed_episode_count"] = 0
memory["completed_episode_count"] += (
int(np.prod(batch["masks"].shape)) - batch["masks"].sum().item()
)
info["epoch_progress"] = (
memory["completed_episode_count"] / self.total_episodes_in_epoch
)
return expert_ce_loss, info, memory, rollout_len * nrollouts
class PoisonedDoorsOffPolicyAdvisorLoss(AbstractOffPolicyLoss[ActorCriticModel]):
def __init__(
self,
total_episodes_in_epoch: Optional[int] = None,
fixed_alpha: Optional[float] = 1,
fixed_bound: Optional[float] = 0.0,
alpha_scheduler: AlphaScheduler = None,
smooth_expert_weight_decay: Optional[float] = None,
*args,
**kwargs
):
super().__init__()
self.advisor_loss = AdvisorWeightedStage(
rl_loss=None,
fixed_alpha=fixed_alpha,
fixed_bound=fixed_bound,
alpha_scheduler=alpha_scheduler,
smooth_expert_weight_decay=smooth_expert_weight_decay,
*args,
**kwargs
)
self.total_episodes_in_epoch = total_episodes_in_epoch
def loss(
self,
step_count: int,
model: ActorCriticModel,
batch: Dict[str, Union[torch.Tensor, Dict[str, torch.Tensor]]],
memory: Memory,
**kwargs
) -> Tuple[torch.FloatTensor, Dict[str, float], Memory, int]:
rollout_len, nrollouts, _ = batch["poisoned_door_state"].shape
observations = {"poisoned_door_state": batch["poisoned_door_state"]}
ac_out, memory = model.forward(
observations=observations,
memory=memory,
prev_actions=None,
masks=batch["masks"].view(rollout_len, nrollouts, -1),
)
total_loss, losses_dict = self.advisor_loss.loss(
step_count=step_count,
batch={
"observations": {
"expert_action": torch.cat(
(
batch["expert_action"].view(rollout_len, nrollouts, 1),
torch.ones(rollout_len, nrollouts, 1, dtype=torch.int64).to(
batch["expert_action"].device
),
),
dim=-1,
)
}
},
actor_critic_output=ac_out,
)
info = {"offpolicy_" + key: val for key, val in losses_dict.items()}
if self.total_episodes_in_epoch is not None:
if "completed_episode_count" not in memory:
memory["completed_episode_count"] = 0
memory["completed_episode_count"] += (
int(np.prod(batch["masks"].shape)) - batch["masks"].sum().item()
)
info["epoch_progress"] = (
memory["completed_episode_count"] / self.total_episodes_in_epoch
)
return total_loss, info, memory, rollout_len * nrollouts
class PoisonedDoorsExpertTrajectoryIterator(Iterator):
def __init__(
self, num_doors: int, nrollouts: int, rollout_len: int, dataset_size: int,
):
super(PoisonedDoorsExpertTrajectoryIterator, self).__init__()
self.np_seeded_random_gen, _ = typing.cast(
Tuple[np.random.RandomState, Any], seeding.np_random(0)
)
self.ndoors = num_doors
self.nrollouts = nrollouts
self.rollout_len = rollout_len
self.dataset_size = dataset_size
self.initial_observations = np.zeros(
(rollout_len, nrollouts, 1), dtype=np.int64
)
self.mask = np.zeros((rollout_len, nrollouts, 1), dtype=np.float32)
self.expert_actions = np.random.randint(
4, 3 + num_doors, size=(self.dataset_size, 1)
)
self.current_ind = 0
def __next__(self) -> Dict[str, torch.Tensor]:
start = self.current_ind
end = self.current_ind + self.nrollouts * self.rollout_len
if end > self.dataset_size:
raise StopIteration()
self.current_ind = end
return {
"masks": torch.from_numpy(self.mask),
"poisoned_door_state": torch.from_numpy(self.initial_observations),
"expert_action": torch.from_numpy(
self.expert_actions[start:end].reshape(
(self.rollout_len, self.nrollouts)
)
),
}
def create_poisoneddoors_offpolicy_data_iterator(
num_doors: int, nrollouts: int, rollout_len: int, dataset_size: int,
) -> PoisonedDoorsExpertTrajectoryIterator:
return PoisonedDoorsExpertTrajectoryIterator(
num_doors=num_doors,
nrollouts=nrollouts,
rollout_len=rollout_len,
dataset_size=dataset_size,
)
| [
"torch.ones",
"torch.from_numpy"
] | 1.8.0 | allenai/advisor | 6849755042c6dab1488f64cf21bde2322add3cc1 |
1.7 | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from typing import Any, Callable, Dict, List, Optional, Tuple
from unittest import mock
import numpy as np
import pytest
import torch
from pytorch_lightning import Trainer
from pytorch_lightning.trainer.states import RunningStage
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from torch import Tensor, tensor
from torch.utils.data import DataLoader
from torch.utils.data._utils.collate import default_collate
from flash.core.data.auto_dataset import AutoDataset, IterableAutoDataset
from flash.core.data.batch import _PostProcessor, _PreProcessor
from flash.core.data.data_module import DataModule
from flash.core.data.data_pipeline import _StageOrchestrator, DataPipeline
from flash.core.data.data_source import DataSource
from flash.core.data.process import DefaultPreprocess, Postprocess, Preprocess
from flash.core.model import Task
from flash.core.utilities.imports import _IMAGE_AVAILABLE
if _IMAGE_AVAILABLE:
import torchvision.transforms as T
from PIL import Image
class DummyDataset(torch.utils.data.Dataset):
def __getitem__(self, index: int) -> Tuple[Tensor, Tensor]:
return torch.rand(1), torch.rand(1)
def __len__(self) -> int:
return 5
@pytest.mark.parametrize("use_preprocess", [False, True])
@pytest.mark.parametrize("use_postprocess", [False, True])
def test_data_pipeline_init_and_assignement(use_preprocess, use_postprocess, tmpdir):
class CustomModel(Task):
def __init__(self, postprocess: Optional[Postprocess] = None):
super().__init__(model=torch.nn.Linear(1, 1), loss_fn=torch.nn.MSELoss())
self._postprocess = postprocess
def train_dataloader(self) -> Any:
return DataLoader(DummyDataset())
class SubPreprocess(DefaultPreprocess):
pass
class SubPostprocess(Postprocess):
pass
data_pipeline = DataPipeline(
preprocess=SubPreprocess() if use_preprocess else None,
postprocess=SubPostprocess() if use_postprocess else None,
)
assert isinstance(data_pipeline._preprocess_pipeline, SubPreprocess if use_preprocess else DefaultPreprocess)
assert isinstance(data_pipeline._postprocess_pipeline, SubPostprocess if use_postprocess else Postprocess)
model = CustomModel(postprocess=Postprocess())
model.data_pipeline = data_pipeline
# TODO: the line below should make the same effect but it's not
# data_pipeline._attach_to_model(model)
if use_preprocess:
assert isinstance(model._preprocess, SubPreprocess)
else:
assert model._preprocess is None or isinstance(model._preprocess, Preprocess)
if use_postprocess:
assert isinstance(model._postprocess, SubPostprocess)
else:
assert model._postprocess is None or isinstance(model._postprocess, Postprocess)
def test_data_pipeline_is_overriden_and_resolve_function_hierarchy(tmpdir):
class CustomPreprocess(DefaultPreprocess):
def val_pre_tensor_transform(self, *_, **__):
pass
def predict_to_tensor_transform(self, *_, **__):
pass
def train_post_tensor_transform(self, *_, **__):
pass
def test_collate(self, *_, **__):
pass
def val_per_sample_transform_on_device(self, *_, **__):
pass
def train_per_batch_transform_on_device(self, *_, **__):
pass
def test_per_batch_transform_on_device(self, *_, **__):
pass
preprocess = CustomPreprocess()
data_pipeline = DataPipeline(preprocess=preprocess)
train_func_names: Dict[str, str] = {
k: data_pipeline._resolve_function_hierarchy(
k, data_pipeline._preprocess_pipeline, RunningStage.TRAINING, Preprocess
)
for k in data_pipeline.PREPROCESS_FUNCS
}
val_func_names: Dict[str, str] = {
k: data_pipeline._resolve_function_hierarchy(
k, data_pipeline._preprocess_pipeline, RunningStage.VALIDATING, Preprocess
)
for k in data_pipeline.PREPROCESS_FUNCS
}
test_func_names: Dict[str, str] = {
k: data_pipeline._resolve_function_hierarchy(
k, data_pipeline._preprocess_pipeline, RunningStage.TESTING, Preprocess
)
for k in data_pipeline.PREPROCESS_FUNCS
}
predict_func_names: Dict[str, str] = {
k: data_pipeline._resolve_function_hierarchy(
k, data_pipeline._preprocess_pipeline, RunningStage.PREDICTING, Preprocess
)
for k in data_pipeline.PREPROCESS_FUNCS
}
# pre_tensor_transform
assert train_func_names["pre_tensor_transform"] == "pre_tensor_transform"
assert val_func_names["pre_tensor_transform"] == "val_pre_tensor_transform"
assert test_func_names["pre_tensor_transform"] == "pre_tensor_transform"
assert predict_func_names["pre_tensor_transform"] == "pre_tensor_transform"
# to_tensor_transform
assert train_func_names["to_tensor_transform"] == "to_tensor_transform"
assert val_func_names["to_tensor_transform"] == "to_tensor_transform"
assert test_func_names["to_tensor_transform"] == "to_tensor_transform"
assert predict_func_names["to_tensor_transform"] == "predict_to_tensor_transform"
# post_tensor_transform
assert train_func_names["post_tensor_transform"] == "train_post_tensor_transform"
assert val_func_names["post_tensor_transform"] == "post_tensor_transform"
assert test_func_names["post_tensor_transform"] == "post_tensor_transform"
assert predict_func_names["post_tensor_transform"] == "post_tensor_transform"
# collate
assert train_func_names["collate"] == "collate"
assert val_func_names["collate"] == "collate"
assert test_func_names["collate"] == "test_collate"
assert predict_func_names["collate"] == "collate"
# per_sample_transform_on_device
assert train_func_names["per_sample_transform_on_device"] == "per_sample_transform_on_device"
assert val_func_names["per_sample_transform_on_device"] == "val_per_sample_transform_on_device"
assert test_func_names["per_sample_transform_on_device"] == "per_sample_transform_on_device"
assert predict_func_names["per_sample_transform_on_device"] == "per_sample_transform_on_device"
# per_batch_transform_on_device
assert train_func_names["per_batch_transform_on_device"] == "train_per_batch_transform_on_device"
assert val_func_names["per_batch_transform_on_device"] == "per_batch_transform_on_device"
assert test_func_names["per_batch_transform_on_device"] == "test_per_batch_transform_on_device"
assert predict_func_names["per_batch_transform_on_device"] == "per_batch_transform_on_device"
train_worker_preprocessor = data_pipeline.worker_preprocessor(RunningStage.TRAINING)
val_worker_preprocessor = data_pipeline.worker_preprocessor(RunningStage.VALIDATING)
test_worker_preprocessor = data_pipeline.worker_preprocessor(RunningStage.TESTING)
predict_worker_preprocessor = data_pipeline.worker_preprocessor(RunningStage.PREDICTING)
_seq = train_worker_preprocessor.per_sample_transform
assert _seq.pre_tensor_transform.func == preprocess.pre_tensor_transform
assert _seq.to_tensor_transform.func == preprocess.to_tensor_transform
assert _seq.post_tensor_transform.func == preprocess.train_post_tensor_transform
assert train_worker_preprocessor.collate_fn.func == preprocess.collate
assert train_worker_preprocessor.per_batch_transform.func == preprocess.per_batch_transform
_seq = val_worker_preprocessor.per_sample_transform
assert _seq.pre_tensor_transform.func == preprocess.val_pre_tensor_transform
assert _seq.to_tensor_transform.func == preprocess.to_tensor_transform
assert _seq.post_tensor_transform.func == preprocess.post_tensor_transform
assert val_worker_preprocessor.collate_fn.func == DataPipeline._identity
assert val_worker_preprocessor.per_batch_transform.func == preprocess.per_batch_transform
_seq = test_worker_preprocessor.per_sample_transform
assert _seq.pre_tensor_transform.func == preprocess.pre_tensor_transform
assert _seq.to_tensor_transform.func == preprocess.to_tensor_transform
assert _seq.post_tensor_transform.func == preprocess.post_tensor_transform
assert test_worker_preprocessor.collate_fn.func == preprocess.test_collate
assert test_worker_preprocessor.per_batch_transform.func == preprocess.per_batch_transform
_seq = predict_worker_preprocessor.per_sample_transform
assert _seq.pre_tensor_transform.func == preprocess.pre_tensor_transform
assert _seq.to_tensor_transform.func == preprocess.predict_to_tensor_transform
assert _seq.post_tensor_transform.func == preprocess.post_tensor_transform
assert predict_worker_preprocessor.collate_fn.func == preprocess.collate
assert predict_worker_preprocessor.per_batch_transform.func == preprocess.per_batch_transform
class CustomPreprocess(DefaultPreprocess):
def train_per_sample_transform(self, *_, **__):
pass
def train_per_batch_transform_on_device(self, *_, **__):
pass
def test_per_sample_transform(self, *_, **__):
pass
def test_per_batch_transform(self, *_, **__):
pass
def test_per_sample_transform_on_device(self, *_, **__):
pass
def test_per_batch_transform_on_device(self, *_, **__):
pass
def val_per_batch_transform(self, *_, **__):
pass
def val_per_sample_transform_on_device(self, *_, **__):
pass
def predict_per_sample_transform(self, *_, **__):
pass
def predict_per_sample_transform_on_device(self, *_, **__):
pass
def predict_per_batch_transform_on_device(self, *_, **__):
pass
def test_data_pipeline_predict_worker_preprocessor_and_device_preprocessor():
preprocess = CustomPreprocess()
data_pipeline = DataPipeline(preprocess=preprocess)
data_pipeline.worker_preprocessor(RunningStage.TRAINING)
with pytest.raises(MisconfigurationException, match="are mutually exclusive"):
data_pipeline.worker_preprocessor(RunningStage.VALIDATING)
with pytest.raises(MisconfigurationException, match="are mutually exclusive"):
data_pipeline.worker_preprocessor(RunningStage.TESTING)
data_pipeline.worker_preprocessor(RunningStage.PREDICTING)
def test_detach_preprocessing_from_model(tmpdir):
class CustomModel(Task):
def __init__(self, postprocess: Optional[Postprocess] = None):
super().__init__(model=torch.nn.Linear(1, 1), loss_fn=torch.nn.MSELoss())
self._postprocess = postprocess
def train_dataloader(self) -> Any:
return DataLoader(DummyDataset())
preprocess = CustomPreprocess()
data_pipeline = DataPipeline(preprocess=preprocess)
model = CustomModel()
model.data_pipeline = data_pipeline
assert model.train_dataloader().collate_fn == default_collate
assert model.transfer_batch_to_device.__self__ == model
model.on_train_dataloader()
assert isinstance(model.train_dataloader().collate_fn, _PreProcessor)
assert isinstance(model.transfer_batch_to_device, _StageOrchestrator)
model.on_fit_end()
assert model.transfer_batch_to_device.__self__ == model
assert model.train_dataloader().collate_fn == default_collate
class TestPreprocess(DefaultPreprocess):
def train_per_sample_transform(self, *_, **__):
pass
def train_per_batch_transform_on_device(self, *_, **__):
pass
def test_per_sample_transform(self, *_, **__):
pass
def test_per_sample_transform_on_device(self, *_, **__):
pass
def test_per_batch_transform_on_device(self, *_, **__):
pass
def val_per_sample_transform_on_device(self, *_, **__):
pass
def predict_per_sample_transform(self, *_, **__):
pass
def predict_per_sample_transform_on_device(self, *_, **__):
pass
def predict_per_batch_transform_on_device(self, *_, **__):
pass
def test_attaching_datapipeline_to_model(tmpdir):
class SubPreprocess(DefaultPreprocess):
pass
preprocess = SubPreprocess()
data_pipeline = DataPipeline(preprocess=preprocess)
class CustomModel(Task):
def __init__(self):
super().__init__(model=torch.nn.Linear(1, 1), loss_fn=torch.nn.MSELoss())
self._postprocess = Postprocess()
def training_step(self, batch: Any, batch_idx: int) -> Any:
pass
def validation_step(self, batch: Any, batch_idx: int) -> Any:
pass
def test_step(self, batch: Any, batch_idx: int) -> Any:
pass
def train_dataloader(self) -> Any:
return DataLoader(DummyDataset())
def val_dataloader(self) -> Any:
return DataLoader(DummyDataset())
def test_dataloader(self) -> Any:
return DataLoader(DummyDataset())
def predict_dataloader(self) -> Any:
return DataLoader(DummyDataset())
class TestModel(CustomModel):
stages = [RunningStage.TRAINING, RunningStage.VALIDATING, RunningStage.TESTING, RunningStage.PREDICTING]
on_train_start_called = False
on_val_start_called = False
on_test_start_called = False
on_predict_start_called = False
def on_fit_start(self):
assert self.predict_step.__self__ == self
self._saved_predict_step = self.predict_step
def _compare_pre_processor(self, p1, p2):
p1_seq = p1.per_sample_transform
p2_seq = p2.per_sample_transform
assert p1_seq.pre_tensor_transform.func == p2_seq.pre_tensor_transform.func
assert p1_seq.to_tensor_transform.func == p2_seq.to_tensor_transform.func
assert p1_seq.post_tensor_transform.func == p2_seq.post_tensor_transform.func
assert p1.collate_fn.func == p2.collate_fn.func
assert p1.per_batch_transform.func == p2.per_batch_transform.func
def _assert_stage_orchestrator_state(
self, stage_mapping: Dict, current_running_stage: RunningStage, cls=_PreProcessor
):
assert isinstance(stage_mapping[current_running_stage], cls)
assert stage_mapping[current_running_stage]
def on_train_dataloader(self) -> None:
current_running_stage = RunningStage.TRAINING
self.on_train_dataloader_called = True
collate_fn = self.train_dataloader().collate_fn # noqa F811
assert collate_fn == default_collate
assert not isinstance(self.transfer_batch_to_device, _StageOrchestrator)
super().on_train_dataloader()
collate_fn = self.train_dataloader().collate_fn # noqa F811
assert collate_fn.stage == current_running_stage
self._compare_pre_processor(collate_fn, self.data_pipeline.worker_preprocessor(current_running_stage))
assert isinstance(self.transfer_batch_to_device, _StageOrchestrator)
self._assert_stage_orchestrator_state(self.transfer_batch_to_device._stage_mapping, current_running_stage)
def on_val_dataloader(self) -> None:
current_running_stage = RunningStage.VALIDATING
self.on_val_dataloader_called = True
collate_fn = self.val_dataloader().collate_fn # noqa F811
assert collate_fn == default_collate
assert isinstance(self.transfer_batch_to_device, _StageOrchestrator)
super().on_val_dataloader()
collate_fn = self.val_dataloader().collate_fn # noqa F811
assert collate_fn.stage == current_running_stage
self._compare_pre_processor(collate_fn, self.data_pipeline.worker_preprocessor(current_running_stage))
assert isinstance(self.transfer_batch_to_device, _StageOrchestrator)
self._assert_stage_orchestrator_state(self.transfer_batch_to_device._stage_mapping, current_running_stage)
def on_test_dataloader(self) -> None:
current_running_stage = RunningStage.TESTING
self.on_test_dataloader_called = True
collate_fn = self.test_dataloader().collate_fn # noqa F811
assert collate_fn == default_collate
assert not isinstance(self.transfer_batch_to_device, _StageOrchestrator)
super().on_test_dataloader()
collate_fn = self.test_dataloader().collate_fn # noqa F811
assert collate_fn.stage == current_running_stage
self._compare_pre_processor(collate_fn, self.data_pipeline.worker_preprocessor(current_running_stage))
assert isinstance(self.transfer_batch_to_device, _StageOrchestrator)
self._assert_stage_orchestrator_state(self.transfer_batch_to_device._stage_mapping, current_running_stage)
def on_predict_dataloader(self) -> None:
current_running_stage = RunningStage.PREDICTING
self.on_predict_dataloader_called = True
collate_fn = self.predict_dataloader().collate_fn # noqa F811
assert collate_fn == default_collate
assert isinstance(self.transfer_batch_to_device, _StageOrchestrator)
assert self.predict_step == self._saved_predict_step
super().on_predict_dataloader()
collate_fn = self.predict_dataloader().collate_fn # noqa F811
assert collate_fn.stage == current_running_stage
self._compare_pre_processor(collate_fn, self.data_pipeline.worker_preprocessor(current_running_stage))
assert isinstance(self.transfer_batch_to_device, _StageOrchestrator)
assert isinstance(self.predict_step, _StageOrchestrator)
self._assert_stage_orchestrator_state(self.transfer_batch_to_device._stage_mapping, current_running_stage)
self._assert_stage_orchestrator_state(
self.predict_step._stage_mapping, current_running_stage, cls=_PostProcessor
)
def on_fit_end(self) -> None:
super().on_fit_end()
assert self.train_dataloader().collate_fn == default_collate
assert self.val_dataloader().collate_fn == default_collate
assert self.test_dataloader().collate_fn == default_collate
assert self.predict_dataloader().collate_fn == default_collate
assert not isinstance(self.transfer_batch_to_device, _StageOrchestrator)
assert self.predict_step == self._saved_predict_step
model = TestModel()
model.data_pipeline = data_pipeline
trainer = Trainer(fast_dev_run=True)
trainer.fit(model)
trainer.test(model)
trainer.predict(model)
assert model.on_train_dataloader_called
assert model.on_val_dataloader_called
assert model.on_test_dataloader_called
assert model.on_predict_dataloader_called
def test_stage_orchestrator_state_attach_detach(tmpdir):
model = CustomModel()
preprocess = TestPreprocess()
_original_predict_step = model.predict_step
class CustomDataPipeline(DataPipeline):
def _attach_postprocess_to_model(self, model: 'Task', _postprocesssor: _PostProcessor) -> 'Task':
model.predict_step = self._model_predict_step_wrapper(model.predict_step, _postprocesssor, model)
return model
data_pipeline = CustomDataPipeline(preprocess=preprocess)
_postprocesssor = data_pipeline._create_uncollate_postprocessors(RunningStage.PREDICTING)
data_pipeline._attach_postprocess_to_model(model, _postprocesssor)
assert model.predict_step._original == _original_predict_step
assert model.predict_step._stage_mapping[RunningStage.PREDICTING] == _postprocesssor
data_pipeline._detach_postprocess_from_model(model)
assert model.predict_step == _original_predict_step
class LamdaDummyDataset(torch.utils.data.Dataset):
def __init__(self, fx: Callable):
self.fx = fx
def __getitem__(self, index: int) -> Any:
return self.fx()
def __len__(self) -> int:
return 5
class TestPreprocessTransformationsDataSource(DataSource):
def __init__(self):
super().__init__()
self.train_load_data_called = False
self.val_load_data_called = False
self.val_load_sample_called = False
self.test_load_data_called = False
self.predict_load_data_called = False
@staticmethod
def fn_train_load_data() -> Tuple:
return (
0,
1,
2,
3,
)
def train_load_data(self, sample) -> LamdaDummyDataset:
assert self.training
assert self.current_fn == "load_data"
self.train_load_data_called = True
return LamdaDummyDataset(self.fn_train_load_data)
def val_load_data(self, sample, dataset) -> List[int]:
assert self.validating
assert self.current_fn == "load_data"
self.val_load_data_called = True
return list(range(5))
def val_load_sample(self, sample) -> Dict[str, Tensor]:
assert self.validating
assert self.current_fn == "load_sample"
self.val_load_sample_called = True
return {"a": sample, "b": sample + 1}
@staticmethod
def fn_test_load_data() -> List[torch.Tensor]:
return [torch.rand(1), torch.rand(1)]
def test_load_data(self, sample) -> LamdaDummyDataset:
assert self.testing
assert self.current_fn == "load_data"
self.test_load_data_called = True
return LamdaDummyDataset(self.fn_test_load_data)
@staticmethod
def fn_predict_load_data() -> List[str]:
return (["a", "b"])
def predict_load_data(self, sample) -> LamdaDummyDataset:
assert self.predicting
assert self.current_fn == "load_data"
self.predict_load_data_called = True
return LamdaDummyDataset(self.fn_predict_load_data)
class TestPreprocessTransformations(DefaultPreprocess):
def __init__(self):
super().__init__(data_sources={"default": TestPreprocessTransformationsDataSource()})
self.train_pre_tensor_transform_called = False
self.train_collate_called = False
self.train_per_batch_transform_on_device_called = False
self.val_to_tensor_transform_called = False
self.val_collate_called = False
self.val_per_batch_transform_on_device_called = False
self.test_to_tensor_transform_called = False
self.test_post_tensor_transform_called = False
def train_pre_tensor_transform(self, sample: Any) -> Any:
assert self.training
assert self.current_fn == "pre_tensor_transform"
self.train_pre_tensor_transform_called = True
return sample + (5, )
def train_collate(self, samples) -> Tensor:
assert self.training
assert self.current_fn == "collate"
self.train_collate_called = True
return tensor([list(s) for s in samples])
def train_per_batch_transform_on_device(self, batch: Any) -> Any:
assert self.training
assert self.current_fn == "per_batch_transform_on_device"
self.train_per_batch_transform_on_device_called = True
assert torch.equal(batch, tensor([[0, 1, 2, 3, 5], [0, 1, 2, 3, 5]]))
def val_to_tensor_transform(self, sample: Any) -> Tensor:
assert self.validating
assert self.current_fn == "to_tensor_transform"
self.val_to_tensor_transform_called = True
return sample
def val_collate(self, samples) -> Dict[str, Tensor]:
assert self.validating
assert self.current_fn == "collate"
self.val_collate_called = True
_count = samples[0]['a']
assert samples == [{'a': _count, 'b': _count + 1}, {'a': _count + 1, 'b': _count + 2}]
return {'a': tensor([0, 1]), 'b': tensor([1, 2])}
def val_per_batch_transform_on_device(self, batch: Any) -> Any:
assert self.validating
assert self.current_fn == "per_batch_transform_on_device"
self.val_per_batch_transform_on_device_called = True
if isinstance(batch, list):
batch = batch[0]
assert torch.equal(batch["a"], tensor([0, 1]))
assert torch.equal(batch["b"], tensor([1, 2]))
return [False]
def test_to_tensor_transform(self, sample: Any) -> Tensor:
assert self.testing
assert self.current_fn == "to_tensor_transform"
self.test_to_tensor_transform_called = True
return sample
def test_post_tensor_transform(self, sample: Tensor) -> Tensor:
assert self.testing
assert self.current_fn == "post_tensor_transform"
self.test_post_tensor_transform_called = True
return sample
class TestPreprocessTransformations2(TestPreprocessTransformations):
def val_to_tensor_transform(self, sample: Any) -> Tensor:
self.val_to_tensor_transform_called = True
return {"a": tensor(sample["a"]), "b": tensor(sample["b"])}
class CustomModel(Task):
def __init__(self):
super().__init__(model=torch.nn.Linear(1, 1), loss_fn=torch.nn.MSELoss())
def training_step(self, batch, batch_idx):
assert batch is None
def validation_step(self, batch, batch_idx):
if isinstance(batch, list):
batch = batch[0]
assert batch is False
def test_step(self, batch, batch_idx):
assert len(batch) == 2
assert batch[0].shape == torch.Size([2, 1])
def predict_step(self, batch, batch_idx, dataloader_idx):
assert batch[0][0] == 'a'
assert batch[0][1] == 'a'
assert batch[1][0] == 'b'
assert batch[1][1] == 'b'
return tensor([0, 0, 0])
def test_datapipeline_transformations(tmpdir):
datamodule = DataModule.from_data_source(
"default", 1, 1, 1, 1, batch_size=2, num_workers=0, preprocess=TestPreprocessTransformations()
)
assert datamodule.train_dataloader().dataset[0] == (0, 1, 2, 3)
batch = next(iter(datamodule.train_dataloader()))
assert torch.equal(batch, tensor([[0, 1, 2, 3, 5], [0, 1, 2, 3, 5]]))
assert datamodule.val_dataloader().dataset[0] == {'a': 0, 'b': 1}
assert datamodule.val_dataloader().dataset[1] == {'a': 1, 'b': 2}
with pytest.raises(MisconfigurationException, match="When ``to_tensor_transform``"):
batch = next(iter(datamodule.val_dataloader()))
datamodule = DataModule.from_data_source(
"default", 1, 1, 1, 1, batch_size=2, num_workers=0, preprocess=TestPreprocessTransformations2()
)
batch = next(iter(datamodule.val_dataloader()))
assert torch.equal(batch["a"], tensor([0, 1]))
assert torch.equal(batch["b"], tensor([1, 2]))
model = CustomModel()
trainer = Trainer(
max_epochs=1,
limit_train_batches=2,
limit_val_batches=1,
limit_test_batches=2,
limit_predict_batches=2,
num_sanity_val_steps=1
)
trainer.fit(model, datamodule=datamodule)
trainer.test(model)
trainer.predict(model)
preprocess = model._preprocess
data_source = preprocess.data_source_of_name("default")
assert data_source.train_load_data_called
assert preprocess.train_pre_tensor_transform_called
assert preprocess.train_collate_called
assert preprocess.train_per_batch_transform_on_device_called
assert data_source.val_load_data_called
assert data_source.val_load_sample_called
assert preprocess.val_to_tensor_transform_called
assert preprocess.val_collate_called
assert preprocess.val_per_batch_transform_on_device_called
assert data_source.test_load_data_called
assert preprocess.test_to_tensor_transform_called
assert preprocess.test_post_tensor_transform_called
assert data_source.predict_load_data_called
def test_is_overriden_recursive(tmpdir):
class TestPreprocess(DefaultPreprocess):
def collate(self, *_):
pass
def val_collate(self, *_):
pass
preprocess = TestPreprocess()
assert DataPipeline._is_overriden_recursive("collate", preprocess, Preprocess, prefix="val")
assert DataPipeline._is_overriden_recursive("collate", preprocess, Preprocess, prefix="train")
assert not DataPipeline._is_overriden_recursive(
"per_batch_transform_on_device", preprocess, Preprocess, prefix="train"
)
assert not DataPipeline._is_overriden_recursive("per_batch_transform_on_device", preprocess, Preprocess)
with pytest.raises(MisconfigurationException, match="This function doesn't belong to the parent class"):
assert not DataPipeline._is_overriden_recursive("chocolate", preprocess, Preprocess)
@pytest.mark.skipif(not _IMAGE_AVAILABLE, reason="image libraries aren't installed.")
@mock.patch("torch.save") # need to mock torch.save or we get pickle error
def test_dummy_example(tmpdir):
class ImageDataSource(DataSource):
def load_data(self, folder: str):
# from folder -> return files paths
return ["a.jpg", "b.jpg"]
def load_sample(self, path: str) -> Image.Image:
# from a file path, load the associated image
img8Bit = np.uint8(np.random.uniform(0, 1, (64, 64, 3)) * 255.0)
return Image.fromarray(img8Bit)
class ImageClassificationPreprocess(DefaultPreprocess):
def __init__(
self,
train_transform=None,
val_transform=None,
test_transform=None,
predict_transform=None,
to_tensor_transform=None,
train_per_sample_transform_on_device=None,
):
super().__init__(
train_transform=train_transform,
val_transform=val_transform,
test_transform=test_transform,
predict_transform=predict_transform,
data_sources={"default": ImageDataSource()},
)
self._to_tensor = to_tensor_transform
self._train_per_sample_transform_on_device = train_per_sample_transform_on_device
def to_tensor_transform(self, pil_image: Image.Image) -> Tensor:
# convert pil image into a tensor
return self._to_tensor(pil_image)
def train_per_sample_transform_on_device(self, sample: Any) -> Any:
# apply an augmentation per sample on gpu for train only
return self._train_per_sample_transform_on_device(sample)
class CustomModel(Task):
def __init__(self):
super().__init__(model=torch.nn.Linear(1, 1), loss_fn=torch.nn.MSELoss())
def training_step(self, batch, batch_idx):
assert batch.shape == torch.Size([2, 3, 64, 64])
def validation_step(self, batch, batch_idx):
assert batch.shape == torch.Size([2, 3, 64, 64])
def test_step(self, batch, batch_idx):
assert batch.shape == torch.Size([2, 3, 64, 64])
class CustomDataModule(DataModule):
preprocess_cls = ImageClassificationPreprocess
datamodule = CustomDataModule.from_data_source(
"default",
"train_folder",
"val_folder",
"test_folder",
None,
batch_size=2,
to_tensor_transform=T.ToTensor(),
train_per_sample_transform_on_device=T.RandomHorizontalFlip(),
)
assert isinstance(datamodule.train_dataloader().dataset[0], Image.Image)
batch = next(iter(datamodule.train_dataloader()))
assert batch[0].shape == torch.Size([3, 64, 64])
model = CustomModel()
trainer = Trainer(
max_epochs=1,
limit_train_batches=2,
limit_val_batches=1,
limit_test_batches=2,
limit_predict_batches=2,
num_sanity_val_steps=1
)
trainer.fit(model, datamodule=datamodule)
trainer.test(model)
def test_preprocess_transforms(tmpdir):
"""
This test makes sure that when a preprocess is being provided transforms as dictionaries,
checking is done properly, and collate_in_worker_from_transform is properly extracted.
"""
with pytest.raises(MisconfigurationException, match="Transform should be a dict."):
DefaultPreprocess(train_transform="choco")
with pytest.raises(MisconfigurationException, match="train_transform contains {'choco'}. Only"):
DefaultPreprocess(train_transform={"choco": None})
preprocess = DefaultPreprocess(train_transform={"to_tensor_transform": torch.nn.Linear(1, 1)})
# keep is None
assert preprocess._train_collate_in_worker_from_transform is True
assert preprocess._val_collate_in_worker_from_transform is None
assert preprocess._test_collate_in_worker_from_transform is None
assert preprocess._predict_collate_in_worker_from_transform is None
with pytest.raises(MisconfigurationException, match="`per_batch_transform` and `per_sample_transform_on_device`"):
preprocess = DefaultPreprocess(
train_transform={
"per_batch_transform": torch.nn.Linear(1, 1),
"per_sample_transform_on_device": torch.nn.Linear(1, 1)
}
)
preprocess = DefaultPreprocess(
train_transform={"per_batch_transform": torch.nn.Linear(1, 1)},
predict_transform={"per_sample_transform_on_device": torch.nn.Linear(1, 1)}
)
# keep is None
assert preprocess._train_collate_in_worker_from_transform is True
assert preprocess._val_collate_in_worker_from_transform is None
assert preprocess._test_collate_in_worker_from_transform is None
assert preprocess._predict_collate_in_worker_from_transform is False
train_preprocessor = DataPipeline(preprocess=preprocess).worker_preprocessor(RunningStage.TRAINING)
val_preprocessor = DataPipeline(preprocess=preprocess).worker_preprocessor(RunningStage.VALIDATING)
test_preprocessor = DataPipeline(preprocess=preprocess).worker_preprocessor(RunningStage.TESTING)
predict_preprocessor = DataPipeline(preprocess=preprocess).worker_preprocessor(RunningStage.PREDICTING)
assert train_preprocessor.collate_fn.func == preprocess.collate
assert val_preprocessor.collate_fn.func == preprocess.collate
assert test_preprocessor.collate_fn.func == preprocess.collate
assert predict_preprocessor.collate_fn.func == DataPipeline._identity
class CustomPreprocess(DefaultPreprocess):
def per_sample_transform_on_device(self, sample: Any) -> Any:
return super().per_sample_transform_on_device(sample)
def per_batch_transform(self, batch: Any) -> Any:
return super().per_batch_transform(batch)
preprocess = CustomPreprocess(
train_transform={"per_batch_transform": torch.nn.Linear(1, 1)},
predict_transform={"per_sample_transform_on_device": torch.nn.Linear(1, 1)}
)
# keep is None
assert preprocess._train_collate_in_worker_from_transform is True
assert preprocess._val_collate_in_worker_from_transform is None
assert preprocess._test_collate_in_worker_from_transform is None
assert preprocess._predict_collate_in_worker_from_transform is False
data_pipeline = DataPipeline(preprocess=preprocess)
train_preprocessor = data_pipeline.worker_preprocessor(RunningStage.TRAINING)
with pytest.raises(MisconfigurationException, match="`per_batch_transform` and `per_sample_transform_on_device`"):
val_preprocessor = data_pipeline.worker_preprocessor(RunningStage.VALIDATING)
with pytest.raises(MisconfigurationException, match="`per_batch_transform` and `per_sample_transform_on_device`"):
test_preprocessor = data_pipeline.worker_preprocessor(RunningStage.TESTING)
predict_preprocessor = data_pipeline.worker_preprocessor(RunningStage.PREDICTING)
assert train_preprocessor.collate_fn.func == preprocess.collate
assert predict_preprocessor.collate_fn.func == DataPipeline._identity
def test_iterable_auto_dataset(tmpdir):
class CustomDataSource(DataSource):
def load_sample(self, index: int) -> Dict[str, int]:
return {"index": index}
ds = IterableAutoDataset(range(10), data_source=CustomDataSource(), running_stage=RunningStage.TRAINING)
for index, v in enumerate(ds):
assert v == {"index": index}
class CustomPreprocessHyperparameters(DefaultPreprocess):
def __init__(self, token: str, *args, **kwargs):
self.token = token
super().__init__(*args, **kwargs)
@classmethod
def load_from_state_dict(cls, state_dict: Dict[str, Any]):
return cls(state_dict["token"])
def state_dict(self) -> Dict[str, Any]:
return {"token": self.token}
def local_fn(x):
return x
def test_save_hyperparemeters(tmpdir):
kwargs = {"train_transform": {"pre_tensor_transform": local_fn}}
preprocess = CustomPreprocessHyperparameters("token", **kwargs)
state_dict = preprocess.state_dict()
torch.save(state_dict, os.path.join(tmpdir, "state_dict.pt"))
state_dict = torch.load(os.path.join(tmpdir, "state_dict.pt"))
preprocess = CustomPreprocessHyperparameters.load_from_state_dict(state_dict)
assert isinstance(preprocess, CustomPreprocessHyperparameters)
| [
"torch.Size",
"torch.rand",
"torch.nn.Linear",
"torch.nn.MSELoss",
"torch.tensor"
] | 1.7 | charlesjhill/lightning-flash | 2b19acbb5d627c609f2f7e13b48006e157781718 |
1.7 | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from unittest.mock import Mock
import pytest
import torch
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from torch.utils.data import DataLoader
from flash import Task, Trainer
from flash.core.classification import Labels, LabelsState
from flash.core.data.data_module import DataModule
from flash.core.data.data_pipeline import DataPipeline, DataPipelineState, DefaultPreprocess
from flash.core.data.data_source import DefaultDataSources
from flash.core.data.process import Serializer, SerializerMapping
from flash.core.data.properties import ProcessState, Properties
def test_properties_data_pipeline_state():
"""Tests that ``get_state`` and ``set_state`` work for properties and that ``DataPipelineState`` is attached
correctly."""
class MyProcessState1(ProcessState):
pass
class MyProcessState2(ProcessState):
pass
class OtherProcessState(ProcessState):
pass
my_properties = Properties()
my_properties.set_state(MyProcessState1())
assert my_properties._state == {MyProcessState1: MyProcessState1()}
assert my_properties.get_state(OtherProcessState) is None
data_pipeline_state = DataPipelineState()
data_pipeline_state.set_state(OtherProcessState())
my_properties.attach_data_pipeline_state(data_pipeline_state)
assert my_properties.get_state(OtherProcessState) == OtherProcessState()
my_properties.set_state(MyProcessState2())
assert data_pipeline_state.get_state(MyProcessState2) == MyProcessState2()
def test_serializer():
"""Tests that ``Serializer`` can be enabled and disabled correctly."""
my_serializer = Serializer()
assert my_serializer.serialize('test') == 'test'
my_serializer.serialize = Mock()
my_serializer.disable()
assert my_serializer('test') == 'test'
my_serializer.serialize.assert_not_called()
my_serializer.enable()
my_serializer('test')
my_serializer.serialize.assert_called_once()
def test_serializer_mapping():
"""Tests that ``SerializerMapping`` correctly passes its inputs to the underlying serializers. Also checks that
state is retrieved / loaded correctly."""
serializer1 = Serializer()
serializer1.serialize = Mock(return_value='test1')
class Serializer1State(ProcessState):
pass
serializer2 = Serializer()
serializer2.serialize = Mock(return_value='test2')
class Serializer2State(ProcessState):
pass
serializer_mapping = SerializerMapping({'key1': serializer1, 'key2': serializer2})
assert serializer_mapping({'key1': 'serializer1', 'key2': 'serializer2'}) == {'key1': 'test1', 'key2': 'test2'}
serializer1.serialize.assert_called_once_with('serializer1')
serializer2.serialize.assert_called_once_with('serializer2')
with pytest.raises(ValueError, match='output must be a mapping'):
serializer_mapping('not a mapping')
serializer1_state = Serializer1State()
serializer2_state = Serializer2State()
serializer1.set_state(serializer1_state)
serializer2.set_state(serializer2_state)
data_pipeline_state = DataPipelineState()
serializer_mapping.attach_data_pipeline_state(data_pipeline_state)
assert serializer1._data_pipeline_state is data_pipeline_state
assert serializer2._data_pipeline_state is data_pipeline_state
assert data_pipeline_state.get_state(Serializer1State) is serializer1_state
assert data_pipeline_state.get_state(Serializer2State) is serializer2_state
def test_saving_with_serializers(tmpdir):
checkpoint_file = os.path.join(tmpdir, 'tmp.ckpt')
class CustomModel(Task):
def __init__(self):
super().__init__(model=torch.nn.Linear(1, 1), loss_fn=torch.nn.MSELoss())
serializer = Labels(["a", "b"])
model = CustomModel()
trainer = Trainer(fast_dev_run=True)
data_pipeline = DataPipeline(preprocess=DefaultPreprocess(), serializer=serializer)
data_pipeline.initialize()
model.data_pipeline = data_pipeline
assert isinstance(model.preprocess, DefaultPreprocess)
dummy_data = DataLoader(list(zip(torch.arange(10, dtype=torch.float), torch.arange(10, dtype=torch.float))))
trainer.fit(model, train_dataloader=dummy_data)
trainer.save_checkpoint(checkpoint_file)
model = CustomModel.load_from_checkpoint(checkpoint_file)
assert isinstance(model._data_pipeline_state, DataPipelineState)
assert model._data_pipeline_state._state[LabelsState] == LabelsState(["a", "b"])
class CustomPreprocess(DefaultPreprocess):
def __init__(self):
super().__init__(
data_sources={
"test": Mock(return_value="test"),
DefaultDataSources.TENSORS: Mock(return_value="tensors"),
},
default_data_source="test",
)
def test_data_source_of_name():
preprocess = CustomPreprocess()
assert preprocess.data_source_of_name("test")() == "test"
assert preprocess.data_source_of_name(DefaultDataSources.TENSORS)() == "tensors"
assert preprocess.data_source_of_name("tensors")() == "tensors"
assert preprocess.data_source_of_name("default")() == "test"
with pytest.raises(MisconfigurationException, match="available data sources are: test, tensor"):
preprocess.data_source_of_name("not available")
def test_available_data_sources():
preprocess = CustomPreprocess()
assert DefaultDataSources.TENSORS in preprocess.available_data_sources()
assert "test" in preprocess.available_data_sources()
assert len(preprocess.available_data_sources()) == 3
data_module = DataModule(preprocess=preprocess)
assert DefaultDataSources.TENSORS in data_module.available_data_sources()
assert "test" in data_module.available_data_sources()
assert len(data_module.available_data_sources()) == 3
| [
"torch.nn.Linear",
"torch.nn.MSELoss",
"torch.arange"
] | 1.7 | charlesjhill/lightning-flash | 2b19acbb5d627c609f2f7e13b48006e157781718 |
1.5 | import torch as to
BASE_GRAPH = to.tensor([[0, 1, 1, 0],
[1, 0, 1, 0],
[1, 1, 0, 1],
[0, 0, 1, 0]])
BASE_GRAPH_NODE_FEATURES = to.tensor([[1, 2], [1, 1], [2, 0.5], [0.5, 0.5]])
BASE_GRAPH_EDGE_FEATURES = to.tensor([[[0.0, 0.0], [1.0, 2.0], [2.0, 0.5], [0.0, 0.0]],
[[1.0, 2.0], [0.0, 0.0], [1.0, 1.0], [0.0, 0.0]],
[[2.0, 0.5], [1.0, 1.0], [0.0, 0.0], [0.5, 0.5]],
[[0.0, 0.0], [0.0, 0.0], [0.5, 0.5], [0.0, 0.0]]])
| [
"torch.tensor"
] | 1.5.0 | kovanostra/message-passing-nn | 6617a4753173c8fffc60140b9d8d0f497b33aed4 |
1.3 | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Callable, Optional
import torch
from torch import Tensor, tensor
from torchmetrics.functional.regression.mean_squared_error import (
_mean_squared_error_compute,
_mean_squared_error_update,
)
from torchmetrics.metric import Metric
class MeanSquaredError(Metric):
r"""
Computes `mean squared error`_ (MSE):
.. math:: \text{MSE} = \frac{1}{N}\sum_i^N(y_i - \hat{y_i})^2
Where :math:`y` is a tensor of target values, and :math:`\hat{y}` is a tensor of predictions.
Args:
compute_on_step:
Forward only calls ``update()`` and return None if this is set to False. default: True
dist_sync_on_step:
Synchronize metric state across processes at each ``forward()``
before returning the value at the step. default: False
process_group:
Specify the process group on which synchronization is called. default: None (which selects the entire world)
squared:
If True returns MSE value, if False returns RMSE value.
Example:
>>> from torchmetrics import MeanSquaredError
>>> target = torch.tensor([2.5, 5.0, 4.0, 8.0])
>>> preds = torch.tensor([3.0, 5.0, 2.5, 7.0])
>>> mean_squared_error = MeanSquaredError()
>>> mean_squared_error(preds, target)
tensor(0.8750)
"""
is_differentiable = True
sum_squared_error: Tensor
total: Tensor
def __init__(
self,
compute_on_step: bool = True,
dist_sync_on_step: bool = False,
process_group: Optional[Any] = None,
dist_sync_fn: Callable = None,
squared: bool = True,
) -> None:
super().__init__(
compute_on_step=compute_on_step,
dist_sync_on_step=dist_sync_on_step,
process_group=process_group,
dist_sync_fn=dist_sync_fn,
)
self.add_state("sum_squared_error", default=tensor(0.0), dist_reduce_fx="sum")
self.add_state("total", default=tensor(0), dist_reduce_fx="sum")
self.squared = squared
def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore
"""Update state with predictions and targets.
Args:
preds: Predictions from model
target: Ground truth values
"""
sum_squared_error, n_obs = _mean_squared_error_update(preds, target)
self.sum_squared_error += sum_squared_error
self.total += n_obs
def compute(self) -> Tensor:
"""Computes mean squared error over state."""
return _mean_squared_error_compute(self.sum_squared_error, self.total, squared=self.squared)
| [
"torch.tensor"
] | 1.3.1 | bibinwils/metrics | e1c3fda24f90367803c2b04315ad7c8bced719db |
1.0 | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch RoBERTa model."""
import math
from typing import List, Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from packaging import version
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACT2FN, gelu
from ...file_utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
replace_return_docstrings,
)
from ...modeling_outputs import (
BaseModelOutputWithPastAndCrossAttentions,
BaseModelOutputWithPoolingAndCrossAttentions,
CausalLMOutputWithCrossAttentions,
MaskedLMOutput,
MultipleChoiceModelOutput,
QuestionAnsweringModelOutput,
SequenceClassifierOutput,
TokenClassifierOutput,
)
from ...modeling_utils import (
PreTrainedModel,
apply_chunking_to_forward,
find_pruneable_heads_and_indices,
prune_linear_layer,
)
from ...utils import logging
from .configuration_roberta import RobertaConfig
logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC = "roberta-base"
_CONFIG_FOR_DOC = "RobertaConfig"
_TOKENIZER_FOR_DOC = "RobertaTokenizer"
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST = [
"roberta-base",
"roberta-large",
"roberta-large-mnli",
"distilroberta-base",
"roberta-base-openai-detector",
"roberta-large-openai-detector",
# See all RoBERTa models at https://huggingface.co/models?filter=roberta
]
class RobertaEmbeddings(nn.Module):
"""
Same as BertEmbeddings with a tiny tweak for positional embeddings indexing.
"""
# Copied from transformers.models.bert.modeling_bert.BertEmbeddings.__init__
def __init__(self, config):
super().__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
# position_ids (1, len position emb) is contiguous in memory and exported when serialized
self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)))
if version.parse(torch.__version__) > version.parse("1.6.0"):
self.register_buffer(
"token_type_ids",
torch.zeros(self.position_ids.size(), dtype=torch.long),
persistent=False,
)
# End copy
self.padding_idx = config.pad_token_id
self.position_embeddings = nn.Embedding(
config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx
)
def forward(
self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0
):
if position_ids is None:
if input_ids is not None:
# Create the position ids from the input token ids. Any padded tokens remain padded.
position_ids = create_position_ids_from_input_ids(input_ids, self.padding_idx, past_key_values_length)
else:
position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds)
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
seq_length = input_shape[1]
# Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs
# when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves
# issue #5664
if token_type_ids is None:
if hasattr(self, "token_type_ids"):
buffered_token_type_ids = self.token_type_ids[:, :seq_length]
buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length)
token_type_ids = buffered_token_type_ids_expanded
else:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = inputs_embeds + token_type_embeddings
if self.position_embedding_type == "absolute":
position_embeddings = self.position_embeddings(position_ids)
embeddings += position_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
def create_position_ids_from_inputs_embeds(self, inputs_embeds):
"""
We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids.
Args:
inputs_embeds: torch.Tensor
Returns: torch.Tensor
"""
input_shape = inputs_embeds.size()[:-1]
sequence_length = input_shape[1]
position_ids = torch.arange(
self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device
)
return position_ids.unsqueeze(0).expand(input_shape)
# Copied from transformers.models.bert.modeling_bert.BertSelfAttention with Bert->Roberta
class RobertaSelfAttention(nn.Module):
def __init__(self, config, position_embedding_type=None):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
raise ValueError(
f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
f"heads ({config.num_attention_heads})"
)
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
self.position_embedding_type = position_embedding_type or getattr(
config, "position_embedding_type", "absolute"
)
if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
self.max_position_embeddings = config.max_position_embeddings
self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
self.is_decoder = config.is_decoder
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.FloatTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
encoder_hidden_states: Optional[torch.FloatTensor] = None,
encoder_attention_mask: Optional[torch.FloatTensor] = None,
past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
output_attentions: Optional[bool] = False,
) -> Tuple:
mixed_query_layer = self.query(hidden_states)
# If this is instantiated as a cross-attention module, the keys
# and values come from an encoder; the attention mask needs to be
# such that the encoder's padding tokens are not attended to.
is_cross_attention = encoder_hidden_states is not None
if is_cross_attention and past_key_value is not None:
# reuse k,v, cross_attentions
key_layer = past_key_value[0]
value_layer = past_key_value[1]
attention_mask = encoder_attention_mask
elif is_cross_attention:
key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
attention_mask = encoder_attention_mask
elif past_key_value is not None:
key_layer = self.transpose_for_scores(self.key(hidden_states))
value_layer = self.transpose_for_scores(self.value(hidden_states))
key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
else:
key_layer = self.transpose_for_scores(self.key(hidden_states))
value_layer = self.transpose_for_scores(self.value(hidden_states))
query_layer = self.transpose_for_scores(mixed_query_layer)
if self.is_decoder:
# if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
# Further calls to cross_attention layer can then reuse all cross-attention
# key/value_states (first "if" case)
# if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
# all previous decoder key/value_states. Further calls to uni-directional self-attention
# can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
# if encoder bi-directional self-attention `past_key_value` is always `None`
past_key_value = (key_layer, value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
seq_length = hidden_states.size()[1]
position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
distance = position_ids_l - position_ids_r
positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
if self.position_embedding_type == "relative_key":
relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
attention_scores = attention_scores + relative_position_scores
elif self.position_embedding_type == "relative_key_query":
relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
if attention_mask is not None:
# Apply the attention mask is (precomputed for all layers in RobertaModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.functional.softmax(attention_scores, dim=-1)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
# Mask heads if we want to
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(new_context_layer_shape)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
if self.is_decoder:
outputs = outputs + (past_key_value,)
return outputs
# Copied from transformers.models.bert.modeling_bert.BertSelfOutput
class RobertaSelfOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
# Copied from transformers.models.bert.modeling_bert.BertAttention with Bert->Roberta
class RobertaAttention(nn.Module):
def __init__(self, config, position_embedding_type=None):
super().__init__()
self.self = RobertaSelfAttention(config, position_embedding_type=position_embedding_type)
self.output = RobertaSelfOutput(config)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(
heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
)
# Prune linear layers
self.self.query = prune_linear_layer(self.self.query, index)
self.self.key = prune_linear_layer(self.self.key, index)
self.self.value = prune_linear_layer(self.self.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
# Update hyper params and store pruned heads
self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.FloatTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
encoder_hidden_states: Optional[torch.FloatTensor] = None,
encoder_attention_mask: Optional[torch.FloatTensor] = None,
past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
output_attentions: Optional[bool] = False,
) -> Tuple:
self_outputs = self.self(
hidden_states,
attention_mask,
head_mask,
encoder_hidden_states,
encoder_attention_mask,
past_key_value,
output_attentions,
)
attention_output = self.output(self_outputs[0], hidden_states)
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
# Copied from transformers.models.bert.modeling_bert.BertIntermediate
class RobertaIntermediate(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
# Copied from transformers.models.bert.modeling_bert.BertOutput
class RobertaOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
# Copied from transformers.models.bert.modeling_bert.BertLayer with Bert->Roberta
class RobertaLayer(nn.Module):
def __init__(self, config):
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = RobertaAttention(config)
self.is_decoder = config.is_decoder
self.add_cross_attention = config.add_cross_attention
if self.add_cross_attention:
if not self.is_decoder:
raise ValueError(f"{self} should be used as a decoder model if cross attention is added")
self.crossattention = RobertaAttention(config, position_embedding_type="absolute")
self.intermediate = RobertaIntermediate(config)
self.output = RobertaOutput(config)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.FloatTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
encoder_hidden_states: Optional[torch.FloatTensor] = None,
encoder_attention_mask: Optional[torch.FloatTensor] = None,
past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
output_attentions: Optional[bool] = False,
) -> Tuple:
# decoder uni-directional self-attention cached key/values tuple is at positions 1,2
self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
self_attention_outputs = self.attention(
hidden_states,
attention_mask,
head_mask,
output_attentions=output_attentions,
past_key_value=self_attn_past_key_value,
)
attention_output = self_attention_outputs[0]
# if decoder, the last output is tuple of self-attn cache
if self.is_decoder:
outputs = self_attention_outputs[1:-1]
present_key_value = self_attention_outputs[-1]
else:
outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
cross_attn_present_key_value = None
if self.is_decoder and encoder_hidden_states is not None:
if not hasattr(self, "crossattention"):
raise ValueError(
f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`"
)
# cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple
cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
cross_attention_outputs = self.crossattention(
attention_output,
attention_mask,
head_mask,
encoder_hidden_states,
encoder_attention_mask,
cross_attn_past_key_value,
output_attentions,
)
attention_output = cross_attention_outputs[0]
outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
# add cross-attn cache to positions 3,4 of present_key_value tuple
cross_attn_present_key_value = cross_attention_outputs[-1]
present_key_value = present_key_value + cross_attn_present_key_value
layer_output = apply_chunking_to_forward(
self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
)
outputs = (layer_output,) + outputs
# if decoder, return the attn key/values as the last output
if self.is_decoder:
outputs = outputs + (present_key_value,)
return outputs
def feed_forward_chunk(self, attention_output):
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
# Copied from transformers.models.bert.modeling_bert.BertEncoder with Bert->Roberta
class RobertaEncoder(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.layer = nn.ModuleList([RobertaLayer(config) for _ in range(config.num_hidden_layers)])
self.gradient_checkpointing = False
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.FloatTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
encoder_hidden_states: Optional[torch.FloatTensor] = None,
encoder_attention_mask: Optional[torch.FloatTensor] = None,
past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = False,
output_hidden_states: Optional[bool] = False,
return_dict: Optional[bool] = True,
) -> Union[Tuple, BaseModelOutputWithPastAndCrossAttentions]:
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
next_decoder_cache = () if use_cache else None
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_head_mask = head_mask[i] if head_mask is not None else None
past_key_value = past_key_values[i] if past_key_values is not None else None
if self.gradient_checkpointing and self.training:
if use_cache:
logger.warning(
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
)
use_cache = False
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, past_key_value, output_attentions)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(layer_module),
hidden_states,
attention_mask,
layer_head_mask,
encoder_hidden_states,
encoder_attention_mask,
)
else:
layer_outputs = layer_module(
hidden_states,
attention_mask,
layer_head_mask,
encoder_hidden_states,
encoder_attention_mask,
past_key_value,
output_attentions,
)
hidden_states = layer_outputs[0]
if use_cache:
next_decoder_cache += (layer_outputs[-1],)
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if self.config.add_cross_attention:
all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(
v
for v in [
hidden_states,
next_decoder_cache,
all_hidden_states,
all_self_attentions,
all_cross_attentions,
]
if v is not None
)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=next_decoder_cache,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
cross_attentions=all_cross_attentions,
)
# Copied from transformers.models.bert.modeling_bert.BertPooler
class RobertaPooler(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class RobertaPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = RobertaConfig
base_model_prefix = "roberta"
supports_gradient_checkpointing = True
# Copied from transformers.models.bert.modeling_bert.BertPreTrainedModel._init_weights
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, nn.Linear):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
def _set_gradient_checkpointing(self, module, value=False):
if isinstance(module, RobertaEncoder):
module.gradient_checkpointing = value
def update_keys_to_ignore(self, config, del_keys_to_ignore):
"""Remove some keys from ignore list"""
if not config.tie_word_embeddings:
# must make a new list, or the class variable gets modified!
self._keys_to_ignore_on_save = [k for k in self._keys_to_ignore_on_save if k not in del_keys_to_ignore]
self._keys_to_ignore_on_load_missing = [
k for k in self._keys_to_ignore_on_load_missing if k not in del_keys_to_ignore
]
ROBERTA_START_DOCSTRING = r"""
This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
etc.)
This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
and behavior.
Parameters:
config ([`RobertaConfig`]): Model configuration class with all the parameters of the
model. Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
ROBERTA_INPUTS_DOCSTRING = r"""
Args:
input_ids (`torch.LongTensor` of shape `({0})`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`RobertaTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
1]`:
- 0 corresponds to a *sentence A* token,
- 1 corresponds to a *sentence B* token.
[What are token type IDs?](../glossary#token-type-ids)
position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
config.max_position_embeddings - 1]`.
[What are position IDs?](../glossary#position-ids)
head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
model's internal embedding lookup matrix.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"The bare RoBERTa Model transformer outputting raw hidden-states without any specific head on top.",
ROBERTA_START_DOCSTRING,
)
class RobertaModel(RobertaPreTrainedModel):
"""
The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
cross-attention is added between the self-attention layers, following the architecture described in *Attention is
all you need*_ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz
Kaiser and Illia Polosukhin.
To behave as an decoder the model needs to be initialized with the `is_decoder` argument of the configuration set
to `True`. To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder` argument and
`add_cross_attention` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass.
.. _*Attention is all you need*: https://arxiv.org/abs/1706.03762
"""
_keys_to_ignore_on_load_missing = [r"position_ids"]
# Copied from transformers.models.bert.modeling_bert.BertModel.__init__ with Bert->Roberta
def __init__(self, config, add_pooling_layer=True):
super().__init__(config)
self.config = config
self.embeddings = RobertaEmbeddings(config)
self.encoder = RobertaEncoder(config)
self.pooler = RobertaPooler(config) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=BaseModelOutputWithPoolingAndCrossAttentions,
config_class=_CONFIG_FOR_DOC,
)
# Copied from transformers.models.bert.modeling_bert.BertModel.forward
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
head_mask: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
encoder_hidden_states: Optional[torch.Tensor] = None,
encoder_attention_mask: Optional[torch.Tensor] = None,
past_key_values: Optional[List[torch.FloatTensor]] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, BaseModelOutputWithPoolingAndCrossAttentions]:
r"""
encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
the model is configured as a decoder.
encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
`decoder_input_ids` of shape `(batch_size, sequence_length)`.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
`past_key_values`).
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if self.config.is_decoder:
use_cache = use_cache if use_cache is not None else self.config.use_cache
else:
use_cache = False
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
batch_size, seq_length = input_shape
device = input_ids.device if input_ids is not None else inputs_embeds.device
# past_key_values_length
past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
if attention_mask is None:
attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)
if token_type_ids is None:
if hasattr(self.embeddings, "token_type_ids"):
buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length]
buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length)
token_type_ids = buffered_token_type_ids_expanded
else:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, device)
# If a 2D or 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
else:
encoder_extended_attention_mask = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
embedding_output = self.embeddings(
input_ids=input_ids,
position_ids=position_ids,
token_type_ids=token_type_ids,
inputs_embeds=inputs_embeds,
past_key_values_length=past_key_values_length,
)
encoder_outputs = self.encoder(
embedding_output,
attention_mask=extended_attention_mask,
head_mask=head_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_extended_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
if not return_dict:
return (sequence_output, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndCrossAttentions(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
past_key_values=encoder_outputs.past_key_values,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
cross_attentions=encoder_outputs.cross_attentions,
)
@add_start_docstrings(
"""RoBERTa Model with a `language modeling` head on top for CLM fine-tuning.""", ROBERTA_START_DOCSTRING
)
class RobertaForCausalLM(RobertaPreTrainedModel):
_keys_to_ignore_on_save = [r"lm_head.decoder.weight", r"lm_head.decoder.bias"]
_keys_to_ignore_on_load_missing = [r"position_ids", r"lm_head.decoder.weight", r"lm_head.decoder.bias"]
_keys_to_ignore_on_load_unexpected = [r"pooler"]
def __init__(self, config):
super().__init__(config)
if not config.is_decoder:
logger.warning("If you want to use `RobertaLMHeadModel` as a standalone, add `is_decoder=True.`")
self.roberta = RobertaModel(config, add_pooling_layer=False)
self.lm_head = RobertaLMHead(config)
# The LM head weights require special treatment only when they are tied with the word embeddings
self.update_keys_to_ignore(config, ["lm_head.decoder.weight"])
# Initialize weights and apply final processing
self.post_init()
def get_output_embeddings(self):
return self.lm_head.decoder
def set_output_embeddings(self, new_embeddings):
self.lm_head.decoder = new_embeddings
@add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
encoder_hidden_states: Optional[torch.FloatTensor] = None,
encoder_attention_mask: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
past_key_values: Tuple[Tuple[torch.FloatTensor]] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, CausalLMOutputWithCrossAttentions]:
r"""
encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
the model is configured as a decoder.
encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in
`[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are
ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
`decoder_input_ids` of shape `(batch_size, sequence_length)`.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
`past_key_values`).
Returns:
Example:
```python
>>> from transformers import RobertaTokenizer, RobertaForCausalLM, RobertaConfig
>>> import torch
>>> tokenizer = RobertaTokenizer.from_pretrained("roberta-base")
>>> config = RobertaConfig.from_pretrained("roberta-base")
>>> config.is_decoder = True
>>> model = RobertaForCausalLM.from_pretrained("roberta-base", config=config)
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
>>> prediction_logits = outputs.logits
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if labels is not None:
use_cache = False
outputs = self.roberta(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
prediction_scores = self.lm_head(sequence_output)
lm_loss = None
if labels is not None:
# we are doing next-token prediction; shift prediction scores and input ids by one
shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous()
labels = labels[:, 1:].contiguous()
loss_fct = CrossEntropyLoss()
lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (prediction_scores,) + outputs[2:]
return ((lm_loss,) + output) if lm_loss is not None else output
return CausalLMOutputWithCrossAttentions(
loss=lm_loss,
logits=prediction_scores,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
cross_attentions=outputs.cross_attentions,
)
def prepare_inputs_for_generation(self, input_ids, past=None, attention_mask=None, **model_kwargs):
input_shape = input_ids.shape
# if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
if attention_mask is None:
attention_mask = input_ids.new_ones(input_shape)
# cut decoder_input_ids if past is used
if past is not None:
input_ids = input_ids[:, -1:]
return {"input_ids": input_ids, "attention_mask": attention_mask, "past_key_values": past}
def _reorder_cache(self, past, beam_idx):
reordered_past = ()
for layer_past in past:
reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),)
return reordered_past
@add_start_docstrings("""RoBERTa Model with a `language modeling` head on top.""", ROBERTA_START_DOCSTRING)
class RobertaForMaskedLM(RobertaPreTrainedModel):
_keys_to_ignore_on_save = [r"lm_head.decoder.weight", r"lm_head.decoder.bias"]
_keys_to_ignore_on_load_missing = [r"position_ids", r"lm_head.decoder.weight", r"lm_head.decoder.bias"]
_keys_to_ignore_on_load_unexpected = [r"pooler"]
def __init__(self, config):
super().__init__(config)
if config.is_decoder:
logger.warning(
"If you want to use `RobertaForMaskedLM` make sure `config.is_decoder=False` for "
"bi-directional self-attention."
)
self.roberta = RobertaModel(config, add_pooling_layer=False)
self.lm_head = RobertaLMHead(config)
# The LM head weights require special treatment only when they are tied with the word embeddings
self.update_keys_to_ignore(config, ["lm_head.decoder.weight"])
# Initialize weights and apply final processing
self.post_init()
def get_output_embeddings(self):
return self.lm_head.decoder
def set_output_embeddings(self, new_embeddings):
self.lm_head.decoder = new_embeddings
@add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=MaskedLMOutput,
config_class=_CONFIG_FOR_DOC,
mask="<mask>",
)
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
encoder_hidden_states: Optional[torch.FloatTensor] = None,
encoder_attention_mask: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, MaskedLMOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
kwargs (`Dict[str, any]`, optional, defaults to *{}*):
Used to hide legacy arguments that have been deprecated.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.roberta(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
prediction_scores = self.lm_head(sequence_output)
masked_lm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (prediction_scores,) + outputs[2:]
return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
return MaskedLMOutput(
loss=masked_lm_loss,
logits=prediction_scores,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
class RobertaLMHead(nn.Module):
"""Roberta Head for masked language modeling."""
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.decoder = nn.Linear(config.hidden_size, config.vocab_size)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
self.decoder.bias = self.bias
def forward(self, features, **kwargs):
x = self.dense(features)
x = gelu(x)
x = self.layer_norm(x)
# project back to size of vocabulary with bias
x = self.decoder(x)
return x
def _tie_weights(self):
# To tie those two weights if they get disconnected (on TPU or when the bias is resized)
self.bias = self.decoder.bias
@add_start_docstrings(
"""
RoBERTa Model transformer with a sequence classification/regression head on top (a linear layer on top of the
pooled output) e.g. for GLUE tasks.
""",
ROBERTA_START_DOCSTRING,
)
class RobertaForSequenceClassification(RobertaPreTrainedModel):
_keys_to_ignore_on_load_missing = [r"position_ids"]
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.config = config
self.roberta = RobertaModel(config, add_pooling_layer=False)
self.classifier = RobertaClassificationHead(config)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=SequenceClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, SequenceClassifierOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.roberta(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
self.config.problem_type = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = "single_label_classification"
else:
self.config.problem_type = "multi_label_classification"
if self.config.problem_type == "regression":
loss_fct = MSELoss()
if self.num_labels == 1:
loss = loss_fct(logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(logits, labels)
elif self.config.problem_type == "single_label_classification":
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(logits, labels)
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
Roberta Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
softmax) e.g. for RocStories/SWAG tasks.
""",
ROBERTA_START_DOCSTRING,
)
class RobertaForMultipleChoice(RobertaPreTrainedModel):
_keys_to_ignore_on_load_missing = [r"position_ids"]
def __init__(self, config):
super().__init__(config)
self.roberta = RobertaModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, 1)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=MultipleChoiceModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, MultipleChoiceModelOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
`input_ids` above)
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
flat_input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
flat_position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
flat_inputs_embeds = (
inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
if inputs_embeds is not None
else None
)
outputs = self.roberta(
flat_input_ids,
position_ids=flat_position_ids,
token_type_ids=flat_token_type_ids,
attention_mask=flat_attention_mask,
head_mask=head_mask,
inputs_embeds=flat_inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
reshaped_logits = logits.view(-1, num_choices)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels)
if not return_dict:
output = (reshaped_logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return MultipleChoiceModelOutput(
loss=loss,
logits=reshaped_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
Roberta Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
Named-Entity-Recognition (NER) tasks.
""",
ROBERTA_START_DOCSTRING,
)
class RobertaForTokenClassification(RobertaPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
_keys_to_ignore_on_load_missing = [r"position_ids"]
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.roberta = RobertaModel(config, add_pooling_layer=False)
classifier_dropout = (
config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
)
self.dropout = nn.Dropout(classifier_dropout)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TokenClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, TokenClassifierOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.roberta(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TokenClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
class RobertaClassificationHead(nn.Module):
"""Head for sentence-level classification tasks."""
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
classifier_dropout = (
config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
)
self.dropout = nn.Dropout(classifier_dropout)
self.out_proj = nn.Linear(config.hidden_size, config.num_labels)
def forward(self, features, **kwargs):
x = features[:, 0, :] # take <s> token (equiv. to [CLS])
x = self.dropout(x)
x = self.dense(x)
x = torch.tanh(x)
x = self.dropout(x)
x = self.out_proj(x)
return x
@add_start_docstrings(
"""
Roberta Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
""",
ROBERTA_START_DOCSTRING,
)
class RobertaForQuestionAnswering(RobertaPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
_keys_to_ignore_on_load_missing = [r"position_ids"]
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.roberta = RobertaModel(config, add_pooling_layer=False)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=QuestionAnsweringModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
start_positions: Optional[torch.LongTensor] = None,
end_positions: Optional[torch.LongTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, QuestionAnsweringModelOutput]:
r"""
start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
are not taken into account for computing the loss.
end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
are not taken into account for computing the loss.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.roberta(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1).contiguous()
end_logits = end_logits.squeeze(-1).contiguous()
total_loss = None
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions = start_positions.clamp(0, ignored_index)
end_positions = end_positions.clamp(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
if not return_dict:
output = (start_logits, end_logits) + outputs[2:]
return ((total_loss,) + output) if total_loss is not None else output
return QuestionAnsweringModelOutput(
loss=total_loss,
start_logits=start_logits,
end_logits=end_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_length=0):
"""
Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols
are ignored. This is modified from fairseq's `utils.make_positions`.
Args:
x: torch.Tensor x:
Returns: torch.Tensor
"""
# The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA.
mask = input_ids.ne(padding_idx).int()
incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask
return incremental_indices.long() + padding_idx
| [
"torch.nn.Linear",
"torch.zeros",
"torch.nn.Dropout",
"torch.nn.LayerNorm",
"torch.cat",
"torch.nn.MSELoss",
"torch.arange",
"torch.einsum",
"torch.nn.Tanh",
"torch.nn.CrossEntropyLoss",
"torch.ones",
"torch.nn.BCEWithLogitsLoss",
"torch.nn.functional.softmax",
"torch.tanh",
"torch.matmul",
"torch.nn.Embedding",
"torch.cumsum"
] | 1.0 | khoih-prog/transformers | 77321481247787c97568c3b9f64b19e22351bab8 |
1.2 | # encoding: utf-8
# Sample-based Monte Carlo Denoising using a Kernel-Splatting Network
# Michaël Gharbi Tzu-Mao Li Miika Aittala Jaakko Lehtinen Frédo Durand
# Siggraph 2019
#
# Copyright (c) 2019 Michaël Gharbi
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Loss functions and metrics."""
import torch as th
__all__ = ["RelativeMSE", "SMAPE", "TonemappedMSE", "TonemappedRelativeMSE"]
class RelativeMSE(th.nn.Module):
"""Relative Mean-Squared Error.
:math:`0.5 * \\frac{(x - y)^2}{y^2 + \epsilon}`
Args:
eps(float): small number to avoid division by 0.
"""
def __init__(self, eps=1e-2):
super(RelativeMSE, self).__init__()
self.eps = eps
def forward(self, im, ref):
"""Evaluate the metric.
Args:
im(th.Tensor): image.
ref(th.Tensor): reference.
"""
mse = th.pow(im-ref, 2)
loss = mse/(th.pow(ref, 2) + self.eps)
loss = 0.5*th.mean(loss)
return loss
class SMAPE(th.nn.Module):
"""Symmetric Mean Absolute error.
:math:`\\frac{|x - y|} {|x| + |y| + \epsilon}`
Args:
eps(float): small number to avoid division by 0.
"""
def __init__(self, eps=1e-2):
super(SMAPE, self).__init__()
self.eps = eps
def forward(self, im, ref):
# NOTE: the denominator is used to scale the loss, but does not
# contribute gradients, hence the '.detach()' call.
loss = (th.abs(im-ref) / (
self.eps + th.abs(im.detach()) + th.abs(ref.detach()))).mean()
return loss
class TonemappedMSE(th.nn.Module):
"""Mean-squared error on tonemaped images.
Args:
eps(float): small number to avoid division by 0.
"""
def __init__(self, eps=1e-2):
super(TonemappedMSE, self).__init__()
self.eps = eps # avoid division by zero
def forward(self, im, ref):
im = _tonemap(im)
ref = _tonemap(ref)
loss = th.pow(im-ref, 2)
loss = 0.5*th.mean(loss)
return loss
class TonemappedRelativeMSE(th.nn.Module):
"""Relative mean-squared error on tonemaped images.
Args:
eps(float): small number to avoid division by 0.
"""
def __init__(self, eps=1e-2):
super(TonemappedRelativeMSE, self).__init__()
self.eps = eps # avoid division by zero
def forward(self, im, ref):
im = _tonemap(im)
ref = _tonemap(ref)
mse = th.pow(im-ref, 2)
loss = mse/(th.pow(ref, 2) + self.eps)
loss = 0.5*th.mean(loss)
return loss
def _tonemap(im):
"""Helper Reinhards tonemapper.
Args:
im(th.Tensor): image to tonemap.
Returns:
(th.Tensor) tonemaped image.
"""
im = th.clamp(im, min=0)
return im / (1+im)
| [
"torch.abs",
"torch.mean",
"torch.clamp",
"torch.pow"
] | 1.2.0 | milebril/Temporal-SBMC-extension | 57c56b73786e49d233facffde4ba80f212a00fa8 |
1.6 | import os
import pandas as pd
import numpy as np
import torch
import json
import joblib
from ..scripts.compute_normalization_factors import annotate_kmer_information, create_kmer_mapping_df, create_norm_dict
from torch.utils.data import DataLoader, Dataset
from torch.utils.data._utils.collate import default_collate
from itertools import product
class NanopolishDS(Dataset):
def __init__(self, root_dir, min_reads, norm_path=None, site_info=None,
num_neighboring_features=1, mode='Inference', site_mode=False,
n_processes=1):
allowed_mode = ('Train', 'Test', 'Val', 'Inference')
if mode not in allowed_mode:
raise ValueError("Invalid mode passed to dataset, must be one of {}".format(allowed_mode))
self.mode = mode
self.site_info = site_info
self.data_info = self.initialize_data_info(root_dir, min_reads)
self.data_fpath = os.path.join(root_dir, "data.json")
self.min_reads = min_reads
self.site_mode = site_mode
if norm_path is not None:
self.norm_dict = joblib.load(norm_path)
else:
self.norm_dict = self.compute_norm_factors(n_processes)
if num_neighboring_features > 5:
raise ValueError("Invalid neighboring features number {}".format(num_neighboring_features))
self.num_neighboring_features = num_neighboring_features
center_motifs = [['A', 'G', 'T'], ['G', 'A'], ['A'], ['C'], ['A', 'C', 'T']]
flanking_motifs = [['G', 'A', 'C', 'T'] for i in range(self.num_neighboring_features)]
all_kmers = list(["".join(x) for x in product(*(flanking_motifs + center_motifs + flanking_motifs))])
self.all_kmers = np.unique(np.array(list(map(lambda x: [x[i:i+5] for i in range(len(x) -4)],
all_kmers))).flatten())
self.kmer_to_int = {self.all_kmers[i]: i for i in range(len(self.all_kmers))}
self.int_to_kmer = {i: self.all_kmers[i] for i in range(len(self.all_kmers))}
# Inferring total number of neighboring features extracted during dataprep step
kmer, _ = self._load_data(0)
self.total_neighboring_features = (len(kmer) - 5) // 2
left_idx = [(self.total_neighboring_features - num_neighboring_features + j) * 3 + i
for j in range(num_neighboring_features) for i in range(3)]
center_idx = [self.total_neighboring_features * 3 + i for i in range(3)]
right_idx = [(self.total_neighboring_features + j) * 3 + i for j in range(1, num_neighboring_features + 1)
for i in range(3)]
self.indices = np.concatenate([left_idx, center_idx, right_idx]).astype('int')
if self.mode != 'Inference':
self.labels = self.data_info["modification_status"].values
def initialize_data_info(self, fpath, min_reads):
data_index = pd.read_csv(os.path.join(fpath ,"data.index"))
if self.mode == 'Inference':
read_count = pd.read_csv(os.path.join(fpath, "data.readcount"))
else:
if self.site_info is None:
read_count = pd.read_csv(os.path.join(fpath, "data.readcount.labelled"))
else:
read_count = pd.read_csv(os.path.join(self.site_info, "data.readcount.labelled"))
read_count = read_count[read_count["set_type"] == self.mode].reset_index(drop=True)
data_info = data_index.merge(read_count, on=["transcript_id", "transcript_position"])
return data_info[data_info["n_reads"] >= min_reads].reset_index(drop=True)
def __len__(self):
return len(self.data_info)
def _load_data(self, idx):
with open(self.data_fpath, 'r') as f:
tx_id, tx_pos, start_pos, end_pos = self.data_info.iloc[idx][["transcript_id", "transcript_position",
"start", "end"]]
f.seek(start_pos, 0)
json_str = f.read(end_pos - start_pos)
pos_info = json.loads(json_str)[tx_id][str(tx_pos)]
assert(len(pos_info.keys()) == 1)
kmer, features = list(pos_info.items())[0]
return kmer, np.array(features)
def __getitem__(self, idx):
kmer, features = self._load_data(idx)
# Repeating kmer to the number of reads sampled
kmer = self._retrieve_full_sequence(kmer, self.num_neighboring_features)
kmer = [kmer[i:i+5] for i in range(2 * self.num_neighboring_features + 1)]
features = features[np.random.choice(len(features), self.min_reads, replace=False), :]
features = features[:, self.indices]
if self.norm_dict is not None:
mean, std = self.get_norm_factor(kmer)
features = torch.Tensor((features - mean) / std)
else:
features = torch.Tensor((features))
if not self.site_mode:
kmer = np.repeat(np.array([self.kmer_to_int[kmer] for kmer in kmer])\
.reshape(-1, 2 * self.num_neighboring_features + 1), self.min_reads, axis=0)
kmer = torch.Tensor(kmer)
else:
kmer = torch.LongTensor([self.kmer_to_int[kmer] for kmer in kmer])
if self.mode == 'Inference':
return features, kmer
else:
return features, kmer, self.data_info.iloc[idx]["modification_status"]
def get_norm_factor(self, list_of_kmers):
norm_mean, norm_std = [], []
for kmer in list_of_kmers:
mean, std = self.norm_dict[kmer]
norm_mean.append(mean)
norm_std.append(std)
return np.concatenate(norm_mean), np.concatenate(norm_std)
def compute_norm_factors(self, n_processes):
if "kmer" not in self.data_info.columns:
print("k-mer information is not present in column, annotating k-mer information in data info")
self.data_info = annotate_kmer_information(self.data_fpath, self.data_info, n_processes)
kmer_mapping_df = create_kmer_mapping_df(self.data_info)
norm_dict = create_norm_dict(kmer_mapping_df, self.data_fpath, n_processes)
return norm_dict
def _retrieve_full_sequence(self, kmer, n_neighboring_features=0):
if n_neighboring_features < self.total_neighboring_features:
return kmer[self.total_neighboring_features - n_neighboring_features:2 * self.total_neighboring_features + n_neighboring_features]
else:
return kmer
def _retrieve_sequence(self, sequence, n_neighboring_features=0):
return [sequence[i : i+5] for i in range(len(sequence) - 4)]
class ImbalanceUnderSampler(torch.utils.data.Sampler):
def __init__(self, data_source):
self.data_source = data_source
self.class_counts = np.unique(self.data_source.labels, return_counts=True)[1]
self.minority_class, self.majority_class = np.argmin(self.class_counts), np.argmax(self.class_counts)
self.minority_class_idx = np.argwhere(self.data_source.labels == self.minority_class).flatten()
self.majority_class_idx = np.argwhere(self.data_source.labels == self.majority_class).flatten()
def __iter__(self):
idx = np.append(self.minority_class_idx, np.random.choice(self.majority_class_idx,
len(self.minority_class_idx), replace=False))
np.random.shuffle(idx)
return iter(idx)
def __len__(self):
return 2 * len(self.minority_class_idx)
class ImbalanceOverSampler(torch.utils.data.Sampler):
def __init__(self, data_source):
self.data_source = data_source
self.class_counts = np.unique(self.data_source.labels, return_counts=True)[1]
self.minority_class, self.majority_class = np.argmin(self.class_counts), np.argmax(self.class_counts)
self.minority_class_idx = np.argwhere(self.data_source.labels == self.minority_class).flatten()
self.majority_class_idx = np.argwhere(self.data_source.labels == self.majority_class).flatten()
def __iter__(self):
idx = np.append(self.majority_class_idx, np.random.choice(self.minority_class_idx,
len(self.majority_class_idx), replace=True))
np.random.shuffle(idx)
return iter(idx)
def __len__(self):
return 2 * len(self.majority_class_idx)
def inference_collate(batch):
return {key: batch for key, batch
in zip (['X', 'kmer'], default_collate(batch))}
def train_collate(batch):
return {key: batch for key, batch
in zip (['X', 'kmer', 'y'], default_collate(batch))}
| [
"torch.utils.data._utils.collate.default_collate",
"torch.LongTensor",
"torch.Tensor"
] | 1.6.0 | GoekeLab/m6anet | be3148a6404bdd2a4e5e9544b3e618e836c6483c |
1.6 | import torch
from torch import nn
def get_activation(activation):
activation_func = None
if activation == 'tanh':
activation_func = nn.Tanh()
elif activation == 'sigmoid':
activation_func = nn.Sigmoid()
elif activation == 'relu':
activation_func = nn.ReLU()
elif activation == 'softmax':
activation_func = nn.Softmax(dim=1)
else:
raise ValueError("Invalid activation")
return activation_func
class Block(nn.Module):
def __init__(self):
super(Block, self).__init__()
def forward(self, x):
return self.layers(x)
class PoolingFilter(nn.Module):
def forward(self, x):
return x
def predict_read_level_prob(self, x):
return self.forward(x)
class ConcatenateFeatures(Block):
def __init__(self):
super(ConcatenateFeatures, self).__init__()
def forward(self, x):
x = torch.cat([val for _, val in x.items()], axis=1)
return x
class DeaggregateNanopolish(Block):
def __init__(self, num_neighboring_features, n_features=3):
super(DeaggregateNanopolish, self).__init__()
self.num_neighboring_features = num_neighboring_features
self.n_features = n_features * (2 * self.num_neighboring_features + 1)
def forward(self, x):
return {'X': x['X'].view(-1, self.n_features), 'kmer': x['kmer'].view(-1, 1)}
class Flatten(Block):
def __init__(self, start_dim, end_dim):
super(Flatten, self).__init__()
self.layers = nn.Flatten(start_dim, end_dim)
class KmerMultipleEmbedding(Block):
def __init__(self, input_channel, output_channel, num_neighboring_features=0):
super(KmerMultipleEmbedding, self).__init__()
self.input_channel, self.output_channel = input_channel, output_channel
self.embedding_layer = nn.Embedding(input_channel, output_channel)
self.n_features = 2 * num_neighboring_features + 1
def forward(self, x):
kmer = x['kmer']
return {'X': x['X'], 'kmer' :self.embedding_layer(kmer.long()).reshape(-1, self.n_features * self.output_channel)}
class Linear(Block):
def __init__(self, input_channel, output_channel, activation='relu', batch_norm=True, dropout=0.0):
super(Linear, self).__init__()
self.layers = self._make_layers(input_channel, output_channel, activation, batch_norm)
def _make_layers(self, input_channel, output_channel, activation, batch_norm, dropout=0.0):
layers = [nn.Linear(input_channel, output_channel)]
if batch_norm:
layers.append(nn.BatchNorm1d(num_features=output_channel))
if activation is not None:
layers.append(get_activation(activation))
layers.append(nn.Dropout(p=dropout))
return nn.Sequential(*layers)
| [
"torch.nn.Linear",
"torch.nn.Dropout",
"torch.nn.Softmax",
"torch.nn.Sigmoid",
"torch.nn.Sequential",
"torch.nn.Tanh",
"torch.nn.ReLU",
"torch.nn.BatchNorm1d",
"torch.nn.Embedding",
"torch.nn.Flatten"
] | 1.6.0 | GoekeLab/m6anet | be3148a6404bdd2a4e5e9544b3e618e836c6483c |
1.8 | import os
import torch
import numpy as np
class Exp_Basic(object):
def __init__(self, args):
self.args = args
self.device = self._acquire_device()
self.model = self._build_model().cuda()
def _build_model(self):
raise NotImplementedError
def _acquire_device(self):
if self.args.use_gpu:
os.environ["CUDA_VISIBLE_DEVICES"] = str(self.args.gpu) if not self.args.use_multi_gpu else self.args.devices
device = torch.device('cuda:{}'.format(self.args.gpu))
print('Use GPU: cuda:{}'.format(self.args.gpu))
else:
device = torch.device('cpu')
print('Use CPU')
return device
def _get_data(self):
pass
def valid(self):
pass
def train(self):
pass
def test(self):
pass
| [
"torch.device"
] | 1.8.0 | MarcAntoineAlex/SCINet | 4ac582cd717ba1c0c6c6d31a9a824235d35563ed |
0.4 | # 새로운 참고링크: https://github.com/eriklindernoren/PyTorch-GAN/tree/master/implementations/context_encoder
# 우리랑 같은 3채널에 하고자하는 바도 비슷함. shape 참고하기 좋을듯하여 첨부함
# 나(소현)는 사진 11개 -> batch_size=11, num_classes=11이니 주의바람!
import argparse
import os
import numpy as np
from dataloader import OAGandataset
import math
import torchvision.transforms as transforms
from torchvision.utils import save_image
from torch.utils.data import DataLoader
from torchvision import datasets
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
import torch
import torchvision.models as models
#from auxiliary_training import *
from loss import sganloss
os.makedirs("images", exist_ok=True)
parser = argparse.ArgumentParser()
parser.add_argument("--n_epochs", type=int, default=200, help="number of epochs of training")
parser.add_argument("--batch_size", type=int, default=11, help="size of the batches")
parser.add_argument("--lr", type=float, default=0.0001, help="adam: learning rate")
parser.add_argument("--b1", type=float, default=0.5, help="adam: decay of first order momentum of gradient")
parser.add_argument("--b2", type=float, default=0.999, help="adam: decay of first order momentum of gradient")
parser.add_argument("--n_cpu", type=int, default=8, help="number of cpu threads to use during batch generation")
parser.add_argument("--latent_dim", type=int, default=100, help="dimensionality of the latent space")
parser.add_argument("--num_classes", type=int, default=11, help="number of classes for dataset")
parser.add_argument("--img_size", type=int, default=128, help="size of each image dimension")
parser.add_argument("--channels", type=int, default=3, help="number of image channels")
parser.add_argument("--sample_interval", type=int, default=400, help="interval between image sampling")
opt = parser.parse_args()
# print(opt)
cuda = True if torch.cuda.is_available() else False
def weights_init_normal(m):
classname = m.__class__.__name__
if classname.find("Conv") != -1:
torch.nn.init.normal_(m.weight.data, 0.0, 0.02)
elif classname.find("BatchNorm") != -1:
torch.nn.init.normal_(m.weight.data, 1.0, 0.02)
torch.nn.init.constant_(m.bias.data, 0.0)
class IdentityPadding(nn.Module):
def __init__(self, in_channels, out_channels, stride):
super(IdentityPadding, self).__init__()
self.pooling = nn.MaxPool2d(1, stride=stride)
self.add_channels = out_channels - in_channels
def forward(self, x):
out = F.pad(x, (0, 0, 0, 0, 0, self.add_channels))
out = self.pooling(out)
return out
# 코드 출처 : https://dnddnjs.github.io/cifar10/2018/10/09/resnet/
# https://github.com/eriklindernoren/PyTorch-GAN/blob/a163b82beff3d01688d8315a3fd39080400e7c01/implementations/srgan/models.py#L18
# 여길보니 residual block 할때 in, out channel이 동일함.
class ResidualBlock(nn.Module):
def __init__(self, in_channels, out_channels, stride=1, down_sample=False):
super(ResidualBlock, self).__init__()
self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3,
stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(out_channels)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3,
stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(out_channels)
self.stride = stride
if down_sample:
self.down_sample = IdentityPadding(in_channels, out_channels, stride)
else:
self.down_sample = None
def forward(self, x):
shortcut = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.down_sample is not None:
shortcut = self.down_sample(x)
out += shortcut
out = self.relu(out)
return out
# 참고링크: https://github.com/eriklindernoren/PyTorch-GAN/blob/master/implementations/cogan/cogan.py
class Generator(nn.Module):
def __init__(self):
super(Generator, self).__init__()
# TODO : 밑에 3줄이 의미하는 것 찾아 수정 or 삭제하기
# self.label_emb = nn.Embedding(opt.num_classes, opt.latent_dim)
# self.init_size = opt.img_size // 4 # Initial size before upsampling
# self.l1 = nn.Sequential(nn.Linear(opt.latent_dim, 128 * self.init_size ** 2))
self.FaceOcclusion_1=nn.Sequential(
nn.Conv2d(3, 64, kernel_size=7, stride=1, padding=3),
nn.InstanceNorm2d(64),
nn.ReLU(),
# -----
nn.Conv2d(64, 128, kernel_size=4, stride=2, padding=1),
nn.InstanceNorm2d(128),
nn.ReLU(),
nn.Conv2d(128, 256, kernel_size=4, stride=2, padding=1),
nn.InstanceNorm2d(256),
nn.ReLU(),
# -----
ResidualBlock(256, 256),
ResidualBlock(256, 256),
ResidualBlock(256, 256),
# -----
nn.ConvTranspose2d(256, 128, kernel_size=4, stride=2, padding=1),
nn.InstanceNorm2d(128),
nn.ReLU(),
nn.ConvTranspose2d(128, 64, kernel_size=4, stride=2, padding=1),
nn.InstanceNorm2d(64),
nn.ReLU()
# -----
)
self.FaceOcclusion_2=nn.Sequential(
nn.Conv2d(64, 1, kernel_size=7, stride=1, padding=3),
nn.Sigmoid()
)
self.FaceCompletion=nn.Sequential(
nn.Conv2d(64, 128, kernel_size=4, stride=2, padding=1),
nn.InstanceNorm2d(512),
nn.ReLU(),
nn.Conv2d(128, 256, kernel_size=4, stride=2, padding=1),
nn.InstanceNorm2d(512),
nn.ReLU(),
nn.Conv2d(256, 512, kernel_size=4, stride=2, padding=1),
nn.InstanceNorm2d(512),
nn.ReLU(),
# -----
nn.ConvTranspose2d(512, 256, kernel_size=4, stride=2, padding=1),
nn.InstanceNorm2d(256),
nn.ReLU(),
nn.ConvTranspose2d(256, 128, kernel_size=4, stride=2, padding=1),
nn.InstanceNorm2d(128),
nn.ReLU(),
nn.ConvTranspose2d(128, 64, kernel_size=4, stride=2, padding=1),
nn.InstanceNorm2d(64),
nn.ReLU(),
# -----
nn.Conv2d(64, 3, kernel_size=7, stride=1, padding=3),
nn.Tanh()
)
def forward(self, x):
# occlusion aware module
out_predicted=self.FaceOcclusion_1(x)
out_predictedM=self.FaceOcclusion_2(out_predicted)
# TODO: 아랫줄 1-x가 아니라 1-out_predictedM같은데 어떻게 생각하쇼?일단 바꾸겠음! -> 답변: 마자용!!!!
out_InvertedM = torch.ones(1, 1, 128, 128) - out_predictedM
out_oa=torch.matmul(out_predicted, out_predictedM)
# face completion module
out_synth=self.FaceCompletion(out_oa)
out_fc=torch.matmul(out_InvertedM, out_synth)
out_filter=torch.matmul(x, out_predictedM)
out_final=out_filter + out_fc
return out_predictedM, out_InvertedM, out_synth, out_final
class Discriminator(nn.Module):
def __init__(self):
super(Discriminator, self).__init__()
self.discriminator_block = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=4, stride=2, padding=1),
nn.LeakyReLU(),
nn.Conv2d(64, 128, kernel_size=4, stride=2, padding=1),
nn.LeakyReLU(),
nn.Conv2d(128, 256, kernel_size=4, stride=2, padding=1),
nn.LeakyReLU(),
nn.Conv2d(256, 512, kernel_size=4, stride=2, padding=1),
nn.LeakyReLU(),
nn.Conv2d(512, 1024, kernel_size=4, stride=2, padding=1),
nn.LeakyReLU(),
nn.Conv2d(1024, 2048, kernel_size=4, stride=2, padding=1),
nn.LeakyReLU()
)
# The height and width of downsampled image
# ds_size = opt.img_size // 2 ** 4
# Output layers
# https://github.com/znxlwm/pytorch-pix2pix/blob/3059f2af53324e77089bbcfc31279f01a38c40b8/network.py#L104- patch gan discriminator code
# 기존 sgan코드는 linear였지만 우리는 논문에 따라 conv를 취하면서 shape이 달라지게 된듯.
self.adv_layer = nn.Sequential(nn.Conv2d(2048, 1, kernel_size=3, stride=1, padding=1),
nn.Sigmoid())
self.attr_layer = nn.Sequential(nn.Conv2d(2048, opt.num_classes, kernel_size=2, stride=1, padding=0),
nn.Softmax()) # attribute classification대신 얼굴 인식 수행
def forward(self, x):
out = self.discriminator_block(x) # torch.Size([11, 2048, 2, 2])
# out = out.view(out.shape[0], -1) # torch.Size([11, 8192])
validity = self.adv_layer(out) # torch.Size([11, 1, 2, 2])
label = self.attr_layer(out) # torch.Size([11, 11, 1, 1])
# label = label.view(label.shape[0], -1) # torch.Size([11, 11]) # 왜 label view 했는지 설명바람!
return validity, label
#TODO: loss에 할당되는 weight parameter (조정 필요 -> 14주차 발표 참고)
class weight():
def __init__(self):
self.lam1 = 0.2
self.lam2 = 0.2
self.lam3 = 0.2
self.lam4 = 0.2
self.lam5 = 0.1
self.lam6 = 0.1
self.alpha = 0.7
self.beta = 0.3
w = weight()
# Loss functions - TODO: 지혜,승건 수정 부분
# loss 합치는거 그냥 sum of scala vector*loss 로 하면될듯?
# 참고링크: https://github.com/eriklindernoren/PyTorch-GAN/blob/master/implementations/cogan/cogan.py 210줄
adversarial_loss = torch.nn.BCELoss()
attribute_loss = nn.MSELoss() # discriminator에 사용되는 attribute loss
# Initialize generator and discriminator
generator = Generator()
discriminator = Discriminator()
if cuda:
generator.cuda()
discriminator.cuda()
adversarial_loss.cuda()
attribute_loss.cuda()
# Initialize weights
generator.apply(weights_init_normal)
discriminator.apply(weights_init_normal)
# data loader
'''
data load할 부분 Index
10000장에서 7000장은 train에 사용,
그 7000장을 1000장씩 나눠서 alternative train에 사용
총 7번의 alternative train paired:unpaired 비율은 각각
9:1, 8:2, 7:3, 6:4, 5:5, 4:6,3:7
알아서 계산해서 index바꾸시길~
'''
idx1 = 0
idx2 = 899
idx3 = 900
idx4 = 999
#TODO: model save후 불러오기 (일단은 save만이라도)
#처음 안 사실 : 숫자 Parameter가 문자 parameter보다 먼저와야함..
paired_dataset = OAGandataset(idx1, idx2, paired=True, folder_numbering=False)
unpaired_dataset = OAGandataset(idx3, idx4, unpaired=True, folder_numbering=False)
train_dataloader_p = DataLoader(paired_dataset,
shuffle=True,
num_workers=0,
batch_size=opt.batch_size)
train_dataloader_up = DataLoader(unpaired_dataset,
shuffle=True,
num_workers=0,
batch_size=opt.batch_size)
print ("data loaded")
# Optimizers
optimizer_G = torch.optim.Adam(generator.parameters(), lr=opt.lr, betas=(opt.b1, opt.b2))
optimizer_D = torch.optim.Adam(discriminator.parameters(), lr=opt.lr, betas=(opt.b1, opt.b2))
FloatTensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor
LongTensor = torch.cuda.LongTensor if cuda else torch.LongTensor
# ----------
# Training
# TODO: alternating training 보고 디자인하기
# ----------
#나도 TODO넣고싶은데 어케함??????? -> 주석에 영어로 투두쓰면 바로 적용됩니다~
#paired image training (unpaired도 따로 만들고, loss도 상황에 따라 적용)
print ("paired train")
for epoch in range(opt.n_epochs):
for i, (imgs,imgs_gt,labels) in enumerate(train_dataloader_p):
batch_size = opt.batch_size
# Adversarial ground truths
# valid = Variable(FloatTensor(batch_size, 1).fill_(1.0), requires_grad=False)
valid = Variable(FloatTensor(batch_size, 1, 2, 2).fill_(1.0), requires_grad=False)
# fake = Variable(FloatTensor(batch_size, 1).fill_(0.0), requires_grad=False)
fake = Variable(FloatTensor(batch_size, 1, 2, 2).fill_(0.0), requires_grad=False)
# fake_attr_gt = Variable(LongTensor(batch_size).fill_(opt.num_classes), requires_grad=False)
fake_attr_gt = Variable(FloatTensor(batch_size).fill_(opt.num_classes), requires_grad=False)
# Configure input
real_imgs = Variable(imgs.type(FloatTensor))
# TODO: labels는 float 형태일 수 없음. 무조건 long type이어야함. 다른 곳에서 문제가 있는거임. -> 여러 자료 찾아봤지만 다들 이유는 모르지만 label을 float로 형변환하라고 함
# labels = Variable(labels.type(LongTensor))
labels = Variable(labels.type(FloatTensor))
# line 280, line 286 -> FloatTensor가 기대된다고 해서 LongTensor -> FloatTensor 로 바꿔봄 => 에러 안남
# -----------------
# Train Generator
# -----------------
optimizer_G.zero_grad()
# Sample noise and labels as generator input
# z = Variable(FloatTensor(np.random.normal(0, 1, (batch_size, opt.latent_dim)))) -> 우리는 사용X
# Generate a batch of images
out_predictedM, out_InvertedM, out_synth, out_final = generator(real_imgs) # discriminator와 loss 계산에 쓰이는 애들
loss = sganloss([out_final,
out_predictedM,
out_InvertedM,
out_synth],
imgs_gt)
# # Loss measures generator's ability to fool the discriminator
validity, _ = discriminator(out_final) # ?????????? : 해결했으니까 물음표 치우시길!
# print('validity', validity.shape) # validity torch.Size([10, 1, 2, 2])
# print('val', valid.shape) # val torch.Size([10, 1])
g_loss = 0
g_loss += w.lam1*loss.perceptual_loss()
g_loss += w.lam2*loss.style_loss()
g_loss += w.lam3*loss.pixel_loss(w.alpha, w.beta)
g_loss += w.lam4*loss.smooth_loss()
g_loss += w.lam5*loss.l2_norm()
g_loss += w.lam6*adversarial_loss(validity,valid)
print ("loss:",g_loss)
g_loss.backward()
optimizer_G.step()
# ---------------------
# Train Discriminator
# ---------------------
optimizer_D.zero_grad()
# d_alpha, d_beta는 discriminator에 사용되는 2가지 loss함수에 대한 가중치값으로 우리가 결정해야 하는듯
d_alpha = 0.5
d_beta = 1 - d_alpha
# Loss for real images
real_pred, real_attr = discriminator(real_imgs)
# d_real_loss = (adversarial_loss(real_pred, valid) + attribute_loss(real_attr, labels)) / 2
d_real_loss = d_alpha * adversarial_loss(real_pred, valid) + d_beta * attribute_loss(real_attr, labels)
# print('r',real_pred.shape)
# print('valid', valid.shape)
# Loss for fake images
fake_pred, fake_attr = discriminator(out_final.detach())
# d_fake_loss = (adversarial_loss(fake_pred, fake) + attribute_loss(fake_attr, fake_attr_gt)) / 2
d_fake_loss = d_alpha * adversarial_loss(fake_pred, fake) + d_beta * attribute_loss(fake_attr, fake_attr_gt)
# Total discriminator loss
d_loss = (d_real_loss + d_fake_loss) / 2
# print(d_loss.type) # 원래(sgan)랑 type똑같음(<built-in method type of Tensor object at ...>). 둘다 float형태.
# Calculate discriminator accuracy
pred = np.concatenate([real_attr.data.cpu().numpy(), fake_attr.data.cpu().numpy()], axis=0)
gt = np.concatenate([labels.data.cpu().numpy(), fake_attr_gt.data.cpu().numpy()], axis=0)
d_acc = np.mean(np.argmax(pred, axis=1) == gt)
d_loss.backward()
optimizer_D.step()
print(
"[Epoch %d/%d] [Batch %d/%d] [D loss: %f, acc: %d%%] [G loss: %f]"
% (epoch, opt.n_epochs, i, len(train_dataloader_p), d_loss.item(), 100 * d_acc, g_loss.item())
)
batches_done = epoch * len(train_dataloader_p) + i
if batches_done % opt.sample_interval == 0:
save_image(out_final.data[:25], "finals/%d.png" % batches_done, nrow=5, normalize=True)
save_image(out_synth.data[:25], "synth/%d.png" % batches_done, nrow=5, normalize=True)
save_image(out_predictedM.data[:25], "masks/%d.png" % batches_done, nrow=5, normalize=True)
torch.save(generator, "generator_paired%d.pt" % batches_done)
torch.save(discriminator, "discriminator_paired%d.pt" % batches_done)
print("unpaired train")
for epoch in range(opt.n_epochs):
for i, (imgs, labels) in enumerate(train_dataloader_up):
batch_size = opt.batch_size
# Adversarial ground truths
# valid = Variable(FloatTensor(batch_size, 1).fill_(1.0), requires_grad=False)
valid = Variable(FloatTensor(batch_size, 1, 2, 2).fill_(1.0), requires_grad=False)
# fake = Variable(FloatTensor(batch_size, 1).fill_(0.0), requires_grad=False)
fake = Variable(FloatTensor(batch_size, 1, 2, 2).fill_(0.0), requires_grad=False)
# fake_attr_gt = Variable(LongTensor(batch_size).fill_(opt.num_classes), requires_grad=False)
fake_attr_gt = Variable(FloatTensor(batch_size).fill_(opt.num_classes), requires_grad=False)
# Configure input
real_imgs = Variable(imgs.type(FloatTensor))
labels = Variable(labels.type(FloatTensor))
# line 280, line 286 -> FloatTensor가 기대된다고 해서 LongTensor -> FloatTensor 로 바꿔봄 => 에러 안남
# -----------------
# Train Generator
# -----------------
optimizer_G.zero_grad()
# Sample noise and labels as generator input
# z = Variable(FloatTensor(np.random.normal(0, 1, (batch_size, opt.latent_dim)))) -> 우리는 사용X
# Generate a batch of images
out_predictedM, out_InvertedM, out_synth, out_final = generator(real_imgs) # discriminator와 loss 계산에 쓰이는 애들
loss = sganloss([out_final,
out_predictedM,
out_InvertedM,
out_synth],
)
# # Loss measures generator's ability to fool the discriminator
validity, _ = discriminator(out_final) # ?????????? : 해결했으니까 물음표 치우시길!
# print('validity', validity.shape) # validity torch.Size([10, 1, 2, 2])
# print('val', valid.shape) # val torch.Size([10, 1])
g_loss = 0
g_loss += w.lam4 * loss.smooth_loss()
g_loss += w.lam5 * loss.l2_norm()
g_loss += w.lam6 * adversarial_loss(validity, valid)
print("loss:", g_loss)
g_loss.backward()
optimizer_G.step()
# ---------------------
# Train Discriminator
# ---------------------
optimizer_D.zero_grad()
# d_alpha, d_beta는 discriminator에 사용되는 2가지 loss함수에 대한 가중치값으로 우리가 결정해야 하는듯
d_alpha = 0.5
d_beta = 1 - d_alpha
# Loss for real images
real_pred, real_attr = discriminator(real_imgs)
# d_real_loss = (adversarial_loss(real_pred, valid) + attribute_loss(real_attr, labels)) / 2
d_real_loss = d_alpha * adversarial_loss(real_pred, valid) + d_beta * attribute_loss(real_attr, labels)
# print('r',real_pred.shape)
# print('valid', valid.shape)
# Loss for fake images
fake_pred, fake_attr = discriminator(out_final.detach())
# d_fake_loss = (adversarial_loss(fake_pred, fake) + attribute_loss(fake_attr, fake_attr_gt)) / 2
d_fake_loss = d_alpha * adversarial_loss(fake_pred, fake) + d_beta * attribute_loss(fake_attr, fake_attr_gt)
# Total discriminator loss
d_loss = (d_real_loss + d_fake_loss) / 2
# print(d_loss.type) # 원래(sgan)랑 type똑같음(<built-in method type of Tensor object at ...>). 둘다 float형태.
# Calculate discriminator accuracy
pred = np.concatenate([real_attr.data.cpu().numpy(), fake_attr.data.cpu().numpy()], axis=0)
gt = np.concatenate([labels.data.cpu().numpy(), fake_attr_gt.data.cpu().numpy()], axis=0)
d_acc = np.mean(np.argmax(pred, axis=1) == gt)
d_loss.backward()
optimizer_D.step()
print(
"[Epoch %d/%d] [Batch %d/%d] [D loss: %f, acc: %d%%] [G loss: %f]"
% (epoch, opt.n_epochs, i, len(train_dataloader_p), d_loss.item(), 100 * d_acc, g_loss.item())
)
batches_done = epoch * len(train_dataloader_p) + i
if batches_done % opt.sample_interval == 0:
save_image(out_final.data[:25], "finals/%d.png" % batches_done, nrow=5, normalize=True)
save_image(out_synth.data[:25], "synth/%d.png" % batches_done, nrow=5, normalize=True)
save_image(out_predictedM.data[:25], "masks/%d.png" % batches_done, nrow=5, normalize=True)
torch.save(generator, "generator_unpaired%d.pt" % batches_done)
torch.save(discriminator, "discriminator_unpaired%d.pt" % batches_done)
| [
"torch.nn.BatchNorm2d",
"torch.nn.LeakyReLU",
"torch.ones",
"torch.cuda.is_available",
"torch.nn.functional.pad",
"torch.nn.Softmax",
"torch.nn.MaxPool2d",
"torch.nn.init.constant_",
"torch.nn.ConvTranspose2d",
"torch.nn.init.normal_",
"torch.utils.data.DataLoader",
"torch.nn.BCELoss",
"torch.nn.Tanh",
"torch.save",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.nn.InstanceNorm2d",
"torch.matmul",
"torch.nn.MSELoss",
"torch.nn.Sigmoid"
] | 0.4.0 | gun8474/face-recognition-by-OAGAN | 54c67a29a22e25b14a24fb8aa3badba5444653ac |
0.4 | import argparse
import os
import numpy as np
from dataloader import OAGandataset
from torchvision.utils import save_image
from torch.utils.data import DataLoader
from torchvision import datasets
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
import torch
import torchvision.models as models
#from auxiliary_training import *
from loss import sganloss
os.makedirs("finals", exist_ok=True)
os.makedirs("synth", exist_ok=True)
os.makedirs("masks", exist_ok=True)
os.makedirs("model", exist_ok=True)
parser = argparse.ArgumentParser()
parser.add_argument("--n_epochs", type=int, default=200, help="number of epochs of training")
parser.add_argument("--batch_size", type=int, default=10, help="size of the batches")
parser.add_argument("--lr", type=float, default=0.001, help="adam: learning rate")
parser.add_argument("--b1", type=float, default=0.5, help="adam: decay of first order momentum of gradient")
parser.add_argument("--b2", type=float, default=0.999, help="adam: decay of first order momentum of gradient")
parser.add_argument("--n_cpu", type=int, default=8, help="number of cpu threads to use during batch generation")
parser.add_argument("--latent_dim", type=int, default=100, help="dimensionality of the latent space")
parser.add_argument("--num_classes", type=int, default=7000, help="number of classes for paired-dataset")
parser.add_argument("--img_size", type=int, default=128, help="size of each image dimension")
parser.add_argument("--channels", type=int, default=3, help="number of image channels")
parser.add_argument("--sample_interval", type=int, default=400, help="interval between image sampling")
opt = parser.parse_args()
cuda = True if torch.cuda.is_available() else False
def weights_init_normal(m):
classname = m.__class__.__name__
if classname.find("Conv") != -1:
torch.nn.init.normal_(m.weight.data, 0.0, 0.02)
elif classname.find("BatchNorm") != -1:
torch.nn.init.normal_(m.weight.data, 1.0, 0.02)
torch.nn.init.constant_(m.bias.data, 0.0)
class IdentityPadding(nn.Module):
def __init__(self, in_channels, out_channels, stride):
super(IdentityPadding, self).__init__()
self.pooling = nn.MaxPool2d(1, stride=stride)
self.add_channels = out_channels - in_channels
def forward(self, x):
out = F.pad(x, (0, 0, 0, 0, 0, self.add_channels))
out = self.pooling(out)
return out
# 코드 출처 : https://dnddnjs.github.io/cifar10/2018/10/09/resnet/
# https://github.com/eriklindernoren/PyTorch-GAN/blob/a163b82beff3d01688d8315a3fd39080400e7c01/implementations/srgan/models.py#L18
# 여길보니 residual block 할때 in, out channel이 동일함.
class ResidualBlock(nn.Module):
def __init__(self, in_channels, out_channels, stride=1, down_sample=False):
super(ResidualBlock, self).__init__()
self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3,
stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(out_channels)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3,
stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(out_channels)
self.stride = stride
if down_sample:
self.down_sample = IdentityPadding(in_channels, out_channels, stride)
else:
self.down_sample = None
def forward(self, x):
shortcut = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.down_sample is not None:
shortcut = self.down_sample(x)
out += shortcut
out = self.relu(out)
return out
# 참고링크: https://github.com/eriklindernoren/PyTorch-GAN/blob/master/implementations/cogan/cogan.py
class Generator(nn.Module):
def __init__(self):
super(Generator, self).__init__()
# TODO : 밑에 3줄이 의미하는 것 찾아 수정 or 삭제하기
# self.label_emb = nn.Embedding(opt.num_classes, opt.latent_dim)
# self.init_size = opt.img_size // 4 # Initial size before upsampling
# self.l1 = nn.Sequential(nn.Linear(opt.latent_dim, 128 * self.init_size ** 2))
self.FaceOcclusion_1=nn.Sequential(
nn.Conv2d(3, 64, kernel_size=7, stride=1, padding=3),
nn.InstanceNorm2d(64),
nn.ReLU(),
# -----
nn.Conv2d(64, 128, kernel_size=4, stride=2, padding=1),
nn.InstanceNorm2d(128),
nn.ReLU(),
nn.Conv2d(128, 256, kernel_size=4, stride=2, padding=1),
nn.InstanceNorm2d(256),
nn.ReLU(),
# -----
ResidualBlock(256, 256),
ResidualBlock(256, 256),
ResidualBlock(256, 256),
# -----
nn.ConvTranspose2d(256, 128, kernel_size=4, stride=2, padding=1),
nn.InstanceNorm2d(128),
nn.ReLU(),
nn.ConvTranspose2d(128, 64, kernel_size=4, stride=2, padding=1),
nn.InstanceNorm2d(64),
nn.ReLU()
# -----
)
self.FaceOcclusion_2=nn.Sequential(
nn.Conv2d(64, 1, kernel_size=7, stride=1, padding=3),
nn.Sigmoid()
)
self.FaceCompletion=nn.Sequential(
nn.Conv2d(64, 128, kernel_size=4, stride=2, padding=1),
nn.InstanceNorm2d(512),
nn.ReLU(),
nn.Conv2d(128, 256, kernel_size=4, stride=2, padding=1),
nn.InstanceNorm2d(512),
nn.ReLU(),
nn.Conv2d(256, 512, kernel_size=4, stride=2, padding=1),
nn.InstanceNorm2d(512),
nn.ReLU(),
# -----
nn.ConvTranspose2d(512, 256, kernel_size=4, stride=2, padding=1),
nn.InstanceNorm2d(256),
nn.ReLU(),
nn.ConvTranspose2d(256, 128, kernel_size=4, stride=2, padding=1),
nn.InstanceNorm2d(128),
nn.ReLU(),
nn.ConvTranspose2d(128, 64, kernel_size=4, stride=2, padding=1),
nn.InstanceNorm2d(64),
nn.ReLU(),
# -----
nn.Conv2d(64, 3, kernel_size=7, stride=1, padding=3),
nn.Tanh()
)
def forward(self, x):
# occlusion aware module
out_predicted=self.FaceOcclusion_1(x)
out_predictedM=self.FaceOcclusion_2(out_predicted)
out_InvertedM = torch.ones(1, 1, 128, 128).cuda() - out_predictedM
out_oa=torch.matmul(out_predicted, out_predictedM)
# face completion module
out_synth=self.FaceCompletion(out_oa)
out_fc=torch.matmul(out_InvertedM, out_synth)
out_filter=torch.matmul(x, out_predictedM)
out_final=out_filter + out_fc
return out_predictedM, out_InvertedM, out_synth, out_final
class Discriminator(nn.Module):
def __init__(self):
super(Discriminator, self).__init__()
self.discriminator_block = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=4, stride=2, padding=1),
nn.LeakyReLU(),
nn.Conv2d(64, 128, kernel_size=4, stride=2, padding=1),
nn.LeakyReLU(),
nn.Conv2d(128, 256, kernel_size=4, stride=2, padding=1),
nn.LeakyReLU(),
nn.Conv2d(256, 512, kernel_size=4, stride=2, padding=1),
nn.LeakyReLU(),
nn.Conv2d(512, 1024, kernel_size=4, stride=2, padding=1),
nn.LeakyReLU(),
nn.Conv2d(1024, 2048, kernel_size=4, stride=2, padding=1),
nn.LeakyReLU()
)
# The height and width of downsampled image
# ds_size = opt.img_size // 2 ** 4
# Output layers
# https://github.com/znxlwm/pytorch-pix2pix/blob/3059f2af53324e77089bbcfc31279f01a38c40b8/network.py#L104- patch gan discriminator code
# 기존 sgan코드는 linear였지만 우리는 논문에 따라 conv를 취하면서 shape이 달라지게 된듯.
self.adv_layer = nn.Sequential(nn.Conv2d(2048, 1, kernel_size=3, stride=1, padding=1),
nn.Sigmoid())
self.attr_layer = nn.Sequential(nn.Conv2d(2048, opt.num_classes, kernel_size=2, stride=1, padding=0),
nn.Softmax()) # attribute classification대신 얼굴 인식 수행
#TODO: paired - unpaired의 class 수(=이미지 수)가 다른데 attr_layer는 전체 class 수로 들어가있음. 어떻게 하면 좋을지 다른 논문 or 코드 찾아보기
def forward(self, x):
out = self.discriminator_block(x) # torch.Size([11, 2048, 2, 2])
# out = out.view(out.shape[0], -1) # torch.Size([11, 8192])
validity = self.adv_layer(out) # torch.Size([11, 1, 2, 2])
label = self.attr_layer(out) # torch.Size([11, 11, 1, 1])
return validity, label
class weight():
def __init__(self):
self.lam1 = 0.05 # perceptual_loss
self.lam2 = 120 # style_loss
self.lam3 = 1 # pixel_loss
self.lam4 = 0.001 # smooth_loss
self.lam5 = -1 # L2 norm
self.lam6 = 1 # adversarial_loss
self.alpha = 0.5
self.beta = 0.5
w = weight()
# 참고링크: https://github.com/eriklindernoren/PyTorch-GAN/blob/master/implementations/cogan/cogan.py 210줄
adversarial_loss = torch.nn.BCELoss()
attribute_loss = nn.MSELoss() # discriminator에 사용되는 attribute loss
# Initialize generator and discriminator
generator = Generator()
discriminator = Discriminator()
if cuda:
generator.cuda()
discriminator.cuda()
adversarial_loss.cuda()
attribute_loss.cuda()
# Initialize weights
generator.apply(weights_init_normal)
discriminator.apply(weights_init_normal)
# data loader
'''
data load할 부분 Index
10000장에서 7000장은 train에 사용,
그 7000장을 1000장씩 나눠서 alternative train에 사용
총 7번의 alternative train paired:unpaired 비율은 각각
9:1, 8:2, 7:3, 6:4, 5:5, 4:6,3:7
알아서 계산해서 index바꾸시길~
'''
idx1 = 0
idx2 = 140 * 9 - 1
idx3 = 140 * 9
idx4 = 140 * 10 - 1
#TODO: 모델 save했으니 이제 trainig된 모델을 load해서 이어 학습하는거 만들기
#처음 안 사실 : 숫자 Parameter가 문자 parameter보다 먼저와야함..
paired_dataset = OAGandataset(idx1, idx2, paired=True, folder_numbering=False)
unpaired_dataset = OAGandataset(idx3, idx4, paired=False, folder_numbering=False)
train_dataloader_p = DataLoader(paired_dataset,
shuffle=True,
num_workers=0,
batch_size=opt.batch_size)
train_dataloader_up = DataLoader(unpaired_dataset,
shuffle=True,
num_workers=0,
batch_size=opt.batch_size)
print ("data loaded")
# Optimizers
optimizer_G = torch.optim.Adam(generator.parameters(), lr=opt.lr, betas=(opt.b1, opt.b2))
optimizer_D = torch.optim.Adam(discriminator.parameters(), lr=opt.lr, betas=(opt.b1, opt.b2))
FloatTensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor
LongTensor = torch.cuda.LongTensor if cuda else torch.LongTensor
# ----------
# Training
# ----------
#paired image training (unpaired도 따로 만들고, loss도 상황에 따라 적용)
print ("paired train")
for epoch in range(opt.n_epochs):
for i, (imgs,imgs_gt,labels) in enumerate(train_dataloader_p):
#TODO: batch_size 하나로 통일하기(opt, dataloader, img shape)
batch_size = opt.batch_size
# Adversarial ground truths
valid = Variable(FloatTensor(batch_size, 1, 2, 2).fill_(1.0), requires_grad=False)
fake = Variable(FloatTensor(batch_size, 1, 2, 2).fill_(0.0), requires_grad=False)
fake_attr_gt = Variable(FloatTensor(batch_size).fill_(opt.num_classes), requires_grad=False)
# Configure input
real_imgs = Variable(imgs.type(FloatTensor))
labels = Variable(labels.type(FloatTensor))
# -----------------
# Train Generator
# -----------------
optimizer_G.zero_grad()
# Sample noise and labels as generator input
# z = Variable(FloatTensor(np.random.normal(0, 1, (batch_size, opt.latent_dim)))) -> 우리는 사용X
# Generate a batch of images
out_predictedM, out_InvertedM, out_synth, out_final = generator(real_imgs) # discriminator와 loss 계산에 쓰이는 애들
loss = sganloss([out_final,
out_predictedM,
out_InvertedM,
out_synth],imgs_gt.cuda())
# # Loss measures generator's ability to fool the discriminator
validity, _ = discriminator(out_final)
g_loss = 0
g_loss += w.lam1*loss.perceptual_loss()
g_loss += w.lam2*loss.style_loss()
g_loss += w.lam3*loss.pixel_loss(w.alpha, w.beta)
g_loss += w.lam4*loss.smooth_loss()
g_loss += w.lam5*loss.l2_norm()
g_loss += w.lam6*adversarial_loss(validity,valid)
print ("loss:",loss.perceptual_loss(), loss.style_loss(), loss.pixel_loss(w.alpha, w.beta), loss.smooth_loss(), loss.l2_norm(), adversarial_loss(validity,valid))
g_loss.backward()
optimizer_G.step()
# ---------------------
# Train Discriminator
# ---------------------
optimizer_D.zero_grad()
# d_alpha, d_beta는 discriminator에 사용되는 2가지 loss함수에 대한 가중치값으로 우리가 결정해야 하는듯
d_alpha = 0.5
d_beta = 1 - d_alpha
# Loss for real images
real_pred, real_attr = discriminator(real_imgs)
# d_real_loss = (adversarial_loss(real_pred, valid) + attribute_loss(real_attr, labels)) / 2
d_real_loss = d_alpha * adversarial_loss(real_pred, valid) + d_beta * attribute_loss(real_attr, labels)
# print('r',real_pred.shape)
# print('valid', valid.shape)
# Loss for fake images
fake_pred, fake_attr = discriminator(out_final.detach())
# d_fake_loss = (adversarial_loss(fake_pred, fake) + attribute_loss(fake_attr, fake_attr_gt)) / 2
d_fake_loss = d_alpha * adversarial_loss(fake_pred, fake) + d_beta * attribute_loss(fake_attr, fake_attr_gt)
# Total discriminator loss
d_loss = (d_real_loss + d_fake_loss) / 2
# print(d_loss.type) # 원래(sgan)랑 type똑같음(<built-in method type of Tensor object at ...>). 둘다 float형태.
# Calculate discriminator accuracy
pred = np.concatenate([real_attr.data.cpu().numpy(), fake_attr.data.cpu().numpy()], axis=0)
gt = np.concatenate([labels.data.cpu().numpy(), fake_attr_gt.data.cpu().numpy()], axis=0)
d_acc = np.mean(np.argmax(pred, axis=1) == gt)
d_loss.backward()
optimizer_D.step()
print(
"[Epoch %d/%d] [Batch %d/%d] [D loss: %f, acc: %d%%] [G loss: %f]"
% (epoch, opt.n_epochs, i, len(train_dataloader_p), d_loss.item(), 100 * d_acc, g_loss.item())
)
batches_done = epoch * len(train_dataloader_p) + i
if batches_done % opt.sample_interval == 0:
save_image(out_final.data[:10], "finals/%d.png" % batches_done, nrow=5, normalize=True)
save_image(out_synth.data[:10], "synth/%d.png" % batches_done, nrow=5, normalize=True)
save_image(out_predictedM.data[:10], "masks/%d.png" % batches_done, nrow=5, normalize=True)
torch.save(generator, "model/generator_paired%d.pt" % batches_done)
torch.save(discriminator, "model/discriminator_paired%d.pt" % batches_done)
print("unpaired train")
for epoch in range(opt.n_epochs):
for i, (imgs, labels) in enumerate(train_dataloader_up):
batch_size = opt.batch_size
# Adversarial ground truths
valid = Variable(FloatTensor(batch_size, 1, 2, 2).fill_(1.0), requires_grad=False)
fake = Variable(FloatTensor(batch_size, 1, 2, 2).fill_(0.0), requires_grad=False)
fake_attr_gt = Variable(FloatTensor(batch_size).fill_(opt.num_classes), requires_grad=False)
# Configure input
real_imgs = Variable(imgs.type(FloatTensor))
labels = Variable(labels.type(FloatTensor))
# -----------------
# Train Generator
# -----------------
optimizer_G.zero_grad()
# Generate a batch of images
out_predictedM, out_InvertedM, out_synth, out_final = generator(real_imgs) # discriminator와 loss 계산에 쓰이는 애들
loss = sganloss([out_final,
out_predictedM,
out_InvertedM,
out_synth]
)
#TODO: 여기서 에러남.TypeError: conv2d(): argument 'input' (position 1) must be Tensor, not NoneType
# generator에서 문제인듯
# # Loss measures generator's ability to fool the discriminator
validity, _ = discriminator(out_final)
g_loss = 0
g_loss += w.lam4 * loss.smooth_loss()
g_loss += w.lam5 * loss.l2_norm()
g_loss += w.lam6 * adversarial_loss(validity, valid)
print("loss:", g_loss)
g_loss.backward()
optimizer_G.step()
# ---------------------
# Train Discriminator
# ---------------------
optimizer_D.zero_grad()
# d_alpha, d_beta는 discriminator에 사용되는 2가지 loss함수에 대한 가중치값으로 우리가 결정해야 하는듯
d_alpha = 0.5
d_beta = 1 - d_alpha
# Loss for real images
real_pred, real_attr = discriminator(real_imgs)
# d_real_loss = (adversarial_loss(real_pred, valid) + attribute_loss(real_attr, labels)) / 2
d_real_loss = d_alpha * adversarial_loss(real_pred, valid) + d_beta * attribute_loss(real_attr, labels)
# print('r',real_pred.shape)
# print('valid', valid.shape)
# Loss for fake images
fake_pred, fake_attr = discriminator(out_final.detach())
# d_fake_loss = (adversarial_loss(fake_pred, fake) + attribute_loss(fake_attr, fake_attr_gt)) / 2
d_fake_loss = d_alpha * adversarial_loss(fake_pred, fake) + d_beta * attribute_loss(fake_attr, fake_attr_gt)
# Total discriminator loss
d_loss = (d_real_loss + d_fake_loss) / 2
# print(d_loss.type) # 원래(sgan)랑 type똑같음(<built-in method type of Tensor object at ...>). 둘다 float형태.
# Calculate discriminator accuracy
pred = np.concatenate([real_attr.data.cpu().numpy(), fake_attr.data.cpu().numpy()], axis=0)
gt = np.concatenate([labels.data.cpu().numpy(), fake_attr_gt.data.cpu().numpy()], axis=0)
d_acc = np.mean(np.argmax(pred, axis=1) == gt)
d_loss.backward()
optimizer_D.step()
print(
"[Epoch %d/%d] [Batch %d/%d] [D loss: %f, acc: %d%%] [G loss: %f]"
% (epoch, opt.n_epochs, i, len(train_dataloader_p), d_loss.item(), 100 * d_acc, g_loss.item())
)
batches_done = epoch * len(train_dataloader_p) + i
if batches_done % opt.sample_interval == 0:
save_image(out_final.data[:10], "finals/up_%d.png" % batches_done, nrow=5, normalize=True)
save_image(out_synth.data[:10], "synth/up_%d.png" % batches_done, nrow=5, normalize=True)
save_image(out_predictedM.data[:10], "masks/up_%d.png" % batches_done, nrow=5, normalize=True)
torch.save(generator, "model/generator_unpaired%d.pt" % batches_done)
torch.save(discriminator, "model/discriminator_unpaired%d.pt" % batches_done)
| [
"torch.nn.BatchNorm2d",
"torch.nn.LeakyReLU",
"torch.ones",
"torch.cuda.is_available",
"torch.nn.functional.pad",
"torch.nn.Softmax",
"torch.nn.MaxPool2d",
"torch.nn.init.constant_",
"torch.nn.ConvTranspose2d",
"torch.nn.init.normal_",
"torch.utils.data.DataLoader",
"torch.nn.BCELoss",
"torch.nn.Tanh",
"torch.save",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.nn.InstanceNorm2d",
"torch.matmul",
"torch.nn.MSELoss",
"torch.nn.Sigmoid"
] | 0.4.0 | gun8474/face-recognition-by-OAGAN | 54c67a29a22e25b14a24fb8aa3badba5444653ac |
1.4 | # -*- coding: utf-8 -*-
__author__ = 'S.I. Mimilakis'
__copyright__ = 'MacSeNet'
# imports
import numpy as np
import torch
import torch.nn as nn
class ConvEncoder(nn.Module):
"""
Class for building the analysis part
of the Front-End ('Fe'), with randomly
initialized dictionaries.
"""
def __init__(self, in_size=1024, out_size=1024, hop_size=384, exp_settings={}):
super(ConvEncoder, self).__init__()
# Analysis Parameters
self.fully_modulated = exp_settings['fully_modulated']
self.batch_size = None
self.time_domain_samples = None
self.sz_in = in_size
self.sz_out = out_size
self.hop = hop_size
self.f_matrix = np.zeros((self.sz_out, self.sz_in), dtype=np.float32)
self.input_size = exp_settings['fs'] * exp_settings['d_p_length']
self.output_size = np.ceil(self.input_size/self.hop)
self.pad = np.int(-self.input_size/2 + self.sz_in/2 - hop_size/2 + self.output_size*hop_size/2)
self.relu = torch.nn.ReLU()
# Model parameters to be optimized
self.conv_a1 = torch.nn.Conv1d(in_channels=1, out_channels=self.sz_out,
kernel_size=self.sz_in, stride=self.hop, padding=self.pad, bias=False)
self.conv_a2 = torch.nn.Conv1d(in_channels=self.sz_out, out_channels=self.sz_out,
kernel_size=5, dilation=10, padding=20, bias=False)
# Initialize model parameters
self.initialize()
def initialize(self):
torch.nn.init.kaiming_uniform_(self.conv_a1.weight)
torch.nn.init.kaiming_uniform_(self.conv_a2.weight)
def forward(self, wave_form):
# Resize waveform
batch_size = wave_form.size(0)
time_domain_samples = wave_form.size(1)
# Reshaping
wave_form = wave_form.view(batch_size, 1, time_domain_samples)
# Cosine part
x_coeff = self.conv_a1.forward(wave_form)
x_c_coeff = self.relu(self.conv_a2(x_coeff) + x_coeff)
return x_c_coeff
class ConvDecoder(nn.Module):
"""
Class for building the synthesis part
of the Front-End ('Fe'), with randomly
initialized dictionaries.
"""
def __init__(self, ft_size=1024, kernel_size=1024, hop_size=384, exp_settings={}):
super(ConvDecoder, self).__init__()
# Synthesis Parameters
self.fully_modulated = exp_settings['fully_modulated']
self.batch_size = None
self.time_domain_samples = None
self.sz_in = ft_size
self.kernel_sz = kernel_size
self.hop = hop_size
self.output_size = exp_settings['fs'] * exp_settings['d_p_length']
self.input_size = np.ceil(self.output_size / self.hop)
self.pad = np.int(((self.input_size-1)*self.hop + self.kernel_sz - self.output_size)/2)
self.f_matrix = np.zeros((self.sz_in, self.kernel_sz), dtype=np.float32)
self.tanh = torch.nn.Tanh()
self.conv_dec = torch.nn.ConvTranspose1d(in_channels=self.sz_in, out_channels=1,
kernel_size=self.kernel_sz, bias=None, stride=self.hop,
padding=self.pad,
dilation=1, groups=1)
def forward(self, x_coeff, use_sorting):
# Reshaping
wave_form = self.tanh(self.conv_dec.forward(x_coeff))
return wave_form[:, 0, :]
# EOF
| [
"torch.nn.init.kaiming_uniform_",
"torch.nn.ConvTranspose1d",
"torch.nn.Conv1d",
"torch.nn.Tanh",
"torch.nn.ReLU"
] | 1.4.0 | TUIlmenauAMS/rl_singing_voice | 60204c698d48f27b44588c9d6c8dd2c66a13fcd5 |
1.1 | import torch
import torch.nn as nn
from torchvision import models, transforms
class Resnet18(object):
'''
pretrained Resnet18 from torchvision
'''
def __init__(self, args, eval=True, share_memory=False, use_conv_feat=True):
self.model = models.resnet18(pretrained=True)
if args.gpu:
try:
self.model = self.model.to(torch.device('cuda'))
except:
self.model = self.model.to(torch.device('cuda'))
if eval:
self.model = self.model.eval()
if share_memory:
self.model.share_memory()
if use_conv_feat:
self.model = nn.Sequential(*list(self.model.children())[:-2])
def extract(self, x):
return self.model(x)
class MaskRCNN(object):
'''
pretrained MaskRCNN from torchvision
'''
def __init__(self, args, eval=True, share_memory=False, min_size=224):
self.model = models.detection.maskrcnn_resnet50_fpn(pretrained=True, min_size=min_size)
self.model = self.model.backbone.body
self.feat_layer = 3
if args.gpu:
try:
self.model = self.model.to(torch.device('cuda'))
except:
self.model = self.model.to(torch.device('cuda'))
if eval:
self.model = self.model.eval()
if share_memory:
self.model.share_memory()
def extract(self, x):
features = self.model(x)
return features[self.feat_layer]
class Resnet(object):
def __init__(self, args, eval=True, share_memory=False, use_conv_feat=True):
self.model_type = args.visual_model
self.gpu = args.gpu
# choose model type
if self.model_type == "maskrcnn":
self.resnet_model = MaskRCNN(args, eval, share_memory)
else:
self.resnet_model = Resnet18(args, eval, share_memory, use_conv_feat=use_conv_feat)
# normalization transform
self.transform = self.get_default_transform()
@staticmethod
def get_default_transform():
return transforms.Compose([
transforms.Resize((224, 224)),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225],
)
])
def featurize(self, images, batch=32):
images_normalized = torch.stack([self.transform(i) for i in images], dim=0)
if self.gpu:
images_normalized = images_normalized.to(torch.device('cuda'))
out = []
with torch.set_grad_enabled(False):
for i in range(0, images_normalized.size(0), batch):
b = images_normalized[i:i+batch]
out.append(self.resnet_model.extract(b))
return torch.cat(out, dim=0)
| [
"torch.device",
"torch.cat",
"torch.set_grad_enabled"
] | 1.1.0 | jzhanson/alfred | d5b540e7c9b53d3f70cc2907503935fecff00018 |
1.4 | # -*- coding: utf-8 -*
# *****************************************************************************
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
import copy
import argparse
import json
import os
import torch
import numpy as np
#=====START: ADDED FOR DISTRIBUTED======
from distributed import init_distributed, apply_gradient_allreduce, reduce_tensor
from torch.utils.data.distributed import DistributedSampler
#=====END: ADDED FOR DISTRIBUTED======
from torch.utils.data import DataLoader
from glow import WaveGlow, WaveGlowLoss
from mel2samp import Mel2Samp
def load_checkpoint(checkpoint_path, model, optimizer):
assert os.path.isfile(checkpoint_path)
checkpoint_dict = torch.load(checkpoint_path, map_location='cpu')
iteration = checkpoint_dict['iteration']
optimizer.load_state_dict(checkpoint_dict['optimizer'])
model_for_loading = checkpoint_dict['model']
model.load_state_dict(model_for_loading.state_dict())
print("Loaded checkpoint '{}' (iteration {})" .format(
checkpoint_path, iteration))
return model, optimizer, iteration
def save_checkpoint(model, optimizer, schedular,learning_rate, iteration, filepath):
print("Saving model and optimizer state at iteration {} to {}".format(
iteration, filepath))
model_for_saving = WaveGlow(**waveglow_config).cuda()
model_for_saving.load_state_dict(model.state_dict())
torch.save({'model': model_for_saving,
'iteration': iteration,
'optimizer': optimizer.state_dict(),
'learning_rate': learning_rate,
'schedular':schedular
}, filepath)
def validate(model,criterion,valset,epoch,batch_size,n_gpus,rank,output_directory,logger):
model.eval()
with torch.no_grad():
test_sampler = DistributedSampler(valset) if n_gpus > 1 else None
test_loader = DataLoader(valset, num_workers=1, shuffle=False,
sampler=test_sampler,
batch_size=batch_size,
pin_memory=False,
drop_last=True)
val_loss =[]
for i,batch in enumerate(test_loader):
model.zero_grad()
#mel=batch*80*63,batch*16000
mel, audio = batch
#封装数据
mel = torch.autograd.Variable(mel.cuda())
audio = torch.autograd.Variable(audio.cuda())
outputs = model((mel, audio))
#计算loss
loss = criterion(outputs)
if num_gpus > 1:
reduced_loss = reduce_tensor(loss.data, num_gpus).item()
else:
reduced_loss = loss.item()
val_loss.append(reduced_loss)
logger.add_scalar('test_loss', np.mean(val_loss), epoch)
def train(num_gpus, rank, group_name,tnum, output_directory, epochs, learning_rate,
sigma, iters_per_checkpoint, batch_size, seed, fp16_run,
checkpoint_path, with_tensorboard):
#设定随机数以便复现
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
#=====START: ADDED FOR DISTRIBUTED======
if num_gpus > 1:
init_distributed(rank, num_gpus, group_name, **dist_config)
#=====END: ADDED FOR DISTRIBUTED======
#计算Loss
criterion = WaveGlowLoss(sigma)
#构建waveglow模型
model = WaveGlow(**waveglow_config).cuda()
pytorch_total_params = sum(p.numel() for p in model.parameters())
pytorch_total_params_train = sum(p.numel() for p in model.parameters() if p.requires_grad)
print("param", pytorch_total_params)
print("param trainable", pytorch_total_params_train)
#=====START: ADDED FOR DISTRIBUTED======
if num_gpus > 1:
model = apply_gradient_allreduce(model)
#=====END: ADDED FOR DISTRIBUTED======
#优化器
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
#apex加速
if fp16_run:
from apex import amp
model, optimizer = amp.initialize(model, optimizer, opt_level='O1')
# Load checkpoint if one exists
iteration = 0
if checkpoint_path != "":
checkpoint_dict = torch.load(checkpoint_path, map_location='cpu')
#iteration = checkpoint_dict['iteration']
#optimizer.load_state_dict(checkpoint_dict['optimizer'])
model_for_loading = checkpoint_dict['model']
model.load_state_dict(model_for_loading.state_dict())
print("Loaded checkpoint '{}' (iteration {})".format(
checkpoint_path, iteration))
#model, optimizer, iteration = load_checkpoint(checkpoint_path, model,
# optimizer)
iteration += 1 # next iteration is iteration + 1
temp_config = copy.deepcopy(data_config)
temp_config['training_files'] = data_config['training_files'].replace('1',str(tnum))
trainset = Mel2Samp(**data_config)
testconfig = copy.deepcopy(data_config)
testconfig["training_files"] = "traintestset_eng/test_files_eng.txt"
testset = Mel2Samp(**testconfig)
# =====START: ADDED FOR DISTRIBUTED======
train_sampler = DistributedSampler(trainset) if num_gpus > 1 else None
# =====END: ADDED FOR DISTRIBUTED======
train_loader = DataLoader(trainset, num_workers=1, shuffle=False,
sampler=train_sampler,
batch_size=batch_size,
pin_memory=False,
drop_last=True)
# Get shared output_directory ready
if rank == 0:
if not os.path.isdir(output_directory):
os.makedirs(output_directory)
os.chmod(output_directory, 0o775)
print("output directory", output_directory)
#用不到
if with_tensorboard and rank == 0:
from tensorboardX import SummaryWriter
logger = SummaryWriter(os.path.join(output_directory, 'logs'))
model.train()
epoch_offset = max(0, int(iteration / len(train_loader)))
# for param_group in optimizer.param_groups:
# param_group['lr'] = 5e-5
scheduler = torch.optim.lr_scheduler.StepLR(optimizer,step_size=200,gamma=0.25)
# ================ MAIN TRAINNIG LOOP! ===================
for epoch in range(epoch_offset, epochs):
print("Epoch: {}".format(epoch))
for i, batch in enumerate(train_loader):
#梯度置0,z符合高斯0分布
model.zero_grad()
#mel=batch*80*63,batch*16000
mel, audio = batch
#封装数据
mel = torch.autograd.Variable(mel.cuda())
audio = torch.autograd.Variable(audio.cuda())
outputs = model((mel, audio))
#计算loss
loss = criterion(outputs)
if num_gpus > 1:
reduced_loss = reduce_tensor(loss.data, num_gpus).item()
else:
reduced_loss = loss.item()
#apex加速还原
if fp16_run:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
optimizer.step()
if not reduced_loss < 0:
print("no")
print("{}:\t{:.9f}".format(iteration, reduced_loss))
if with_tensorboard and rank == 0:
logger.add_scalar('training_loss', reduced_loss, i + len(train_loader) * epoch)
if (iteration % iters_per_checkpoint == 0):
if rank == 0:
checkpoint_path = "{}/waveglow_{}".format(
output_directory, iteration)
save_checkpoint(model, optimizer, scheduler,learning_rate, iteration,
checkpoint_path)
iteration += 1
# num_p = 0
# for param in model.parameters():
# num_p += param.numel()
# print(num_p)
#scheduler.step()
# validate
if rank == 0:
validate(model,criterion,testset,epoch,batch_size,num_gpus,rank,output_directory,logger)
model.train()
checkpoint_path = "{}/test{}_eng_model".format(
output_directory, tnum)
save_checkpoint(model, optimizer, scheduler, learning_rate, iteration,
checkpoint_path)
if __name__ == "__main__":
#解析参数
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--config', type=str,
help='JSON file for configuration')
parser.add_argument('-r', '--rank', type=int, default=0,
help='rank of process for distributed')
parser.add_argument('-g', '--group_name', type=str, default='',
help='name of group for distributed')
args = parser.parse_args()
# Parse configs. Globals nicer in this case
with open(args.config) as f:
data = f.read()
config = json.loads(data)
train_config = config["train_config"]
global data_config
data_config = config["data_config"]
global dist_config
dist_config = config["dist_config"]
global waveglow_config
waveglow_config = config["waveglow_config"]
num_gpus = torch.cuda.device_count()
if num_gpus > 1:
if args.group_name == '':
print("WARNING: Multiple GPUs detected but no distributed group set")
print("Only running 1 GPU. Use distributed.py for multiple GPUs")
num_gpus = 1
if num_gpus == 1 and args.rank != 0:
raise Exception("Doing single GPU training on rank > 0")
#自动使用高效算法
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = False
for i in range(1,2):
tnum=i
train(num_gpus, args.rank, args.group_name,tnum, **train_config)
| [
"torch.cuda.manual_seed",
"torch.optim.lr_scheduler.StepLR",
"torch.no_grad",
"torch.cuda.device_count",
"torch.manual_seed",
"torch.utils.data.DataLoader",
"torch.load",
"torch.utils.data.distributed.DistributedSampler"
] | 1.4.1 | ruaruaruabick/waveglow | 636d2ba2bda4f4efd5f13f8e46aef23d8b7881bd |
1.5 | # Copyright 2020 - 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Utilities for accessing Nvidia MMARs
See Also:
- https://docs.nvidia.com/clara/clara-train-sdk/pt/mmar.html
"""
import json
import os
import warnings
from typing import Mapping, Union
import torch
import monai.networks.nets as monai_nets
from monai.apps.utils import download_and_extract, logger
from monai.utils.module import optional_import
from .model_desc import MODEL_DESC
from .model_desc import RemoteMMARKeys as Keys
__all__ = ["get_model_spec", "download_mmar", "load_from_mmar"]
def get_model_spec(idx: Union[int, str]):
"""get model specification by `idx`. `idx` could be index of the constant tuple of dict or the actual model ID."""
if isinstance(idx, int):
return MODEL_DESC[idx]
if isinstance(idx, str):
key = idx.strip().lower()
for cand in MODEL_DESC:
if str(cand[Keys.ID]).strip().lower() == key:
return cand
logger.info(f"Available specs are: {MODEL_DESC}.")
raise ValueError(f"Unknown MODEL_DESC request: {idx}")
def _get_all_ngc_models(pattern, page_index=0, page_size=50):
url = "https://api.ngc.nvidia.com/v2/search/catalog/resources/MODEL"
query_dict = {
"query": "",
"orderBy": [{"field": "score", "value": "DESC"}],
"queryFields": ["all", "description", "displayName", "name", "resourceId"],
"fields": [
"isPublic",
"attributes",
"guestAccess",
"name",
"orgName",
"teamName",
"displayName",
"dateModified",
"labels",
"description",
],
"page": 0,
}
filter = [dict(field="name", value=f"*{pattern}*")]
query_dict["page"] = page_index
query_dict["pageSize"] = page_size
query_dict["filters"] = filter
query_str = json.dumps(query_dict)
full_url = f"{url}?q={query_str}"
requests_get, has_requests = optional_import("requests", name="get")
if has_requests:
resp = requests_get(full_url)
else:
raise ValueError("NGC API requires requests package. Please install it.")
model_list = json.loads(resp.text)
model_dict = {}
for result in model_list["results"]:
for model in result["resources"]:
current_res_id = model["resourceId"]
model_dict[current_res_id] = {"name": model["name"]}
for attribute in model["attributes"]:
if attribute["key"] == "latestVersionIdStr":
model_dict[current_res_id]["latest"] = attribute["value"]
return model_dict
def _get_ngc_url(model_name: str, version: str, model_prefix=""):
return f"https://api.ngc.nvidia.com/v2/models/{model_prefix}{model_name}/versions/{version}/zip"
def _get_ngc_doc_url(model_name: str, model_prefix=""):
return f"https://ngc.nvidia.com/catalog/models/{model_prefix}{model_name}"
def download_mmar(item, mmar_dir=None, progress: bool = True, api: bool = False, version: int = -1):
"""
Download and extract Medical Model Archive (MMAR) from Nvidia Clara Train.
See Also:
- https://docs.nvidia.com/clara/
- Nvidia NGC Registry CLI
- https://docs.nvidia.com/clara/clara-train-sdk/pt/mmar.html
Args:
item: the corresponding model item from `MODEL_DESC`.
Or when api is True, the substring to query NGC's model name field.
mmar_dir: target directory to store the MMAR, default is `mmars` subfolder under `torch.hub get_dir()`.
progress: whether to display a progress bar.
api: whether to query NGC and download via api
version: which version of MMAR to download. -1 means the latest from ngc.
Examples::
>>> from monai.apps import download_mmar
>>> download_mmar("clara_pt_prostate_mri_segmentation_1", mmar_dir=".")
>>> download_mmar("prostate_mri_segmentation", mmar_dir=".", api=True)
Returns:
The local directory of the downloaded model.
If api is True, a list of local directories of downloaded models.
"""
if not mmar_dir:
get_dir, has_home = optional_import("torch.hub", name="get_dir")
if has_home:
mmar_dir = os.path.join(get_dir(), "mmars")
else:
raise ValueError("mmar_dir=None, but no suitable default directory computed. Upgrade Pytorch to 1.6+ ?")
if api:
model_dict = _get_all_ngc_models(item)
if len(model_dict) == 0:
raise ValueError(f"api query returns no item for pattern {item}. Please change or shorten it.")
model_dir_list = []
for k, v in model_dict.items():
ver = v["latest"] if version == -1 else str(version)
download_url = _get_ngc_url(k, ver)
model_dir = os.path.join(mmar_dir, v["name"])
download_and_extract(
url=download_url,
filepath=os.path.join(mmar_dir, f'{v["name"]}_{ver}.zip'),
output_dir=model_dir,
hash_val=None,
hash_type="md5",
file_type="zip",
has_base=False,
progress=progress,
)
model_dir_list.append(model_dir)
return model_dir_list
if not isinstance(item, Mapping):
item = get_model_spec(item)
ver = item.get(Keys.VERSION, 1)
if version > 0:
ver = str(version)
model_fullname = f"{item[Keys.NAME]}_{ver}"
model_dir = os.path.join(mmar_dir, model_fullname)
model_url = item.get(Keys.URL) or _get_ngc_url(item[Keys.NAME], version=ver, model_prefix="nvidia/med/")
download_and_extract(
url=model_url,
filepath=os.path.join(mmar_dir, f"{model_fullname}.{item[Keys.FILE_TYPE]}"),
output_dir=model_dir,
hash_val=item[Keys.HASH_VAL],
hash_type=item[Keys.HASH_TYPE],
file_type=item[Keys.FILE_TYPE],
has_base=False,
progress=progress,
)
return model_dir
def load_from_mmar(
item,
mmar_dir=None,
progress: bool = True,
version: int = -1,
map_location=None,
pretrained=True,
weights_only=False,
model_key: str = "model",
):
"""
Download and extract Medical Model Archive (MMAR) model weights from Nvidia Clara Train.
Args:
item: the corresponding model item from `MODEL_DESC`.
mmar_dir: : target directory to store the MMAR, default is mmars subfolder under `torch.hub get_dir()`.
progress: whether to display a progress bar when downloading the content.
version: version number of the MMAR. Set it to `-1` to use `item[Keys.VERSION]`.
map_location: pytorch API parameter for `torch.load` or `torch.jit.load`.
pretrained: whether to load the pretrained weights after initializing a network module.
weights_only: whether to load only the weights instead of initializing the network module and assign weights.
model_key: a key to search in the model file or config file for the model dictionary.
Currently this function assumes that the model dictionary has
`{"[name|path]": "test.module", "args": {'kw': 'test'}}`.
Examples::
>>> from monai.apps import load_from_mmar
>>> unet_model = load_from_mmar("clara_pt_prostate_mri_segmentation_1", mmar_dir=".", map_location="cpu")
>>> print(unet_model)
See Also:
https://docs.nvidia.com/clara/
"""
if not isinstance(item, Mapping):
item = get_model_spec(item)
model_dir = download_mmar(item=item, mmar_dir=mmar_dir, progress=progress, version=version)
model_file = os.path.join(model_dir, item[Keys.MODEL_FILE])
logger.info(f'\n*** "{item[Keys.ID]}" available at {model_dir}.')
# loading with `torch.jit.load`
if f"{model_file}".endswith(".ts"):
if not pretrained:
warnings.warn("Loading a ScriptModule, 'pretrained' option ignored.")
if weights_only:
warnings.warn("Loading a ScriptModule, 'weights_only' option ignored.")
return torch.jit.load(model_file, map_location=map_location)
# loading with `torch.load`
model_dict = torch.load(model_file, map_location=map_location)
if weights_only:
return model_dict.get(model_key, model_dict) # model_dict[model_key] or model_dict directly
# 1. search `model_dict['train_config]` for model config spec.
model_config = _get_val(dict(model_dict).get("train_conf", {}), key=model_key, default={})
if not model_config:
# 2. search json CONFIG_FILE for model config spec.
json_path = os.path.join(model_dir, item.get(Keys.CONFIG_FILE, "config_train.json"))
with open(json_path) as f:
conf_dict = json.load(f)
conf_dict = dict(conf_dict)
model_config = _get_val(conf_dict, key=model_key, default={})
if not model_config:
# 3. search `model_dict` for model config spec.
model_config = _get_val(dict(model_dict), key=model_key, default={})
if not (model_config and isinstance(model_config, Mapping)):
raise ValueError(
f"Could not load model config dictionary from config: {item.get(Keys.CONFIG_FILE)}, "
f"or from model file: {item.get(Keys.MODEL_FILE)}."
)
# parse `model_config` for model class and model parameters
if model_config.get("name"): # model config section is a "name"
model_name = model_config["name"]
model_cls = monai_nets.__dict__[model_name]
elif model_config.get("path"): # model config section is a "path"
# https://docs.nvidia.com/clara/clara-train-sdk/pt/byom.html
model_module, model_name = model_config.get("path", ".").rsplit(".", 1)
model_cls, has_cls = optional_import(module=model_module, name=model_name)
if not has_cls:
raise ValueError(
f"Could not load MMAR model config {model_config.get('path', '')}, "
f"Please make sure MMAR's sub-folders in '{model_dir}' is on the PYTHONPATH."
"See also: https://docs.nvidia.com/clara/clara-train-sdk/pt/byom.html"
)
else:
raise ValueError(f"Could not load model config {model_config}.")
logger.info(f"*** Model: {model_cls}")
model_kwargs = model_config.get("args", None)
if model_kwargs:
model_inst = model_cls(**model_kwargs)
logger.info(f"*** Model params: {model_kwargs}")
else:
model_inst = model_cls()
if pretrained:
model_inst.load_state_dict(model_dict.get(model_key, model_dict))
logger.info("\n---")
doc_url = item.get(Keys.DOC) or _get_ngc_doc_url(item[Keys.NAME], model_prefix="nvidia:med:")
logger.info(f"For more information, please visit {doc_url}\n")
return model_inst
def _get_val(input_dict: Mapping, key="model", default=None):
"""
Search for the item with `key` in `config_dict`.
Returns: the first occurrence of `key` in a breadth first search.
"""
if key in input_dict:
return input_dict[key]
for sub_dict in input_dict:
val = input_dict[sub_dict]
if isinstance(val, Mapping):
found_val = _get_val(val, key=key, default=None)
if found_val is not None:
return found_val
return default
| [
"torch.jit.load",
"torch.load"
] | 1.5 | finalelement/MONAI | 8e8e1b391fa649d1227087164dba208008d00bc4 |
1.7 |
import torch
import torch.nn as nn
def soft_update(target, source, t):
for target_param, source_param in zip(target.parameters(), source.parameters()):
target_param.data.copy_( (1 - t) * target_param.data + t * source_param.data )
def hard_update(target, source):
for target_param, source_param in zip(target.parameters(), source.parameters()):
target_param.data.copy_(source_param.data)
def init_weights(m):
if isinstance(m, (nn.Conv2d, nn.Linear)):
nn.init.orthogonal_(m.weight)
try: nn.init.constant_(m.bias, 0.01)
except: pass
if isinstance(m, nn.LSTM):
for name, param in m.named_parameters():
if name.startswith('weight'): nn.init.orthogonal_(param)
return
'''
https://github.com/MadryLab/implementation-matters.git
'''
def init_weights_new(m):
for p in m.parameters():
if len(p.data.shape) >= 2:
orthogonal_init(p.data)
else:
p.data.zero_()
return
def orthogonal_init(tensor, gain=1):
'''
Fills the input `Tensor` using the orthogonal initialization scheme from OpenAI
Args:
tensor: an n-dimensional `torch.Tensor`, where :math:`n \geq 2`
gain: optional scaling factor
Examples:
>>> w = torch.empty(3, 5)
>>> orthogonal_init(w)
'''
if tensor.ndimension() < 2:
raise ValueError("Only tensors with 2 or more dimensions are supported")
rows = tensor.size(0)
cols = tensor[0].numel()
flattened = tensor.new(rows, cols).normal_(0, 1)
if rows < cols:
flattened.t_()
# Compute the QR factorization
u, s, v = torch.svd(flattened, some=True)
if rows < cols:
u.t_()
q = u if tuple(u.shape) == (rows, cols) else v
with torch.no_grad():
tensor.view_as(q).copy_(q)
tensor.mul_(gain)
return tensor
| [
"torch.nn.init.orthogonal_",
"torch.no_grad",
"torch.nn.init.constant_",
"torch.svd"
] | 1.7.1 | zhangdongkun98/rl-lib | 50e36c18b130cff40abc6621923becd6cdc48e2b |
1.3 | """Compute the gradient with PyTorch and the variance with BackPACK."""
from torch.nn import CrossEntropyLoss, Flatten, Linear, Sequential
from backpack import backpack, extend, extensions
from backpack.utils.examples import load_mnist_data
B = 4
X, y = load_mnist_data(B)
print("# Gradient with PyTorch, individual gradients with BackPACK | B =", B)
model = Sequential(Flatten(), Linear(784, 10),)
lossfunc = CrossEntropyLoss()
model = extend(model)
lossfunc = extend(lossfunc)
loss = lossfunc(model(X), y)
with backpack(extensions.BatchGrad()):
loss.backward()
for name, param in model.named_parameters():
print(name)
print(".grad.shape: ", param.grad.shape)
print(".grad_batch.shape: ", param.grad_batch.shape)
| [
"torch.nn.Linear",
"torch.nn.CrossEntropyLoss",
"torch.nn.Flatten"
] | 1.3.0 | paulkogni/backpack | 3122de062d5bbcdcba8f8e02d24adb1bd2cdada6 |
1.6 | import errno
import os
import time
import urllib.request
import sys
import numpy as np
import pkg_resources
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
from PIL import Image
from skimage import transform
from torchvision import transforms
from tqdm import tqdm
from . import data_loader, u2net
class DownloadProgressBar(tqdm):
def update_to(self, b=1, bsize=1, tsize=None):
if tsize is not None:
self.total = tsize
self.update(b * bsize - self.n)
def download_url(url, model_name, output_path):
if os.path.exists(output_path):
return
print(
f"Downloading model to {output_path}".format(output_path=output_path),
file=sys.stderr,
)
with DownloadProgressBar(
unit="B", unit_scale=True, miniters=1, desc=url.split("/")[-1]
) as t:
urllib.request.urlretrieve(url, filename=output_path, reporthook=t.update_to)
def load_model(model_name: str = "u2net"):
if model_name == "u2netp":
net = u2net.U2NETP(3, 1)
path = os.path.expanduser("~/.u2net/u2netp.pth")
download_url(
"https://www.dropbox.com/s/usb1fyiuh8as5gi/u2netp.pth?dl=1",
"u2netp.pth",
path,
)
elif model_name == "u2net":
net = u2net.U2NET(3, 1)
path = os.path.expanduser("~/.u2net/u2net.pth")
download_url(
"https://www.dropbox.com/s/kdu5mhose1clds0/u2net.pth?dl=1",
"u2net.pth",
path,
)
else:
print("Choose between u2net or u2netp", file=sys.stderr)
try:
if torch.cuda.is_available():
net.load_state_dict(torch.load(path))
net.to(torch.device("cuda"))
else:
net.load_state_dict(torch.load(path, map_location="cpu",))
except FileNotFoundError:
raise FileNotFoundError(
errno.ENOENT, os.strerror(errno.ENOENT), model_name + ".pth"
)
net.eval()
return net
def norm_pred(d):
ma = torch.max(d)
mi = torch.min(d)
dn = (d - mi) / (ma - mi)
return dn
def preprocess(image):
label_3 = np.zeros(image.shape)
label = np.zeros(label_3.shape[0:2])
if 3 == len(label_3.shape):
label = label_3[:, :, 0]
elif 2 == len(label_3.shape):
label = label_3
if 3 == len(image.shape) and 2 == len(label.shape):
label = label[:, :, np.newaxis]
elif 2 == len(image.shape) and 2 == len(label.shape):
image = image[:, :, np.newaxis]
label = label[:, :, np.newaxis]
transform = transforms.Compose(
[data_loader.RescaleT(320), data_loader.ToTensorLab(flag=0)]
)
sample = transform({"imidx": np.array([0]), "image": image, "label": label})
return sample
def predict(net, item):
sample = preprocess(item)
with torch.no_grad():
if torch.cuda.is_available():
inputs_test = torch.cuda.FloatTensor(sample["image"].unsqueeze(0).float())
else:
inputs_test = torch.FloatTensor(sample["image"].unsqueeze(0).float())
d1, d2, d3, d4, d5, d6, d7 = net(inputs_test)
pred = d1[:, 0, :, :]
predict = norm_pred(pred)
predict = predict.squeeze()
predict_np = predict.cpu().detach().numpy()
img = Image.fromarray(predict_np * 255).convert("RGB")
del d1, d2, d3, d4, d5, d6, d7, pred, predict, predict_np, inputs_test, sample
return img
| [
"torch.device",
"torch.min",
"torch.max",
"torch.no_grad",
"torch.cuda.is_available",
"torch.load"
] | 1.6.0 | 0xflotus/rembg | 7fb6683169d588f653281d53c3c258838194c950 |
0.4 | from collections import OrderedDict
import torch.nn as nn
import torchvision.models as models
class LinkNet(nn.Module):
def __init__(self, num_classes, resnet_size=18, pretrained_encoder=True):
super().__init__()
self.num_classes = num_classes
# The LinkNet encoder is a ResNet18 without the last average pooling layer and
# the fully connected layer
if resnet_size == 18:
resnet = models.resnet18(pretrained=pretrained_encoder)
elif resnet_size == 34:
resnet = models.resnet34(pretrained=pretrained_encoder)
else:
raise ValueError(
"expected 18 or 34 for resnet_size, got {}".format(resnet_size)
)
encoder_list = list(resnet.named_children())[:-2]
self.encoder = nn.Sequential(OrderedDict([*encoder_list]))
# Construct the decoder
self.layer4_d = DecoderBlock(512, 256, stride=2, padding=1)
self.layer3_d = DecoderBlock(256, 128, stride=2, padding=1)
self.layer2_d = DecoderBlock(128, 64, stride=2, padding=1)
self.layer1_d = DecoderBlock(64, 64, stride=1, padding=1)
self.tconv1_d = nn.ConvTranspose2d(64, 32, 3, stride=2, padding=1)
self.bn1_d = nn.BatchNorm2d(32)
self.relu1_d = nn.ReLU()
self.conv1_d = nn.Conv2d(32, 32, 3, padding=1)
self.bn2_d = nn.BatchNorm2d(32)
self.relu2_d = nn.ReLU()
self.tconv2_d = nn.ConvTranspose2d(32, self.num_classes, 3, stride=2, padding=1)
def forward(self, x):
input_x = x
# Have to access the output of a few layers in the encoder to make the skip
# connections. For that, iterate over all modules in the encoder, do the
# forward pass and save the output for the layers that are needed
skip = {}
for name, module in self.encoder.named_children():
x = module(x)
if name in ("conv1", "maxpool", "layer1", "layer2", "layer3"):
skip[name] = x
x = skip["layer3"] + self.layer4_d(x, skip["layer3"].size())
x = skip["layer2"] + self.layer3_d(x, skip["layer2"].size())
x = skip["layer1"] + self.layer2_d(x, skip["layer1"].size())
x = self.layer1_d(x, skip["maxpool"].size())
x = self.tconv1_d(x, skip["conv1"].size())
x = self.bn1_d(x)
x = self.relu1_d(x)
x = self.conv1_d(x)
x = self.bn2_d(x)
x = self.relu2_d(x)
return self.tconv2_d(x, input_x.size())
class DecoderBlock(nn.Module):
"""Creates a decoder block.
Decoder block architecture:
1. Conv2D
2. BatchNormalization
3. ReLU
4. Conv2DTranspose
5. BatchNormalization
6. ReLU
7. Conv2D
8. BatchNormalization
9. ReLU
"""
def __init__(
self,
in_channels,
out_channels,
stride=1,
padding=0,
output_padding=0,
projection_ratio=4,
bias=False,
):
super().__init__()
proj_channels = in_channels // projection_ratio
self.conv1 = nn.Conv2d(in_channels, proj_channels, 1)
self.bn1 = nn.BatchNorm2d(proj_channels)
self.relu1 = nn.ReLU()
self.tconv = nn.ConvTranspose2d(
proj_channels,
proj_channels,
3,
stride=stride,
padding=padding,
output_padding=output_padding,
)
self.bn2 = nn.BatchNorm2d(proj_channels)
self.relu2 = nn.ReLU()
self.conv2 = nn.Conv2d(proj_channels, out_channels, 1)
self.bn3 = nn.BatchNorm2d(out_channels)
self.relu3 = nn.ReLU()
def forward(self, x, output_size=None):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu1(x)
x = self.tconv(x, output_size=output_size)
x = self.bn2(x)
x = self.relu2(x)
x = self.conv2(x)
x = self.bn3(x)
return self.relu3(x)
| [
"torch.nn.ReLU",
"torch.nn.BatchNorm2d",
"torch.nn.ConvTranspose2d",
"torch.nn.Conv2d"
] | 0.4.1 | liyingben/kaggle-airbus-ship-detection | 21d89b2f1273b31a6ffafb4fe5f7e643ffbbc567 |
0.4 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Python version: 3.6
import copy
import os
import pickle
import pandas as pd
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader
from utils.options import args_parser
from utils.train_utils import get_data, get_model
from models.Update import DatasetSplit
from models.test import test_img_local, test_img_local_all, test_img_avg_all, test_img_ensemble_all
import pdb
if __name__ == '__main__':
# parse args
args = args_parser()
args.device = torch.device('cuda:{}'.format(args.gpu) if torch.cuda.is_available() and args.gpu != -1 else 'cpu')
base_dir = './save/{}/{}_iid{}_num{}_C{}_le{}/shard{}/{}/'.format(
args.dataset, args.model, args.iid, args.num_users, args.frac, args.local_ep, args.shard_per_user, args.results_save)
if not os.path.exists(os.path.join(base_dir, 'local')):
os.makedirs(os.path.join(base_dir, 'local'), exist_ok=True)
dataset_train, dataset_test, dict_users_train, dict_users_test = get_data(args)
dict_save_path = os.path.join(base_dir, 'dict_users.pkl')
with open(dict_save_path, 'rb') as handle:
dict_users_train, dict_users_test = pickle.load(handle)
# build model
net_glob = get_model(args)
net_glob.train()
net_local_list = []
for user_ix in range(args.num_users):
net_local_list.append(copy.deepcopy(net_glob))
# training
results_save_path = os.path.join(base_dir, 'local/results.csv')
loss_train = []
net_best = None
best_loss = None
best_acc = None
best_epoch = None
lr = args.lr
results = []
criterion = nn.CrossEntropyLoss()
for user, net_local in enumerate(net_local_list):
model_save_path = os.path.join(base_dir, 'local/model_user{}.pt'.format(user))
net_best = None
best_acc = None
ldr_train = DataLoader(DatasetSplit(dataset_train, dict_users_train[user]), batch_size=args.local_bs, shuffle=True)
optimizer = torch.optim.SGD(net_local.parameters(), lr=lr, momentum=0.5)
for iter in range(args.epochs):
for batch_idx, (images, labels) in enumerate(ldr_train):
images, labels = images.to(args.device), labels.to(args.device)
net_local.zero_grad()
log_probs = net_local(images)
loss = criterion(log_probs, labels)
loss.backward()
optimizer.step()
acc_test, loss_test = test_img_local(net_local, dataset_test, args, user_idx=user, idxs=dict_users_test[user])
if best_acc is None or acc_test > best_acc:
best_acc = acc_test
net_best = copy.deepcopy(net_local)
# torch.save(net_local_list[user].state_dict(), model_save_path)
print('User {}, Epoch {}, Acc {:.2f}'.format(user, iter, acc_test))
if iter > 50 and acc_test >= 99:
break
net_local_list[user] = net_best
acc_test_local, loss_test_local = test_img_local_all(net_local_list, args, dataset_test, dict_users_test)
acc_test_avg, loss_test_avg = test_img_avg_all(net_glob, net_local_list, args, dataset_test)
acc_test_ens_avg, loss_test, acc_test_ens_maj = test_img_ensemble_all(net_local_list, args, dataset_test)
print('Final: acc: {:.2f}, acc (avg): {:.2f}, acc (ens,avg): {:.2f}, acc (ens,maj): {:.2f}'.format(acc_test_local, acc_test_avg, acc_test_ens_avg, acc_test_ens_maj))
final_results = np.array([[acc_test_local, acc_test_avg, acc_test_ens_avg, acc_test_ens_maj]])
final_results = pd.DataFrame(final_results, columns=['acc_test_local', 'acc_test_avg', 'acc_test_ens_avg', 'acc_test_ens_maj'])
final_results.to_csv(results_save_path, index=False)
| [
"torch.cuda.is_available",
"torch.nn.CrossEntropyLoss"
] | 0.4.1 | yhyeh/LG-FedAvg | f64a2943c7f1fed214412033e0fa0a63f3c03fb8 |
1.0 | """
Unit tests for various optimization related utilities.
"""
import unittest
import torch
from texar.core.optimization import *
class OptimizationTest(unittest.TestCase):
r"""Test optimization.
"""
def setUp(self):
N, D_in, H, D_out = 64, 100, 10, 1
self.x = torch.randn(N, D_in)
self.y = torch.randn(N, D_out)
self.model = torch.nn.Sequential(
torch.nn.Linear(D_in, H),
torch.nn.ReLU(),
torch.nn.Linear(H, D_out),)
self.loss_fn = torch.nn.MSELoss(reduction='sum')
def test_get_optimizer(self):
r"""Tests get_optimizer.
"""
default_optimizer = get_optimizer(params=[torch.tensor(1)],
hparams=None)
self.assertIsInstance(default_optimizer, torch.optim.Adam)
hparams = {
"optimizer": {
"type": "RMSprop",
"kwargs": {
"lr": 0.001,
"alpha": 0.99,
"eps": 1e-8,
"weight_decay": 0,
"momentum": 0,
"centered": False
}
},
"learning_rate_decay": {
"type": "",
"kwargs": {}
},
"gradient_clip": {
"type": "",
"kwargs": {}
},
"gradient_noise_scale": None,
"name": None
}
rmsprop_optimizer = get_optimizer(params=[torch.tensor(1)],
hparams=hparams)
self.assertIsInstance(rmsprop_optimizer, torch.optim.RMSprop)
hparams = {
"optimizer": {
"type": torch.optim.SGD,
"kwargs": {
"lr": 0.001,
"weight_decay": 0,
"momentum": 0
}
},
"learning_rate_decay": {
"type": "",
"kwargs": {}
},
"gradient_clip": {
"type": "",
"kwargs": {}
},
"gradient_noise_scale": None,
"name": None
}
sgd_optimizer = get_optimizer(params=[torch.tensor(1)],
hparams=hparams)
self.assertIsInstance(sgd_optimizer, torch.optim.SGD)
def test_get_scheduler(self):
r"""Tests get_scheduler.
"""
optimizer = get_optimizer(params=[torch.tensor(1)], hparams=None)
default_scheduler = get_scheduler(optimizer=optimizer,
hparams=None)
self.assertEqual(default_scheduler, None)
hparams = {
"optimizer": {
"type": "",
"kwargs": {}
},
"learning_rate_decay": {
"type": "ExponentialLR",
"kwargs": {
"gamma": 0.99
}
},
"gradient_clip": {
"type": "",
"kwargs": {}
},
"gradient_noise_scale": None,
"name": None
}
scheduler = get_scheduler(optimizer=optimizer,
hparams=hparams)
self.assertIsInstance(scheduler, torch.optim.lr_scheduler.ExponentialLR)
hparams = {
"optimizer": {
"type": "",
"kwargs": {}
},
"learning_rate_decay": {
"type": torch.optim.lr_scheduler.ExponentialLR,
"kwargs": {
"gamma": 0.99
}
},
"gradient_clip": {
"type": "",
"kwargs": {}
},
"gradient_noise_scale": None,
"name": None
}
scheduler = get_scheduler(optimizer=optimizer,
hparams=hparams)
self.assertIsInstance(scheduler, torch.optim.lr_scheduler.ExponentialLR)
def test_get_grad_clip_fn(self):
r"""Tests get_grad_clip_fn.
"""
default_grad_clip_fn = get_grad_clip_fn(hparams=None)
self.assertEqual(default_grad_clip_fn, None)
hparams = {
"optimizer": {
"type": "",
"kwargs": {}
},
"learning_rate_decay": {
"type": "",
"kwargs": {}
},
"gradient_clip": {
"type": "clip_grad_norm_",
"kwargs": {
"max_norm": 10,
"norm_type": 2
}
},
"gradient_noise_scale": None,
"name": None
}
grad_clip_fn = get_grad_clip_fn(hparams=hparams)
if not callable(grad_clip_fn):
raise ValueError("grad_clip_fn is not callable")
hparams = {
"optimizer": {
"type": "",
"kwargs": {}
},
"learning_rate_decay": {
"type": "",
"kwargs": {}
},
"gradient_clip": {
"type": torch.nn.utils.clip_grad_norm_,
"kwargs": {
"max_norm": 10,
"norm_type": 2
}
},
"gradient_noise_scale": None,
"name": None
}
grad_clip_fn = get_grad_clip_fn(hparams=hparams)
if not callable(grad_clip_fn):
raise ValueError("grad_clip_fn is not callable")
def test_get_train_op(self):
r"""Tests get_train_op.
"""
hparams = {
"optimizer": {
"type": torch.optim.SGD,
"kwargs": {
"lr": 0.001
}
},
"learning_rate_decay": {
"type": torch.optim.lr_scheduler.ExponentialLR,
"kwargs": {
"gamma": 0.99
}
},
"gradient_clip": {
"type": torch.nn.utils.clip_grad_norm_,
"kwargs": {
"max_norm": 10,
"norm_type": 2
}
},
"gradient_noise_scale": None,
"name": None
}
optimizer = get_optimizer(self.model.parameters(), hparams)
train_op = get_train_op(optimizer, hparams)
for t in range(50):
y_pred = self.model(self.x)
loss = self.loss_fn(y_pred, self.y)
loss.backward()
train_op()
if __name__ == "__main__":
unittest.main()
| [
"torch.nn.Linear",
"torch.nn.MSELoss",
"torch.nn.ReLU",
"torch.tensor",
"torch.randn"
] | 1.0.0 | lunayach/texar-pytorch | ac3e334e491f524dd01654b07af030fa20c88b34 |
1.0 | # Copyright 2019 The Texar Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Transformer decoder.
"""
from typing import Callable, Dict, NamedTuple, Optional, Tuple, Union
import torch
from torch import nn
from texar.core import layers
from texar.hyperparams import HParams
from texar.modules.decoders.decoder_base import DecoderBase, _make_output_layer
from texar.modules.decoders.decoder_helpers import EmbeddingHelper, Helper
from texar.modules.encoders.multihead_attention import (
Cache, MultiheadAttentionEncoder)
from texar.modules.encoders.transformer_encoder import (
default_transformer_poswise_net_hparams)
from texar.modules.networks.networks import FeedForwardNetwork
from texar.utils import transformer_attentions as attn
from texar.utils.beam_search import beam_search
from texar.utils.shapes import mask_sequences
from texar.utils.utils import sequence_mask
__all__ = [
'TransformerDecoderOutput',
'TransformerDecoder',
]
class TransformerDecoderOutput(NamedTuple):
r"""The output of :class:`TransformerDecoder`.
"""
logits: torch.Tensor
r"""A :tensor:`Tensor` of shape ``[batch_size, max_time, vocab_size]``
containing the logits."""
sample_id: torch.LongTensor
r"""A :tensor:`LongTensor` of shape ``[batch_size, max_time]`` containing
the sampled token indices."""
class TransformerDecoder(DecoderBase[Cache, TransformerDecoderOutput]):
r"""Transformer decoder that applies multi-head self-attention for
sequence decoding.
It is a stack of :class:`~texar.modules.encoders.MultiheadAttentionEncoder`,
:class:`~texar.modules.FeedForwardNetwork`, and residual connections.
Args:
vocab_size (int, optional): Vocabulary size. Required if
:attr:`output_layer` is `None`.
output_layer (optional): An output layer that transforms cell output
to logits. This can be:
- A callable layer, e.g., an instance of :torch_nn:`Module`.
- A tensor. A :torch_nn:`Linear` layer will be created using the
tensor as weights. The bias of the dense layer is determined
by ``hparams.output_layer_bias``. This can be used to tie the
output layer with the input embedding matrix, as proposed in
https://arxiv.org/pdf/1608.05859.pdf.
- `None`. A :torch_nn:`Linear` layer will be created based on
attr:`vocab_size` and ``hparams.output_layer_bias``.
- If no output layer is needed at the end, set
:attr:`vocab_size` to `None` and ``output_layer`` to
:func:`~texar.core.identity`.
hparams (dict or HParams, optional): Hyperparameters. Missing
hyperparameters will be set to default values. See
:meth:`default_hparams` for the hyperparameter structure and
default values.
.. document private functions
"""
# State variables used during `dynamic_decode`. Assigned in `forward`.
_state_max_decoding_length: int
_state_context: Optional[torch.LongTensor]
_state_context_sequence_length: Optional[torch.LongTensor]
_state_cache: Cache
def __init__(self,
vocab_size: Optional[int] = None,
output_layer: Optional[Union[nn.Module, torch.Tensor]] = None,
hparams: Optional[HParams] = None):
super().__init__(0, vocab_size, # dummy value for input_size
input_time_major=False,
output_time_major=False, hparams=hparams)
self._input_size = self._hparams.dim
self._output_layer, self._vocab_size = _make_output_layer(
output_layer, vocab_size, self._input_size,
self._hparams.output_layer_bias)
self.self_attns = nn.ModuleList()
self.self_attn_layer_norm = nn.ModuleList()
self.enc_dec_attns = nn.ModuleList()
self.end_dec_attn_layer_norm = nn.ModuleList()
self.poswise_networks = nn.ModuleList()
self.poswise_layer_norm = nn.ModuleList()
if self._hparams.use_gpt_config:
eps = 1e-5
else:
eps = 1e-12
for _ in range(self._hparams.num_blocks):
attn_module = MultiheadAttentionEncoder(
self._input_size, self._hparams.multihead_attention)
if self._hparams.dim != attn_module.output_size:
raise ValueError("The output dimension of "
"MultiheadEncoder should be equal "
"to the dim of TransformerDecoder")
self.self_attns.append(attn_module)
self.self_attn_layer_norm.append(
nn.LayerNorm(self._input_size, eps=eps))
attn_module = MultiheadAttentionEncoder(
self._input_size, self._hparams.multihead_attention)
if self._hparams.dim != attn_module.output_size:
raise ValueError("The output dimension of "
"MultiheadEncoder should be equal "
"to the dim of TransformerDecoder")
self.enc_dec_attns.append(attn_module)
self.end_dec_attn_layer_norm.append(
nn.LayerNorm(self._input_size, eps=eps))
poswise_network = FeedForwardNetwork(
hparams=self._hparams.poswise_feedforward)
if (poswise_network.hparams.layers[-1]['kwargs']['out_features']
!= self._hparams.dim):
raise ValueError("The output dimension of "
"FeedForwardNetwork should be equal "
"to the dim of TransformerDecoder")
self.poswise_networks.append(poswise_network)
self.poswise_layer_norm.append(
nn.LayerNorm(self._input_size, eps=eps))
self.final_layer_norm = nn.LayerNorm(self._input_size, eps=eps)
self.embed_dropout = nn.Dropout(self._hparams.embedding_dropout)
self.residual_dropout = nn.Dropout(self._hparams.residual_dropout)
if self._hparams.initializer:
# TODO: This might be different to what TensorFlow does
initialize = layers.get_initializer(self._hparams.initializer)
assert initialize is not None
# Do not re-initialize LayerNorm modules.
for name, param in self.named_parameters():
if name.split(".")[-1] == "weight" and "layer_norm" not in name:
initialize(param)
@staticmethod
def default_hparams():
r"""Returns a dictionary of hyperparameters with default values.
.. code-block:: python
{
# Same as in TransformerEncoder
"num_blocks": 6,
"dim": 512,
"use_gpt_config": False,
"embedding_dropout": 0.1,
"residual_dropout": 0.1,
"poswise_feedforward": default_transformer_poswise_net_hparams,
"multihead_attention": {
'name': 'multihead_attention',
'num_units': 512,
'output_dim': 512,
'num_heads': 8,
'dropout_rate': 0.1,
'output_dim': 512,
'use_bias': False,
},
"initializer": None,
"name": "transformer_decoder"
# Additional for TransformerDecoder
"embedding_tie": True,
"output_layer_bias": False,
"max_decoding_length": int(1e10),
}
Here:
`"num_blocks"`: int
Number of stacked blocks.
`"dim"`: int
Hidden dimension of the encoder.
`"use_gpt_config"`: bool
Whether to follow the `eps` setting of OpenAI GPT.
`"embedding_dropout"`: float
Dropout rate of the input word and position embeddings.
`"residual_dropout"`: float
Dropout rate of the residual connections.
`"poswise_feedforward"`: dict
Hyperparameters for a feed-forward network used in residual
connections.
Make sure the dimension of the output tensor is equal to ``dim``.
See :func:`~texar.modules.default_transformer_poswise_net_hparams`
for details.
`"multihead_attention"`: dict
Hyperparameters for the multi-head attention strategy.
Make sure the ``output_dim`` in this module is equal to ``dim``.
See :func:`~texar.modules.MultiheadAttentionEncoder.default_hparams`
for details.
`"initializer"`: dict, optional
Hyperparameters of the default initializer that initializes
variables created in this module.
See :func:`~texar.core.get_initializer` for details.
`"embedding_tie"`: bool
Whether to use the word embedding matrix as the output layer
that computes logits. If `False`, a new dense layer is created.
`"output_layer_bias"`: bool
Whether to use bias to the output layer.
`"max_decoding_length"`: int
The maximum allowed number of decoding steps.
Set to a very large number of avoid the length constraint.
Ignored if provided in :meth:`forward` or ``"train_greedy"``
decoding is used.
`"name"`: str
Name of the module.
"""
dim = 512
return {
'num_blocks': 6,
'dim': dim,
'use_gpt_config': False,
'embedding_tie': True,
'output_layer_bias': False,
'max_decoding_length': int(1e10),
'embedding_dropout': 0.1,
'residual_dropout': 0.1,
'poswise_feedforward': default_transformer_poswise_net_hparams(dim),
'multihead_attention': {
'name': 'multihead_attention',
'num_units': 512,
'num_heads': 8,
'dropout_rate': 0.1,
'output_dim': 512,
'use_bias': False,
},
'initializer': None,
'name': "transformer_decoder",
}
def _inputs_to_outputs(self, inputs: torch.Tensor,
cache: Cache) -> Tuple[torch.Tensor, Cache]:
r"""Returns the outputs of one decoding step (for example,
the predicted logits of the next token).
:attr:`inputs` should be of shape ``[batch_size, dim]``.
Returns:
A tuple of logits and updated cache. Logits are of shape
``[batch_size, vocab_size]``.
"""
outputs = self._self_attention_stack(
inputs.unsqueeze(1), memory=cache['memory'], cache=cache)
outputs = self._output_layer(outputs)
outputs = outputs.squeeze(1)
return outputs, cache
def forward(self, # type: ignore
inputs: Optional[torch.Tensor] = None,
sequence_length: Optional[torch.LongTensor] = None,
memory: Optional[torch.Tensor] = None,
memory_sequence_length: Optional[torch.LongTensor] = None,
memory_attention_bias: Optional[torch.Tensor] = None,
context: Optional[torch.Tensor] = None,
context_sequence_length: Optional[torch.LongTensor] = None,
helper: Optional[Helper] = None,
decoding_strategy: str = 'train_greedy',
max_decoding_length: Optional[int] = None,
impute_finished: bool = False,
infer_mode: Optional[bool] = None,
beam_width: Optional[int] = None,
length_penalty: float = 0.,
**kwargs) \
-> Union[
TransformerDecoderOutput,
Tuple[TransformerDecoderOutput, torch.LongTensor],
Dict[str, torch.Tensor]]:
r"""Performs decoding.
The interface is very similar to that of RNN decoders
(:class:`texar.modules.RNNDecoderBase`). In particular,
the function provides **3 ways** to specify the decoding method, with
varying flexibility:
1. The :attr:`decoding_strategy` argument.
- **"train_greedy"**: decoding in teacher-forcing fashion (i.e.,
feeding ground truth to decode the next step), and for each step
sample is obtained by taking the `argmax` of logits.
Argument :attr:`inputs` is required for this strategy.
:attr:`sequence_length` is optional.
- **"infer_greedy"**: decoding in inference fashion (i.e., feeding
`generated` sample to decode the next step), and for each step
sample is obtained by taking the `argmax` of logits.
Arguments :attr:`(start_tokens, end_token)` are
required for this strategy, and argument
:attr:`max_decoding_length` is optional.
- **"infer_sample"**: decoding in inference fashion, and for each
step sample is obtained by `random sampling` from the logits.
Arguments :attr:`(start_tokens, end_token)` are required for this
strategy, and argument :attr:`max_decoding_length` is optional.
This argument is used only when arguments :attr:`helper` and
:attr:`beam_width` are both `None`.
2. The :attr:`helper` argument: An instance of subclass of
:class:`texar.modules.decoders.Helper`.
This provides a superset of decoding strategies than above.
The interface is the same as in RNN decoders.
Please refer to :meth:`texar.modules.RNNDecoderBase.forward` for
detailed usage and examples.
Note that, here, though using a
:class:`~texar.decoder.TrainingHelper` corresponding to the
``"train_greedy"`` strategy above, the implementation is *slower*
than directly setting ``decoding_strategy="train_greedy"`` (though
output results are the same).
Argument :attr:`max_decoding_length` is optional.
3. **Beam search**: set :attr:`beam_width` to use beam search decoding.
Arguments :attr:`(start_tokens, end_token)` are required,
and argument :attr:`max_decoding_length` is optional.
.. warning::
Beam search is not yet implemented. Setting :attr:`beam_width`
to any value greater than 1 would raise a
:exc:`NotImplementedError`
Args:
memory (optional): The memory to attend, e.g., the output of an RNN
encoder. A :tensor:`Tensor` of shape
``[batch_size, memory_max_time, dim]``.
memory_sequence_length (optional): A :tensor:`Tensor` of shape
``[batch_size]`` containing the sequence lengths for the batch
entries in memory. Used to create attention bias of
:attr:`memory_attention_bias` is not given. Ignored if
:attr:`memory_attention_bias` is provided.
memory_attention_bias (optional): A :tensor:`Tensor` of shape
``[batch_size, num_heads, memory_max_time, dim]``.
An attention bias typically sets the value of a padding
position to a large negative value for masking. If not given,
:attr:`memory_sequence_length` is used to automatically
create an attention bias.
inputs (optional): Input tensor for teacher forcing decoding, of
shape ``[batch_size, target_max_time, emb_dim]`` containing the
target sequence word embeddings. Used when
:attr:`decoding_strategy` is set to ``"train_greedy"``.
sequence_length (optional): A :tensor:`LongTensor` of shape
``[batch_size]``, containing the sequence length of
:attr:`inputs`. Tokens beyond the respective sequence length are
masked out.
Used when :attr:`decoding_strategy` is set to
``"train_greedy"``.
decoding_strategy (str): A string specifying the decoding
strategy, including ``"train_greedy"``, ``"infer_greedy"``,
``"infer_sample"``.
Different arguments are required based on the
strategy. See above for details. Ignored if
:attr:`beam_width` or :attr:`helper` is set.
beam_width (int): Set to use beam search. If given,
:attr:`decoding_strategy` is ignored.
length_penalty (float): Length penalty coefficient used in beam
search decoding. Refer to https://arxiv.org/abs/1609.08144
for more details.
It should be larger if longer sentences are desired.
context (optional): An :tensor:`LongTensor` of shape
``[batch_size, length]``, containing the starting tokens for
decoding. If context is set, ``start_tokens`` of the
:class:`~texar.modules.Helper` will be ignored.
context_sequence_length (optional): Specify the length of context.
max_decoding_length (int, optional): The maximum allowed number of
decoding steps.
If `None` (default), use ``"max_decoding_length"`` defined in
:attr:`hparams`. Ignored in ``"train_greedy"`` decoding.
impute_finished (bool): If `True`, then states for batch
entries which are marked as finished get copied through and
the corresponding outputs get zeroed out. This causes some
slowdown at each time step, but ensures that the final state
and outputs have the correct values and that backprop ignores
time steps that were marked as finished. Ignored in
``"train_greedy"`` decoding.
helper (optional): An instance of
:class:`texar.modules.decoders.Helper`
that defines the decoding strategy. If given,
``decoding_strategy`` and helper configurations in
:attr:`hparams` are ignored.
infer_mode (optional): If not `None`, overrides mode given by
:attr:`self.training`.
Returns:
- For **"train_greedy"** decoding, returns an instance of
:class:`~texar.modules.TransformerDecoderOutput` which contains
`sample_id` and `logits`.
- For **"infer_greedy"** and **"infer_sample"** decoding or
decoding with :attr:`helper`, returns
a tuple ``(outputs, sequence_lengths)``, where ``outputs`` is an
instance of :class:`~texar.modules.TransformerDecoderOutput` as
in `"train_greedy"`, and ``sequence_lengths`` is a
:tensor:`LongTensor` of shape ``[batch_size]`` containing the
length of each sample.
- For **beam search** decoding, returns a ``dict`` containing keys
``"sample_id"`` and ``"log_prob"``.
- ``"sample_id"`` is a :tensor:`LongTensor` of shape
``[batch_size, max_time, beam_width]`` containing generated
token indexes. ``sample_id[:,:,0]`` is the highest-probable
sample.
- ``"log_prob"`` is a :tensor:`Tensor` of shape
``[batch_size, beam_width]`` containing the log probability
of each sequence sample.
"""
if memory is not None:
if memory_attention_bias is None:
if memory_sequence_length is None:
raise ValueError(
"`memory_sequence_length` is required if "
"`memory_attention_bias` is not given.")
enc_padding = 1 - sequence_mask(
memory_sequence_length, memory.size(1),
dtype=torch.float32)
memory_attention_bias = attn.attention_bias_ignore_padding(
enc_padding)
# record the context, which will be used in step function
# for dynamic_decode
if context is not None:
if context_sequence_length is None:
raise ValueError("'context_sequence_length' must not be None"
"when 'context' is specified.")
self._state_context = context[:, 1:]
self._state_context_sequence_length = context_sequence_length - 1
else:
self._state_context = None
self._state_context_sequence_length = None
# Faster code path for teacher-forcing training
if (helper is None and beam_width is None and
decoding_strategy == 'train_greedy'):
if inputs is None:
raise ValueError("'input' must not be none "
"when using 'train_greedy' decoding strategy.")
if sequence_length is not None:
inputs = mask_sequences(inputs, sequence_length)
decoder_self_attention_bias = (
attn.attention_bias_lower_triangle(inputs.size(1)))
decoder_output = self._self_attention_stack(
inputs, memory, decoder_self_attention_bias,
memory_attention_bias, cache=None)
logits = self._output_layer(decoder_output)
sample_id = torch.argmax(logits, dim=-1)
return TransformerDecoderOutput(logits, sample_id)
# Inference code path.
if max_decoding_length is None:
max_decoding_length = self._hparams.max_decoding_length
self._state_max_decoding_length = max_decoding_length
if beam_width is None or beam_width == 1: # Inference-like decoding
# Prepare helper
if helper is None:
kwargs.update(decoding_strategy=decoding_strategy)
if context is not None:
kwargs.update(start_tokens=context[:, 0])
helper = self._create_or_get_helper(infer_mode, **kwargs)
assert isinstance(helper, EmbeddingHelper)
self._state_cache = self._init_cache(
memory, memory_attention_bias,
beam_search_decoding=False, batch_size=helper.batch_size)
if context is not None:
assert self._state_context is not None
pad_length = max_decoding_length - self._state_context.size(1)
if pad_length > 0:
self._state_context = torch.cat((
self._state_context,
self._state_context.new_zeros(
self._state_context.size(0), pad_length)
), dim=1)
outputs, cache, sequence_lengths = self.dynamic_decode(
helper, inputs=None, sequence_length=None,
initial_state=None, max_decoding_length=max_decoding_length,
impute_finished=impute_finished)
del cache # not used
if context is not None:
# Here the length of sample_id will be larger than that
# of logit by 1, because there will be a additional
# start_token in the returned sample_id.
# the start_id should be the first token of the
# given context
start_tokens = context[:, 0]
outputs = TransformerDecoderOutput(
logits=outputs.logits,
sample_id=torch.cat([
start_tokens.unsqueeze(1),
outputs.sample_id
], dim=1))
sequence_lengths = sequence_lengths + 1
return outputs, sequence_lengths
else: # Beam-search decoding
# Ignore `decoding_strategy` and # assume `helper` is not set.
if helper is not None:
raise ValueError("Must not set 'beam_width' and 'helper' "
"simultaneously.")
if context is not None:
start_tokens = context[:, 0]
else:
if 'start_tokens' not in kwargs:
raise ValueError(
"'start_tokens' must be specified when using"
"beam search decoding.")
start_tokens = kwargs['start_tokens']
_batch_size = start_tokens.size(0)
self._state_cache = self._init_cache(
memory, memory_attention_bias,
beam_search_decoding=True,
batch_size=_batch_size)
end_token: int = kwargs.get('end_token') # type: ignore
# The output format is different when running beam search.
sample_id, log_prob = self._beam_decode(
start_tokens,
end_token,
embedding_fn=kwargs['embedding'],
beam_width=beam_width,
length_penalty=length_penalty,
decode_length=max_decoding_length)
return {
'sample_id': sample_id,
'log_prob': log_prob
}
def _self_attention_stack(
self, inputs: torch.Tensor,
memory: Optional[torch.Tensor],
decoder_self_attention_bias: Optional[torch.Tensor] = None,
memory_attention_bias: Optional[torch.Tensor] = None,
cache: Optional[Cache] = None) -> torch.Tensor:
r"""Forward through the stacked multi-head attentions.
"""
inputs = self.embed_dropout(inputs)
if cache is not None:
if memory is not None:
memory_attention_bias = cache['memory_attention_bias']
else:
assert decoder_self_attention_bias is not None
x = inputs
for i in range(self._hparams.num_blocks):
layer_cache = cache['layers'][i] if cache is not None else None
selfatt_output = self.self_attns[i](
queries=self.self_attn_layer_norm[i](x),
memory=None,
memory_attention_bias=decoder_self_attention_bias,
cache=layer_cache)
x = x + self.residual_dropout(selfatt_output)
if memory is not None:
encdec_output = self.enc_dec_attns[i](
queries=self.end_dec_attn_layer_norm[i](x),
memory=memory,
memory_attention_bias=memory_attention_bias)
x = x + self.residual_dropout(encdec_output)
sub_output = self.poswise_networks[i](self.poswise_layer_norm[i](x))
x = x + self.residual_dropout(sub_output)
return self.final_layer_norm(x)
def _init_cache(self, memory: Optional[torch.Tensor],
memory_attention_bias: Optional[torch.Tensor],
beam_search_decoding: bool,
batch_size: int) -> Cache:
r"""Returns an initialized cache.
In order to support both inference-like decoding and beam-search
decoding, the elements of each layer must be initialized and extended
as different structure respectively. Specifically, for inference-like
decoding, a simple list is used; for beam-search decoding, a
:tensor:`Tensor` of shape ``[batch_size, current_steps, num_units]``
is maintained, where ``current_steps`` is the number of steps currently
decoded.
"""
device = next(self.parameters()).device
def _create_ta():
return []
def _create_empty_tensor():
ret = torch.zeros(
batch_size, 0, self._hparams.multihead_attention.num_units,
dtype=torch.float, device=device)
return ret
_create_fn = (_create_empty_tensor if beam_search_decoding
else _create_ta)
cache: Cache = {
'memory': memory,
'memory_attention_bias': memory_attention_bias,
'layers': [{
'keys': _create_fn(),
'values': _create_fn(),
} for _ in range(self._hparams.num_blocks)],
}
return cache
def _beam_decode(self, start_tokens: torch.LongTensor, end_token: int,
embedding_fn: Callable[
[torch.LongTensor, torch.LongTensor], torch.Tensor],
decode_length: int = 256, beam_width: int = 5,
length_penalty: float = 0.6) \
-> Tuple[torch.Tensor, torch.Tensor]:
def _symbols_to_logits_fn(ids, cache):
batch_size = ids.size(0)
step = ids.size(-1) - 1
times = ids.new_full((batch_size,), step)
inputs = embedding_fn(ids[:, -1], times)
return self._inputs_to_outputs(inputs, cache)
assert self._vocab_size is not None
outputs, log_prob = beam_search(
_symbols_to_logits_fn,
start_tokens,
beam_width,
decode_length,
self._vocab_size,
length_penalty,
states=self._state_cache,
eos_id=end_token)
# Ignores <BOS>
outputs = outputs[:, :, 1:]
# shape = [batch_size, seq_length, beam_width]
outputs = outputs.permute(0, 2, 1)
return outputs, log_prob
@property
def output_size(self) -> int:
r"""Output size of one step.
"""
return self._input_size
def initialize(self, helper: Helper, inputs: Optional[torch.Tensor],
sequence_length: Optional[torch.LongTensor],
initial_state: Optional[Cache]) \
-> Tuple[torch.ByteTensor, torch.Tensor, Cache]:
initial_finished, initial_inputs = helper.initialize(
inputs, sequence_length)
state = initial_state or self._state_cache
return initial_finished, initial_inputs, state
def step(self, helper: Helper, time: int,
inputs: torch.Tensor, state: Optional[Cache]) \
-> Tuple[TransformerDecoderOutput, Cache,
torch.Tensor, torch.ByteTensor]:
assert state is not None
outputs, state = self._inputs_to_outputs(inputs, state)
sample_ids = helper.sample(time=time, outputs=outputs)
if self._state_context is not None:
assert self._state_context_sequence_length is not None
sample_ids = torch.where(
self._state_context_sequence_length > time,
self._state_context[:, time],
sample_ids)
if time + 1 == self._state_max_decoding_length:
# Maximum decoding length reached, mark all batches as finished.
# This requires special handling because performing lookup on
# position embeddings with `time + 1` may result in IndexError.
finished = torch.ones_like(sample_ids, dtype=torch.uint8)
# Since `next_inputs` will not be used, simply create a null tensor.
next_inputs = torch.empty(0)
else:
finished, next_inputs = helper.next_inputs(
time=time, outputs=outputs, sample_ids=sample_ids)
next_state = state
outputs = TransformerDecoderOutput(
logits=outputs,
sample_id=sample_ids)
return outputs, next_state, next_inputs, finished
def finalize(self, # type: ignore
outputs: TransformerDecoderOutput,
final_state: Optional[Cache],
sequence_lengths: torch.LongTensor) \
-> Tuple[TransformerDecoderOutput, Optional[Cache]]:
# Clear state variables at end of decoding.
del self._state_max_decoding_length
del self._state_context
del self._state_context_sequence_length
del self._state_cache
return super().finalize(outputs, final_state, sequence_lengths)
| [
"torch.zeros",
"torch.nn.Dropout",
"torch.nn.LayerNorm",
"torch.nn.ModuleList",
"torch.ones_like",
"torch.empty",
"torch.argmax",
"torch.where"
] | 1.0.0 | lunayach/texar-pytorch | ac3e334e491f524dd01654b07af030fa20c88b34 |
1.0 | """
Unit tests for attention mechanism.
"""
import unittest
import numpy as np
import torch
from texar.core.attention_mechanism import *
class AttentionMechanismTest(unittest.TestCase):
r"""Tests attention mechanism.
"""
def setUp(self):
self._batch_size = 8
self._max_time = 16
self._encoder_output_size = 64
self._attention_dim = 256
self._memory = torch.rand(
self._batch_size, self._max_time, self._encoder_output_size
)
self._memory_sequence_length = torch.tensor(
np.random.randint(self._max_time, size=[self._batch_size]) + 1
)
self._attention_state = torch.rand(self._batch_size, self._max_time)
def test_LuongAttention(self):
r"""Tests `LuongAttention`
"""
# Case 1
attention_mechanism = LuongAttention(
num_units=self._attention_dim,
encoder_output_size=self._encoder_output_size)
cell_output = torch.rand(self._batch_size, self._attention_dim)
attention, alignments, next_attention_state = \
compute_attention(
attention_mechanism=attention_mechanism,
cell_output=cell_output,
attention_state=self._attention_state,
memory=self._memory,
attention_layer=None,
memory_sequence_length=self._memory_sequence_length)
self.assertEqual(attention.shape, torch.Size(
[self._batch_size, self._encoder_output_size]))
self.assertEqual(alignments.shape, torch.Size(
[self._batch_size, self._max_time]))
self.assertEqual(next_attention_state.shape, torch.Size(
[self._batch_size, self._max_time]))
self.assertEqual(len(attention_mechanism.trainable_variables), 1)
# Case 2
attention_mechanism = LuongAttention(
num_units=self._attention_dim,
encoder_output_size=self._encoder_output_size,
scale=True)
cell_output = torch.rand(self._batch_size, self._attention_dim)
attention, alignments, next_attention_state = \
compute_attention(
attention_mechanism=attention_mechanism,
cell_output=cell_output,
attention_state=self._attention_state,
memory=self._memory,
attention_layer=None,
memory_sequence_length=self._memory_sequence_length)
self.assertEqual(attention.shape, torch.Size(
[self._batch_size, self._encoder_output_size]))
self.assertEqual(alignments.shape, torch.Size(
[self._batch_size, self._max_time]))
self.assertEqual(next_attention_state.shape, torch.Size(
[self._batch_size, self._max_time]))
self.assertEqual(len(attention_mechanism.trainable_variables), 2)
def test_BahdanauAttention(self):
r"""Tests BahdanauAttention
"""
# Case 1
attention_mechanism = BahdanauAttention(
num_units=self._attention_dim,
decoder_output_size=128,
encoder_output_size=self._encoder_output_size)
cell_output = torch.rand(self._batch_size, 128)
attention, alignments, next_attention_state = \
compute_attention(
attention_mechanism=attention_mechanism,
cell_output=cell_output,
attention_state=self._attention_state,
memory=self._memory,
attention_layer=None,
memory_sequence_length=self._memory_sequence_length)
self.assertEqual(attention.shape, torch.Size(
[self._batch_size, self._encoder_output_size]))
self.assertEqual(alignments.shape, torch.Size(
[self._batch_size, self._max_time]))
self.assertEqual(next_attention_state.shape, torch.Size(
[self._batch_size, self._max_time]))
self.assertEqual(len(attention_mechanism.trainable_variables), 3)
# Case 2
attention_mechanism = BahdanauAttention(
num_units=self._attention_dim,
decoder_output_size=128,
encoder_output_size=self._encoder_output_size,
normalize=True)
cell_output = torch.rand(self._batch_size, 128)
attention, alignments, next_attention_state = \
compute_attention(
attention_mechanism=attention_mechanism,
cell_output=cell_output,
attention_state=self._attention_state,
memory=self._memory,
attention_layer=None,
memory_sequence_length=self._memory_sequence_length)
self.assertEqual(attention.shape, torch.Size(
[self._batch_size, self._encoder_output_size]))
self.assertEqual(alignments.shape, torch.Size(
[self._batch_size, self._max_time]))
self.assertEqual(next_attention_state.shape, torch.Size(
[self._batch_size, self._max_time]))
self.assertEqual(len(attention_mechanism.trainable_variables), 5)
def test_LuongMonotonicAttention(self):
r"""Tests LuongMonotonicAttention
"""
# Case 1
attention_mechanism = LuongMonotonicAttention(
num_units=self._attention_dim,
encoder_output_size=self._encoder_output_size)
cell_output = torch.rand(self._batch_size, self._attention_dim)
attention, alignments, next_attention_state = \
compute_attention(
attention_mechanism=attention_mechanism,
cell_output=cell_output,
attention_state=self._attention_state,
memory=self._memory,
attention_layer=None,
memory_sequence_length=self._memory_sequence_length)
self.assertEqual(attention.shape, torch.Size(
[self._batch_size, self._encoder_output_size]))
self.assertEqual(alignments.shape, torch.Size(
[self._batch_size, self._max_time]))
self.assertEqual(next_attention_state.shape, torch.Size(
[self._batch_size, self._max_time]))
self.assertEqual(len(attention_mechanism.trainable_variables), 2)
# Case 2
attention_mechanism = LuongMonotonicAttention(
num_units=self._attention_dim,
encoder_output_size=self._encoder_output_size,
scale=True)
cell_output = torch.rand(self._batch_size, self._attention_dim)
attention, alignments, next_attention_state = \
compute_attention(
attention_mechanism=attention_mechanism,
cell_output=cell_output,
attention_state=self._attention_state,
memory=self._memory,
attention_layer=None,
memory_sequence_length=self._memory_sequence_length)
self.assertEqual(attention.shape, torch.Size(
[self._batch_size, self._encoder_output_size]))
self.assertEqual(alignments.shape, torch.Size(
[self._batch_size, self._max_time]))
self.assertEqual(next_attention_state.shape, torch.Size(
[self._batch_size, self._max_time]))
self.assertEqual(len(attention_mechanism.trainable_variables), 3)
def test_BahdanauMonotonicAttention(self):
r"""Tests BahdanauMonotonicAttention
"""
# Case 1
attention_mechanism = BahdanauMonotonicAttention(
num_units=self._attention_dim,
decoder_output_size=128,
encoder_output_size=self._encoder_output_size)
cell_output = torch.rand(self._batch_size, 128)
attention, alignments, next_attention_state = \
compute_attention(
attention_mechanism=attention_mechanism,
cell_output=cell_output,
attention_state=self._attention_state,
memory=self._memory,
attention_layer=None,
memory_sequence_length=self._memory_sequence_length)
self.assertEqual(attention.shape, torch.Size(
[self._batch_size, self._encoder_output_size]))
self.assertEqual(alignments.shape, torch.Size(
[self._batch_size, self._max_time]))
self.assertEqual(next_attention_state.shape, torch.Size(
[self._batch_size, self._max_time]))
self.assertEqual(len(attention_mechanism.trainable_variables), 4)
# Case 2
attention_mechanism = BahdanauMonotonicAttention(
num_units=self._attention_dim,
decoder_output_size=128,
encoder_output_size=self._encoder_output_size,
normalize=True)
cell_output = torch.rand(self._batch_size, 128)
attention, alignments, next_attention_state = \
compute_attention(
attention_mechanism=attention_mechanism,
cell_output=cell_output,
attention_state=self._attention_state,
memory=self._memory,
attention_layer=None,
memory_sequence_length=self._memory_sequence_length)
self.assertEqual(attention.shape, torch.Size(
[self._batch_size, self._encoder_output_size]))
self.assertEqual(alignments.shape, torch.Size(
[self._batch_size, self._max_time]))
self.assertEqual(next_attention_state.shape, torch.Size(
[self._batch_size, self._max_time]))
self.assertEqual(len(attention_mechanism.trainable_variables), 6)
if __name__ == "__main__":
unittest.main()
| [
"torch.Size",
"torch.rand"
] | 1.0.0 | lunayach/texar-pytorch | ac3e334e491f524dd01654b07af030fa20c88b34 |
1.2 | import time
from typing import Any, Callable, Dict, List, Optional
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from captum._utils.models.linear_model.model import LinearModel
def l2_loss(x1, x2, weights=None):
if weights is None:
return torch.mean((x1 - x2) ** 2) / 2.0
else:
return torch.sum((weights / weights.norm(p=1)) * ((x1 - x2) ** 2)) / 2.0
def sgd_train_linear_model(
model: LinearModel,
dataloader: DataLoader,
construct_kwargs: Dict[str, Any],
max_epoch: int = 100,
reduce_lr: bool = True,
initial_lr: float = 0.01,
alpha: float = 1.0,
loss_fn: Callable = l2_loss,
reg_term: Optional[int] = 1,
patience: int = 10,
threshold: float = 1e-4,
running_loss_window: Optional[int] = None,
device: Optional[str] = None,
init_scheme: str = "zeros",
debug: bool = False,
) -> Dict[str, float]:
r"""
Trains a linear model with SGD. This will continue to iterate your
dataloader until we converged to a solution or alternatively until we have
exhausted `max_epoch`.
Convergence is defined by the loss not changing by `threshold` amount for
`patience` number of iterations.
Args:
model
The model to train
dataloader
The data to train it with. We will assume the dataloader produces
either pairs or triples of the form (x, y) or (x, y, w). Where x and
y are typical pairs for supervised learning and w is a weight
vector.
We will call `model._construct_model_params` with construct_kwargs
and the input features set to `x.shape[1]` (`x.shape[0]` corresponds
to the batch size). We assume that `len(x.shape) == 2`, i.e. the
tensor is flat. The number of output features will be set to
y.shape[1] or 1 (if `len(y.shape) == 1`); we require `len(y.shape)
<= 2`.
max_epoch
The maximum number of epochs to exhaust
reduce_lr
Whether or not to reduce the learning rate as iterations progress.
Halves the learning rate when the training loss does not move. This
uses torch.optim.lr_scheduler.ReduceLROnPlateau and uses the
parameters `patience` and `threshold`
initial_lr
The initial learning rate to use.
alpha
A constant for the regularization term.
loss_fn
The loss to optimise for. This must accept three parameters:
x1 (predicted), x2 (labels) and a weight vector
reg_term
Regularization is defined by the `reg_term` norm of the weights.
Please use `None` if you do not wish to use regularization.
patience
Defines the number of iterations in a row the loss must remain
within `threshold` in order to be classified as converged.
threshold
Threshold for convergence detection.
running_loss_window
Used to report the training loss once we have finished training and
to determine when we have converged (along with reducing the
learning rate).
The reported training loss will take the last `running_loss_window`
iterations and average them.
If `None` we will approximate this to be the number of examples in
an epoch.
init_scheme
Initialization to use prior to training the linear model.
device
The device to send the model and data to. If None then no `.to` call
will be used.
debug
Whether to print the loss, learning rate per iteration
Returns
This will return the final training loss (averaged with
`running_loss_window`)
"""
loss_window: List[torch.Tensor] = []
min_avg_loss = None
convergence_counter = 0
converged = False
def get_point(datapoint):
if len(datapoint) == 2:
x, y = datapoint
w = None
else:
x, y, w = datapoint
if device is not None:
x = x.to(device)
y = y.to(device)
if w is not None:
w = w.to(device)
return x, y, w
# get a point and construct the model
data_iter = iter(dataloader)
x, y, w = get_point(next(data_iter))
model._construct_model_params(
in_features=x.shape[1],
out_features=y.shape[1] if len(y.shape) == 2 else 1,
**construct_kwargs,
)
model.train()
assert model.linear is not None
if init_scheme is not None:
assert init_scheme in ["xavier", "zeros"]
with torch.no_grad():
if init_scheme == "xavier":
torch.nn.init.xavier_uniform_(model.linear.weight)
else:
model.linear.weight.zero_()
if model.linear.bias is not None:
model.linear.bias.zero_()
optim = torch.optim.SGD(model.parameters(), lr=initial_lr)
if reduce_lr:
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
optim, factor=0.5, patience=patience, threshold=threshold
)
t1 = time.time()
epoch = 0
i = 0
while epoch < max_epoch:
while True: # for x, y, w in dataloader
if running_loss_window is None:
running_loss_window = x.shape[0] * len(dataloader)
y = y.view(x.shape[0], -1)
if w is not None:
w = w.view(x.shape[0], -1)
i += 1
out = model(x)
loss = loss_fn(y, out, w)
if reg_term is not None:
reg = torch.norm(model.linear.weight, p=reg_term)
loss += reg.sum() * alpha
if len(loss_window) >= running_loss_window:
loss_window = loss_window[1:]
loss_window.append(loss.clone().detach())
assert len(loss_window) <= running_loss_window
average_loss = torch.mean(torch.stack(loss_window))
if min_avg_loss is not None:
# if we haven't improved by at least `threshold`
if average_loss > min_avg_loss or torch.isclose(
min_avg_loss, average_loss, atol=threshold
):
convergence_counter += 1
if convergence_counter >= patience:
converged = True
break
else:
convergence_counter = 0
if min_avg_loss is None or min_avg_loss >= average_loss:
min_avg_loss = average_loss.clone()
if debug:
print(
f"lr={optim.param_groups[0]['lr']}, Loss={loss},"
+ "Aloss={average_loss}, min_avg_loss={min_avg_loss}"
)
loss.backward()
optim.step()
model.zero_grad()
if scheduler:
scheduler.step(average_loss)
temp = next(data_iter, None)
if temp is None:
break
x, y, w = get_point(temp)
if converged:
break
epoch += 1
data_iter = iter(dataloader)
x, y, w = get_point(next(data_iter))
t2 = time.time()
return {
"train_time": t2 - t1,
"train_loss": torch.mean(torch.stack(loss_window)).item(),
"train_iter": i,
"train_epoch": epoch,
}
class NormLayer(nn.Module):
def __init__(self, mean, std, n=None, eps=1e-8) -> None:
super().__init__()
self.mean = mean
self.std = std
self.eps = eps
def forward(self, x):
return (x - self.mean) / (self.std + self.eps)
def sklearn_train_linear_model(
model: LinearModel,
dataloader: DataLoader,
construct_kwargs: Dict[str, Any],
sklearn_trainer: str = "Lasso",
norm_input: bool = False,
**fit_kwargs,
):
r"""
Alternative method to train with sklearn. This does introduce some slight
overhead as we convert the tensors to numpy and then convert the resulting
trained model to a `LinearModel` object. However, this conversion
should be negligible.
Please note that this assumes:
0. You have sklearn and numpy installed
1. The dataset can fit into memory
Args
model
The model to train.
dataloader
The data to use. This will be exhausted and converted to numpy
arrays. Therefore please do not feed an infinite dataloader.
norm_input
Whether or not to normalize the input
sklearn_trainer
The sklearn model to use to train the model. Please refer to
sklearn.linear_model for a list of modules to use.
construct_kwargs
Additional arguments provided to the `sklearn_trainer` constructor
fit_kwargs
Other arguments to send to `sklearn_trainer`'s `.fit` method
"""
from functools import reduce
try:
import numpy as np
except ImportError:
raise ValueError("numpy is not available. Please install numpy.")
try:
import sklearn
import sklearn.linear_model
import sklearn.svm
except ImportError:
raise ValueError("sklearn is not available. Please install sklearn >= 0.23")
assert (
sklearn.__version__ >= "0.23.0"
), "Must have sklearn version 0.23.0 or higher to use "
"sample_weight in Lasso regression."
num_batches = 0
xs, ys, ws = [], [], []
for data in dataloader:
if len(data) == 3:
x, y, w = data
else:
assert len(data) == 2
x, y = data
w = None
xs.append(x.cpu().numpy())
ys.append(y.cpu().numpy())
if w is not None:
ws.append(w.cpu().numpy())
num_batches += 1
x = np.concatenate(xs, axis=0)
y = np.concatenate(ys, axis=0)
if len(ws) > 0:
w = np.concatenate(ws, axis=0)
else:
w = None
if norm_input:
mean, std = x.mean(0), x.std(0)
x -= mean
x /= std
t1 = time.time()
sklearn_model = reduce(
lambda val, el: getattr(val, el), [sklearn] + sklearn_trainer.split(".")
)(**construct_kwargs)
sklearn_model.fit(x, y, sample_weight=w, **fit_kwargs)
t2 = time.time()
# Convert weights to pytorch
num_outputs = 1 if len(y.shape) == 1 else y.shape[1]
weight_values = torch.FloatTensor(sklearn_model.coef_) # type: ignore
bias_values = torch.FloatTensor([sklearn_model.intercept_]) # type: ignore
model._construct_model_params(
norm_type=None,
weight_values=weight_values.view(num_outputs, -1),
bias_value=bias_values.squeeze().unsqueeze(0),
)
if norm_input:
model.norm = NormLayer(mean, std)
return {"train_time": t2 - t1}
| [
"torch.stack",
"torch.norm",
"torch.FloatTensor",
"torch.no_grad",
"torch.nn.init.xavier_uniform_",
"torch.isclose",
"torch.optim.lr_scheduler.ReduceLROnPlateau",
"torch.mean"
] | 1.2 | edward-io/captum | 8f959950baaad00f2f9a3404d583b9f9292e35c7 |
1.0 | import torch
from . import Metric
class MeanAbsoluteError(Metric):
def __init__(self):
super().__init__("mae", default_value=float('inf'))
self._absolute_error_sum = 0.
self._total = 0
def step(self, y: torch.Tensor, y_pred: torch.Tensor):
absolute_errors = torch.abs(y - y_pred)
self._absolute_error_sum += torch.sum(absolute_errors).item()
self._total += y.size(dim=0) # dim 0 should be batch size
return torch.sum(absolute_errors)
def compute(self):
if self._total == 0:
raise ZeroDivisionError("Mean absolute error is not computable.")
return self._absolute_error_sum / self._total
def reset(self):
self._absolute_error_sum = 0.
self._total = 0
| [
"torch.abs",
"torch.sum"
] | 1.0.0 | benoitmartin88/pytorchtrainer | 7d73acd0802e00c3589d28bce6c42a489dcd46ea |
1.9 | """Convert the model to ONN format
"""
__author__ = "Likith Reddy"
__version__ = "1.0.0"
__email__ = "[email protected]"
import torch
import torch.nn as nn
import torch.onnx
import sys, os
sys.path.insert(0, os.path.join(sys.path[0], '../'))
from configs import config
from src.dataset import BERTDataset
if __name__ == '__main__':
sentence = ['I love BERT']
dataset = BERTDataset(sentence = sentence, target = [1], config = config)
model = config.MODEL
num_device = torch.cuda.device_count()
device_ids = list(range(num_device))
if len(device_ids) > 1:
model = nn.DataParallel(model, device_ids=device_ids)
model = model.module if hasattr(model, 'module') else model
model = config.MODEL.from_pretrained(config.MODEL_PATH, local_files_only = True)
model.eval()
ids = dataset[0]['ids'].unsqueeze(0)
attention_mask = dataset[0]['mask'].unsqueeze(0)
token_type_ids = None
device = 'cpu'
ids = ids.to(device, dtype = torch.long)
attention_mask = attention_mask.to(device, dtype = torch.long)
torch.onnx.export(
model,
(ids, token_type_ids, attention_mask),
"onnx_model.onnx",
input_names = ['ids', 'token_type_ids' 'attention_mask'],
output_names = ['output'],
dynamic_axes = {
'ids': {0: 'batch_size'},
'token_type_ids': {0, 'batch_size'},
'attention_mask': {0: 'batch_size'},
'output': {0: 'batch_size'},
},
verbose = True,
opset_version=12,
enable_onnx_checker=True
) | [
"torch.nn.DataParallel",
"torch.onnx.export",
"torch.cuda.device_count"
] | 1.9.1 | likith012/distill-grammar | 04ff5e07337789edfe57f21f85e30e7992ae90d9 |
1.1 | import pytest
import torch
import torch.nn as nn
import torch.nn.functional as F
from foresight import ei
##################################
#### H ####
##################################
def test_H_0():
x = torch.zeros((4,))
x[1] = 1
assert ei.H(x).item() == 0
def test_H_1():
x = torch.ones((4,)) / 4
assert ei.H(x).item() == 2
def test_H_2():
x = torch.ones((256,)) / 256
assert ei.H(x).item() == 8
def test_H_3():
x = torch.ones((4,4)) / 4
assert all(ei.H(x, dim=0) == 2)
def test_H_4():
x = torch.ones((4,4)) / 4
assert all(ei.H(x, dim=1) == 2)
def test_H_5():
x = torch.zeros((4,))
assert ei.H(x).item() == 0
##################################
#### lin_norm ####
##################################
def test_lin_norm_0():
x = torch.ones((4,4))
x_normed = ei.lin_norm(x) == 0.25
for row in x_normed:
assert all(row)
def test_lin_norm_1():
"""Check that negative entries become 0."""
x = torch.ones((5, 5))
x[:, 0] = -1
x_normed = ei.lin_norm(x)
assert all(x_normed[:, 0] == 0)
for row in x_normed[:, 1:]:
assert all(row == 0.25)
def test_lin_norm_2():
"""Check that rows of all 0s stay all 0s (no nan values via division by 0)."""
x = torch.zeros((4,4))
x_normed = ei.lin_norm(x)
for row in x_normed:
assert all(row == 0)
##################################
#### conv2d_create_matrix ####
##################################
def test_conv2d_create_matrix_0():
m = nn.Conv2d(1, 2, 2)
m.weight = nn.Parameter(torch.ones((2, 1, 2, 2)))
matrix = ei.conv2d_create_matrix(m, (1, 1, 3, 3), (1, 2, 2, 2))
correct_matrix = torch.tensor([
[1, 1, 0, 1, 1, 0, 0, 0, 0],
[0, 1, 1, 0, 1, 1, 0, 0, 0],
[0, 0, 0, 1, 1, 0, 1, 1, 0],
[0, 0, 0, 0, 1, 1, 0, 1, 1],
[1, 1, 0, 1, 1, 0, 0, 0, 0],
[0, 1, 1, 0, 1, 1, 0, 0, 0],
[0, 0, 0, 1, 1, 0, 1, 1, 0],
[0, 0, 0, 0, 1, 1, 0, 1, 1]
]).to(torch.float32).t()
assert all(torch.flatten(matrix == correct_matrix))
def test_conv2d_create_matrix_1():
m = nn.Conv2d(1, 1, 2, stride=2)
m.weight = nn.Parameter(torch.ones((1, 1, 2, 2)))
matrix = ei.conv2d_create_matrix(m, (1, 1, 4, 4), (1, 1, 2, 2))
correct_matrix = torch.tensor([
[1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1]
]).to(torch.float32).t()
assert all(torch.flatten(matrix == correct_matrix))
def test_conv2d_create_matrix_2():
m = nn.Conv2d(2, 1, 2)
m.weight = nn.Parameter(torch.ones((1, 2, 2, 2)))
matrix = ei.conv2d_create_matrix(m, (1, 2, 3, 3), (1, 1, 2, 2))
correct_matrix = torch.tensor([
[1, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0],
[0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0],
[0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0],
[0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1]
]).to(torch.float32).t()
assert all(torch.flatten(matrix == correct_matrix))
def test_conv2d_create_matrix_3():
m = nn.Conv2d(1, 1, 2, padding=1)
m.weight = nn.Parameter(torch.ones((1, 1, 2, 2)))
matrix = ei.conv2d_create_matrix(m, (1, 1, 3, 3), (1, 1, 4 ,4))
correct_matrix = torch.tensor([
[1, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 0, 1, 1, 0, 0, 0, 0],
[0, 1, 1, 0, 1, 1, 0, 0, 0],
[0, 0, 1, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 1, 0, 0],
[0, 0, 0, 1, 1, 0, 1, 1, 0],
[0, 0, 0, 0, 1, 1, 0, 1, 1],
[0, 0, 0, 0, 0, 1, 0, 0, 1],
[0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 1]
]).to(torch.float32).t()
assert all(torch.flatten(matrix == correct_matrix))
def test_conv2d_create_matrix_4():
m = nn.Conv2d(1, 1, 2, padding=1, stride=2)
m.weight = nn.Parameter(torch.ones((1, 1, 2, 2)))
matrix = ei.conv2d_create_matrix(m, (1, 1, 3, 3), (1, 1, 2, 2))
correct_matrix = torch.tensor([
[1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 1, 1, 0, 1, 1]
]).to(torch.float32).t()
assert all(torch.flatten(matrix == correct_matrix))
def test_conv2d_create_matrix_5():
m = nn.Conv2d(1, 1, (1, 2), padding=1, stride=(2, 2))
m.weight = nn.Parameter(torch.ones((1, 1, 1, 2)))
matrix = ei.conv2d_create_matrix(m, (1, 1, 3, 3), (1, 1, 3, 2))
correct_matrix = torch.tensor([
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]
]).to(torch.float32).t()
assert all(torch.flatten(matrix == correct_matrix))
##################################
#### avgpool2d_create_matrix ####
##################################
def test_avgpool2d_create_matrix_0():
m = nn.AvgPool2d(2)
matrix = ei.avgpool2d_create_matrix(m, (1, 1, 4, 4), (1, 1, 2, 2))
correct_matrix = torch.tensor([
[0.25, 0.25, 0, 0, 0.25, 0.25, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0.25, 0.25, 0, 0, 0.25, 0.25, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0.25, 0.25, 0, 0, 0.25, 0.25, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.25, 0.25, 0, 0, 0.25, 0.25]
]).t()
assert all(torch.flatten(matrix == correct_matrix))
def test_avgpool2d_create_matrix_1():
m = nn.AvgPool2d(2, stride=1)
matrix = ei.avgpool2d_create_matrix(m, (1, 1, 3, 3), (1, 1, 2, 2))
correct_matrix = torch.tensor([
[0.25, 0.25, 0, 0.25, 0.25, 0, 0, 0, 0],
[0, 0.25, 0.25, 0, 0.25, 0.25, 0, 0, 0],
[0, 0, 0, 0.25, 0.25, 0, 0.25, 0.25, 0],
[0, 0, 0, 0, 0.25, 0.25, 0, 0.25, 0.25]
]).t()
assert all(torch.flatten(matrix == correct_matrix))
def test_avgpool2d_create_matrix_2():
m = nn.AvgPool2d((1, 2), stride=1)
matrix = ei.avgpool2d_create_matrix(m, (1, 1, 3, 3), (1, 1, 3, 2))
correct_matrix = torch.tensor([
[0.5, 0.5, 0, 0, 0, 0, 0, 0, 0],
[0, 0.5, 0.5, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0.5, 0.5, 0, 0, 0, 0],
[0, 0, 0, 0, 0.5, 0.5, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0.5, 0.5, 0],
[0, 0, 0, 0, 0, 0, 0, 0.5, 0.5]
]).t()
assert all(torch.flatten(matrix == correct_matrix))
##################################
#### determinism ####
##################################
def test_determinism_0():
m = nn.Linear(4, 4, bias=False)
m.weight = nn.Parameter(torch.ones((4, 4)))
computed_det = ei.determinism(m, input=torch.randn(1, 4))
true_det = 2
assert type(computed_det) == float and computed_det == true_det
##################################
#### degeneracy ####
##################################
def test_degeneracy_0():
m = nn.Linear(4, 4, bias=False)
m.weight = nn.Parameter(torch.ones((4, 4)))
computed_deg = ei.degeneracy(m, input=torch.randn(1, 4))
true_deg = 2
assert type(computed_deg) == float and computed_deg == true_deg
##################################
#### ei ####
##################################
def test_ei_0():
m = nn.Linear(4, 4, bias=False)
m.weight = nn.Parameter(torch.ones((4, 4)))
computed_ei = ei.ei(m, input=torch.randn(1, 4))
true_ei = 0
assert type(computed_ei) == float and computed_ei == true_ei
| [
"torch.zeros",
"torch.nn.Linear",
"torch.nn.AvgPool2d",
"torch.ones",
"torch.nn.Conv2d",
"torch.tensor",
"torch.flatten",
"torch.randn"
] | 1.1.0 | ejmichaud/torch-foresight | e36a8fdd65f0432b9fa25a5127412b081159956b |
1.4 | # Taken from https://github.com/psclklnk/spdl
# Copy of the license at TeachMyAgent/teachers/LICENSES/SPDL
import torch
import numpy as np
def set_weights(parameters, weights, use_cuda):
"""
Function used to set the value of a set of torch parameters given a
vector of values.
Args:
parameters (list): list of parameters to be considered;
weights (numpy.ndarray): array of the new values for
the parameters;
use_cuda (bool): whether the parameters are cuda tensors or not;
"""
idx = 0
for p in parameters:
shape = p.data.shape
c = 1
for s in shape:
c *= s
w = np.reshape(weights[idx:idx + c], shape)
if not use_cuda:
w_tensor = torch.from_numpy(w).type(p.data.dtype)
else:
w_tensor = torch.from_numpy(w).type(p.data.dtype).cuda()
p.data = w_tensor
idx += c
assert idx == weights.size
def get_weights(parameters):
"""
Function used to get the value of a set of torch parameters as
a single vector of values.
Args:
parameters (list): list of parameters to be considered.
Returns:
A numpy vector consisting of all the values of the vectors.
"""
weights = list()
for p in parameters:
w = p.data.detach().cpu().numpy()
weights.append(w.flatten())
weights = np.concatenate(weights, 0)
return weights
def zero_grad(parameters):
"""
Function used to set to zero the value of the gradient of a set
of torch parameters.
Args:
parameters (list): list of parameters to be considered.
"""
for p in parameters:
if p.grad is not None:
p.grad.detach_()
p.grad.zero_()
def get_gradient(params):
"""
Function used to get the value of the gradient of a set of
torch parameters.
Args:
parameters (list): list of parameters to be considered.
"""
views = []
for p in params:
if p.grad is None:
view = p.new(p.numel()).zero_()
else:
view = p.grad.view(-1)
views.append(view)
return torch.cat(views, 0)
def to_float_tensor(x, use_cuda=False):
"""
Function used to convert a numpy array to a float torch tensor.
Args:
x (np.ndarray): numpy array to be converted as torch tensor;
use_cuda (bool): whether to build a cuda tensors or not.
Returns:
A float tensor build from the values contained in the input array.
"""
x = torch.tensor(x, dtype=torch.float)
return x.cuda() if use_cuda else x
| [
"torch.cat",
"torch.from_numpy",
"torch.tensor"
] | 1.4.0 | flowersteam/TeachMyAgent | a8f71cbfce4cb8ca6da24d00ea690495e3afbd2e |
1.11 | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
#!/usr/bin/env python3
# pyre-strict
import os
import time
import traceback
from dataclasses import dataclass
from typing import Any, Dict, Optional, TypedDict, List, Tuple
import hydra
import torch
from omegaconf import OmegaConf
from pytorch_lightning import LightningDataModule, LightningModule
from pytorch_lightning.callbacks import Callback
from pytorch_lightning.callbacks import ModelCheckpoint as OSSModelCheckpoint
from pytorch_lightning.loggers import TensorBoardLogger as OSSTensorboardLogger
from pytorch_lightning.trainer import Trainer
from pytorch_lightning.utilities.types import _EVALUATE_OUTPUT, _PREDICT_OUTPUT
from torchrecipes._internal_patches import log_run, ModelCheckpoint, TensorBoardLogger
from torchrecipes.core.conf import ModuleConf, DataModuleConf, TrainerConf
from torchrecipes.core.logger import JobStatus
from torchrecipes.utils.checkpoint import find_last_checkpoint_path
from torchrecipes.utils.trainer_plugins import get_trainer_params
OmegaConf.register_new_resolver("get_method", hydra.utils.get_method)
@dataclass
class TrainOutput:
tensorboard_log_dir: Optional[str] = None
class TestOutput(TypedDict):
pass
class BaseTrainApp:
"""
A training recipe that contains all necessary parts to train a model.
One can easily start a trainig flow with this training application.
To use the interface, create your own TrainApp and subclass from the BaseTrainApp.
You also need to subclass YourTaskConfig from ModuleConf.
"""
module_conf: ModuleConf
module: LightningModule
datamodule_conf: Optional[DataModuleConf]
datamodule: Optional[LightningDataModule]
trainer_conf: TrainerConf
log_dir: Optional[str]
root_dir: Optional[str]
def __init__(
self,
module: ModuleConf,
trainer: TrainerConf,
datamodule: Optional[DataModuleConf] = None,
) -> None:
super().__init__()
self.datamodule_conf = datamodule
self.datamodule = self.get_data_module()
self.module_conf = module
self.module = self.get_lightning_module()
self.trainer_conf = trainer
self.log_dir = None
self.root_dir = None
torch._C._log_api_usage_once(f"torchrecipes.{self.__class__.__name__}")
def get_lightning_module(self) -> LightningModule:
"""
Override this method to instantiate a LightningModule
"""
return hydra.utils.instantiate(self.module_conf, _recursive_=False)
def get_data_module(self) -> Optional[LightningDataModule]:
"""
Override this method to instantiate a LightningDataModule
"""
if self.datamodule_conf:
return hydra.utils.instantiate(self.datamodule_conf, _recursive_=False)
return None
def get_callbacks(self) -> List[Callback]:
"""
Override this method to return a list of callbacks to be passed into Trainer
You can add additional ModelCheckpoint here
"""
return []
def get_logger(self) -> OSSTensorboardLogger:
"""
Override this method to return a logger for trainer
TODO: T88650989 set different default logger for OSS and FB TrainApp
"""
return TensorBoardLogger()
def get_default_model_checkpoint(self) -> OSSModelCheckpoint:
"""
Override this method to return a default ModelCheckpoint callback.
Note: If you want to use more than 1 ModelCheckpoint callback, add it through
get_callbacks() function.
"""
dirpath: Optional[str] = None
root_dir = self.root_dir
if root_dir:
dirpath = os.path.join(root_dir, ModelCheckpoint.CHECKPOINT_PATH_SUFFIX)
return ModelCheckpoint(
# will auto generate dirpath if not provided
dirpath=dirpath,
save_top_k=-1,
has_user_data=False,
ttl_days=1,
monitor=None,
)
def _get_trainer(self) -> Tuple[Trainer, Dict[str, Any]]:
trainer_params = self._init_trainer_params()
self._set_trainer_params(trainer_params)
# log trainer params
log_params = dict(trainer_params)
log_params["oncall_team"] = "pt_lightning"
log_params["run_status"] = JobStatus.RUNNING.value
log_run(**log_params)
return Trainer(**trainer_params), log_params
def _init_trainer_params(self) -> Dict[str, Any]:
return get_trainer_params(self.trainer_conf)
def _set_trainer_params(
self,
trainer_params: Dict[str, Any],
) -> None:
# set default logger if not specified
# if logger=False, do not add a logger
if trainer_params.get("logger", True):
logger = self.get_logger()
trainer_params["logger"] = logger
self.log_dir = logger.log_dir
self.root_dir = logger.root_dir
callbacks = trainer_params.get("callbacks", [])
callbacks.extend(self.get_callbacks())
# create default model checkpoint callback unless disabled
if trainer_params.get("checkpoint_callback", True):
checkpoint_callback = self.get_default_model_checkpoint()
callbacks.append(checkpoint_callback)
# auto-resume from last default checkpoint
ckpt_path = checkpoint_callback.dirpath
if not trainer_params.get("resume_from_checkpoint") and ckpt_path:
last_checkpoint = find_last_checkpoint_path(ckpt_path)
trainer_params["resume_from_checkpoint"] = last_checkpoint
trainer_params["callbacks"] = callbacks
def train(self) -> TrainOutput:
trainer, log_params = self._get_trainer()
start_time = time.monotonic()
got_exception = None
try:
trainer.fit(self.module, datamodule=self.datamodule)
except Exception as ex:
got_exception = ex
# log trainer status to Scuba and Hive
total_run_time = int(time.monotonic() - start_time)
log_params["global_rank"] = trainer.global_rank
log_params["world_size"] = trainer.world_size
log_params["total_run_time"] = total_run_time
if got_exception is None:
log_params["run_status"] = JobStatus.COMPLETED.value
log_run(**log_params)
else:
log_params["error_message"] = str(got_exception)
log_params["stacktrace"] = traceback.format_stack()
log_params["run_status"] = JobStatus.FAILED.value
log_run(**log_params)
raise got_exception
return TrainOutput(tensorboard_log_dir=self.log_dir)
def test(self) -> _EVALUATE_OUTPUT:
trainer, _ = self._get_trainer()
return trainer.test(self.module, datamodule=self.datamodule)
def predict(self) -> Optional[_PREDICT_OUTPUT]:
trainer, _ = self._get_trainer()
return trainer.predict(self.module, datamodule=self.datamodule)
| [
"torch._C._log_api_usage_once"
] | 1.11.0 | laurencer/recipes | 60b7c5f0304c7eb44a39295eba78da02608ae858 |
0.4 | # From https://github.com/wjh720/QPLEX/, added here for convenience.
import copy
from components.episode_buffer import EpisodeBatch
from modules.mixers.dmaq_general import DMAQer
# from modules.mixers.dmaq_qatten import DMAQ_QattenMixer
import torch.nn.functional as F
import torch as th
from torch.optim import RMSprop
import numpy as np
class DMAQ_qattenLearner:
def __init__(self, mac, scheme, logger, args):
self.args = args
self.mac = mac
self.logger = logger
self.params = list(mac.parameters())
self.last_target_update_episode = 0
self.mixer = None
if args.mixer is not None:
if args.mixer == "dmaq":
self.mixer = DMAQer(args)
# elif args.mixer == 'dmaq_qatten':
# self.mixer = DMAQ_QattenMixer(args)
else:
raise ValueError("Mixer {} not recognised.".format(args.mixer))
self.params += list(self.mixer.parameters())
self.target_mixer = copy.deepcopy(self.mixer)
self.optimiser = RMSprop(params=self.params, lr=args.lr, alpha=args.optim_alpha, eps=args.optim_eps)
# a little wasteful to deepcopy (e.g. duplicates action selector), but should work for any MAC
self.target_mac = copy.deepcopy(mac)
self.log_stats_t = -self.args.learner_log_interval - 1
self.n_actions = self.args.n_actions
def sub_train(self, batch: EpisodeBatch, t_env: int, episode_num: int, mac, mixer, optimiser, params,
show_demo=False, save_data=None):
# Get the relevant quantities
rewards = batch["reward"][:, :-1]
actions = batch["actions"][:, :-1]
terminated = batch["terminated"][:, :-1].float()
mask = batch["filled"][:, :-1].float()
mask[:, 1:] = mask[:, 1:] * (1 - terminated[:, :-1])
avail_actions = batch["avail_actions"]
actions_onehot = batch["actions_onehot"][:, :-1]
# Calculate estimated Q-Values
mac_out = []
mac.init_hidden(batch.batch_size)
for t in range(batch.max_seq_length):
agent_outs = mac.forward(batch, t=t)
mac_out.append(agent_outs)
mac_out = th.stack(mac_out, dim=1) # Concat over time
# Pick the Q-Values for the actions taken by each agent
chosen_action_qvals = th.gather(mac_out[:, :-1], dim=3, index=actions).squeeze(3) # Remove the last dim
x_mac_out = mac_out.clone().detach()
x_mac_out[avail_actions == 0] = -9999999
max_action_qvals, max_action_index = x_mac_out[:, :-1].max(dim=3)
max_action_index = max_action_index.detach().unsqueeze(3)
is_max_action = (max_action_index == actions).int().float()
if show_demo:
q_i_data = chosen_action_qvals.detach().cpu().numpy()
q_data = (max_action_qvals - chosen_action_qvals).detach().cpu().numpy()
# self.logger.log_stat('agent_1_%d_q_1' % save_data[0], np.squeeze(q_data)[0], t_env)
# self.logger.log_stat('agent_2_%d_q_2' % save_data[1], np.squeeze(q_data)[1], t_env)
# Calculate the Q-Values necessary for the target
target_mac_out = []
self.target_mac.init_hidden(batch.batch_size)
for t in range(batch.max_seq_length):
target_agent_outs = self.target_mac.forward(batch, t=t)
target_mac_out.append(target_agent_outs)
# We don't need the first timesteps Q-Value estimate for calculating targets
target_mac_out = th.stack(target_mac_out[1:], dim=1) # Concat across time
# Mask out unavailable actions
target_mac_out[avail_actions[:, 1:] == 0] = -9999999
# Max over target Q-Values
if self.args.double_q:
# Get actions that maximise live Q (for double q-learning)
mac_out_detach = mac_out.clone().detach()
mac_out_detach[avail_actions == 0] = -9999999
cur_max_actions = mac_out_detach[:, 1:].max(dim=3, keepdim=True)[1]
target_chosen_qvals = th.gather(target_mac_out, 3, cur_max_actions).squeeze(3)
target_max_qvals = target_mac_out.max(dim=3)[0]
target_next_actions = cur_max_actions.detach()
cur_max_actions_onehot = th.zeros(cur_max_actions.squeeze(3).shape + (self.n_actions,))
cur_max_actions_onehot = cur_max_actions_onehot.scatter_(3, cur_max_actions, 1)
else:
# Calculate the Q-Values necessary for the target
target_mac_out = []
self.target_mac.init_hidden(batch.batch_size)
for t in range(batch.max_seq_length):
target_agent_outs = self.target_mac.forward(batch, t=t)
target_mac_out.append(target_agent_outs)
# We don't need the first timesteps Q-Value estimate for calculating targets
target_mac_out = th.stack(target_mac_out[1:], dim=1) # Concat across time
target_max_qvals = target_mac_out.max(dim=3)[0]
# Mix
if mixer is not None:
if self.args.mixer == "dmaq_qatten":
ans_chosen, q_attend_regs, head_entropies = \
mixer(chosen_action_qvals, batch["state"][:, :-1], is_v=True)
ans_adv, _, _ = mixer(chosen_action_qvals, batch["state"][:, :-1], actions=actions_onehot,
max_q_i=max_action_qvals, is_v=False)
chosen_action_qvals = ans_chosen + ans_adv
else:
ans_chosen = mixer(chosen_action_qvals, batch["state"][:, :-1], is_v=True)
ans_adv = mixer(chosen_action_qvals, batch["state"][:, :-1], actions=actions_onehot,
max_q_i=max_action_qvals, is_v=False)
chosen_action_qvals = ans_chosen + ans_adv
if self.args.double_q:
if self.args.mixer == "dmaq_qatten":
target_chosen, _, _ = self.target_mixer(target_chosen_qvals, batch["state"][:, 1:], is_v=True)
target_adv, _, _ = self.target_mixer(target_chosen_qvals, batch["state"][:, 1:],
actions=cur_max_actions_onehot,
max_q_i=target_max_qvals, is_v=False)
target_max_qvals = target_chosen + target_adv
else:
target_chosen = self.target_mixer(target_chosen_qvals, batch["state"][:, 1:], is_v=True)
target_adv = self.target_mixer(target_chosen_qvals, batch["state"][:, 1:],
actions=cur_max_actions_onehot,
max_q_i=target_max_qvals, is_v=False)
target_max_qvals = target_chosen + target_adv
else:
target_max_qvals = self.target_mixer(target_max_qvals, batch["state"][:, 1:], is_v=True)
# Calculate 1-step Q-Learning targets
targets = rewards + self.args.gamma * (1 - terminated) * target_max_qvals
if show_demo:
tot_q_data = chosen_action_qvals.detach().cpu().numpy()
tot_target = targets.detach().cpu().numpy()
print('action_pair_%d_%d' % (save_data[0], save_data[1]), np.squeeze(q_data[:, 0]),
np.squeeze(q_i_data[:, 0]), np.squeeze(tot_q_data[:, 0]), np.squeeze(tot_target[:, 0]))
self.logger.log_stat('action_pair_%d_%d' % (save_data[0], save_data[1]),
np.squeeze(tot_q_data[:, 0]), t_env)
return
# Td-error
td_error = (chosen_action_qvals - targets.detach())
mask = mask.expand_as(td_error)
# 0-out the targets that came from padded data
masked_td_error = td_error * mask
# Normal L2 loss, take mean over actual data
if self.args.mixer == "dmaq_qatten":
loss = (masked_td_error ** 2).sum() / mask.sum() + q_attend_regs
else:
loss = (masked_td_error ** 2).sum() / mask.sum()
masked_hit_prob = th.mean(is_max_action, dim=2) * mask
hit_prob = masked_hit_prob.sum() / mask.sum()
# Optimise
optimiser.zero_grad()
loss.backward()
grad_norm = th.nn.utils.clip_grad_norm_(params, self.args.grad_norm_clip)
optimiser.step()
if t_env - self.log_stats_t >= self.args.learner_log_interval:
self.logger.log_stat("loss", loss.item(), t_env)
self.logger.log_stat("hit_prob", hit_prob.item(), t_env)
self.logger.log_stat("grad_norm", grad_norm, t_env)
mask_elems = mask.sum().item()
self.logger.log_stat("td_error_abs", (masked_td_error.abs().sum().item() / mask_elems), t_env)
self.logger.log_stat("q_taken_mean",
(chosen_action_qvals * mask).sum().item() / (mask_elems * self.args.n_agents), t_env)
self.logger.log_stat("target_mean", (targets * mask).sum().item() / (mask_elems * self.args.n_agents),
t_env)
self.log_stats_t = t_env
def train(self, batch: EpisodeBatch, t_env: int, episode_num: int, show_demo=False, save_data=None):
self.sub_train(batch, t_env, episode_num, self.mac, self.mixer, self.optimiser, self.params,
show_demo=show_demo, save_data=save_data)
if (episode_num - self.last_target_update_episode) / self.args.target_update_interval >= 1.0:
self._update_targets()
self.last_target_update_episode = episode_num
def _update_targets(self):
self.target_mac.load_state(self.mac)
if self.mixer is not None:
self.target_mixer.load_state_dict(self.mixer.state_dict())
self.logger.console_logger.info("Updated target network")
def cuda(self):
self.mac.cuda()
self.target_mac.cuda()
if self.mixer is not None:
self.mixer.cuda()
self.target_mixer.cuda()
def save_models(self, path):
self.mac.save_models(path)
if self.mixer is not None:
th.save(self.mixer.state_dict(), "{}/mixer.th".format(path))
th.save(self.optimiser.state_dict(), "{}/opt.th".format(path))
def load_models(self, path):
self.mac.load_models(path)
# Not quite right but I don't want to save target networks
self.target_mac.load_models(path)
if self.mixer is not None:
self.mixer.load_state_dict(th.load("{}/mixer.th".format(path), map_location=lambda storage, loc: storage))
self.target_mixer.load_state_dict(th.load("{}/mixer.th".format(path),
map_location=lambda storage, loc: storage))
self.optimiser.load_state_dict(th.load("{}/opt.th".format(path), map_location=lambda storage, loc: storage))
| [
"torch.stack",
"torch.optim.RMSprop",
"torch.gather",
"torch.nn.utils.clip_grad_norm_",
"torch.mean"
] | 0.4.1 | HDUAIS/MARL_Bench | f592d20ddbcb2039453cf56221083d4ac64dee46 |
1.7 | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import torch
from torch import nn
from torch.nn import functional as F
from torch.autograd import Variable
import torch.utils.data
import torch.utils.data.distributed
import numpy as np
import pdb
kernel_size = 3
class SimpleDiscriminator(nn.Module):
"""
Known to work well as a GAN discriminator
"""
def __init__(self, num_classes=1, args=None):
super().__init__()
nf = self.nf = 128
# Submodules
nlayers = 0
self.nf0 = nf
blocks = [
ResnetBlock(nf, nf),
ResnetBlock(nf, nf),
]
# Initial up-channeling conv
self.conv_img = nn.Conv2d(3, 1*nf, kernel_size=kernel_size, padding=kernel_size//2)
self.resnet = nn.Sequential(*blocks)
# Final stage is standard avg-pool followed by linear
self.pool = nn.AdaptiveAvgPool2d((1,1))
self.fc = nn.Linear(self.nf0, num_classes)
#self.norm = nn.GroupNorm(1, 1, affine=False, eps=0.0)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_in', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
if m.weight is not None:
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def forward(self, x):
batch_size = x.size(0)
out = x
#out = self.norm(out)
#pdb.set_trace()
out = self.conv_img(out)
out = self.resnet(out)
out = self.pool(out)
out = out.view(batch_size, self.nf0)
out = self.fc(actvn(out))
return out
class ResnetBlock(nn.Module):
def __init__(self, fin, fout, fhidden=None):
super().__init__()
# Attributes
self.learned_shortcut = (fin != fout)
self.fin = fin
self.fout = fout
if fhidden is None:
self.fhidden = min(fin, fout)
else:
self.fhidden = fhidden
# Submodules
self.norm_0 = nn.GroupNorm(self.fin//32, self.fin)
self.conv_0 = nn.Conv2d(self.fin, self.fhidden,
kernel_size, stride=1, padding=kernel_size//2, bias=False)
self.norm_1 = nn.GroupNorm(self.fhidden//32, self.fhidden)
self.conv_1 = nn.Conv2d(self.fhidden, self.fout,
kernel_size, stride=1, padding=kernel_size//2, bias=False)
if self.learned_shortcut:
self.conv_s = nn.Conv2d(self.fin, self.fout,
1, stride=1, padding=0, bias=False)
def forward(self, x):
x_s = self._shortcut(x)
dx = self.conv_0(actvn(self.norm_0(x)))
dx = self.conv_1(actvn(self.norm_1(dx)))
out = x_s + dx
return out
def _shortcut(self, x):
if self.learned_shortcut:
x_s = self.conv_s(x)
else:
x_s = x
return x_s
def actvn(x):
return F.relu(x)
#return F.leaky_relu(x, 2e-1)
| [
"torch.nn.Linear",
"torch.nn.AdaptiveAvgPool2d",
"torch.nn.init.constant_",
"torch.nn.Sequential",
"torch.nn.init.kaiming_normal_",
"torch.nn.GroupNorm",
"torch.nn.Conv2d",
"torch.nn.functional.relu"
] | 1.7.1 | sbelenki/fastMRI | 9a359ffe340e9265491744e381d92241b36a6455 |
1.7 | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import h5py
import torch
from collections import OrderedDict
from fastmri.data import transforms
import numpy as np
import random
import pdb
def est_sens_maps(kspace, start, end, apodize_hori=0.07):
num_coils, height, width = kspace.shape
mask = np.zeros(width, dtype=kspace.dtype)
mask[start:end] = 1
kspace = np.where(mask, kspace, 0)
mask = np.exp(-(np.linspace(-1, 1, width) / apodize_hori) ** 2, dtype=kspace.dtype)
kspace = kspace * mask
sens_maps = np.fft.fftshift(np.fft.ifft2(np.fft.ifftshift(kspace), norm='ortho'))
sens_maps /= np.sqrt(np.sum(np.abs(sens_maps) ** 2, axis=0, keepdims=True))
return sens_maps
class KSpaceDataTransform(object):
def __init__(self, args, mask_func, partition, use_seed=True):
self.args = args
self.mask_func = mask_func
self.partition = partition
self.use_seed = use_seed
def __call__(self, target_ksp, target_im, attrs, fname, slice):
kspace_np = target_ksp
target_im = transforms.to_tensor(target_im)
target_ksp = transforms.to_tensor(target_ksp)
if self.args.coil_compress_coils:
target_ksp = transforms.coil_compress(target_ksp, self.args.coil_compress_coils)
if self.args.calculate_offsets_directly:
krow = kspace_np.sum(axis=(0,1)) # flatten to a single row
width = len(krow)
offset = (krow != 0).argmax()
acq_start = offset
acq_end = width - (krow[::-1] != 0).argmax() #exclusive
else:
offset = None # Mask will pick randomly
if self.partition == 'val' and 'mask_offset' in attrs:
offset = attrs['mask_offset']
acq_start = attrs['padding_left']
acq_end = attrs['padding_right']
#pdb.set_trace()
seed = None if not self.use_seed else tuple(map(ord, fname))
input_ksp, mask, num_lf = transforms.apply_mask(
target_ksp, self.mask_func,
seed, offset,
(acq_start, acq_end))
#pdb.set_trace()
sens_map = torch.Tensor(0)
if self.args.compute_sensitivities:
start_of_center_mask = (kspace_np.shape[-1] - num_lf + 1) // 2
end_of_center_mask = start_of_center_mask + num_lf
sens_map = est_sens_maps(kspace_np, start_of_center_mask, end_of_center_mask)
sens_map = transforms.to_tensor(sens_map)
if self.args.grappa_input:
with h5py.File(self.args.grappa_input_path / self.partition / fname, 'r') as hf:
kernel = transforms.to_tensor(hf['kernel'][slice])
input_ksp = transforms.apply_grappa(input_ksp, kernel, target_ksp, mask)
grappa_kernel = torch.Tensor(0)
if self.args.grappa_path is not None:
with h5py.File(self.args.grappa_path / self.partition / fname, 'r') as hf:
grappa_kernel = transforms.to_tensor(hf['kernel'][slice])
if self.args.grappa_target:
with h5py.File(self.args.grappa_target_path / self.partition / fname, 'r') as hf:
kernel = transforms.to_tensor(hf['kernel'][slice])
target_ksp = transforms.apply_grappa(target_ksp.clone(), kernel, target_ksp, mask, sample_accel=2)
target_im = transforms.root_sum_of_squares(transforms.complex_abs(transforms.ifft2(target_ksp)))
input_im = transforms.ifft2(input_ksp)
if not self.args.scale_inputs:
scale = torch.Tensor([1.])
else:
abs_input = transforms.complex_abs(input_im)
if self.args.scale_type == 'max':
scale = torch.max(abs_input)
else:
scale = torch.mean(abs_input)
input_ksp /= scale
target_ksp /= scale
target_im /= scale
scale = scale.view([1, 1, 1])
attrs_dict = dict(**attrs)
return OrderedDict(
input = input_ksp,
target = target_ksp,
target_im = target_im,
mask = mask,
grappa_kernel = grappa_kernel,
scale = scale,
attrs_dict = attrs_dict,
fname = fname,
slice = slice,
num_lf = num_lf,
sens_map = sens_map,
)
| [
"torch.max",
"torch.Tensor",
"torch.mean"
] | 1.7.1 | sbelenki/fastMRI | 9a359ffe340e9265491744e381d92241b36a6455 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.