python_code
stringlengths 0
992k
| repo_name
stringlengths 8
46
| file_path
stringlengths 5
162
|
---|---|---|
# Copyright (c) Facebook, Inc. and its affiliates.
import logging
import math
import os
import torch
from mmf.common.registry import registry
from mmf.models import BaseModel
from mmf.modules.embeddings import BertVisioLinguisticEmbeddings
from mmf.utils.configuration import get_mmf_cache_dir
from mmf.utils.file_io import PathManager
from mmf.utils.modeling import get_optimizer_parameters_for_bert
from mmf.utils.transform import (
transform_to_batch_sequence,
transform_to_batch_sequence_dim,
)
from omegaconf import OmegaConf
from torch import nn
try:
from transformers3.modeling_bert import (
BertConfig,
BertEncoder,
BertLayer,
BertModel,
BertPooler,
BertPredictionHeadTransform,
BertPreTrainedModel,
)
except ImportError:
from transformers.modeling_bert import (
BertConfig,
BertEncoder,
BertLayer,
BertModel,
BertPooler,
BertPredictionHeadTransform,
BertPreTrainedModel,
)
logger = logging.getLogger(__name__)
# This model essentially wraps GraphNetworkModule and multi-modal models
@registry.register_model("krisp")
class KRISP(BaseModel):
def __init__(self, config):
super().__init__(config)
self.build()
@classmethod
def config_path(cls):
return "configs/models/krisp/defaults.yaml"
# Each method need to define a build method where the model's modules
# are actually build and assigned to the model
def build(self):
# Get any cross-model info we need for building network
# (like hidden sizes)
extra_config = {}
extra_config["vb_hid_sz"] = self.config.visual_bert.hidden_size
extra_config["node_hid_dim"] = self.config.graph_module.node_hid_dim
# Also pass arguments to know if it needs to feed in something
extra_config["feed_vb_to_graph"] = self.config.feed_vb_to_graph
extra_config["feed_q_to_graph"] = self.config.feed_q_to_graph
extra_config["feed_mode"] = self.config.feed_mode
extra_config["feed_graph_to_vb"] = self.config.feed_graph_to_vb
extra_config["feed_special_node"] = self.config.feed_special_node
extra_config["topk_ans_feed"] = self.config.topk_ans_feed
extra_config["compress_crossmodel"] = self.config.compress_crossmodel
extra_config["crossmodel_compress_dim"] = self.config.crossmodel_compress_dim
extra_config["analysis_mode"] = self.config.analysis_mode
extra_config["noback_vb"] = self.config.noback_vb_to_graph
# If feed q, make the question module here
if self.config.feed_q_to_graph:
# We can just make it a BERT model really easily
self.q_enc = BertModel.from_pretrained("bert-base-uncased")
extra_config["q_hid_sz"] = self.q_enc.config.hidden_size
# Import graph network module
# Putting in try-catch to avoid adding dependencies to mmf
try:
from projects.krisp.graphnetwork_module import GraphNetworkModule
except Exception:
print(
"Import error with KRISP dependencies. Fix dependencies if "
+ "you want to use KRISP"
)
raise
# Builds the graph network module
self.graph_module = GraphNetworkModule(self.config.graph_module, extra_config)
# Make VisualBERT module (without the final hidden logit layer)
self.vb_module = VisualBERTModule(self.config.visual_bert, extra_config)
# Final hidden layer for the vb module
self.vocab_fc = nn.Linear(
self.vb_module.model.bert.config.hidden_size, self.config.num_labels
)
# There's whether to use the bilinear and then whether to add or concat features
# These are not mutally exclusive really
# If output combine is ptr net, make GraphPtr Net for combining outputs
if self.config.graph_logit_mode == "mc4":
# Bilinear network
self.graph_ptr_net = GraphPtrNet(
self.vb_module.model.bert.config.hidden_size,
self.config.graph_module.node_hid_dim,
)
elif self.config.graph_logit_mode == "in_graph":
# Logits is already computed
pass
elif self.config.graph_logit_mode == "logit_fc":
# Compute logits from single hidden layer
self.graph_logit_fc = nn.Linear(
self.config.graph_module.node_hid_dim, self.config.num_labels
)
# Answer indices not in graph
if self.config.output_combine == "add":
self.missing_ans_inds = torch.LongTensor(self.config.num_labels).fill_(1)
self.missing_ans_inds[
self.graph_module.index_in_ans
] = 0 # Now any index stil set to 1 is missing from graph
# Each model in MMF gets a dict called sample_list which contains
# all of the necessary information returned from the image
def forward(self, sample_list):
# If we have different combine modes, may need to call in different order
if self.config.feed_graph_to_vb:
# Can't be both (would create circular dep)
assert not self.config.feed_vb_to_graph
# Check mode
# Can be feed_graph_hid_to_vb, where we pass in some vector
# rep of graph into vb or feed_top_node_to_vb which is similar,
# but it feeds in k node hidden states
assert self.config.feed_mode in [
"feed_graph_hid_to_vb",
"feed_top_node_to_vb",
]
if self.config.feed_mode == "feed_graph_hid_to_vb":
assert self.graph_module.gn.output_special_node
else:
raise Exception("Unknown feed mode %s" % self.config.feed_mode)
# Forward through graph module
graph_output = self.graph_module(sample_list)
# Put graph_output into sample_list
sample_list["graph_output"] = graph_output
# Forward through vb module
vb_hidden = self.vb_module(sample_list)
# Get vocab logit preds
vb_logits = self.vocab_fc(vb_hidden)
else:
# Check mode
if self.config.feed_vb_to_graph:
# Can be feed_vb_hid_to_graph where we feed final
# vb state into graph as a node input
# Or feed_vb_logit_to_graph where we feed vg_predicted logits into graph
assert self.config.feed_mode in [
"feed_vb_hid_to_graph",
"feed_vb_logit_to_graph",
]
# Forward through vb module
vb_hidden = self.vb_module(sample_list)
# Get vocab logit preds
vb_logits = self.vocab_fc(vb_hidden)
sample_list["vb_hidden"] = vb_hidden
sample_list["vb_logits"] = vb_logits
# If we feed seperate Q feats into graph
if self.config.feed_q_to_graph:
# Now sample_list has all the processed inputs for us
attention_mask_q = (sample_list["input_ids"] != 0).float()
q_enc_out = self.q_enc(
input_ids=sample_list["input_ids"],
attention_mask=attention_mask_q,
token_type_ids=sample_list["token_type_ids"],
)
sample_list["q_encoded"] = q_enc_out[1] # Get pooled output
# Forward through graph module
graph_output = self.graph_module(sample_list)
# Compute graph logits
if self.config.graph_logit_mode == "mc4":
# Use bilinear network
if self.config.noback_vb_to_blinear:
graph_logits = self.graph_ptr_net(vb_hidden.detach(), graph_output)
else:
graph_logits = self.graph_ptr_net(vb_hidden, graph_output)
elif self.config.graph_logit_mode == "in_graph":
# Logits is already computed
graph_logits = graph_output
assert self.config.graph_module.output_type == "graph_prediction"
elif self.config.graph_logit_mode == "logit_fc":
# Compute logits from single hidden layer
graph_logits = self.graph_logit_fc(graph_output)
# Now combine outputs
if self.config.output_combine == "concat":
# Output order should be alphabetical
assert self.config.graph_module.output_order == "alpha"
# Combine both logits
logits = torch.cat([vb_logits, graph_logits], dim=1)
elif self.config.output_combine == "add":
# Output order should be ans
assert self.config.graph_module.output_order == "ans"
# Set invalid inds to zero here
assert graph_logits.size(1) == vb_logits.size(1)
graph_logits[:, self.missing_ans_inds] = 0
logits = vb_logits + graph_logits
# Do zerobias
if self.config.zerobias:
logits -= 6.58
# For loss calculations (automatically done by MMF
# as per the loss defined in the config),
# we need to return a dict with "scores" key as logits
output = {"scores": logits}
# If we're in eval / analysis mode, add more to output
if self.config.analysis_mode:
output = self.graph_module.add_analysis_to_output(output)
# MMF will automatically calculate loss
return output
class GraphPtrNet(nn.Module):
def __init__(self, hidden_size, graph_hidden_size):
super().__init__()
self.hidden_size = hidden_size
self.graph_hidden_size = graph_hidden_size
self.bl_w = nn.Linear(hidden_size, hidden_size)
self.graph_w = nn.Linear(graph_hidden_size, hidden_size)
def forward(self, bl_hidden, graph_hidden):
# Compute Eq. 4 from Iterative Answer Prediction with
# Pointer-Augmented Multimodal Transformers for TextVQA
# bl_hidden is bs x hidden_size
# graph_hidden is bs x graph_hidden_size
# Compute BL half
bl_hidden = self.bl_w(bl_hidden)
assert bl_hidden.dim() == 2
bl_hidden = bl_hidden.unsqueeze(1)
# Compute graph hidden half
# Assume we've already subsampled to only valid answer nodes
graph_hidden = self.graph_w(graph_hidden)
# Now we have bl_hidden as a bs x 1 x hid vec
# graph_hidden as a bs x num_nodes x hid vec
# Combine
scores = torch.matmul(bl_hidden, graph_hidden.transpose(-1, -2))
# Normalize
scores = scores / math.sqrt(self.hidden_size)
scores = scores.squeeze(1)
# Scores is now a bs x #nodes matrix
return scores
class VisualBERTBase(BertPreTrainedModel):
def __init__(
self,
config,
visual_embedding_dim=512,
embedding_strategy="plain",
bypass_transformer=False,
output_attentions=False,
output_hidden_states=False,
):
super().__init__(config)
self.config = config
config.visual_embedding_dim = visual_embedding_dim
config.embedding_strategy = embedding_strategy
config.bypass_transformer = bypass_transformer
config.output_attentions = output_attentions
config.output_hidden_states = output_hidden_states
self.embeddings = BertVisioLinguisticEmbeddings(config)
self.encoder = BertEncoder(config)
self.pooler = BertPooler(config)
self.bypass_transformer = config.bypass_transformer
if self.bypass_transformer:
self.additional_layer = BertLayer(config)
self.output_attentions = self.config.output_attentions
self.output_hidden_states = self.config.output_hidden_states
self.fixed_head_masks = [None for _ in range(len(self.encoder.layer))]
self.init_weights()
def forward(
self,
input_ids,
attention_mask=None,
token_type_ids=None,
visual_embeddings=None,
position_embeddings_visual=None,
visual_embeddings_type=None,
image_text_alignment=None,
graph_input=None,
):
if attention_mask is None:
attention_mask = torch.ones_like(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of
# causal attention used in OpenAI GPT, we just need to prepare the
# broadcast dimension here.
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = extended_attention_mask.to(
dtype=next(self.parameters()).dtype
) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
assert position_embeddings_visual is None
embedding_output = self.embeddings(
input_ids,
token_type_ids,
visual_embeddings=visual_embeddings,
visual_embeddings_type=visual_embeddings_type,
image_text_alignment=image_text_alignment,
)
if self.bypass_transformer and visual_embeddings is not None:
assert (
not self.output_hidden_states
) # Don't support this for the bypass model
text_length = input_ids.size(1)
text_embedding_output = embedding_output[:, :text_length, :]
visual_part = embedding_output[:, text_length:, :]
text_extended_attention_mask = extended_attention_mask[
:, :, :text_length, :text_length
]
encoded_layers = self.encoder(
text_embedding_output,
text_extended_attention_mask,
self.fixed_head_masks,
)
sequence_output = encoded_layers[0]
new_input = torch.cat((sequence_output, visual_part), dim=1)
final_sequence_output = self.additional_layer(
new_input, extended_attention_mask
)
pooled_output = self.pooler(final_sequence_output)
return final_sequence_output, pooled_output
else:
# If it takes graph input(s)
# Do forward here through its own embedding
# Then concat to embedding_output
# And concat onto the extended_attention_mask to inclode this too
if graph_input is not None:
# Concat onto embeddings
embedding_output = torch.cat([embedding_output, graph_input], dim=1)
graph_att_mask = torch.zeros(
graph_input.size(0), 1, 1, graph_input.size(1)
).to(extended_attention_mask.device)
extended_attention_mask = torch.cat(
[extended_attention_mask, graph_att_mask], dim=3
)
encoded_layers = self.encoder(
embedding_output, extended_attention_mask, self.fixed_head_masks
)
sequence_output = encoded_layers[0]
pooled_output = self.pooler(sequence_output)
attn_data_list = []
if self.output_attentions:
attn_data_list = encoded_layers[1:]
return sequence_output, pooled_output, attn_data_list
class VisualBERTForClassification(nn.Module):
def __init__(self, config, extra_config):
super().__init__()
self.config = config
self.output_attentions = self.config.output_attentions
self.output_hidden_states = self.config.output_hidden_states
self.pooler_strategy = self.config.get("pooler_strategy", "default")
# Graph input params
self.feed_graph_to_vb = extra_config["feed_graph_to_vb"]
self.graph_node_hid_dim = extra_config["node_hid_dim"]
self.graph_feed_mode = extra_config["feed_mode"]
self.graph_topk = extra_config["topk_ans_feed"]
# If doing graph, make a graph embedding layer
if self.feed_graph_to_vb:
self.graph_embedding = nn.Sequential(
nn.Linear(self.graph_node_hid_dim, config.hidden_size),
nn.LayerNorm(config.hidden_size, eps=1e-12),
nn.Dropout(config.hidden_dropout_prob), # hidden_dropout_prb
)
# If bert_model_name is not specified, you will need to specify
# all of the required parameters for BERTConfig and a pretrained
# model won't be loaded
self.bert_model_name = self.config.get("bert_model_name", None)
self.bert_config = BertConfig.from_dict(
OmegaConf.to_container(self.config, resolve=True)
)
if self.bert_model_name is None or self.bert_model_name == "nopretrain":
self.bert = VisualBERTBase(
self.bert_config,
visual_embedding_dim=self.config.visual_embedding_dim,
embedding_strategy=self.config.embedding_strategy,
bypass_transformer=self.config.bypass_transformer,
output_attentions=self.config.output_attentions,
output_hidden_states=self.config.output_hidden_states,
)
else:
self.bert = VisualBERTBase.from_pretrained(
self.config.bert_model_name,
config=self.bert_config,
cache_dir=os.path.join(
get_mmf_cache_dir(), "distributed_{}".format(-1)
),
visual_embedding_dim=self.config.visual_embedding_dim,
embedding_strategy=self.config.embedding_strategy,
bypass_transformer=self.config.bypass_transformer,
output_attentions=self.config.output_attentions,
output_hidden_states=self.config.output_hidden_states,
)
self.training_head_type = self.config.training_head_type
self.dropout = nn.Dropout(self.bert.config.hidden_dropout_prob)
if self.config.training_head_type == "nlvr2":
self.bert.config.hidden_size *= 2
self.classifier = nn.Sequential(BertPredictionHeadTransform(self.bert.config))
self.init_weights()
def init_weights(self):
if self.config.random_initialize is False:
if self.bert_model_name is None:
# No pretrained model, init weights
self.bert.init_weights()
# Classifier needs to be initialized always as it is task specific
self.classifier.apply(self.bert._init_weights)
def forward(
self,
input_ids,
input_mask,
attention_mask=None,
token_type_ids=None,
visual_embeddings=None,
position_embeddings_visual=None,
visual_embeddings_type=None,
image_text_alignment=None,
masked_lm_labels=None,
graph_input=None,
):
# If we have a graph input, do the embedding first
if self.feed_graph_to_vb:
# Sanity check sizes
if self.graph_feed_mode == "feed_graph_hid_to_vb":
assert (
graph_input.dim() == 2
and graph_input.size(0) == input_ids.size(0)
and graph_input.size(1) == self.graph_node_hid_dim
)
graph_input = graph_input.unsqueeze(1) # Add extra dim
elif self.graph_feed_mode == "feed_top_node_to_vb":
assert (
graph_input.dim() == 3
and graph_input.size(0) == input_ids.size(0)
and graph_input.size(1) == self.graph_topk
and graph_input.size(1) == self.graph_node_hid_dim
)
# Do the graph embedding
graph_input = self.graph_embedding(graph_input)
sequence_output, pooled_output, attention_weights = self.bert(
input_ids,
attention_mask,
token_type_ids,
visual_embeddings,
position_embeddings_visual,
visual_embeddings_type,
image_text_alignment,
graph_input,
)
if self.training_head_type == "nlvr2":
# 2B * H => B * 2H
b, h = pooled_output.size()
pooled_output = torch.cat(
[pooled_output[: b // 2], pooled_output[b // 2 :]], dim=1
)
output_dict = {}
if self.output_attentions:
output_dict["attention_weights"] = attention_weights
if self.output_hidden_states:
output_dict["sequence_output"] = sequence_output
output_dict["pooled_output"] = pooled_output
if self.pooler_strategy == "vqa":
# In VQA2 pooling strategy, we use representation from second last token
index_to_gather = input_mask.sum(1) - 2
pooled_output = torch.gather(
sequence_output,
1,
index_to_gather.unsqueeze(-1)
.unsqueeze(-1)
.expand(index_to_gather.size(0), 1, sequence_output.size(-1)),
)
pooled_output = self.dropout(pooled_output)
output = self.classifier(pooled_output).squeeze(1)
return output
class VisualBERTModule(nn.Module):
def __init__(self, config, extra_config=None):
super().__init__()
self.config = config
if extra_config is None:
self.extra_config = {}
else:
self.extra_config = extra_config
self.build()
def build(self):
assert self.config.training_head_type != "pretraining"
self.model = VisualBERTForClassification(self.config, self.extra_config)
if self.config.special_visual_initialize:
self.model.bert.embeddings.initialize_visual_from_pretrained()
# Initialize from pretrained model
if self.config.load_from_pretrained:
# Load the raw checkpoint
pretrained_file = self.config.pretrained_file
with PathManager.open(pretrained_file, "rb") as f:
ckpt = torch.load(f, map_location=lambda storage, loc: storage)
model_ckpt = ckpt["model"]
# Remove "model" in fron of keys
model_ckpt_new = {}
for key in model_ckpt:
if "bert" not in key:
continue
model_ckpt_new[key.split("model.")[1]] = model_ckpt[key]
model_ckpt = model_ckpt_new
# Load the checkpoint
incompatible_keys = self.model.load_state_dict(model_ckpt, strict=False)
# Print any missing / wrong keys for debug
if len(incompatible_keys.missing_keys) != 0:
logger.warning(
f"Missing keys {incompatible_keys.missing_keys} in the"
+ " checkpoint.\n"
+ "If this is not your checkpoint, please open up an "
+ "issue on MMF GitHub. \n"
+ f"Unexpected keys if any: {incompatible_keys.unexpected_keys}"
)
if len(incompatible_keys.unexpected_keys) != 0:
logger.warning(
"Unexpected keys in state dict: "
+ f"{incompatible_keys.unexpected_keys} \n"
+ "This is usually not a problem with pretrained models, but "
+ "if this is your own model, please double check. \n"
+ "If you think this is an issue, please open up a "
+ "bug at MMF GitHub."
)
if getattr(self.config, "freeze_base", False):
for p in self.model.bert.parameters():
p.requires_grad = False
# Graph input params
self.feed_graph_to_vb = self.extra_config["feed_graph_to_vb"]
self.graph_node_hid_dim = self.extra_config["node_hid_dim"]
self.graph_feed_mode = self.extra_config["feed_mode"]
# Not implemented for this model
if self.feed_graph_to_vb and self.extra_config["compress_crossmodel"]:
assert False
def flatten(self, sample_list, to_be_flattened=None, to_be_flattened_dim=None):
if to_be_flattened is None:
to_be_flattened = {}
if to_be_flattened_dim is None:
to_be_flattened_dim = {}
for key in to_be_flattened:
# Make sure these keys are present or otherwise set these keys to None
sample_list[key] = getattr(sample_list, key, None)
sample_list[key] = transform_to_batch_sequence(sample_list[key])
for key in to_be_flattened_dim:
sample_list[key] = getattr(sample_list, key, None)
sample_list[key] = transform_to_batch_sequence_dim(sample_list[key])
if sample_list.visual_embeddings_type is None:
if sample_list.image_mask is not None:
sample_list.visual_embeddings_type = torch.zeros_like(
sample_list.image_mask
)
if sample_list.image_mask is not None:
attention_mask = torch.cat(
(sample_list.input_mask, sample_list.image_mask), dim=-1
)
if sample_list.masked_lm_labels is not None:
assert sample_list.masked_lm_labels.size(
-1
) == sample_list.input_mask.size(-1)
new_lm_labels = torch.ones_like(attention_mask) * -1
size_masked_lm_labels = sample_list.masked_lm_labels.size()
assert len(size_masked_lm_labels) == 2
new_lm_labels[
: size_masked_lm_labels[0], : size_masked_lm_labels[1]
] = sample_list.masked_lm_labels
sample_list.masked_lm_labels = new_lm_labels
else:
attention_mask = sample_list.input_mask
sample_list.attention_mask = attention_mask
return sample_list
def get_optimizer_parameters(self, config):
return get_optimizer_parameters_for_bert(self.model, config)
def flatten_for_bert(self, sample_list, **kwargs):
to_be_flattened = [
"input_ids",
"token_type_ids",
"input_mask",
"image_mask",
"masked_lm_labels",
# "position_embeddings_visual",
# "visual_embeddings_type",
]
to_be_flattened_dim = ["visual_embeddings"] # "image_text_alignment",
# We want to convert everything into: batch x sequence_length x (dim).
flattened = self.flatten(sample_list, to_be_flattened, to_be_flattened_dim)
return flattened
def update_sample_list_based_on_head(self, sample_list):
bert_input_ids = sample_list.input_ids
bert_input_mask = sample_list.input_mask
bert_input_type_ids = sample_list.segment_ids
if self.config.training_head_type == "nlvr2":
bert_input_ids = torch.cat([bert_input_ids, bert_input_ids])
bert_input_mask = torch.cat([bert_input_mask, bert_input_mask])
bert_input_type_ids = torch.cat([bert_input_type_ids, bert_input_type_ids])
# image input
img0 = getattr(sample_list, "img0", {})
image_info = getattr(img0, "image_info_0", {})
image_dim_variable_0 = getattr(image_info, "max_features", None)
image_feat_variable_0 = getattr(img0, "image_feature_0", None)
img1 = getattr(sample_list, "img1", {})
image_info = getattr(img1, "image_info_0", {})
image_dim_variable_1 = getattr(image_info, "max_features", None)
image_feat_variable_1 = getattr(img1, "image_feature_0", None)
image_feat_variable = torch.cat(
[image_feat_variable_0, image_feat_variable_1]
)
image_dim_variable = torch.cat([image_dim_variable_0, image_dim_variable_1])
else:
image_info = getattr(sample_list, "image_info_0", {})
image_dim_variable = getattr(image_info, "max_features", None)
image_feat_variable = getattr(sample_list, "image_feature_0", None)
sample_list.visual_embeddings = image_feat_variable
sample_list.image_dim = image_dim_variable
sample_list.input_ids = bert_input_ids
sample_list.input_mask = bert_input_mask
sample_list.token_type_ids = bert_input_type_ids
return sample_list
def add_custom_params(self, sample_list):
visual_embeddings = getattr(sample_list, "visual_embeddings", None)
image_dim = getattr(sample_list, "image_dim", None)
# pretraining labels
sample_list.masked_lm_labels = getattr(sample_list, "lm_label_ids", None)
# image_feat_variable = batch x ( num_choice x ) image_feature_length x dim
# Prepare Mask
if visual_embeddings is not None and image_dim is not None:
image_mask = torch.arange(
visual_embeddings.size(-2), device=visual_embeddings.device
).expand(*visual_embeddings.size()[:-1])
if len(image_dim.size()) < len(image_mask.size()):
image_dim = image_dim.unsqueeze(-1)
assert len(image_dim.size()) == len(image_mask.size())
image_mask = image_mask < image_dim
sample_list.image_mask = image_mask.long()
else:
sample_list.image_mask = None
sample_list.position_embeddings_visual = None
sample_list.visual_embeddings_type = None
sample_list.image_text_alignment = None
return sample_list
# Backward compatibility for code from original VisualBERT
@classmethod
def format_state_key(cls, key):
return (
key.replace("bert.bert", "model.bert")
.replace("bert.cls", "model.cls")
.replace("bert.classifier", "model.classifier")
)
def forward(self, sample_list):
sample_list = self.update_sample_list_based_on_head(sample_list)
sample_list = self.add_custom_params(sample_list)
sample_list = self.flatten_for_bert(sample_list)
if self.feed_graph_to_vb:
if self.graph_feed_mode == "feed_graph_hid_to_vb":
assert "graph_special_node_out" in sample_list
graph_input = sample_list["graph_special_node_out"]
else:
assert False
else:
graph_input = None
output = self.model(
sample_list.input_ids,
sample_list.input_mask,
sample_list.attention_mask,
sample_list.token_type_ids,
sample_list.visual_embeddings,
sample_list.position_embeddings_visual,
sample_list.visual_embeddings_type,
sample_list.image_text_alignment,
sample_list.masked_lm_labels,
graph_input,
)
return output
| EXA-1-master | exa/models/mmf-main/mmf/models/krisp.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# MMBTModel, ModalEmbeddings is copied from [1]
# as we have internal dependency on transformers v2.3.
# These will be removed when we upgrade to package v2.5+.
# [1]: https://github.com/huggingface/transformers/blob/master/src/transformers/modeling_mmbt.py # noqa
import os
from copy import deepcopy
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
from mmf.common.registry import registry
from mmf.models.base_model import BaseModel
from mmf.models.interfaces.mmbt import MMBTGridHMInterface
from mmf.modules.encoders import (
EncoderFactory,
ImageEncoderFactory,
ImageEncoderTypes,
MultiModalEncoderBase,
ResNet152ImageEncoder,
TextEncoderFactory,
TextEncoderTypes,
TransformerEncoder,
)
from mmf.modules.hf_layers import replace_with_jit
from mmf.utils.checkpoint import load_pretrained_model
from mmf.utils.configuration import get_mmf_cache_dir
from mmf.utils.modeling import get_optimizer_parameters_for_bert
from omegaconf import DictConfig, II, OmegaConf
from torch import nn, Tensor
try:
from transformers3.modeling_bert import (
BertForPreTraining,
BertPredictionHeadTransform,
)
except ImportError:
from transformers.modeling_bert import (
BertForPreTraining,
BertPredictionHeadTransform,
)
# TODO: Remove after transformers package upgrade to 2.5
class MMBTConfig:
"""Configuration class to store the configuration of a `MMBT Model`.
Args:
config (:obj:`~transformers.PreTrainedConfig`):
Config of the underlying Transformer models. Its values are
copied over to use a single config.
num_labels (:obj:`int` or :obj:`None`, optional, defaults to `None`):
Size of final Linear layer for classification.
modal_hidden_size (:obj:`int`, optional, defautls to 2048):
Embedding dimension of the non-text modality encoder.
"""
def __init__(self, config, num_labels=None, modal_hidden_size=2048):
self.__dict__ = config.__dict__
self.modal_hidden_size = modal_hidden_size
if num_labels:
self.num_labels = num_labels
# TODO: Remove after transformers package upgrade to 2.5
class ModalEmbeddings(nn.Module):
"""
Generic Modal Embeddings which takes in an encoder,
and a transformer embedding.
"""
def __init__(self, config, encoder, embeddings):
super().__init__()
self.config = config
self.encoder = encoder
self.proj_embeddings = nn.Linear(config.modal_hidden_size, config.hidden_size)
self.position_embeddings = embeddings.position_embeddings
self.token_type_embeddings = embeddings.token_type_embeddings
self.word_embeddings = embeddings.word_embeddings
self.LayerNorm = embeddings.LayerNorm
self.dropout = nn.Dropout(p=config.hidden_dropout_prob)
def forward(
self,
input_modal: Tensor,
start_token: Optional[Tensor] = None,
end_token: Optional[Tensor] = None,
position_ids: Optional[Tensor] = None,
token_type_ids: Optional[Tensor] = None,
):
token_embeddings = self.proj_embeddings(self.encoder(input_modal))
seq_length = token_embeddings.size(1)
if start_token is not None:
start_token_embeds = self.word_embeddings(start_token)
seq_length += 1
token_embeddings = torch.cat(
[start_token_embeds.unsqueeze(1), token_embeddings], dim=1
)
if end_token is not None:
end_token_embeds = self.word_embeddings(end_token)
seq_length += 1
token_embeddings = torch.cat(
[token_embeddings, end_token_embeds.unsqueeze(1)], dim=1
)
if position_ids is None:
position_ids = torch.arange(
seq_length, dtype=torch.long, device=input_modal.device
)
position_ids = position_ids.unsqueeze(0).expand(
input_modal.size(0), seq_length
)
if token_type_ids is None:
token_type_ids = torch.zeros(
(input_modal.size(0), seq_length),
dtype=torch.long,
device=input_modal.device,
)
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = token_embeddings + position_embeddings + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
# TODO: Remove after transformers package upgrade to 2.5
class MMBTModel(nn.Module):
r"""
Outputs: `Tuple` comprising various elements depending on the configuration
(config) and inputs:
**last_hidden_state**: ``torch.FloatTensor`` of shape
``(batch_size, sequence_length, hidden_size)``. Sequence of
hidden-states at the output of the last layer of the model.
**pooler_output**: ``torch.FloatTensor`` of shape
``(batch_size, hidden_size)``. Last layer hidden-state of the
first token of the sequence (classification token) further processed
by a Linear layer and a Tanh activation function. The Linear
layer weights are trained from the next sentence prediction
(classification) objective during Bert pretraining. This output
is usually *not* a good summary of the semantic content of the
input, you're often better with averaging or pooling
the sequence of hidden-states for the whole input sequence.
**hidden_states**: (`optional`, returned when
``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer +
the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the
initial embedding outputs.
**attentions**: (`optional`, returned when
``config.output_attentions=True``) list of ``torch.FloatTensor``
(one for each layer) of shape ``(batch_size, num_heads,
sequence_length, sequence_length)``: Attentions weights after
the attention softmax, used to compute the weighted average in the
self-attention heads.
Examples::
# For example purposes. Not runnable.
transformer = BertModel.from_pretrained('bert-base-uncased')
encoder = ImageEncoder(args)
mmbt = MMBTModel(config, transformer, encoder)
"""
def __init__(self, config, transformer, encoder):
super().__init__()
self.is_decoder = config.is_decoder
self.num_hidden_layers = config.num_hidden_layers
self.transformer = transformer
self.modal_encoder = ModalEmbeddings(config, encoder, transformer.embeddings)
def forward(
self,
input_modal: Tensor,
input_ids: Tensor,
modal_start_tokens: Optional[Tensor] = None,
modal_end_tokens: Optional[Tensor] = None,
attention_mask: Optional[Tensor] = None,
token_type_ids: Optional[Tensor] = None,
modal_token_type_ids: Optional[Tensor] = None,
position_ids: Optional[Tensor] = None,
modal_position_ids: Optional[Tensor] = None,
head_mask: Optional[Tensor] = None,
inputs_embeds: Optional[Tensor] = None,
encoder_hidden_states: Optional[Tensor] = None,
encoder_attention_mask: Optional[Tensor] = None,
):
if input_ids is not None and inputs_embeds is not None:
raise ValueError(
"You cannot specify both input_ids and inputs_embeds at the same time"
)
elif input_ids is not None:
input_txt_shape = input_ids.size()
elif inputs_embeds is not None:
input_txt_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
device = inputs_embeds.device if inputs_embeds is not None else input_ids.device
modal_embeddings = self.modal_encoder(
input_modal,
start_token=modal_start_tokens,
end_token=modal_end_tokens,
position_ids=modal_position_ids,
token_type_ids=modal_token_type_ids,
)
input_modal_shape = modal_embeddings.size()[:-1]
if token_type_ids is None:
token_type_ids = torch.ones(
input_txt_shape, dtype=torch.long, device=device
)
txt_embeddings = self.transformer.embeddings(
input_ids=input_ids,
position_ids=position_ids,
token_type_ids=token_type_ids,
inputs_embeds=inputs_embeds,
)
embedding_output = torch.cat([modal_embeddings, txt_embeddings], 1)
input_shape = embedding_output.size()[:-1]
if attention_mask is None:
attention_mask = torch.ones(input_shape, device=device)
else:
attention_mask = torch.cat(
[
torch.ones(input_modal_shape, device=device, dtype=torch.long),
attention_mask,
],
dim=1,
)
if encoder_attention_mask is None:
encoder_attention_mask = torch.ones(input_shape, device=device)
else:
encoder_attention_mask = torch.cat(
[torch.ones(input_modal_shape, device=device), encoder_attention_mask],
dim=1,
)
# We can provide a self-attention mask of dimensions
# [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
if attention_mask.dim() == 3:
attention_mask = attention_mask[:, None, :, :]
# Provided a padding mask of dimensions [batch_size, seq_length]
# - if the model is a decoder, apply a causal mask in addition to the
# padding mask
# - if the model is an encoder, make the mask broadcastable to
# [batch_size, num_heads, seq_length, seq_length]
if attention_mask.dim() == 2:
if self.is_decoder:
batch_size, seq_length = input_shape
seq_ids = torch.arange(seq_length, device=device)
causal_mask = (
seq_ids[None, None, :].repeat(batch_size, seq_length, 1)
<= seq_ids[None, :, None]
)
attention_mask = (
causal_mask[:, None, :, :] * attention_mask[:, None, None, :]
)
else:
attention_mask = attention_mask[:, None, None, :]
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
# Python builtin next is currently not supported in Torchscript
if not torch.jit.is_scripting():
attention_mask = attention_mask.to(
dtype=next(self.parameters()).dtype
) # fp16 compatibility
attention_mask = (1.0 - attention_mask) * -10000.0
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastabe to
# [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
encoder_attention_mask = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
encoder_attention_mask = encoder_attention_mask[:, None, None, :]
# Python builtin next is currently not supported in Torchscript
if not torch.jit.is_scripting():
encoder_attention_mask = encoder_attention_mask.to(
dtype=next(self.parameters()).dtype
) # fp16 compatibility
encoder_attention_mask = (1.0 - encoder_attention_mask) * -10000.0
encoder_outputs = self.transformer.encoder(
embedding_output,
attention_mask=attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
)
sequence_output = encoder_outputs[0]
pooled_output = self.transformer.pooler(sequence_output)
outputs = (
sequence_output,
pooled_output,
encoder_outputs[1:],
) # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions)
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
class MMBTBase(MultiModalEncoderBase):
def __init__(self, config, *args, **kwargs):
super().__init__(config, *args, **kwargs)
# Replace transformer layers with scriptable JIT layers
replace_with_jit()
def build(self):
encoders = self._build_encoders(self.config)
text_encoder, modal_encoder = encoders[0], encoders[1]
self._encoder_config = text_encoder.config
self._mmbt_config = MMBTConfig(
self._encoder_config,
num_labels=self.config.num_labels,
modal_hidden_size=self.config.modal_hidden_size,
)
self.use_modal_start_token = self.config.use_modal_start_token
self.use_modal_end_token = self.config.use_modal_end_token
self.num_max_segment = self.config.text_encoder.params.get("num_segments", 2)
self.mmbt = MMBTModel(self._mmbt_config, text_encoder, modal_encoder)
def extract_modal_end_token(self, sample_list: Dict[str, Tensor]):
# compute the position of the last non-masked token, which is <sep>
gather_index = sample_list["input_mask"].sum(1, keepdim=True) - 1
modal_end_token = (
torch.gather(sample_list["input_ids"], 1, gather_index)
.squeeze(1)
.clone()
.detach()
)
batch_size = sample_list["input_ids"].size(0)
device = sample_list["input_ids"].device
# remove start_token in input_ids
sample_list["input_ids"] = torch.cat(
[sample_list["input_ids"][:, 1:], sample_list["input_ids"][:, -1:]], dim=1
)
# update input_mask
sample_list["input_mask"] = torch.cat(
[
sample_list["input_mask"][:, 1:],
torch.zeros([batch_size, 1], dtype=torch.long, device=device),
],
dim=1,
)
return modal_end_token
def forward(self, sample_list: Dict[str, Tensor]):
if self._is_direct_features_input:
if "input_modal" in sample_list:
input_modal = sample_list["input_modal"]
else:
input_modal = sample_list["image_feature_0"]
else:
input_modal = sample_list["image"]
modal_start_token: Optional[Tensor] = None
if self.use_modal_start_token:
modal_start_token = sample_list["input_ids"][:, 0].clone().detach()
modal_end_token: Optional[Tensor] = None
if self.use_modal_end_token:
modal_end_token = self.extract_modal_end_token(sample_list)
if "modal_token_type_ids" in sample_list:
modal_token_type_ids = sample_list["modal_token_type_ids"]
else:
token_value = 0
segment_ids = sample_list["segment_ids"]
max_id = segment_ids.max()
min_id = segment_ids.min()
# Case of only one segment
if max_id == min_id:
# If max_id is greater than 0, that means text is at 0 segment
# which means modal will be at 1
# In other case, it will be zero, which it already is
# NOTE: We compare with tensor here due to TorchScript compliance
if max_id == torch.tensor(0, dtype=max_id.dtype):
token_value = 1
else:
max_segment = self.num_max_segment - 1
# If max id is not equal to max_segment, it means
# text segments start from 0 which means modal will
# be last, otherwise, it is 0, which it already is
if max_id != torch.tensor(max_segment, dtype=max_id.dtype):
token_value = max_segment
modal_token_type_ids = torch.full(
(input_modal.size(0), 1),
fill_value=token_value,
dtype=torch.long,
device=input_modal.device,
)
# In case of XRAY, there might be only two dims
if input_modal.dim() == 2:
input_modal = input_modal.unsqueeze(dim=1)
# See details of inputs at
# https://github.com/huggingface/transformers/blob/1789c7/src/transformers/modeling_mmbt.py#L101 # noqa
output = self.mmbt(
input_modal,
input_ids=sample_list["input_ids"],
modal_start_tokens=modal_start_token,
modal_end_tokens=modal_end_token,
attention_mask=sample_list["input_mask"],
token_type_ids=sample_list["segment_ids"],
modal_token_type_ids=modal_token_type_ids,
position_ids=None,
modal_position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
)
return output
class MMBTForPreTraining(nn.Module):
def __init__(self, config, *args, **kwargs):
super().__init__()
self.config = config
self.bert = MMBTBase(config, *args, **kwargs)
self.encoder_config = self.bert.encoder_config
# TODO : Switch to AutoModelForPreTraining after transformers
# package upgrade to 2.5
pretraining_module = BertForPreTraining.from_pretrained(
self.config.bert_model_name,
config=self.encoder_config,
cache_dir=os.path.join(get_mmf_cache_dir(), "distributed_{}".format(-1)),
)
self.cls = deepcopy(pretraining_module.cls)
self.loss_fct = nn.CrossEntropyLoss(ignore_index=-1)
self.tie_weights()
def tie_weights(self):
"""Make sure we are sharing the input and output embeddings.
Export to TorchScript can't handle parameter sharing so we
are cloning them instead.
"""
if hasattr(self, "cls"):
self.bert.mmbt.transformer._tie_or_clone_weights(
self.cls.predictions.decoder,
self.bert.mmbt.transformer.embeddings.word_embeddings,
)
def forward(self, sample_list):
module_output = self.bert(sample_list)
sequence_output, pooled_output = module_output[0], module_output[1]
prediction_scores, seq_relationship_score = self.cls(
sequence_output, pooled_output
)
output = {}
if (
self.encoder_config.output_hidden_states
or self.encoder_config.output_attentions
):
output["extras"] = module_output[2:]
loss_key = f"{sample_list.dataset_name}/{sample_list.dataset_type}"
if "lm_label_ids" in sample_list and sample_list.lm_label_ids is not None:
output["logits"] = prediction_scores
lm_label_ids = sample_list.lm_label_ids
# Only take last scores which are text's scores and ignore image scores
text_scores = (
prediction_scores[:, -(lm_label_ids.size(1)) :]
.contiguous()
.view(-1, self.encoder_config.vocab_size)
)
masked_lm_loss = self.loss_fct(
text_scores, sample_list.lm_label_ids.contiguous().view(-1)
)
output["losses"] = {}
output["losses"][f"{loss_key}/masked_lm_loss"] = masked_lm_loss
# Add alignment loss if present
if (
"image_text_alignment" in sample_list
and sample_list.image_text_alignment is not None
):
output["seq_relationship_logits"] = seq_relationship_score
alignment_loss = self.loss_fct(
seq_relationship_score.contiguous().view(-1),
sample_list.image_text_alignment.contiguous().view(-1),
)
output["losses"][f"{loss_key}/alignment_loss"] = alignment_loss
return output
class MMBTForClassification(nn.Module):
def __init__(self, config, *args, **kwargs):
super().__init__()
self.config = config
self.bert = MMBTBase(config, *args, **kwargs)
self.encoder_config = self.bert.encoder_config
self.num_labels = self.config.num_labels
self.output_hidden_states = self.encoder_config.output_hidden_states
self.output_attentions = self.encoder_config.output_attentions
self.fused_feature_only = self.config.get("fused_feature_only", False)
self.dropout = nn.Dropout(self.encoder_config.hidden_dropout_prob)
self.classifier = nn.Sequential(
BertPredictionHeadTransform(self.encoder_config),
nn.Linear(self.encoder_config.hidden_size, self.config.num_labels),
)
def forward(self, sample_list: Dict[str, Tensor]):
module_output = self.bert(sample_list)
pooled_output = module_output[1]
output = {}
if not torch.jit.is_scripting():
if self.output_hidden_states or self.output_attentions:
output["extras"] = module_output[2:]
else:
assert not (
self.output_hidden_states or self.output_attentions
), "output_attentions or output_hidden_states not supported in script mode"
pooled_output = self.dropout(pooled_output)
if self.fused_feature_only:
output["fused_feature"] = self.classifier[0](pooled_output)
return output
logits = self.classifier(pooled_output)
reshaped_logits = logits.contiguous().view(-1, self.num_labels)
output["scores"] = reshaped_logits
return output
@registry.register_model("mmbt")
class MMBT(BaseModel):
@dataclass
class Config(BaseModel.Config):
model: str = "mmbt"
# classification or pretraining
training_head_type: str = "pretraining"
bert_model_name: str = "bert-base-uncased"
direct_features_input: bool = False
freeze_text: bool = False
freeze_modal: bool = False
freeze_complete_base: bool = False
finetune_lr_multiplier: float = 1
# Dimension of the embedding finally returned by the modal encoder
modal_hidden_size: int = 2048
text_hidden_size: int = 768
num_labels: int = 2
# This actually is Union[ImageEncoderConfig, ImageFeatureEncoderConfig]
modal_encoder: EncoderFactory.Config = ImageEncoderFactory.Config(
type=ImageEncoderTypes.resnet152, params=ResNet152ImageEncoder.Config()
)
text_encoder: EncoderFactory.Config = TextEncoderFactory.Config(
type=TextEncoderTypes.transformer,
params=TransformerEncoder.Config(bert_model_name=II("bert_model_name")),
)
use_modal_start_token: bool = True
use_modal_end_token: bool = True
fused_feature_only: bool = False
output_dim: int = 768
def __init__(self, config: Union[DictConfig, Config], *args, **kwargs):
super().__init__(config)
def build(self):
if self.config.training_head_type == "pretraining":
self.model = MMBTForPreTraining(self.config)
else:
self.model = MMBTForClassification(self.config)
if self.config.freeze_complete_base or self.config.freeze_text:
for p in self.model.bert.mmbt.transformer.parameters():
p.requires_grad = False
if self.config.freeze_complete_base or self.config.freeze_modal:
for p in self.model.bert.mmbt.modal_encoder.parameters():
p.requires_grad = False
# Backward compatibility for code from older mmbt
@classmethod
def format_state_key(cls, key):
return (
key.replace("base.bert", "model.bert")
.replace("base.cls", "model.cls")
.replace("base.classifier", "model.classifier")
)
@classmethod
def from_pretrained(cls, model_name, *args, **kwargs):
model = super().from_pretrained(model_name, *args, **kwargs)
config = load_pretrained_model(model_name)["full_config"]
OmegaConf.set_struct(config, True)
if model_name == "mmbt.hateful_memes.images" or kwargs.get("interface"):
return MMBTGridHMInterface(model, config)
return model
@classmethod
def config_path(cls):
return "configs/models/mmbt/pretrain.yaml"
def forward(self, sample_list: Dict[str, Tensor]):
return self.model(sample_list)
def get_optimizer_parameters(self, config):
return get_optimizer_parameters_for_bert(self.model, config)
| EXA-1-master | exa/models/mmf-main/mmf/models/mmbt.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import torch
from mmf.common.registry import registry
from mmf.models.pythia import Pythia
from mmf.modules.layers import ClassifierLayer
@registry.register_model("butd")
class BUTD(Pythia):
def __init__(self, config):
super().__init__(config)
@classmethod
def config_path(cls):
return "configs/models/butd/defaults.yaml"
def build(self):
self._build_word_embedding()
self._init_feature_encoders("image")
self._init_feature_embeddings("image")
self._init_classifier()
self._init_extras()
def _build_word_embedding(self):
self.text_processor = registry.get(self._datasets[0] + "_text_processor")
self.vocab = self.text_processor.vocab
self.vocab_size = self.vocab.get_size()
self.word_embedding = self.vocab.get_embedding(
torch.nn.Embedding, embedding_dim=self.config.embedding_dim
)
self.text_embeddings_out_dim = self.config.embedding_dim
def _init_classifier(self):
self.classifier = ClassifierLayer(
self.config.classifier.type,
in_dim=self.config.classifier.params.feature_dim,
out_dim=self.vocab_size,
**self.config.classifier.params,
)
def get_optimizer_parameters(self, config):
params = [
{"params": self.word_embedding.parameters()},
{"params": self.image_feature_embeddings_list.parameters()},
{"params": self.classifier.parameters()},
{
"params": self.image_feature_encoders.parameters(),
"lr": (config.optimizer.params.lr * 0.1),
},
]
return params
def prepare_data(self, sample_list, batch_size):
# turn off teacher forcing during beam search
# (otherwise one cannot run beam search on val set)
self.teacher_forcing = self.config.inference.type != "beam_search" and hasattr(
sample_list, "text"
)
data = {}
if self.teacher_forcing:
caption_lengths, sort_ind = sample_list.caption_len.sort(
dim=0, descending=True
)
data["decode_lengths"] = (caption_lengths - 1).tolist()
sample_list.text = sample_list.text[sort_ind]
sample_list.answers = sample_list.answers[sort_ind]
sample_list.image_feature_0 = sample_list.image_feature_0[sort_ind]
data["texts"] = sample_list.text
timesteps = max(data["decode_lengths"])
sample_list.add_field("targets", sample_list.text[:, 1:])
else:
data["texts"] = sample_list.answers.new_full(
(batch_size, 1), self.vocab.SOS_INDEX, dtype=torch.long
)
timesteps = self.text_processor.max_length
sample_list.add_field("targets", sample_list.answers[:, 0, 1:])
return data, sample_list, timesteps
def init_hidden_state(self, features):
h = features.new_zeros(
(features.size(0), self.config.classifier.params.hidden_dim),
dtype=torch.float,
)
c = features.new_zeros(
(features.size(0), self.config.classifier.params.hidden_dim),
dtype=torch.float,
)
return h, c
def get_data_t(self, t, data, batch_size_t, prev_output):
if self.teacher_forcing:
# Modify batch_size for timestep t
batch_size_t = sum([l > t for l in data["decode_lengths"]])
elif prev_output is not None and self.config.inference.type == "greedy":
# Adding t-1 output words to data["text"] for greedy decoding
output_softmax = torch.log_softmax(prev_output, dim=1)
_, indices = torch.max(output_softmax, dim=1, keepdim=True)
data["texts"] = torch.cat(
(data["texts"], indices.view(batch_size_t, 1)), dim=1
)
# Slice data based on batch_size at timestep t
data["texts"] = data["texts"][:batch_size_t]
if "state" in data:
h1 = data["state"]["td_hidden"][0][:batch_size_t]
c1 = data["state"]["td_hidden"][1][:batch_size_t]
h2 = data["state"]["lm_hidden"][0][:batch_size_t]
c2 = data["state"]["lm_hidden"][1][:batch_size_t]
else:
h1, c1 = self.init_hidden_state(data["texts"])
h2, c2 = self.init_hidden_state(data["texts"])
data["state"] = {"td_hidden": (h1, c1), "lm_hidden": (h2, c2)}
registry.register(f"{h1.device}_lstm_state", data["state"])
return data, batch_size_t
def forward(self, sample_list):
# Stores the output probabilites.
scores = sample_list.answers.new_ones(
(
sample_list.answers.size(0),
self.text_processor.max_length,
self.vocab_size,
),
dtype=torch.float,
)
if self.config["inference"]["type"] in ["beam_search", "nucleus_sampling"]:
decoder = registry.get_decoder_class(self.config["inference"]["type"])(
self.vocab, self.config
)
sample_list = decoder.init_batch(sample_list)
batch_size = sample_list.image_feature_0.size(0)
data, sample_list, timesteps = self.prepare_data(sample_list, batch_size)
output = None
batch_size_t = batch_size
for t in range(timesteps):
data, batch_size_t = self.get_data_t(t, data, batch_size_t, output)
if self.config.inference.type in ["beam_search", "nucleus_sampling"]:
pi_t = data["texts"]
else:
pi_t = data["texts"][:, t].unsqueeze(-1)
embedding = self.word_embedding(pi_t)
attention_feature, _ = self.process_feature_embedding(
"image", sample_list, embedding[:, 0, :], batch_size_t=batch_size_t
)
output = self.classifier(attention_feature)
# Compute decoding
if self.config.inference.type in ["beam_search", "nucleus_sampling"]:
finish, data, batch_size_t = decoder.decode(t, data, output)
if finish:
break
else:
scores[:batch_size_t, t] = output
model_output = {}
if self.config.inference.type in ["beam_search", "nucleus_sampling"]:
results = decoder.get_result()
results = torch.nn.functional.pad(
results,
(0, self.text_processor.max_length - results.size()[-1]),
"constant",
0,
)
model_output["captions"] = results
model_output["losses"] = {}
loss_key = "{}/{}".format(
sample_list.dataset_name, sample_list.dataset_type
)
# Add a dummy loss so that loss calculation is not required
model_output["losses"][loss_key + "/dummy_loss"] = torch.zeros(
batch_size, device=sample_list.answers.device
)
else:
model_output["scores"] = scores
return model_output
| EXA-1-master | exa/models/mmf-main/mmf/models/butd.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import functools
import logging
import math
import torch
import torch.nn.functional as F
from mmf.common.registry import registry
from mmf.models.base_model import BaseModel
from mmf.modules.layers import ClassifierLayer
from mmf.utils.build import build_image_encoder
from omegaconf import OmegaConf
from torch import nn
try:
from transformers3.modeling_bert import (
BertConfig,
BertEmbeddings,
BertEncoder,
BertPreTrainedModel,
)
except ImportError:
from transformers.modeling_bert import (
BertConfig,
BertEmbeddings,
BertEncoder,
BertPreTrainedModel,
)
logger = logging.getLogger(__name__)
@registry.register_model("m4c")
class M4C(BaseModel):
def __init__(self, config):
super().__init__(config)
self.mmt_config = BertConfig(**self.config.mmt)
self._datasets = registry.get("config").datasets.split(",")
@classmethod
def config_path(cls):
return "configs/models/m4c/defaults.yaml"
def build(self):
# modules requiring custom learning rates (usually for finetuning)
self.finetune_modules = []
# split model building into several components
self._build_txt_encoding()
self._build_obj_encoding()
self._build_ocr_encoding()
self._build_mmt()
self._build_output()
def _build_encoder_config(self):
return OmegaConf.create(
{
"type": "finetune_faster_rcnn_fpn_fc7",
"params": {
"in_dim": 2048,
"weights_file": "models/detectron.defaults/fc7_w.pkl",
"bias_file": "models/detectron.defaults/fc7_b.pkl",
"model_data_dir": self.config.model_data_dir,
},
}
)
def _build_txt_encoding(self):
TEXT_BERT_HIDDEN_SIZE = 768
self.text_bert_config = BertConfig(**self.config.text_bert)
if self.config.text_bert_init_from_bert_base:
self.text_bert = TextBert.from_pretrained(
"bert-base-uncased", config=self.text_bert_config
)
# Use a smaller learning rate on text bert when initializing
# from BERT_BASE
self.finetune_modules.append(
{"module": self.text_bert, "lr_scale": self.config.lr_scale_text_bert}
)
else:
logger.info("NOT initializing text_bert from BERT_BASE")
self.text_bert = TextBert(self.text_bert_config)
# if the text bert output dimension doesn't match the
# multimodal transformer (mmt) hidden dimension,
# add a linear projection layer between the two
if self.mmt_config.hidden_size != TEXT_BERT_HIDDEN_SIZE:
logger.info(
f"Projecting text_bert output to {self.mmt_config.hidden_size} dim"
)
self.text_bert_out_linear = nn.Linear(
TEXT_BERT_HIDDEN_SIZE, self.mmt_config.hidden_size
)
else:
self.text_bert_out_linear = nn.Identity()
def _build_obj_encoding(self):
# object appearance feature: Faster R-CNN
self.obj_faster_rcnn_fc7 = build_image_encoder(
self._build_encoder_config(), direct_features=True
)
# apply smaller lr to pretrained Faster R-CNN fc7
self.finetune_modules.append(
{"module": self.obj_faster_rcnn_fc7, "lr_scale": self.config.lr_scale_frcn}
)
self.linear_obj_feat_to_mmt_in = nn.Linear(
self.config.obj.mmt_in_dim, self.mmt_config.hidden_size
)
# object location feature: relative bounding box coordinates (4-dim)
self.linear_obj_bbox_to_mmt_in = nn.Linear(4, self.mmt_config.hidden_size)
self.obj_feat_layer_norm = nn.LayerNorm(self.mmt_config.hidden_size)
self.obj_bbox_layer_norm = nn.LayerNorm(self.mmt_config.hidden_size)
self.obj_drop = nn.Dropout(self.config.obj.dropout_prob)
def _build_ocr_encoding(self):
self.remove_ocr_fasttext = self.config.ocr.get("remove_ocr_fasttext", False)
self.remove_ocr_phoc = self.config.ocr.get("remove_ocr_phoc", False)
self.remove_ocr_frcn = self.config.ocr.get("remove_ocr_frcn", False)
self.remove_ocr_semantics = self.config.ocr.get("remove_ocr_semantics", False)
self.remove_ocr_bbox = self.config.ocr.get("remove_ocr_bbox", False)
# OCR appearance feature: Faster R-CNN
self.ocr_faster_rcnn_fc7 = build_image_encoder(
self._build_encoder_config(), direct_features=True
)
self.finetune_modules.append(
{"module": self.ocr_faster_rcnn_fc7, "lr_scale": self.config.lr_scale_frcn}
)
self.linear_ocr_feat_to_mmt_in = nn.Linear(
self.config.ocr.mmt_in_dim, self.mmt_config.hidden_size
)
# OCR location feature: relative bounding box coordinates (4-dim)
self.linear_ocr_bbox_to_mmt_in = nn.Linear(4, self.mmt_config.hidden_size)
self.ocr_feat_layer_norm = nn.LayerNorm(self.mmt_config.hidden_size)
self.ocr_bbox_layer_norm = nn.LayerNorm(self.mmt_config.hidden_size)
self.ocr_drop = nn.Dropout(self.config.ocr.dropout_prob)
def _build_mmt(self):
self.mmt = MMT(self.mmt_config)
# allow specifying a different/scaled lr for multimodal transformer
self.finetune_modules.append(
{"module": self.mmt, "lr_scale": self.config.lr_scale_mmt}
)
def _build_output(self):
# dynamic OCR-copying scores with pointer network
self.ocr_ptr_net = OcrPtrNet(**self.config.classifier.ocr_ptr_net)
# fixed answer vocabulary scores
num_choices = registry.get(self._datasets[0] + "_num_final_outputs")
# remove the OCR copying dimensions in LoRRA's classifier output
# (OCR copying will be handled separately)
num_choices -= self.config.classifier.ocr_max_num
self.classifier = ClassifierLayer(
self.config.classifier.type,
in_dim=self.mmt_config.hidden_size,
out_dim=num_choices,
**self.config.classifier.params,
)
self.answer_processor = registry.get(self._datasets[0] + "_answer_processor")
def forward(self, sample_list):
# fwd_results holds intermediate forward pass results
# TODO possibly replace it with another sample list
fwd_results = {}
self._forward_txt_encoding(sample_list, fwd_results)
self._forward_obj_encoding(sample_list, fwd_results)
self._forward_ocr_encoding(sample_list, fwd_results)
self._forward_mmt_and_output(sample_list, fwd_results)
# only keep scores in the forward pass results
results = {"scores": fwd_results["scores"]}
return results
def _forward_txt_encoding(self, sample_list, fwd_results):
fwd_results["txt_inds"] = sample_list.text
# binary mask of valid text (question words) vs padding
fwd_results["txt_mask"] = _get_mask(
sample_list.text_len, sample_list.text.size(1)
)
def _forward_obj_encoding(self, sample_list, fwd_results):
# object appearance feature: Faster R-CNN fc7
obj_fc6 = sample_list.image_feature_0
obj_fc7 = self.obj_faster_rcnn_fc7(obj_fc6)
obj_fc7 = F.normalize(obj_fc7, dim=-1)
obj_feat = obj_fc7
obj_bbox = sample_list.obj_bbox_coordinates
obj_mmt_in = self.obj_feat_layer_norm(
self.linear_obj_feat_to_mmt_in(obj_feat)
) + self.obj_bbox_layer_norm(self.linear_obj_bbox_to_mmt_in(obj_bbox))
obj_mmt_in = self.obj_drop(obj_mmt_in)
fwd_results["obj_mmt_in"] = obj_mmt_in
# binary mask of valid object vs padding
obj_nums = sample_list.image_info_0.max_features
fwd_results["obj_mask"] = _get_mask(obj_nums, obj_mmt_in.size(1))
def _forward_ocr_encoding(self, sample_list, fwd_results):
# OCR FastText feature (300-dim)
ocr_fasttext = sample_list.context_feature_0
ocr_fasttext = F.normalize(ocr_fasttext, dim=-1)
assert ocr_fasttext.size(-1) == 300
# OCR PHOC feature (604-dim)
ocr_phoc = sample_list.context_feature_1
ocr_phoc = F.normalize(ocr_phoc, dim=-1)
assert ocr_phoc.size(-1) == 604
# OCR appearance feature: Faster R-CNN fc7
ocr_fc6 = sample_list.image_feature_1[:, : ocr_fasttext.size(1), :]
ocr_fc7 = self.ocr_faster_rcnn_fc7(ocr_fc6)
ocr_fc7 = F.normalize(ocr_fc7, dim=-1)
# OCR order vectors (legacy from LoRRA model; set to all zeros)
# TODO remove OCR order vectors; they are not needed
ocr_order_vectors = torch.zeros_like(sample_list.order_vectors)
if self.remove_ocr_fasttext:
ocr_fasttext = torch.zeros_like(ocr_fasttext)
if self.remove_ocr_phoc:
ocr_phoc = torch.zeros_like(ocr_phoc)
if self.remove_ocr_frcn:
ocr_fc7 = torch.zeros_like(ocr_fc7)
ocr_feat = torch.cat(
[ocr_fasttext, ocr_phoc, ocr_fc7, ocr_order_vectors], dim=-1
)
ocr_bbox = sample_list.ocr_bbox_coordinates
if self.remove_ocr_semantics:
ocr_feat = torch.zeros_like(ocr_feat)
if self.remove_ocr_bbox:
ocr_bbox = torch.zeros_like(ocr_bbox)
ocr_mmt_in = self.ocr_feat_layer_norm(
self.linear_ocr_feat_to_mmt_in(ocr_feat)
) + self.ocr_bbox_layer_norm(self.linear_ocr_bbox_to_mmt_in(ocr_bbox))
ocr_mmt_in = self.ocr_drop(ocr_mmt_in)
fwd_results["ocr_mmt_in"] = ocr_mmt_in
# binary mask of valid OCR vs padding
ocr_nums = sample_list.context_info_0.max_features
fwd_results["ocr_mask"] = _get_mask(ocr_nums, ocr_mmt_in.size(1))
def _forward_mmt(self, sample_list, fwd_results):
# first forward the text BERT layers
text_bert_out = self.text_bert(
txt_inds=fwd_results["txt_inds"], txt_mask=fwd_results["txt_mask"]
)
fwd_results["txt_emb"] = self.text_bert_out_linear(text_bert_out)
mmt_results = self.mmt(
txt_emb=fwd_results["txt_emb"],
txt_mask=fwd_results["txt_mask"],
obj_emb=fwd_results["obj_mmt_in"],
obj_mask=fwd_results["obj_mask"],
ocr_emb=fwd_results["ocr_mmt_in"],
ocr_mask=fwd_results["ocr_mask"],
fixed_ans_emb=self.classifier.module.weight,
prev_inds=fwd_results["prev_inds"],
)
fwd_results.update(mmt_results)
def _forward_output(self, sample_list, fwd_results):
mmt_dec_output = fwd_results["mmt_dec_output"]
mmt_ocr_output = fwd_results["mmt_ocr_output"]
ocr_mask = fwd_results["ocr_mask"]
fixed_scores = self.classifier(mmt_dec_output)
dynamic_ocr_scores = self.ocr_ptr_net(mmt_dec_output, mmt_ocr_output, ocr_mask)
scores = torch.cat([fixed_scores, dynamic_ocr_scores], dim=-1)
fwd_results["scores"] = scores
def _forward_mmt_and_output(self, sample_list, fwd_results):
if self.training:
fwd_results["prev_inds"] = sample_list.train_prev_inds.clone()
self._forward_mmt(sample_list, fwd_results)
self._forward_output(sample_list, fwd_results)
else:
dec_step_num = sample_list.train_prev_inds.size(1)
# fill prev_inds with BOS_IDX at index 0, and zeros elsewhere
fwd_results["prev_inds"] = torch.zeros_like(sample_list.train_prev_inds)
fwd_results["prev_inds"][:, 0] = self.answer_processor.BOS_IDX
# greedy decoding at test time
for _ in range(dec_step_num):
self._forward_mmt(sample_list, fwd_results)
self._forward_output(sample_list, fwd_results)
# find the highest scoring output (either a fixed vocab
# or an OCR), and add it to prev_inds for auto-regressive
# decoding
argmax_inds = fwd_results["scores"].argmax(dim=-1)
fwd_results["prev_inds"][:, 1:] = argmax_inds[:, :-1]
def get_optimizer_parameters(self, config):
optimizer_param_groups = []
base_lr = config.optimizer.params.lr
# collect all the parameters that need different/scaled lr
finetune_params_set = set()
for m in self.finetune_modules:
optimizer_param_groups.append(
{
"params": list(m["module"].parameters()),
"lr": base_lr * m["lr_scale"],
}
)
finetune_params_set.update(list(m["module"].parameters()))
# remaining_params are those parameters w/ default lr
remaining_params = [
p for p in self.parameters() if p not in finetune_params_set
]
# put the default lr parameters at the beginning
# so that the printed lr (of group 0) matches the default lr
optimizer_param_groups.insert(0, {"params": remaining_params})
return optimizer_param_groups
@classmethod
def update_registry_for_pretrained(cls, config, checkpoint, full_output):
from omegaconf import OmegaConf
# Hack datasets using OmegaConf
datasets = full_output["full_config"].datasets
dataset = datasets.split(",")[0]
config_mock = OmegaConf.create({"datasets": datasets})
registry.register("config", config_mock)
registry.register(
f"{dataset}_num_final_outputs",
# Need to add as it is subtracted
checkpoint["classifier.module.weight"].size(0)
+ config.classifier.ocr_max_num,
)
# Fix this later, when processor pipeline is available
answer_processor = OmegaConf.create({"BOS_IDX": 1})
registry.register(f"{dataset}_answer_processor", answer_processor)
class TextBert(BertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.embeddings = BertEmbeddings(config)
self.encoder = BertEncoder(config)
self.init_weights()
def forward(self, txt_inds, txt_mask):
encoder_inputs = self.embeddings(txt_inds)
attention_mask = txt_mask
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
assert not extended_attention_mask.requires_grad
head_mask = [None] * self.config.num_hidden_layers
encoder_outputs = self.encoder(
encoder_inputs, extended_attention_mask, head_mask=head_mask
)
seq_output = encoder_outputs[0]
return seq_output
class MMT(BertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.prev_pred_embeddings = PrevPredEmbeddings(config)
self.encoder = BertEncoder(config)
self.init_weights()
def forward(
self,
txt_emb,
txt_mask,
obj_emb,
obj_mask,
ocr_emb,
ocr_mask,
fixed_ans_emb,
prev_inds,
):
# build embeddings for predictions in previous decoding steps
# fixed_ans_emb is an embedding lookup table for each fixed vocabulary
dec_emb = self.prev_pred_embeddings(fixed_ans_emb, ocr_emb, prev_inds)
# a zero mask for decoding steps, so the encoding steps elements can't
# attend to decoding steps.
# A triangular causal mask will be filled for the decoding steps
# later in extended_attention_mask
dec_mask = torch.zeros(
dec_emb.size(0), dec_emb.size(1), dtype=torch.float32, device=dec_emb.device
)
encoder_inputs = torch.cat([txt_emb, obj_emb, ocr_emb, dec_emb], dim=1)
attention_mask = torch.cat([txt_mask, obj_mask, ocr_mask, dec_mask], dim=1)
# offsets of each modality in the joint embedding space
txt_max_num = txt_mask.size(-1)
obj_max_num = obj_mask.size(-1)
ocr_max_num = ocr_mask.size(-1)
dec_max_num = dec_mask.size(-1)
txt_begin = 0
txt_end = txt_begin + txt_max_num
ocr_begin = txt_max_num + obj_max_num
ocr_end = ocr_begin + ocr_max_num
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, from_seq_length, to_seq_length]
# So we can broadcast to
# [batch_size, num_heads, from_seq_length, to_seq_length]
to_seq_length = attention_mask.size(1)
from_seq_length = to_seq_length
# generate the attention mask similar to prefix LM
# all elements can attend to the elements in encoding steps
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
extended_attention_mask = extended_attention_mask.repeat(
1, 1, from_seq_length, 1
)
# decoding step elements can attend to themselves in a causal manner
extended_attention_mask[:, :, -dec_max_num:, -dec_max_num:] = _get_causal_mask(
dec_max_num, encoder_inputs.device
)
# flip the mask, so that invalid attention pairs have -10000.
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
assert not extended_attention_mask.requires_grad
head_mask = [None] * self.config.num_hidden_layers
encoder_outputs = self.encoder(
encoder_inputs, extended_attention_mask, head_mask=head_mask
)
mmt_seq_output = encoder_outputs[0]
mmt_txt_output = mmt_seq_output[:, txt_begin:txt_end]
mmt_ocr_output = mmt_seq_output[:, ocr_begin:ocr_end]
mmt_dec_output = mmt_seq_output[:, -dec_max_num:]
results = {
"mmt_seq_output": mmt_seq_output,
"mmt_txt_output": mmt_txt_output,
"mmt_ocr_output": mmt_ocr_output,
"mmt_dec_output": mmt_dec_output,
}
return results
class OcrPtrNet(nn.Module):
def __init__(self, hidden_size, query_key_size=None):
super().__init__()
if query_key_size is None:
query_key_size = hidden_size
self.hidden_size = hidden_size
self.query_key_size = query_key_size
self.query = nn.Linear(hidden_size, query_key_size)
self.key = nn.Linear(hidden_size, query_key_size)
def forward(self, query_inputs, key_inputs, attention_mask):
extended_attention_mask = (1.0 - attention_mask) * -10000.0
assert extended_attention_mask.dim() == 2
extended_attention_mask = extended_attention_mask.unsqueeze(1)
query_layer = self.query(query_inputs)
if query_layer.dim() == 2:
query_layer = query_layer.unsqueeze(1)
squeeze_result = True
else:
squeeze_result = False
key_layer = self.key(key_inputs)
scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
scores = scores / math.sqrt(self.query_key_size)
scores = scores + extended_attention_mask
if squeeze_result:
scores = scores.squeeze(1)
return scores
class PrevPredEmbeddings(nn.Module):
def __init__(self, config):
super().__init__()
MAX_DEC_LENGTH = 100
MAX_TYPE_NUM = 5
hidden_size = config.hidden_size
ln_eps = config.layer_norm_eps
self.position_embeddings = nn.Embedding(MAX_DEC_LENGTH, hidden_size)
self.token_type_embeddings = nn.Embedding(MAX_TYPE_NUM, hidden_size)
self.ans_layer_norm = nn.LayerNorm(hidden_size, eps=ln_eps)
self.ocr_layer_norm = nn.LayerNorm(hidden_size, eps=ln_eps)
self.emb_layer_norm = nn.LayerNorm(hidden_size, eps=ln_eps)
self.emb_dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, ans_emb, ocr_emb, prev_inds):
assert prev_inds.dim() == 2 and prev_inds.dtype == torch.long
assert ans_emb.dim() == 2
batch_size = prev_inds.size(0)
seq_length = prev_inds.size(1)
ans_num = ans_emb.size(0)
# apply layer normalization to both answer embedding and OCR embedding
# before concatenation, so that they have the same scale
ans_emb = self.ans_layer_norm(ans_emb)
ocr_emb = self.ocr_layer_norm(ocr_emb)
assert ans_emb.size(-1) == ocr_emb.size(-1)
ans_emb = ans_emb.unsqueeze(0).expand(batch_size, -1, -1)
ans_ocr_emb_cat = torch.cat([ans_emb, ocr_emb], dim=1)
raw_dec_emb = _batch_gather(ans_ocr_emb_cat, prev_inds)
# Add position and type embedding for previous predictions
position_ids = torch.arange(seq_length, dtype=torch.long, device=ocr_emb.device)
position_ids = position_ids.unsqueeze(0).expand(batch_size, seq_length)
position_embeddings = self.position_embeddings(position_ids)
# Token type ids: 0 -- vocab; 1 -- OCR
token_type_ids = prev_inds.ge(ans_num).long()
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = position_embeddings + token_type_embeddings
embeddings = self.emb_layer_norm(embeddings)
embeddings = self.emb_dropout(embeddings)
dec_emb = raw_dec_emb + embeddings
return dec_emb
def _get_mask(nums, max_num):
# non_pad_mask: b x lq, torch.float32, 0. on PAD
batch_size = nums.size(0)
arange = torch.arange(0, max_num).unsqueeze(0).expand(batch_size, -1)
non_pad_mask = arange.to(nums.device).lt(nums.unsqueeze(-1))
non_pad_mask = non_pad_mask.type(torch.float32)
return non_pad_mask
@functools.lru_cache(maxsize=32)
def _get_causal_mask(seq_length, device):
# generate a lower triangular mask
mask = torch.zeros(seq_length, seq_length, device=device)
for i in range(seq_length):
for j in range(i + 1):
mask[i, j] = 1.0
return mask
def _batch_gather(x, inds):
assert x.dim() == 3
batch_size = x.size(0)
length = x.size(1)
dim = x.size(2)
x_flat = x.view(batch_size * length, dim)
batch_offsets = torch.arange(batch_size, device=inds.device) * length
batch_offsets = batch_offsets.unsqueeze(-1)
assert batch_offsets.dim() == inds.dim()
inds_flat = batch_offsets + inds
results = F.embedding(inds_flat, x_flat)
return results
| EXA-1-master | exa/models/mmf-main/mmf/models/m4c.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import copy
import omegaconf
import torch
from mmf.common.registry import registry
from mmf.models.base_model import BaseModel
from mmf.modules.embeddings import (
ImageFeatureEmbedding,
MultiHeadImageFeatureEmbedding,
PreExtractedEmbedding,
TextEmbedding,
)
from mmf.modules.layers import ClassifierLayer, ModalCombineLayer
from mmf.utils.build import build_image_encoder
from torch import nn
@registry.register_model("pythia")
class Pythia(BaseModel):
def __init__(self, config):
super().__init__(config)
self.config = config
self._global_config = registry.get("config")
self._datasets = self._global_config.datasets.split(",")
@classmethod
def config_path(cls):
return "configs/models/pythia/defaults.yaml"
@classmethod
def format_state_key(cls, key):
return key.replace("fa_history", "fa_context")
def build(self):
self._build_word_embedding()
self._init_text_embeddings("text")
self._init_feature_encoders("image")
self._init_feature_embeddings("image")
self._init_combine_layer("image", "text")
self._init_classifier(self._get_classifier_input_dim())
self._init_extras()
def _build_word_embedding(self):
assert len(self._datasets) > 0
text_processor = registry.get(self._datasets[0] + "_text_processor")
vocab = text_processor.vocab
self.word_embedding = vocab.get_embedding(torch.nn.Embedding, embedding_dim=300)
def _init_text_embeddings(self, attr="text"):
if "embeddings" not in attr:
attr += "_embeddings"
text_embeddings = []
text_embeddings_list_config = self.config[attr]
embeddings_out_dim = 0
for text_embedding in text_embeddings_list_config:
embedding_type = text_embedding.type
embedding_kwargs = copy.deepcopy(text_embedding.params)
self._update_text_embedding_args(embedding_kwargs)
embedding = TextEmbedding(embedding_type, **embedding_kwargs)
text_embeddings.append(embedding)
embeddings_out_dim += embedding.text_out_dim
setattr(self, attr + "_out_dim", embeddings_out_dim)
setattr(self, attr, nn.ModuleList(text_embeddings))
def _update_text_embedding_args(self, args):
# Add model_data_dir to kwargs
args.model_data_dir = self.config.model_data_dir
def _init_feature_encoders(self, attr):
feat_encoders = []
feat_encoders_list_config = self.config[attr + "_feature_encodings"]
feature_dim = self.config[attr + "_feature_dim"]
setattr(self, attr + "_feature_dim", feature_dim)
for feat_encoder in feat_encoders_list_config:
feat_encoder_config = copy.deepcopy(feat_encoder)
with omegaconf.open_dict(feat_encoder_config):
feat_encoder_config.params.model_data_dir = self.config.model_data_dir
feat_encoder_config.params.in_dim = feature_dim
feat_model = build_image_encoder(feat_encoder_config, direct_features=True)
feat_encoders.append(feat_model)
setattr(self, attr + "_feature_dim", feat_model.out_dim)
setattr(self, attr + "_feature_encoders", nn.ModuleList(feat_encoders))
def _init_feature_embeddings(self, attr):
feature_embeddings_list = []
num_feature_feat = len(getattr(self.config, f"{attr}_feature_encodings"))
self.feature_embeddings_out_dim = 0
for _ in range(num_feature_feat):
feature_embeddings = []
feature_attn_model_list = self.config[attr + "_feature_embeddings"]
for feature_attn_model_params in feature_attn_model_list:
feature_embedding = ImageFeatureEmbedding(
getattr(self, attr + "_feature_dim"),
self.text_embeddings_out_dim,
**feature_attn_model_params,
)
feature_embeddings.append(feature_embedding)
self.feature_embeddings_out_dim += feature_embedding.out_dim
feature_embeddings = nn.ModuleList(feature_embeddings)
feature_embeddings_list.append(feature_embeddings)
self.feature_embeddings_out_dim *= getattr(self, attr + "_feature_dim")
setattr(
self, attr + "_feature_embeddings_out_dim", self.feature_embeddings_out_dim
)
del self.feature_embeddings_out_dim
setattr(
self,
attr + "_feature_embeddings_list",
nn.ModuleList(feature_embeddings_list),
)
def _get_embeddings_attr(self, attr):
embedding_attr1 = attr
if hasattr(self, attr + "_embeddings_out_dim"):
embedding_attr1 = attr + "_embeddings_out_dim"
else:
embedding_attr1 = attr + "_feature_embeddings_out_dim"
return embedding_attr1
def _init_combine_layer(self, attr1, attr2):
config_attr = attr1 + "_" + attr2 + "_modal_combine"
multi_modal_combine_layer = ModalCombineLayer(
self.config[config_attr].type,
getattr(self, self._get_embeddings_attr(attr1)),
getattr(self, self._get_embeddings_attr(attr2)),
**self.config[config_attr].params,
)
setattr(
self,
attr1 + "_" + attr2 + "_multi_modal_combine_layer",
multi_modal_combine_layer,
)
def _init_classifier(self, combined_embedding_dim):
# TODO: Later support multihead
num_choices = registry.get(self._datasets[0] + "_num_final_outputs")
self.classifier = ClassifierLayer(
self.config.classifier.type,
in_dim=combined_embedding_dim,
out_dim=num_choices,
**self.config.classifier.params,
)
def _init_extras(self):
self.inter_model = None
def get_optimizer_parameters(self, config):
combine_layer = self.image_text_multi_modal_combine_layer
params = [
{"params": self.word_embedding.parameters()},
{"params": self.image_feature_embeddings_list.parameters()},
{"params": self.text_embeddings.parameters()},
{"params": combine_layer.parameters()},
{"params": self.classifier.parameters()},
{
"params": self.image_feature_encoders.parameters(),
"lr": (config.optimizer.params.lr * 0.1),
},
]
return params
def _get_classifier_input_dim(self):
return self.image_text_multi_modal_combine_layer.out_dim
def process_text_embedding(
self, sample_list, embedding_attr="text_embeddings", info=None
):
text_embeddings = []
# Get "text" attribute in case of "text_embeddings" case
# and "context" attribute in case of "context_embeddings"
texts = getattr(sample_list, embedding_attr.split("_")[0])
# Get embedding models
text_embedding_models = getattr(self, embedding_attr)
for text_embedding_model in text_embedding_models:
# TODO: Move this logic inside
if isinstance(text_embedding_model, PreExtractedEmbedding):
embedding = text_embedding_model(sample_list.question_id)
else:
embedding = text_embedding_model(texts)
text_embeddings.append(embedding)
text_embeddding_total = torch.cat(text_embeddings, dim=1)
return text_embeddding_total
def process_feature_embedding(
self, attr, sample_list, text_embedding_total, extra=None, batch_size_t=None
):
if extra is None:
extra = []
feature_embeddings = []
feature_attentions = []
features = []
batch_size_t = (
sample_list.get_batch_size() if batch_size_t is None else batch_size_t
)
# Convert list of keys to the actual values
extra = sample_list.get_fields(extra)
feature_idx = 0
# Get all of the features, which are in the form, "image_feature_0"
# "image_feature_1" ...
while True:
feature = getattr(sample_list, f"{attr}_feature_{feature_idx:d}", None)
if feature is None:
break
feature_idx += 1
feature = feature[:batch_size_t]
features.append(feature)
feature_encoders = getattr(self, attr + "_feature_encoders")
# Each feature should have a separate image feature encoders
assert len(features) == len(feature_encoders), (
"Number of feature encoders, {} are not equal "
"to number of features, {}.".format(len(feature_encoders), len(features))
)
# Now, iterate to get final attended image features
for i, feature in enumerate(features):
# Get info related to the current feature. info is generally
# in key of format "image_info_0" for 0th feature
feature_info = getattr(sample_list, f"{attr}_info_{i:d}", {})
# For Pythia, we need max_features to mask attention
feature_dim = getattr(feature_info, "max_features", None)
if feature_dim is not None:
feature_dim = feature_dim[:batch_size_t]
# Attribute in which encoders are saved, for "image" it
# will be "image_feature_encoders", other example is
# "context_feature_encoders"
encoders_attr = attr + "_feature_encoders"
feature_encoder = getattr(self, encoders_attr)[i]
# Encode the features
encoded_feature = feature_encoder(feature)
# Get all of the feature embeddings
list_attr = attr + "_feature_embeddings_list"
feature_embedding_models = getattr(self, list_attr)[i]
# Forward through these embeddings one by one
for feature_embedding_model in feature_embedding_models:
inp = (encoded_feature, text_embedding_total, feature_dim, extra)
embedding, attention = feature_embedding_model(*inp)
feature_embeddings.append(embedding)
feature_attentions.append(attention.squeeze(-1))
# Concatenate all features embeddings and return along with attention
feature_embedding_total = torch.cat(feature_embeddings, dim=1)
return feature_embedding_total, feature_attentions
def combine_embeddings(self, *args):
feature_names = args[0]
feature_embeddings = args[1]
layer = "_".join(feature_names) + "_multi_modal_combine_layer"
return getattr(self, layer)(*feature_embeddings)
def calculate_logits(self, joint_embedding, **kwargs):
return self.classifier(joint_embedding)
def forward(self, sample_list):
sample_list.text = self.word_embedding(sample_list.text)
text_embedding_total = self.process_text_embedding(sample_list)
image_embedding_total, _ = self.process_feature_embedding(
"image", sample_list, text_embedding_total
)
if self.inter_model is not None:
image_embedding_total = self.inter_model(image_embedding_total)
joint_embedding = self.combine_embeddings(
["image", "text"], [image_embedding_total, text_embedding_total]
)
model_output = {"scores": self.calculate_logits(joint_embedding)}
return model_output
# TODO: Update
@registry.register_model("pythia_question_only")
class PythiaQuestionOnly(Pythia):
def __init__(self, config):
super().__init__(config)
def forward(self, sample_list):
text_embedding_total = self.process_text_embedding(sample_list)
text_embedding_total = text_embedding_total.new_zeros(
text_embedding_total.size()
)
fa_txt = self.image_text_multi_modal_combine_layer.module.fa_txt
dropout = self.image_text_multi_modal_combine_layer.module.dropout
joint_embedding = dropout(fa_txt(text_embedding_total))
linear_text = self.classifier.module.linear_text
f_o_text = self.classifier.module.f_o_text
scores = linear_text(f_o_text(joint_embedding))
model_output = {"scores": scores}
return model_output
# TODO: Update
@registry.register_model("pythia_image_only")
class PythiaImageOnly(Pythia):
def __init__(self, config):
super().__init__(config)
def forward(self, sample_list):
text_embedding_total = self.process_text_embedding(sample_list)
text_embedding_total = text_embedding_total.new_zeros(
text_embedding_total.size()
)
image_embedding_total, _ = self.process_feature_embedding(
"image", sample_list, text_embedding_total
)
if self.inter_model is not None:
image_embedding_total = self.inter_model(image_embedding_total)
fa_image = self.image_text_multi_modal_combine_layer.module.fa_image
dropout = self.image_text_multi_modal_combine_layer.module.dropout
joint_embedding = dropout(fa_image(image_embedding_total))
model_output = {"scores": self.calculate_logits(joint_embedding)}
return model_output
@registry.register_model("multihead")
class PythiaMultiHead(Pythia):
def __init__(self, config):
super().__init__(config)
@classmethod
def config_path(cls):
return None
def build(self):
self._build_word_embedding()
self._init_text_embeddings("text")
self._init_feature_encoders("image")
self._init_feature_projectors("image")
self._init_feature_embeddings("image")
self._init_combine_layer("image", "text")
self._init_classifier(self._get_classifier_input_dim())
self._init_extras()
def _init_feature_projectors(self, attr):
feature_projectors = []
feat_encoders_list_config = self.config[attr + "_feature_projections"]
feat_dim = getattr(self, attr + "_feature_dim")
for feat_encoder in feat_encoders_list_config:
feat_encoder_config = copy.deepcopy(feat_encoder)
feat_encoder_config.params.in_dim = feat_dim
feat_model = build_image_encoder(feat_encoder_config, direct_features=True)
feature_projectors.append(feat_model)
setattr(self, attr + "_feature_dim", feat_model.out_dim)
setattr(self, attr + "_feature_projectors", nn.ModuleList(feature_projectors))
def _init_feature_embeddings(self, attr):
feature_embeddings_list = []
num_feature_feat = len(getattr(self.config, f"{attr}_feature_encodings"))
self.feature_embeddings_out_dim = 0
for _ in range(num_feature_feat):
feature_embeddings = []
feature_attn_model_list = self.config[attr + "_feature_embeddings"]
for feature_attn_model_params in feature_attn_model_list:
feature_embedding = MultiHeadImageFeatureEmbedding(
getattr(self, attr + "_feature_dim"),
self.text_embeddings_out_dim,
**feature_attn_model_params,
)
feature_embeddings.append(feature_embedding)
self.feature_embeddings_out_dim += feature_embedding.out_dim
feature_embeddings = nn.ModuleList(feature_embeddings)
feature_embeddings_list.append(feature_embeddings)
setattr(
self, attr + "_feature_embeddings_out_dim", self.feature_embeddings_out_dim
)
del self.feature_embeddings_out_dim
setattr(
self,
attr + "_feature_embeddings_list",
nn.ModuleList(feature_embeddings_list),
)
def process_feature_embedding(
self, attr, sample_list, text_embedding_total, extra=None, batch_size_t=None
):
if extra is None:
extra = []
feature_embeddings = []
feature_attentions = []
features = []
batch_size_t = (
sample_list.get_batch_size() if batch_size_t is None else batch_size_t
)
# Convert list of keys to the actual values
extra = sample_list.get_fields(extra)
feature_idx = 0
# Get all of the features, which are in the form, "image_feature_0"
# "image_feature_1" ...
while True:
feature = getattr(sample_list, f"{attr}_feature_{feature_idx:d}", None)
if feature is None:
break
feature_idx += 1
feature = feature[:batch_size_t]
features.append(feature)
feature_encoders = getattr(self, attr + "_feature_encoders")
# Each feature should have a separate image feature encoders
assert len(features) == len(feature_encoders), (
"Number of feature encoders, {} are not equal "
"to number of features, {}.".format(len(feature_encoders), len(features))
)
# Now, iterate to get final attended image features
for i, feature in enumerate(features):
# Get info related to the current feature. info is generally
# in key of format "image_info_0" for 0th feature
feature_info = getattr(sample_list, f"{attr}_info_{i:d}", {})
# For Pythia, we need max_features to mask attention
feature_dim = getattr(feature_info, "max_features", None)
if feature_dim is not None:
feature_dim = feature_dim[:batch_size_t]
# Attribute in which encoders are saved, for "image" it
# will be "image_feature_encoders", other example is
# "context_feature_encoders"
encoders_attr = attr + "_feature_encoders"
feature_encoder = getattr(self, encoders_attr)[i]
# Encode the features
encoded_feature = feature_encoder(feature)
projector_attr = attr + "_feature_projectors"
feature_projector = getattr(self, projector_attr)[i]
encoded_feature = feature_projector(encoded_feature)
# Get all of the feature embeddings
list_attr = attr + "_feature_embeddings_list"
feature_embedding_models = getattr(self, list_attr)[i]
# Forward through these embeddings one by one
for feature_embedding_model in feature_embedding_models:
inp = (encoded_feature, text_embedding_total, feature_dim, extra)
embedding, attention = feature_embedding_model(*inp)
feature_embeddings.append(embedding)
feature_attentions.append(attention.squeeze(-1))
# Concatenate all features embeddings and return along with attention
feature_embedding_total = torch.cat(feature_embeddings, dim=1)
return feature_embedding_total, feature_attentions
| EXA-1-master | exa/models/mmf-main/mmf/models/pythia.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# Mostly copy-pasted from
# https://github.com/facebookresearch/detr/blob/master/util/misc.py
from typing import List
import torch
from torch import Tensor
class NestedTensor:
"""
A data class to hold images of different sizes in a batch.
It contains `tensors` to hold padded images to the maximum size and `mask` to
indicate the actual image region of each padded image
"""
def __init__(self, tensors: Tensor, mask: Tensor):
self.tensors = tensors
self.mask = mask
def to(self, *args, **kwargs):
cast_tensor = self.tensors.to(*args, **kwargs)
cast_mask = self.mask.to(*args, **kwargs) if self.mask is not None else None
return type(self)(cast_tensor, cast_mask)
def decompose(self):
return self.tensors, self.mask
@classmethod
def from_tensor_list(cls, tensor_list: List[Tensor]):
"""
convert a list of images in CHW format in `tensor_list` to a NestedTensor by
padding them to maximum image size.
"""
if tensor_list[0].ndim == 3:
max_size = tuple(max(s) for s in zip(*[img.shape for img in tensor_list]))
# min_size = tuple(min(s) for s in zip(*[img.shape for img in tensor_list]))
batch_shape = (len(tensor_list),) + max_size
b, c, h, w = batch_shape
dtype = tensor_list[0].dtype
device = tensor_list[0].device
tensor = torch.zeros(batch_shape, dtype=dtype, device=device)
mask = torch.ones((b, h, w), dtype=torch.bool, device=device)
for img, pad_img, m in zip(tensor_list, tensor, mask):
pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img)
m[: img.shape[1], : img.shape[2]] = False
else:
raise Exception("tensor_list must contain images in CHW format")
return cls(tensor, mask)
| EXA-1-master | exa/models/mmf-main/mmf/models/unit/misc.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# Mostly copy-pasted from
# https://github.com/facebookresearch/detr/blob/master/models/matcher.py
"""
Modules to compute the matching cost and solve the corresponding LSAP.
"""
from typing import Dict, List
import torch
from mmf.utils.box_ops import box_cxcywh_to_xyxy, generalized_box_iou
from scipy.optimize import linear_sum_assignment
from torch import nn, Tensor
class HungarianMatcher(nn.Module):
"""
This class computes an assignment between the targets and the predictions of the
network
For efficiency reasons, the targets don't include the no_object. Because of this,
in general, there are more predictions than targets. In this case, we do a 1-to-1
matching of the best predictions, while the others are un-matched (and thus treated
as non-objects).
"""
def __init__(
self,
cost_class: float = 1,
cost_bbox: float = 1,
cost_giou: float = 1,
logsoftmax: bool = False,
):
"""Creates the matcher
Params:
cost_class: This is the relative weight of the classification error in the
matching cost
cost_bbox: This is the relative weight of the L1 error of the bounding box
coordinates in the matching cost
cost_giou: This is the relative weight of the giou loss of the bounding box
in the matching cost
"""
super().__init__()
self.cost_class = cost_class
self.cost_bbox = cost_bbox
self.cost_giou = cost_giou
self.norm = nn.LogSoftmax(-1) if logsoftmax else nn.Softmax(-1)
assert (
cost_class != 0 or cost_bbox != 0 or cost_giou != 0
), "all costs cant be 0"
@torch.no_grad()
def forward(self, outputs: Dict[str, Tensor], targets: List[Dict[str, Tensor]]):
"""Performs the matching
Params:
outputs: This is a dict that contains at least these entries:
"pred_logits": Tensor of dim [batch_size, num_queries, num_classes]
with the classification logits
"pred_boxes": Tensor of dim [batch_size, num_queries, 4] with the
predicted box coordinates
targets: This is a list of targets (len(targets) = batch_size), where each
target is a dict containing:
"labels": Tensor of dim [num_target_boxes] (where num_target_boxes is
the number of ground-truth objects in the target) containing the
class labels
"boxes": Tensor of dim [num_target_boxes, 4] containing the target box
coordinates
Returns:
A list of size batch_size, containing tuples of (index_i, index_j) where:
- index_i is the indices of the selected predictions (in order)
- index_j is the indices of the corresponding selected targets (in
order)
For each batch element, it holds:
len(index_i) = len(index_j) = min(num_queries, num_target_boxes)
"""
bs, num_queries = outputs["pred_logits"].shape[:2]
# We flatten to compute the cost matrices in a batch
out_prob = self.norm(
outputs["pred_logits"].flatten(0, 1)
) # [batch_size * num_queries, num_classes]
out_bbox = outputs["pred_boxes"].flatten(0, 1) # [batch_size * num_queries, 4]
# Also concat the target labels and boxes
tgt_ids = torch.cat([v["labels"] for v in targets])
tgt_bbox = torch.cat([v["boxes"] for v in targets])
# Compute the classification cost. Contrary to the loss, we don't use the NLL,
# but approximate it in 1 - proba[target class].
# The 1 is a constant that doesn't change the matching, it can be omitted.
cost_class = -out_prob[:, tgt_ids]
# Compute the L1 cost between boxes
cost_bbox = torch.cdist(out_bbox, tgt_bbox, p=1)
# Compute the giou cost betwen boxes
cost_giou = -generalized_box_iou(
box_cxcywh_to_xyxy(out_bbox).float(), box_cxcywh_to_xyxy(tgt_bbox)
)
# Final cost matrix
C = (
self.cost_bbox * cost_bbox
+ self.cost_class * cost_class
+ self.cost_giou * cost_giou
)
C = C.view(bs, num_queries, -1).cpu()
sizes = [len(v["boxes"]) for v in targets]
indices = [
linear_sum_assignment(c[i]) for i, c in enumerate(C.split(sizes, -1))
]
return [
(
torch.as_tensor(i, dtype=torch.int64),
torch.as_tensor(j, dtype=torch.int64),
)
for i, j in indices
]
| EXA-1-master | exa/models/mmf-main/mmf/models/unit/matcher.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# Mostly copy-pasted from
# https://github.com/facebookresearch/detr/blob/master/models/backbone.py
"""
Backbone modules.
"""
import math
from collections import OrderedDict
import torch
import torch.nn.functional as F
import torchvision
from mmf.models.unit.misc import NestedTensor
from torch import nn
from torchvision.models._utils import IntermediateLayerGetter
from torchvision.ops.misc import FrozenBatchNorm2d
class BackboneBase(nn.Module):
def __init__(
self,
backbone: nn.Module,
train_backbone: bool,
num_channels: int,
return_interm_layers: bool,
):
super().__init__()
for name, parameter in backbone.named_parameters():
if (
not train_backbone
or "layer2" not in name
and "layer3" not in name
and "layer4" not in name
):
parameter.requires_grad_(False)
if return_interm_layers:
return_layers = {"layer1": "0", "layer2": "1", "layer3": "2", "layer4": "3"}
else:
return_layers = {"layer4": 0}
self.body = IntermediateLayerGetter(backbone, return_layers=return_layers)
self.num_channels = num_channels
def forward(self, tensor_list: NestedTensor):
xs = self.body(tensor_list.tensors)
out = OrderedDict()
for name, x in xs.items():
mask = F.interpolate(
tensor_list.mask[None].float(), size=x.shape[-2:]
).bool()[0]
out[name] = NestedTensor(x, mask)
return out
class Backbone(BackboneBase):
"""ResNet backbone with frozen BatchNorm."""
def __init__(
self,
name: str,
train_backbone: bool,
return_interm_layers: bool,
dilation: bool,
):
backbone = getattr(torchvision.models, name)(
replace_stride_with_dilation=[False, False, dilation],
pretrained=True,
norm_layer=FrozenBatchNorm2d,
)
num_channels = 512 if name in ("resnet18", "resnet34") else 2048
super().__init__(backbone, train_backbone, num_channels, return_interm_layers)
class Joiner(nn.Sequential):
def __init__(self, backbone: nn.Module, position_embedding: nn.Module):
super().__init__(backbone, position_embedding)
def forward(self, tensor_list: NestedTensor):
xs = self[0](tensor_list)
out = []
pos = []
for _, x in xs.items():
out.append(x)
# position encoding
pos.append(self[1](x).to(x.tensors.dtype))
return out, pos
class PositionEmbeddingSine(nn.Module):
"""
This is a more standard version of the position embedding, very similar to the one
used by the Attention is all you need paper, generalized to work on images.
"""
def __init__(
self, num_pos_feats=64, temperature=10000, normalize=False, scale=None
):
super().__init__()
self.num_pos_feats = num_pos_feats
self.temperature = temperature
self.normalize = normalize
if scale is not None and normalize is False:
raise ValueError("normalize should be True if scale is passed")
if scale is None:
scale = 2 * math.pi
self.scale = scale
def forward(self, tensor_list: NestedTensor):
x = tensor_list.tensors
mask = tensor_list.mask
not_mask = ~mask
y_embed = not_mask.cumsum(1, dtype=torch.float32)
x_embed = not_mask.cumsum(2, dtype=torch.float32)
if self.normalize:
eps = 1e-6
y_embed = y_embed / (y_embed[:, -1:, :] + eps) * self.scale
x_embed = x_embed / (x_embed[:, :, -1:] + eps) * self.scale
dim_t = torch.arange(self.num_pos_feats, dtype=torch.float32, device=x.device)
dim_t = self.temperature ** (2 * (dim_t // 2) / self.num_pos_feats)
pos_x = x_embed[:, :, :, None] / dim_t
pos_y = y_embed[:, :, :, None] / dim_t
pos_x = torch.stack(
(pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4
).flatten(3)
pos_y = torch.stack(
(pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4
).flatten(3)
pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2)
return pos
def build_unit_convnet_backbone(args):
position_embedding = PositionEmbeddingSine(
args.encoder_hidden_dim // 2, normalize=True
)
train_backbone = args.lr_backbone > 0
return_interm_layers = False
backbone = Backbone(
args.backbone, train_backbone, return_interm_layers, args.dilation
)
model = Joiner(backbone, position_embedding)
model.num_channels = backbone.num_channels
return model
| EXA-1-master | exa/models/mmf-main/mmf/models/unit/backbone.py |
# Copyright (c) Facebook, Inc. and its affiliates.
__all__ = ["UniT"]
from .unit import UniT
| EXA-1-master | exa/models/mmf-main/mmf/models/unit/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# Mostly copy-pasted from
# https://github.com/facebookresearch/detr/blob/master/models/transformer.py
import copy
from typing import Optional
import torch
import torch.nn.functional as F
from torch import nn, Tensor
class Transformer(nn.Module):
def __init__(self, args):
super().__init__()
self.args = args
self.d_model_enc = args.encoder_hidden_dim
self.d_model_dec = args.decoder_hidden_dim
self.dropout = args.dropout
self.nhead = args.nheads
self.dim_feedforward = args.dim_feedforward
self.num_encoder_layers = args.enc_layers
self.num_decoder_layers = args.dec_layers
self.normalize_before = args.pre_norm
self.return_intermediate_dec = True
self.pass_pos_and_query = args.pass_pos_and_query
self.share_decoders = args.share_decoders
self.activation = "relu"
self.pass_pos_and_query = self.pass_pos_and_query
encoder_layer = TransformerEncoderLayer(
self.d_model_enc,
self.nhead,
self.dim_feedforward,
self.dropout,
self.activation,
self.normalize_before,
)
encoder_norm = nn.LayerNorm(self.d_model_enc) if self.normalize_before else None
self.encoder = TransformerEncoder(
encoder_layer, self.num_encoder_layers, encoder_norm
)
if self.d_model_dec != self.d_model_enc:
self.enc2dec_proj = nn.Linear(self.d_model_enc, self.d_model_dec)
self.pos_embed_proj = nn.Linear(self.d_model_enc, self.d_model_dec)
else:
self.enc2dec_proj = nn.Identity()
self.pos_embed_proj = nn.Identity()
if self.share_decoders:
decoder_layer = TransformerDecoderLayer(
self.d_model_dec,
self.nhead,
self.dim_feedforward,
self.dropout,
self.activation,
self.normalize_before,
)
decoder_norm = nn.LayerNorm(self.d_model_dec)
self.decoder = TransformerDecoder(
decoder_layer,
self.num_decoder_layers,
decoder_norm,
return_intermediate=self.return_intermediate_dec,
)
self._reset_parameters()
def _reset_parameters(self):
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
def forward(self, *args, **kwargs):
raise NotImplementedError()
class UniTTransformer(Transformer):
def __init__(self, args):
super().__init__(args=args)
num_queries = self.args.num_queries
self.decoders = nn.ModuleDict()
for task in num_queries:
task_dict = nn.ModuleDict()
for dataset in num_queries[task]:
if self.share_decoders:
task_dict[dataset] = self.decoder
else:
task_dict[dataset] = self.build_decoder_layer(
d_model_dec=self.d_model_dec,
nhead=self.nhead,
dim_feedforward=self.dim_feedforward,
dropout=self.dropout,
activation=self.activation,
normalize_before=self.normalize_before,
num_decoder_layers=self.num_decoder_layers,
return_intermediate_dec=self.return_intermediate_dec,
)
self.decoders[task] = task_dict
# A separate decoder for VQA
MAX_TASK_NUM = 256
if args.use_task_embedding_in_img_encoder:
self.task_embeddings_enc = nn.Embedding(MAX_TASK_NUM, self.d_model_enc)
# when adding the task embedding to the beginning of the decoder, we'll strip
# it from the hidden state outputs to make it compatible with previous models
self.mem_out_begin_idx = 1 if args.use_task_embedding_in_img_encoder else 0
def build_decoder_layer(
self,
d_model_dec=512,
nhead=8,
num_decoder_layers=6,
dim_feedforward=2048,
dropout=0.1,
activation="relu",
normalize_before=False,
return_intermediate_dec=False,
):
decoder_layer = TransformerDecoderLayer(
d_model_dec, nhead, dim_feedforward, dropout, activation, normalize_before
)
decoder_norm = nn.LayerNorm(d_model_dec)
return TransformerDecoder(
decoder_layer,
num_decoder_layers,
decoder_norm,
return_intermediate=return_intermediate_dec,
)
def forward(
self,
img_src: Optional[Tensor] = None,
img_mask: Optional[Tensor] = None,
img_pos: Optional[Tensor] = None,
text_src: Optional[Tensor] = None,
text_mask: Optional[Tensor] = None,
text_pos: Optional[Tensor] = None,
query_embed: Optional[Tensor] = None,
task_type: Optional[str] = None,
dataset_name: Optional[str] = None,
task_idx: Optional[int] = None,
):
# flatten NxCxHxW to HWxNxC
memories = []
pos_embeds = []
masks = []
if img_src is not None:
bs, c, h, w = img_src.shape
img_src = img_src.flatten(2).permute(2, 0, 1)
img_pos = img_pos.flatten(2).permute(2, 0, 1)
img_mask = img_mask.flatten(1)
if text_src is None:
query_embed = query_embed.unsqueeze(1).repeat(1, bs, 1)
if self.pass_pos_and_query:
tgt = torch.zeros_like(query_embed)
else:
img_src, tgt, query_embed, img_pos = (
img_src + 0.1 * img_pos,
query_embed,
None,
None,
)
img_src, img_mask, img_pos = self._prefix_task_embedding_to_encoder_inputs(
img_src, img_mask, img_pos, task_idx
)
memory = self.encoder(img_src, src_key_padding_mask=img_mask, pos=img_pos)
if self.mem_out_begin_idx != 0:
img_src = img_src[self.mem_out_begin_idx :]
img_pos = img_pos[self.mem_out_begin_idx :]
img_mask = img_mask[:, self.mem_out_begin_idx :]
memory = memory[self.mem_out_begin_idx :]
if self.args.residual_in_encoder:
memory = img_src + memory
memory = self.enc2dec_proj(memory)
img_pos = self.pos_embed_proj(img_pos)
memories.append(memory)
pos_embeds.append(img_pos)
masks.append(img_mask)
if text_src is not None:
text_src = text_src.permute(1, 0, 2)
memories.append(text_src)
text_pos = text_pos.unsqueeze(1).repeat(1, text_src.size(1), 1)
pos_embeds.append(text_pos)
masks.append(text_mask != 1)
query_embed = query_embed.unsqueeze(1).repeat(1, text_src.size(1), 1)
if self.pass_pos_and_query:
tgt = torch.zeros_like(query_embed)
else:
raise NotImplementedError()
decoder = self.decoders[task_type][dataset_name]
memories = torch.cat(memories)
masks = torch.cat(masks, dim=-1)
pos_embeds = torch.cat(pos_embeds)
hs = decoder(
tgt,
memories,
memory_key_padding_mask=masks,
pos=pos_embeds,
query_pos=query_embed,
)
hs = hs.transpose(1, 2)
# hs is num_layer x batch_size x seq_length x hidden_dim
return hs, memories.permute(1, 2, 0)
def _prefix_task_embedding_to_encoder_inputs(
self, img_src, img_mask, img_pos, task_idx
):
if not self.args.use_task_embedding_in_img_encoder:
return img_src, img_mask, img_pos
bs = img_src.size(1)
task_embed = self.task_embeddings_enc.weight[task_idx]
task_embed = task_embed.unsqueeze(0).unsqueeze(0).repeat(1, bs, 1)
img_src = torch.cat([task_embed, img_src], dim=0)
# 0 for non-padding in img_mask
img_mask_pad = torch.zeros_like(img_mask[:, :1])
img_mask = torch.cat([img_mask_pad, img_mask], dim=1)
img_pos_pad = torch.zeros_like(img_pos[:1])
img_pos = torch.cat([img_pos_pad, img_pos], dim=0)
return img_src, img_mask, img_pos
class TransformerEncoder(nn.Module):
def __init__(self, encoder_layer, num_layers, norm=None):
super().__init__()
self.layers = _get_clones(encoder_layer, num_layers)
self.num_layers = num_layers
self.norm = norm
def forward(
self,
src,
mask: Optional[Tensor] = None,
src_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
):
output = src
for layer in self.layers:
output = layer(
output,
src_mask=mask,
src_key_padding_mask=src_key_padding_mask,
pos=pos,
)
if self.norm is not None:
output = self.norm(output)
return output
class TransformerDecoder(nn.Module):
def __init__(self, decoder_layer, num_layers, norm=None, return_intermediate=False):
super().__init__()
self.layers = _get_clones(decoder_layer, num_layers)
self.num_layers = num_layers
self.norm = norm
self.return_intermediate = return_intermediate
def forward(
self,
tgt,
memory,
tgt_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None,
):
output = tgt
intermediate = []
for layer in self.layers:
output = layer(
output,
memory,
tgt_mask=tgt_mask,
memory_mask=memory_mask,
tgt_key_padding_mask=tgt_key_padding_mask,
memory_key_padding_mask=memory_key_padding_mask,
pos=pos,
query_pos=query_pos,
)
if self.return_intermediate:
intermediate.append(self.norm(output))
if self.norm is not None:
output = self.norm(output)
if self.return_intermediate:
intermediate.pop()
intermediate.append(output)
if self.return_intermediate:
return torch.stack(intermediate)
return output
class TransformerEncoderLayer(nn.Module):
def __init__(
self,
d_model,
nhead,
dim_feedforward=2048,
dropout=0.1,
activation="relu",
normalize_before=False,
):
super().__init__()
self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
# Implementation of Feedforward model
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.activation = _get_activation_fn(activation)
self.normalize_before = normalize_before
def with_pos_embed(self, tensor, pos: Optional[Tensor]):
return tensor if pos is None else tensor + pos
def forward_post(
self,
src,
src_mask: Optional[Tensor] = None,
src_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
):
q = k = self.with_pos_embed(src, pos)
src2 = self.self_attn(
q, k, value=src, attn_mask=src_mask, key_padding_mask=src_key_padding_mask
)[0]
src = src + self.dropout1(src2)
src = self.norm1(src)
src2 = self.linear2(self.dropout(self.activation(self.linear1(src))))
src = src + self.dropout2(src2)
src = self.norm2(src)
return src
def forward_pre(
self,
src,
src_mask: Optional[Tensor] = None,
src_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
):
src2 = self.norm1(src)
q = k = self.with_pos_embed(src2, pos)
src2 = self.self_attn(
q, k, value=src2, attn_mask=src_mask, key_padding_mask=src_key_padding_mask
)[0]
src = src + self.dropout1(src2)
src2 = self.norm2(src)
src2 = self.linear2(self.dropout(self.activation(self.linear1(src2))))
src = src + self.dropout2(src2)
return src
def forward(
self,
src,
src_mask: Optional[Tensor] = None,
src_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
):
if self.normalize_before:
return self.forward_pre(src, src_mask, src_key_padding_mask, pos)
return self.forward_post(src, src_mask, src_key_padding_mask, pos)
class TransformerDecoderLayer(nn.Module):
def __init__(
self,
d_model,
nhead,
dim_feedforward=2048,
dropout=0.1,
activation="relu",
normalize_before=False,
):
super().__init__()
self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
self.multihead_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
# Implementation of Feedforward model
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
self.norm3 = nn.LayerNorm(d_model)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.dropout3 = nn.Dropout(dropout)
self.activation = _get_activation_fn(activation)
self.normalize_before = normalize_before
def with_pos_embed(self, tensor, pos: Optional[Tensor]):
return tensor if pos is None else tensor + pos
def forward_post(
self,
tgt,
memory,
tgt_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None,
):
q = k = self.with_pos_embed(tgt, query_pos)
tgt2 = self.self_attn(
q, k, value=tgt, attn_mask=tgt_mask, key_padding_mask=tgt_key_padding_mask
)[0]
tgt = tgt + self.dropout1(tgt2)
tgt = self.norm1(tgt)
tgt2 = self.multihead_attn(
query=self.with_pos_embed(tgt, query_pos),
key=self.with_pos_embed(memory, pos),
value=memory,
attn_mask=memory_mask,
key_padding_mask=memory_key_padding_mask,
)[0]
tgt = tgt + self.dropout2(tgt2)
tgt = self.norm2(tgt)
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt))))
tgt = tgt + self.dropout3(tgt2)
tgt = self.norm3(tgt)
return tgt
def forward_pre(
self,
tgt,
memory,
tgt_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None,
):
tgt2 = self.norm1(tgt)
q = k = self.with_pos_embed(tgt2, query_pos)
tgt2 = self.self_attn(
q, k, value=tgt2, attn_mask=tgt_mask, key_padding_mask=tgt_key_padding_mask
)[0]
tgt = tgt + self.dropout1(tgt2)
tgt2 = self.norm2(tgt)
tgt2 = self.multihead_attn(
query=self.with_pos_embed(tgt2, query_pos),
key=self.with_pos_embed(memory, pos),
value=memory,
attn_mask=memory_mask,
key_padding_mask=memory_key_padding_mask,
)[0]
tgt = tgt + self.dropout2(tgt2)
tgt2 = self.norm3(tgt)
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt2))))
tgt = tgt + self.dropout3(tgt2)
return tgt
def forward(
self,
tgt,
memory,
tgt_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None,
):
if self.normalize_before:
return self.forward_pre(
tgt,
memory,
tgt_mask,
memory_mask,
tgt_key_padding_mask,
memory_key_padding_mask,
pos,
query_pos,
)
return self.forward_post(
tgt,
memory,
tgt_mask,
memory_mask,
tgt_key_padding_mask,
memory_key_padding_mask,
pos,
query_pos,
)
def _get_clones(module, N):
return nn.ModuleList([copy.deepcopy(module) for i in range(N)])
def _get_activation_fn(activation):
"""Return an activation function given a string"""
if activation == "relu":
return F.relu
if activation == "gelu":
return F.gelu
if activation == "glu":
return F.glu
raise RuntimeError(f"activation should be relu/gelu, not {activation}.")
| EXA-1-master | exa/models/mmf-main/mmf/models/unit/transformer.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import logging
from copy import deepcopy
from typing import Dict
import torch
from mmf.common.registry import registry
from mmf.models import BaseModel
from mmf.models.unit.unit_base_model import (
AttributeHead,
build_detection_loss,
MLP,
UniTBaseModel,
)
from mmf.modules.encoders import TransformerEncoder
from mmf.utils.distributed import byte_tensor_to_object
from torch import nn, Tensor
try:
from transformers3.modeling_bert import BertPredictionHeadTransform
except ImportError:
from transformers.modeling_bert import BertPredictionHeadTransform
logger = logging.getLogger(__name__)
@registry.register_model("unit")
class UniT(BaseModel):
def __init__(self, config):
super().__init__(config)
self.config = config
@classmethod
def config_path(cls):
return "configs/models/unit/defaults.yaml"
# Backward compatibility for code from older mmbt
@classmethod
def format_state_key(cls, key):
return key.replace("detr_model.", "unit_base_model.")
def build(self):
# build the base model (based on DETR)
self.unit_base_model = UniTBaseModel(self.config.base_args)
def keep_only_backbone_params(model_state_dict):
keys = list(model_state_dict.keys())
for k in keys:
if "backbone" not in k:
model_state_dict.pop(k)
ckpt_path = self.config.base_ckpt_path
if ckpt_path != "":
logger.info(f"initializing base model (UniT) from {ckpt_path}")
if ckpt_path.startswith("https"):
base_checkpoint = torch.hub.load_state_dict_from_url(
ckpt_path, check_hash=True
)
else:
base_checkpoint = torch.load(ckpt_path)
if self.config.base_ckpt_load_backbone_only:
keep_only_backbone_params(base_checkpoint["model"])
self.unit_base_model.load_state_dict(
base_checkpoint["model"], strict=False
)
else:
self.unit_base_model.load_state_dict(
base_checkpoint["model"], strict=True
)
# build the text encoder (BERT)
self.bert_model = TransformerEncoder(self.config.base_args.bert_config)
detr_hidden_dim = self.config.base_args.decoder_hidden_dim
bert_config = deepcopy(self.bert_model.config)
self.bert_projection = nn.Linear(bert_config.hidden_size, detr_hidden_dim)
self.bert_pos_projection = nn.Linear(bert_config.hidden_size, detr_hidden_dim)
self.classifiers = nn.ModuleDict()
self.task_embeddings_lang = nn.Identity()
if self.config.base_args.use_task_embedding_in_lang_encoder:
self.task_embeddings_lang = nn.Embedding(
self.config.max_task_num, bert_config.hidden_size
)
bert_config.hidden_size = detr_hidden_dim
# build the task-specific output heads
self.class_embeds = nn.ModuleDict()
self.bbox_embeds = nn.ModuleDict()
self.det_losses = nn.ModuleDict()
for dataset_name in self.config.base_args.num_queries.get("detection", []):
num_cls = self.config.heads["detection"][dataset_name]["num_classes"]
self.class_embeds[dataset_name] = nn.Linear(detr_hidden_dim, num_cls + 1)
self.bbox_embeds[dataset_name] = MLP(detr_hidden_dim, detr_hidden_dim, 4, 3)
attr_head = None
if self.config.heads["detection"][dataset_name]["use_attr"]:
attr_head = AttributeHead(
num_cls, self.config.base_args.attribute_class_num, detr_hidden_dim
)
self.det_losses[dataset_name] = build_detection_loss(
self.config.base_args, num_cls, attr_head
)
vl_classifiers = nn.ModuleDict()
for dataset_name in self.config.base_args.num_queries.get("vl", []):
vl_classifiers[dataset_name] = nn.Sequential(
BertPredictionHeadTransform(bert_config),
nn.Linear(
bert_config.hidden_size,
self.config.heads["vl"][dataset_name]["num_labels"],
),
)
self.classifiers["vl"] = vl_classifiers
self.dropout = nn.Dropout(bert_config.hidden_dropout_prob)
glue_classifiers = nn.ModuleDict()
for dataset_name in self.config.base_args.num_queries.get("glue", []):
glue_classifiers[dataset_name] = nn.Sequential(
BertPredictionHeadTransform(bert_config),
nn.Linear(
bert_config.hidden_size,
self.config.heads["glue"][dataset_name]["num_labels"],
),
)
self.classifiers["glue"] = glue_classifiers
self.loss_calculation_fn = {}
self.loss_calculation_fn["detection"] = self.detection_loss_calculation
self.loss_calculation_fn["vl"] = self.classifier_loss_calculation
self.loss_calculation_fn["glue"] = self.classifier_loss_calculation
self.losses_dict = {}
self.losses_dict["vl"] = {
name: self.get_loss_fn(self.config.heads["vl"][name]["loss_type"])
for name in self.config.heads["vl"]
}
self.losses_dict["glue"] = {
name: self.get_loss_fn(self.config.heads["glue"][name]["loss_type"])
for name in self.config.heads["glue"]
}
def forward_bert_with_task_idx(self, sample_list):
bert = self.bert_model.module
input_ids = sample_list.input_ids
attention_mask = sample_list.input_mask
token_type_ids = sample_list.segment_ids
device = input_ids.device
position_ids = torch.arange(input_ids.size(1), dtype=torch.long, device=device)
input_shape = input_ids.size()
if attention_mask is None:
attention_mask = torch.ones(input_shape, device=device)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
embedding_output = bert.embeddings(
input_ids=input_ids,
position_ids=position_ids,
token_type_ids=token_type_ids,
)
start_idx = 0
if self.config.base_args.use_task_embedding_in_lang_encoder:
bs = input_ids.size(0)
task_idx = self.get_task_idx(sample_list.dataset_name)
task_embed = self.task_embeddings_lang.weight[task_idx]
task_embed = task_embed.unsqueeze(0).unsqueeze(0).repeat(bs, 1, 1)
embedding_output = torch.cat([task_embed, embedding_output], dim=1)
task_attention_mask = embedding_output.new_ones((bs, 1))
attention_mask = torch.cat([task_attention_mask, attention_mask], dim=1)
start_idx = 1
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
extended_attention_mask = extended_attention_mask.to(
dtype=next(self.parameters()).dtype
) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
head_mask = [None for _ in range(bert.config.num_hidden_layers)]
encoder_outputs = bert.encoder(
embedding_output,
attention_mask=extended_attention_mask,
head_mask=head_mask,
)
sequence_output = encoder_outputs[0][:, start_idx:, :]
pos_embeddings = self.bert_model.embeddings.position_embeddings(position_ids)
return sequence_output, pos_embeddings
def forward(self, sample_list):
detr_outputs = {}
task_type = self.get_task_type(sample_list.dataset_name)
text_src = None
text_mask = None
text_pos = None
img_src = None
if task_type == "vl" or task_type == "glue":
if task_type == "vl":
img_src = sample_list.image
text_src, pos_embeddings = self.forward_bert_with_task_idx(sample_list)
# 768 -> 256
text_src = self.bert_projection(text_src)
text_pos = self.bert_pos_projection(pos_embeddings)
text_mask = sample_list.input_mask
if self.config.keep_only_bert_cls[task_type][sample_list.dataset_name]:
# take only the [CLS] token's hidden state
text_src = text_src[:, 0:1, :]
text_pos = text_pos[0:1, :]
text_mask = text_mask[:, 0:1]
elif task_type == "detection":
img_src = sample_list.image
detr_outputs = self.unit_base_model(
img_src=img_src,
text_src=text_src,
text_mask=text_mask,
text_pos=text_pos,
task_type=task_type,
dataset_name=sample_list.dataset_name,
task_idx=self.get_task_idx(sample_list.dataset_name),
)
output = self.loss_calculation_fn[task_type](detr_outputs, sample_list)
return output
def detection_loss_calculation(self, detr_outputs: Dict[str, Tensor], sample_list):
hs = detr_outputs["hidden_states"]
outputs_class = self.class_embeds[sample_list.dataset_name](hs)
outputs_coord = self.bbox_embeds[sample_list.dataset_name](hs).sigmoid()
detr_outputs.update(
{
"pred_logits": outputs_class[-1],
"pred_boxes": outputs_coord[-1],
"hs_for_attr": hs[-1],
}
)
# skip loss computation on test set (which usually doesn't contain labels)
if sample_list.dataset_type != "test":
if self.config.base_args.aux_loss:
detr_outputs["aux_outputs"] = [
{"pred_logits": a, "pred_boxes": b, "hs_for_attr": c}
for a, b, c in zip(outputs_class[:-1], outputs_coord[:-1], hs[:-1])
]
criterion = self.det_losses[sample_list.dataset_name]
targets = [byte_tensor_to_object(t) for t in sample_list.targets_enc]
targets = [{k: v.to(hs.device) for k, v in t.items()} for t in targets]
sample_list.targets = targets
loss_dict = criterion(detr_outputs, sample_list.targets)
weight_dict = criterion.weight_dict
loss_prefix = f"{sample_list.dataset_type}/{sample_list.dataset_name}/"
losses = {
(loss_prefix + f"{k}"): loss_dict[k]
* weight_dict[k]
* self.config.detection_loss_weight
for k in loss_dict.keys()
if k in weight_dict
}
detr_outputs["losses"] = losses
if (
self.config.heads["detection"][sample_list.dataset_name]["use_attr"]
and self.config.predict_attributes
):
hs_for_attr = detr_outputs["hs_for_attr"]
top_obj_class = detr_outputs["pred_logits"][..., :-1].argmax(dim=-1)
attr_head = self.det_losses[sample_list.dataset_name].attribute_head
detr_outputs["attr_logits"] = attr_head(hs_for_attr, top_obj_class)
return detr_outputs
def classifier_loss_calculation(self, detr_outputs: Dict[str, Tensor], sample_list):
task_type = self.get_task_type(sample_list.dataset_name)
hs = detr_outputs["hidden_states"]
if not self.config.loss_on_all_hs:
hs = detr_outputs["hidden_states"][-1:]
num_queries = self.config.base_args.num_queries[task_type][
sample_list.dataset_name
]
assert hs[0].size(1) == num_queries
losses = {}
scores = None
detr_outputs = {}
num_labels = self.config.heads[task_type][sample_list.dataset_name][
"num_labels"
]
for idx, current_hs in enumerate(hs):
pooled_output = current_hs[:, -num_queries, :]
pooled_output = self.dropout(pooled_output)
logits = self.classifiers[task_type][sample_list.dataset_name](
pooled_output
)
reshaped_logits = logits.contiguous().view(-1, num_labels)
scores = reshaped_logits
# skip loss computation on test set (which usually doesn't contain labels)
if sample_list.dataset_type != "test":
loss_prefix = f"{sample_list.dataset_type}/{sample_list.dataset_name}/"
loss = self.losses_dict[task_type][sample_list.dataset_name](
scores, sample_list.targets
)
if sample_list.dataset_name == "vqa2":
loss *= sample_list.targets.size(1)
losses[loss_prefix + f"loss_{idx}"] = loss
detr_outputs["scores"] = scores
detr_outputs["losses"] = losses
return detr_outputs
def get_optimizer_parameters(self, config):
detr_params = [
{
"params": [
p
for n, p in self.unit_base_model.named_parameters()
if "backbone" not in n and p.requires_grad
]
},
{
"params": [
p
for n, p in self.unit_base_model.named_parameters()
if "backbone" in n and p.requires_grad
],
"lr": self.config.base_args.lr_backbone,
},
]
vqa_params = [
{"params": self.bert_model.parameters()},
{"params": self.bert_projection.parameters()},
{"params": self.task_embeddings_lang.parameters()},
{"params": self.bert_pos_projection.parameters()},
{"params": self.classifiers.parameters()},
{"params": self.class_embeds.parameters()},
{"params": self.bbox_embeds.parameters()},
{"params": self.det_losses.parameters()},
]
return vqa_params + detr_params
def get_task_idx(self, dataset_name):
task_type = self.get_task_type(dataset_name)
assert task_type in self.config.heads
return self.config.heads[task_type][dataset_name]["task_idx"]
def get_task_type(self, dataset_name):
task_type = "detection"
if dataset_name in self.config.heads["vl"]:
task_type = "vl"
elif dataset_name in self.config.heads["glue"]:
task_type = "glue"
return task_type
def get_loss_fn(self, loss_type):
if loss_type == "binary_cross_entropy_with_logits":
return nn.functional.binary_cross_entropy_with_logits
elif loss_type == "cross_entropy":
return nn.functional.cross_entropy
else:
raise Exception(f"Unknown loss type: {loss_type}")
| EXA-1-master | exa/models/mmf-main/mmf/models/unit/unit.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# Mostly copy-pasted from
# https://github.com/facebookresearch/detr/blob/master/models/detr.py
from typing import Optional
import torch
import torch.nn.functional as F
from mmf.models.unit.backbone import build_unit_convnet_backbone
from mmf.models.unit.matcher import HungarianMatcher
from mmf.models.unit.misc import NestedTensor
from mmf.models.unit.transformer import UniTTransformer
from mmf.utils import box_ops
from mmf.utils.distributed import get_world_size, is_dist_initialized
from torch import nn, Tensor
class UniTBaseModel(nn.Module):
def __init__(self, args):
super().__init__()
self.num_queries = args.num_queries
self.backbone = build_unit_convnet_backbone(args)
self.transformer = UniTTransformer(args)
encoder_hidden_dim = self.transformer.d_model_enc
decoder_hidden_dim = self.transformer.d_model_dec
self.query_embeds = nn.ModuleDict()
for task_type in self.num_queries:
task_dict = nn.ModuleDict()
for dataset in self.num_queries[task_type]:
task_dict[dataset] = nn.Embedding(
self.num_queries[task_type][dataset], decoder_hidden_dim
)
self.query_embeds[task_type] = task_dict
self.input_proj = nn.Conv2d(
self.backbone.num_channels, encoder_hidden_dim, kernel_size=1
)
def forward(
self,
img_src: Tensor,
text_src: Optional[Tensor] = None,
text_mask: Optional[Tensor] = None,
text_pos: Optional[Tensor] = None,
output_hidden_states_only: bool = False,
task_type: str = "detection",
dataset_name: str = "detection_coco",
task_idx: Optional[int] = None,
):
img_mask = None
img_pos = [None]
if img_src is not None:
if not isinstance(img_src, NestedTensor):
img_src = NestedTensor.from_tensor_list(img_src)
features, img_pos = self.backbone(img_src)
img_src, img_mask = features[-1].decompose()
img_src = self.input_proj(img_src)
query_embed = self.query_embeds[task_type][dataset_name]
hs, _ = self.transformer(
img_src=img_src,
img_mask=img_mask,
img_pos=img_pos[-1],
text_src=text_src,
text_mask=text_mask,
text_pos=text_pos,
query_embed=query_embed.weight,
task_type=task_type,
dataset_name=dataset_name,
task_idx=task_idx,
)
if hs is not None:
assert hs.size(2) == self.num_queries[task_type][dataset_name]
return {"hidden_states": hs}
class MLP(nn.Module):
"""Very simple multi-layer perceptron (also called FFN)"""
def __init__(self, input_dim, hidden_dim, output_dim, num_layers):
super().__init__()
self.num_layers = num_layers
h = [hidden_dim] * (num_layers - 1)
self.layers = nn.ModuleList(
nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim])
)
def forward(self, x: Tensor):
for i, layer in enumerate(self.layers):
x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x)
return x
class AttributeHead(nn.Module):
def __init__(self, object_class_num, attribute_class_num, representation_size):
super().__init__()
# copy-pasted from
# https://github.com/ronghanghu/vqa-maskrcnn-benchmark-m4c/blob/0fbccee2dfed10d652bcf014f9f8bfafd8478f51/maskrcnn_benchmark/modeling/roi_heads/box_head/roi_box_predictors.py#L52-L61 # NoQA
self.cls_embed = nn.Embedding(object_class_num + 1, 256)
self.attr_linear1 = nn.Linear(representation_size + 256, 512)
self.attr_linear2 = nn.Linear(512, attribute_class_num)
nn.init.normal_(self.cls_embed.weight, std=0.01)
nn.init.normal_(self.attr_linear1.weight, std=0.01)
nn.init.normal_(self.attr_linear2.weight, std=0.01)
nn.init.constant_(self.attr_linear1.bias, 0)
nn.init.constant_(self.attr_linear2.bias, 0)
def forward(self, hidden_states: Tensor, labels: Tensor):
# copy-pasted from
# https://github.com/ronghanghu/vqa-maskrcnn-benchmark-m4c/blob/0fbccee2dfed10d652bcf014f9f8bfafd8478f51/maskrcnn_benchmark/modeling/roi_heads/box_head/roi_box_predictors.py#L76-L96 # NoQA
# get embeddings of indices using gt cls labels
cls_embed_out = self.cls_embed(labels)
# concat with fc7 feats
concat_attr = torch.cat([hidden_states, cls_embed_out], dim=-1)
# pass through attr head layers
fc_attr = self.attr_linear1(concat_attr)
attr_score = F.relu(self.attr_linear2(fc_attr))
return attr_score
class SetCriterion(nn.Module):
"""This class computes the loss for DETR.
The process happens in two steps:
1) we compute hungarian assignment between ground truth boxes and the outputs
of the model
2) we supervise each pair of matched ground-truth / prediction (supervise class
and box)
"""
def __init__(
self,
num_classes,
matcher,
weight_dict,
eos_coef,
losses,
attribute_head=None,
attribute_class_num=None,
max_attribute_num=None,
):
"""Create the criterion.
Parameters:
num_classes: number of object categories, omitting the special no-object
category
matcher: module able to compute a matching between targets and proposals
weight_dict: dict containing as key the names of the losses and as values
their relative weight.
eos_coef: relative classification weight applied to the no-object category
losses: list of all the losses to be applied. See get_loss for list of
available losses.
"""
super().__init__()
self.num_classes = num_classes
self.matcher = matcher
self.weight_dict = weight_dict
self.eos_coef = eos_coef
self.losses = losses
empty_weight = torch.ones(self.num_classes + 1)
empty_weight[-1] = self.eos_coef
self.register_buffer("empty_weight", empty_weight)
self.attribute_head = attribute_head
self.attribute_class_num = attribute_class_num
self.max_attribute_num = max_attribute_num
def loss_labels(self, outputs, targets, indices, num_boxes, log=True):
"""Classification loss (NLL)
targets dicts must contain the key "labels" containing a tensor of dim
[nb_target_boxes]
"""
assert "pred_logits" in outputs
src_logits = outputs["pred_logits"]
idx = self._get_src_permutation_idx(indices)
target_classes_o = torch.cat(
[t["labels"][J] for t, (_, J) in zip(targets, indices)]
)
target_classes = torch.full(
src_logits.shape[:2],
self.num_classes,
dtype=torch.int64,
device=src_logits.device,
)
target_classes[idx] = target_classes_o
loss_ce = F.cross_entropy(
src_logits.transpose(1, 2), target_classes, self.empty_weight
)
losses = {"loss_ce": loss_ce}
if self.attribute_head is not None and "attributes" in targets[0]:
attribute_logits = self.attribute_head(
outputs["hs_for_attr"], target_classes
)
target_attributes_o = torch.cat(
[t["attributes"][J] for t, (_, J) in zip(targets, indices)]
)
target_attributes = -torch.ones(
*src_logits.shape[:2], 16, dtype=torch.int64, device=src_logits.device
)
target_attributes[idx] = target_attributes_o
losses["loss_attr"] = self._attribute_loss(
attribute_logits, target_attributes
)
return losses
def loss_labels_balanced(self, outputs, targets, indices, num_boxes, log=True):
"""Classification loss (NLL)
targets dicts must contain the key "labels" containing a tensor of dim
[nb_target_boxes]
"""
assert "pred_logits" in outputs
src_logits = outputs["pred_logits"]
idx = self._get_src_permutation_idx(indices)
target_classes_o = torch.cat(
[t["labels"][J] for t, (_, J) in zip(targets, indices)]
)
target_classes = torch.full(
src_logits.shape[:2],
self.num_classes,
dtype=torch.int64,
device=src_logits.device,
)
target_classes[idx] = target_classes_o
sl = src_logits.flatten(0, 1)
tc = target_classes.flatten(0, 1)
pos = tc != self.num_classes
loss_pos = F.cross_entropy(sl[pos], tc[pos], reduction="none").sum() / num_boxes
loss_neg = F.cross_entropy(sl[~pos], tc[~pos], reduction="none").sum() / (
sl.shape[0] - num_boxes
)
loss_ce = (1 - self.eos_coef) * loss_pos + self.eos_coef * loss_neg
losses = {"loss_ce": loss_ce}
if self.attribute_head is not None:
raise NotImplementedError()
return losses
@torch.no_grad()
def loss_cardinality(self, outputs, targets, indices, num_boxes):
"""Compute the cardinality error, ie the absolute error in the number of
predicted non-empty boxes
This is not really a loss, it is intended for logging purposes only. It doesn't
propagate gradients
"""
pred_logits = outputs["pred_logits"]
device = pred_logits.device
tgt_lengths = torch.as_tensor(
[len(v["labels"]) for v in targets], device=device
)
# Count the number of predictions that are NOT "no-object" (which is the last
# class)
card_pred = (pred_logits.argmax(-1) != pred_logits.shape[-1] - 1).sum(1)
card_err = F.l1_loss(card_pred.float(), tgt_lengths.float())
losses = {"cardinality_error": card_err}
return losses
def loss_boxes(self, outputs, targets, indices, num_boxes):
"""Compute the losses related to the bounding boxes, the L1 regression loss and
the GIoU loss
targets dicts must contain the key "boxes" containing a tensor of dim
[nb_target_boxes, 4]
The target boxes are expected in format (center_x, center_y, h, w),
normalized by the image size.
"""
assert "pred_boxes" in outputs
idx = self._get_src_permutation_idx(indices)
src_boxes = outputs["pred_boxes"][idx]
target_boxes = torch.cat(
[t["boxes"][i] for t, (_, i) in zip(targets, indices)], dim=0
)
loss_bbox = F.l1_loss(src_boxes, target_boxes, reduction="none")
losses = {}
losses["loss_bbox"] = loss_bbox.sum() / num_boxes
loss_giou = 1 - torch.diag(
box_ops.generalized_box_iou(
box_ops.box_cxcywh_to_xyxy(src_boxes).float(),
box_ops.box_cxcywh_to_xyxy(target_boxes),
)
)
losses["loss_giou"] = loss_giou.sum() / num_boxes
return losses
def _get_src_permutation_idx(self, indices):
# permute predictions following indices
batch_idx = torch.cat(
[torch.full_like(src, i) for i, (src, _) in enumerate(indices)]
)
src_idx = torch.cat([src for (src, _) in indices])
return batch_idx, src_idx
def _get_tgt_permutation_idx(self, indices):
# permute targets following indices
batch_idx = torch.cat(
[torch.full_like(tgt, i) for i, (_, tgt) in enumerate(indices)]
)
tgt_idx = torch.cat([tgt for (_, tgt) in indices])
return batch_idx, tgt_idx
def get_loss(self, loss, outputs, targets, indices, num_boxes, **kwargs):
loss_map = {
"labels": self.loss_labels,
"labels_balanced": self.loss_labels_balanced,
"cardinality": self.loss_cardinality,
"boxes": self.loss_boxes,
}
assert loss in loss_map, f"do you really want to compute {loss} loss?"
return loss_map[loss](outputs, targets, indices, num_boxes, **kwargs)
def forward(self, outputs, targets):
"""This performs the loss computation.
Parameters:
outputs: dict of tensors, see the output specification of the model for
the format
targets: list of dicts, such that len(targets) == batch_size.
The expected keys in each dict depends on the losses applied, see
each loss' doc
"""
outputs_without_aux = {k: v for k, v in outputs.items() if k != "aux_outputs"}
# Retrieve the matching between the outputs of the last layer and the targets
indices = self.matcher(outputs_without_aux, targets)
# Compute the average number of target boxes accross all nodes, for
# normalization purposes
num_boxes = sum(len(t["labels"]) for t in targets)
num_boxes = torch.as_tensor(
[num_boxes], dtype=torch.float, device=next(iter(outputs.values())).device
)
if is_dist_initialized():
torch.distributed.all_reduce(num_boxes)
num_boxes = torch.clamp(num_boxes / get_world_size(), min=1).item()
# Compute all the requested losses
losses = {}
for loss in self.losses:
losses.update(self.get_loss(loss, outputs, targets, indices, num_boxes))
# In case of auxiliary losses, we repeat this process with the output of each
# intermediate layer.
if "aux_outputs" in outputs:
for i, aux_outputs in enumerate(outputs["aux_outputs"]):
indices = self.matcher(aux_outputs, targets)
for loss in self.losses:
kwargs = {}
if loss in ("labels", "labels_balanced"):
# Logging is enabled only for the last layer
kwargs = {"log": False}
l_dict = self.get_loss(
loss, aux_outputs, targets, indices, num_boxes, **kwargs
)
l_dict = {k + f"_{i}": v for k, v in l_dict.items()}
losses.update(l_dict)
return losses
def _attribute_loss(self, attribute_logits, attributes):
_N, _B, _C = attribute_logits.size()
assert _C == self.attribute_class_num
attribute_logits = attribute_logits.view(_N * _B, _C)
assert attributes.size(0) == _N
assert attributes.size(1) == _B
assert attributes.size(2) == self.max_attribute_num
attributes = attributes.view(_N * _B, self.max_attribute_num)
# https://github.com/ronghanghu/vqa-maskrcnn-benchmark-m4c/blob/0fbccee2dfed10d652bcf014f9f8bfafd8478f51/maskrcnn_benchmark/modeling/roi_heads/box_head/loss.py#L185-L222 # NoQA
n_boxes = attribute_logits.shape[0]
# N_BOXES, N_ATTR -> N_BOXES, 1, N_ATTR
attribute_logits = attribute_logits.unsqueeze(1)
# N_BOXES, 1, N_ATTR -> N_BOXES, MAX_ATTR_PER_INST, N_ATTR
# -> N_BOXES * MAX_ATTR_PER_INST, N_ATTR
attribute_logits = (
attribute_logits.expand(n_boxes, 16, self.attribute_class_num)
.contiguous()
.view(-1, self.attribute_class_num)
)
# Normalize each box loss by # of valid GT attributes (ie attr != -1)
# Repeat number of valid attributes per box along the rows and take transpose
inv_per_box_weights = (
(attributes >= 0).sum(dim=1).repeat(16, 1).transpose(0, 1).flatten()
)
per_box_weights = inv_per_box_weights.float().reciprocal()
per_box_weights[per_box_weights > 1] = 0.0
attributes = attributes.view(-1)
attribute_loss = 0.5 * F.cross_entropy(
attribute_logits, attributes, reduction="none", ignore_index=-1
)
attribute_loss = (attribute_loss * per_box_weights).view(n_boxes, -1).sum(dim=1)
# Find number of boxes with atleast valid attribute
n_valid_boxes = len(attribute_loss.nonzero())
if n_valid_boxes > 0:
attribute_loss = (attribute_loss / n_valid_boxes).sum()
else:
attribute_loss = (attribute_loss * 0.0).sum()
return attribute_loss
def build_detection_loss(args, num_classes, attribute_head):
matcher = HungarianMatcher(
cost_class=args.set_cost_class,
cost_bbox=args.set_cost_bbox,
cost_giou=args.set_cost_giou,
logsoftmax=args.use_bcl,
)
weight_dict = {"loss_ce": 1, "loss_bbox": args.bbox_loss_coef}
weight_dict["loss_giou"] = args.giou_loss_coef
weight_dict["loss_attr"] = args.attr_loss_coef
if args.aux_loss:
aux_weight_dict = {}
for i in range(args.dec_layers - 1):
aux_weight_dict.update({k + f"_{i}": v for k, v in weight_dict.items()})
weight_dict.update(aux_weight_dict)
losses = []
if args.use_bcl:
losses.append("labels_balanced")
else:
losses.append("labels")
losses.extend(["boxes", "cardinality"])
criterion = SetCriterion(
num_classes,
matcher=matcher,
weight_dict=weight_dict,
eos_coef=args.eos_coef,
losses=losses,
attribute_head=attribute_head,
attribute_class_num=args.attribute_class_num,
max_attribute_num=args.max_attribute_num,
)
return criterion
| EXA-1-master | exa/models/mmf-main/mmf/models/unit/unit_base_model.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import mmf.models.transformers.backends # noqa
from mmf.models.transformers.base import ( # noqa
BaseTransformer,
BaseTransformerBackend,
BaseTransformerBackendConfig,
BaseTransformerHead,
BaseTransformerInput,
BaseTransformerModalityConfig,
)
| EXA-1-master | exa/models/mmf-main/mmf/models/transformers/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import logging
from abc import ABC, abstractmethod
from dataclasses import asdict, dataclass, field
from typing import Any, Dict, List, NamedTuple, Optional, Tuple, Type
from mmf.common.registry import registry
from mmf.models import BaseModel
from mmf.modules.encoders import IdentityEncoder
from mmf.utils.modeling import get_bert_configured_parameters
from omegaconf import MISSING, OmegaConf
from torch import nn, Tensor
logger = logging.getLogger(__name__)
class BaseTransformerInput(NamedTuple):
input_ids: Dict[str, Tensor] # dict of input ids for all modalities
position_ids: Dict[str, Tensor] # dict of position ids for all modalities
segment_ids: Dict[str, Tensor] # dict of segment/token type ids for all modalities
masks: Dict[str, Tensor] # dict of masks for all modalities
@dataclass
class BaseTransformerModalityConfig:
type: str = MISSING # type of modality, text, image, video, audio etc
key: str = MISSING # name of modality
# segment id to be used for modality. Each modality sould have different segment ids
segment_id: int = MISSING
embedding_dim: int = MISSING # input dimension for modality embedding
position_dim: int = MISSING # input dimension for position embedding
# eps for layer norm, default is base transformer layer_norm_eps
layer_norm_eps: float = 1e-12
# dropout probability, default is base transformer hidden_dropout_prob
hidden_dropout_prob: float = 0.1
# Encoder to be used to encode this particular modality
# This is actually: Union[EncoderFactory.Config, Encoder.Config]
# NOTE: Waiting on https://github.com/omry/omegaconf/issues/144
encoder: Any = IdentityEncoder.Config()
# when type is text, whether to consume raw text or intermediate representations
# from frozen text encoder. This can be potentially also used by other modalities.
consume_raw: bool = True
@dataclass
class BaseTransformerBackendConfig:
# Type of the backend, e.g. huggingface
type: str = MISSING
# Whether to freeze the backend parameters
freeze: bool = False
# Parameters for the backend
params: Dict[str, Any] = field(default_factory=lambda: {})
class BaseTransformer(BaseModel):
# NOTE: Please define the values for the config parameters
# in your inherited class
@dataclass
class Config(BaseModel.Config):
# registry key of the model
model: str = MISSING
# name of transformer base model
transformer_base: str = MISSING
# training head type used for initializing head
training_head_type: str = MISSING
# backend of the transformer
backend: BaseTransformerBackendConfig = MISSING
# list of modalities for the model input
modalities: List[BaseTransformerModalityConfig] = MISSING
# std dev of the normal distribution to initialize layers
initializer_range: float = MISSING
# mean of the normal distribution to initialize layers
initializer_mean: float = MISSING
# mean of normal noise for token embeddings
token_noise_std: float = MISSING
# stddev of normal noise for token embeddings
token_noise_mean: float = MISSING
# layer norm weight initialization
layer_norm_weight_fill: float = MISSING
# random initialize whole network
random_initialize: bool = MISSING
# freeze the base transformer
freeze_transformer: bool = MISSING
# finetune lr multiplier for base transformer
finetune_lr_multiplier: float = MISSING
def __init__(self, config: Config):
"""Initialize the config which is the model configuration and transformer_config
which is the config for the `transformer` base model.
"""
super().__init__(config)
self.config = config
def build(self):
"""Build the different parts of the multimodal transformer model and
initializes weights.
"""
self.build_backend()
self.build_encoders()
self.build_heads()
self.build_losses()
self.init_weights()
def get_optimizer_parameters(self, config):
lr = config.optimizer.params.lr
param_list = []
parameters = []
head_configs = self.config.get("heads", [])
for name, module in self.named_children():
# Heads can have different learning rates. This is handled here
if name == "heads":
# Parameters in the head which have a separate learning
# rate, are added as a separate param group
for head_config, head in zip(head_configs, self.heads):
parameters, param_list = self.set_lr_for_parameters(
config=head_config,
module_name="{} head".format(head_config.get("type", "MLP")),
base_lr=lr,
module=head,
parameters=parameters,
param_list=param_list,
)
elif name == "encoders":
for key in module:
for modality in self.config.modalities:
if key == modality.key:
modality_config = modality
parameters, param_list = self.set_lr_for_parameters(
config=modality_config,
module_name=f"{key} encoder",
base_lr=lr,
module=module[key],
parameters=parameters,
param_list=param_list,
)
else:
# For other modules in trunk, add to same param group
param_list += list(module.named_parameters())
parameters += get_bert_configured_parameters(param_list)
return parameters
def set_lr_for_parameters(
self, config, module_name, base_lr, module, parameters, param_list
):
lr_multiplier = config.get("lr_multiplier", 1.0)
if lr_multiplier != 1.0:
logger.info(
f"Setting learning rate of {module_name} to be "
f"{base_lr} * {lr_multiplier}."
) # noqa
parameters += get_bert_configured_parameters(
module, base_lr * lr_multiplier
)
else:
# Parameters for the modules with same learning rate as
# trunk, add to same param group
param_list += list(module.named_parameters())
return parameters, param_list
def build_encoders(self):
"""Build any encoders for different input modalities. Encoders are used while
preprocessing a sample. We the visual_encoder by default for raw image input.
Example ::
# For image
self.image_encoder = ImageEncoder(self.config)
"""
return
def build_backend(self):
"""Build the transformer backend. Use the `BaseTransformerBackend` base class
to inherit from when building a new backend. All the layers in the transformer
backend model will be available (encoder, embeddings etc.) for use. Adjust
your derived class based on the transformer backend you want to use.
"""
backend_config = self.config.get("backend", {})
backend_type = backend_config.get("type", "huggingface")
backend_class = registry.get_transformer_backend_class(backend_type)
self.backend = backend_class(self.config)
if backend_config.get("freeze", False):
for param in self.backend.parameters():
param.requires_grad = False
def build_heads(self):
"""Build the different heads for the model. It can be either the pretraining
head or the classifier heads.
"""
self.heads = nn.ModuleList()
head_configs = self.config.get("heads", [])
for head_config in head_configs:
head_type = head_config.get("type", "mlp")
head_class = registry.get_transformer_head_class(head_type)
self.heads.append(head_class(head_config))
def build_losses(self):
"""Initialize the losses for pretraining. For example MLM, MIM etc.
Example ::
self.mlm_loss = CrossEntropyLoss(ignore_index=-1)
"""
return
def _init_weights(self, module: Type[nn.Module]):
"""Initialize the weights for different layers."""
if isinstance(module, (nn.Linear, nn.Embedding)):
module.weight.data.normal_(
mean=self.config.initializer_mean, std=self.config.initializer_range
)
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(self.config.layer_norm_weight_fill)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
def tie_weights(self):
"""Tie the weights between the input embeddings and the output embeddings
if required.
"""
return
def init_weights(self):
if self.config.random_initialize is False:
if self.config.transformer_base is None:
# No pretrained model, init weights
self.apply(self._init_weights)
# Tie weights if required
self.tie_weights()
def preprocess_sample(
self, sample_list: Dict[str, Any]
) -> Dict[str, Dict[str, Tensor]]:
"""Preprocess the sample_list and returns input ids, position ids, segment or
token type ids and masks for different modalities.
Returns:
Dict[str, Dict[str, Tensor]]: containing input_ids, position_ids,
segment_ids, masks
"""
return
def forward(self, sample_list: Dict[str, Any]) -> Dict[str, Tensor]:
r"""Forward pass of the model. The input sample_list can be preprocessed using
the preprocess_sample method which expects to return a
Dict[str, Dict[str, Tensor]] object. It contains different properties of the
input modalities and the masks. These can be used to generate embeddings for
each modality and also create attention mask.
Flow of how the forward pass can be implemented using various modules in
BaseTransformer:
preprocess_sample ||
| ||
generate embeddings ||
| ||
generate attention masks || MODEL
| ||
transformer encoder pass || FLOW
| ||
different head pass || DIRECTION
| ||
postprocess_output ||
| ||
Dict[str, Tensor] output \/
Returns:
Dict[str, Tensor]: Dict containing scores or losses
"""
return
def postprocess_output(self, output: List[Tensor]) -> Dict[str, Tensor]:
"""Postprocessing the output from the transformer head, for pretraining
it's the output of the pretrain head and for classification its the output
of the classsification head. Calculate lossses on pretraining output or
model output scores.
Returns:
Dict[str, Tensor]: Dict containing scores or losses
"""
return output
class BaseTransformerBackend(nn.Module, ABC):
def __init__(self, config: BaseTransformer.Config, *args, **kwargs):
super().__init__()
self.config = config
self.build_transformer_config()
self.build_transformer_base()
self.build_embeddings()
@abstractmethod
def build_transformer_config(self):
"""Build the transformer base model config.
Warning: Empty shell for code to be implemented in other class.
"""
@abstractmethod
def build_transformer_base(self):
"""Build the transformer base model.
Warning: Empty shell for code to be implemented in other class.
"""
@abstractmethod
def build_embeddings(self):
"""Build the multimodal embeddings using the transformer base
embeddings.
Warning: Empty shell for code to be implemented in other class.
"""
@abstractmethod
def get_config(self):
"""Return the transformer configuration. This can be the config built
in `build_transformer_config` or the model config passed to init.
Warning: Empty shell for code to be implemented in other class.
"""
@abstractmethod
def generate_embeddings(
self,
tokens_ids: Dict[str, Tensor],
position_ids: Dict[str, Tensor],
segment_ids: Dict[str, Tensor],
attention_mask: Tensor,
) -> Tensor:
"""Generate the multimodal embeddings.
Warning: Empty shell for code to be implemented in other class.
"""
@abstractmethod
def generate_attention_mask(self, masks: List[Tensor]) -> Tensor:
"""Generate attention mask.
Warning: Empty shell for code to be implemented in other class.
"""
@abstractmethod
def generate_encoded_layers(self, embedding, attention_mask) -> List[Tensor]:
"""Generate the output from transformer layers. Return the encoded layers.
Warning: Empty shell for code to be implemented in other class.
"""
def forward(
self,
tokens_ids: Dict[str, Tensor],
position_ids: Dict[str, Tensor],
segment_ids: Dict[str, Tensor],
masks: List[Tensor],
) -> Tuple[Tensor, List[Tensor]]:
# Attention mask
attention_mask = self.generate_attention_mask(masks)
# Multimodal Embeddings
embedding = self.generate_embeddings(
tokens_ids, position_ids, segment_ids, attention_mask
)
# Encoder
encoded_layers = self.generate_encoded_layers(embedding, attention_mask)
# Output Tuple(sequence output, all encoded layers)
return encoded_layers[-1], encoded_layers
class BaseTransformerHead(nn.Module, ABC):
@dataclass
class Config:
type: str = MISSING
# Whether to freeze the head parameters
freeze: bool = False
# LR multiplier for the head, (head_lr = base_lr * lr_multiplier)
lr_multiplier: float = 1.0
def __init__(self, config: Config, *args, **kwargs):
super().__init__()
self.config = OmegaConf.create({**asdict(self.Config()), **config})
@classmethod
def from_params(cls, **kwargs):
config = OmegaConf.structured(cls.Config(**kwargs))
return cls(config)
def tie_weights(self, module: Optional[nn.Module] = None):
pass
@abstractmethod
def forward(
self,
sequence_output: Tensor,
encoded_layers: Optional[List[Tensor]] = None,
processed_sample_list: Optional[Dict[str, Dict[str, Tensor]]] = None,
) -> Dict[str, Tensor]:
"""Forward for the head module.
Warning: Empty shell for code to be implemented in other class.
"""
| EXA-1-master | exa/models/mmf-main/mmf/models/transformers/base.py |
# Copyright (c) Facebook, Inc. and its affiliates.
from mmf.utils.env import import_files
import_files(__file__, "mmf.models.transformers.backends")
| EXA-1-master | exa/models/mmf-main/mmf/models/transformers/backends/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
from copy import deepcopy
from typing import Any, Dict, List, Type
import torch
from mmf.common.registry import registry
from mmf.models.transformers.base import BaseTransformer, BaseTransformerBackend
from mmf.modules.hf_layers import BertModelJit, replace_with_jit
from omegaconf import OmegaConf
from torch import nn, Tensor
try:
from transformers3 import AutoConfig, AutoModel
except ImportError:
from transformers import AutoConfig, AutoModel
class HuggingfaceEmbeddings(nn.Module):
"""Embedding class that can take any number of image or text modalities, each can
have their input id, position id and segment id. We generate embeddings of
dimension config.hidden_size for each and then first add the three embeddings
for each modality to have a modality specific embedding. We then concat the
modality specific embeddings to have a joint embedding.
"""
def __init__(
self,
model_config: BaseTransformer.Config,
transformer_config: Dict[str, Any],
transformer: Type[nn.Module],
*args,
**kwargs,
):
super().__init__()
self.model_config = model_config
self.transformer_config = transformer_config
self.token_embeddings = nn.ModuleList()
self.pos_embeddings = nn.ModuleList()
self.layer_norms = nn.ModuleList()
self.dropouts = nn.ModuleList()
self.modality_keys: List = []
# Build layers for each modality and initialize
self.build_layers()
self.init_weights(transformer)
assert (
len(self.token_embeddings)
== len(self.pos_embeddings)
== len(self.layer_norms)
== len(self.dropouts)
== len(self.model_config.modalities)
)
def build_layers(self):
for modality in self.model_config.modalities:
self.modality_keys.append(modality.key)
layer_norm_eps = modality.get(
"layer_norm_eps", self.transformer_config.layer_norm_eps
)
position_dim = modality.get(
"position_dim", self.transformer_config.max_position_embeddings
)
hidden_dropout_prob = modality.get(
"hidden_dropout_prob", self.transformer_config.hidden_dropout_prob
)
if modality.type == "text" and modality.get("consume_raw", True):
self.token_embeddings.append(
nn.Embedding(
self.transformer_config.vocab_size,
self.transformer_config.hidden_size,
padding_idx=self.transformer_config.pad_token_id,
)
)
else:
self.token_embeddings.append(
nn.Sequential(
nn.Linear(
modality.embedding_dim, self.transformer_config.hidden_size
),
torch.nn.LayerNorm(
self.transformer_config.hidden_size, eps=layer_norm_eps
),
)
)
self.pos_embeddings.append(
nn.Embedding(position_dim, self.transformer_config.hidden_size)
)
self.layer_norms.append(
torch.nn.LayerNorm(
self.transformer_config.hidden_size, eps=layer_norm_eps
)
)
self.dropouts.append(nn.Dropout(hidden_dropout_prob))
self.token_type_embeddings = nn.Embedding(
len(self.model_config.modalities), self.transformer_config.hidden_size
)
def init_weights(self, transformer: Type[nn.Module]):
for idx, modality in enumerate(self.model_config.modalities):
if modality.type == "text":
self.token_embeddings[idx] = transformer.embeddings.word_embeddings
self.layer_norms[idx] = transformer.embeddings.LayerNorm
self.pos_embeddings[idx].weight = nn.Parameter(
deepcopy(transformer.embeddings.position_embeddings.weight.data),
requires_grad=True,
)
# Token Type or Segment Embeddings
if hasattr(transformer.embeddings, "token_type_embeddings"):
token_vocab_size = self.transformer_config.type_vocab_size
self.token_type_embeddings.weight.data[:token_vocab_size].copy_(
transformer.embeddings.token_type_embeddings.weight
)
for idx in range(token_vocab_size, len(self.model_config.modalities)):
self.token_type_embeddings.weight.data[idx].copy_(
transformer.embeddings.token_type_embeddings.weight.data.mean(dim=0)
)
# Add random normal noise
self.token_type_embeddings.weight.data[idx] += torch.normal(
self.model_config.token_noise_mean,
self.model_config.token_noise_std,
size=self.token_type_embeddings.weight.data[idx].size(),
)
def forward(
self,
tokens_ids: Dict[str, Tensor],
position_ids: Dict[str, Tensor],
segment_ids: Dict[str, Tensor],
) -> Tensor:
list_embeddings = []
for idx, (token_emb, pos_emb, layer_norm, dropout) in enumerate(
zip(
self.token_embeddings,
self.pos_embeddings,
self.layer_norms,
self.dropouts,
)
):
modality_name = self.modality_keys[idx]
total_embedding = token_emb(tokens_ids[modality_name])
if modality_name in position_ids:
total_embedding += pos_emb(position_ids[modality_name])
if modality_name in segment_ids:
total_embedding += self.token_type_embeddings(
segment_ids[modality_name]
)
list_embeddings.append(dropout(layer_norm(total_embedding)))
return torch.cat(list_embeddings, dim=1)
@registry.register_transformer_backend("huggingface")
class HuggingfaceBackend(BaseTransformerBackend):
"""Transformer backend wih Huggingface transformer models"""
def __init__(self, config: BaseTransformer.Config, *args, **kwargs):
super().__init__(config)
# Replace transformer layers with scriptable JIT layers
replace_with_jit()
def build_transformer_config(self):
"""Build the transformer base model config."""
self.transformer_config = AutoConfig.from_pretrained(
self.config.transformer_base, **OmegaConf.to_container(self.config)
)
def build_transformer_base(self):
"""Build the transformer base model."""
hf_params = {"config": self.transformer_config}
# For BERT models, initialize using Jit version
if self.config.transformer_base.startswith("bert-"):
self.transformer = BertModelJit.from_pretrained(
self.config.transformer_base, **hf_params
)
else:
self.transformer = AutoModel.from_pretrained(
self.config.transformer_base, **hf_params
)
def build_embeddings(self):
"""Build the multimodal embeddings using the transformer base
embeddings.
"""
self.embeddings = HuggingfaceEmbeddings(
self.config, self.transformer_config, self.transformer
)
def get_config(self):
"""Return the transformer configuration."""
return self.transformer_config
def generate_embeddings(
self,
tokens_ids: Dict[str, Tensor],
position_ids: Dict[str, Tensor],
segment_ids: Dict[str, Tensor],
attention_mask: Tensor,
) -> Tensor:
"""Generate multimodal embeddings."""
return self.embeddings(
tokens_ids=tokens_ids, position_ids=position_ids, segment_ids=segment_ids
)
def generate_attention_mask(self, masks: List[Tensor]) -> Tensor:
"""Generate attention mask."""
attention_mask = torch.cat(masks, dim=-1)
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
return extended_attention_mask
def generate_encoded_layers(self, embedding, attention_mask) -> List[Tensor]:
"""Generate the output from transformer layers. For huggingface models the
output encoded layers is a Tuple(last layer output, all layers). So the
order is reversed to match the output order of other backends.
"""
if torch.jit.is_scripting():
encoded_layers = self.transformer.encoder(embedding, attention_mask)
else:
encoded_layers = self.transformer.encoder(
embedding, attention_mask, [None] * len(self.transformer.encoder.layer)
)
return encoded_layers[-1], encoded_layers[0]
| EXA-1-master | exa/models/mmf-main/mmf/models/transformers/backends/huggingface.py |
# Copyright (c) Facebook, Inc. and its affiliates.
from dataclasses import dataclass, field
from typing import Dict, List, Optional, Type
import torch
from mmf.common.registry import registry
from mmf.models.transformers.base import BaseTransformerHead
from mmf.modules.losses import RefinerContrastiveLoss, RefinerMSLoss
from torch import nn
try:
from transformers3.modeling_bert import BertOnlyMLMHead
except ImportError:
from transformers.modeling_bert import BertOnlyMLMHead
class MLP(nn.Module):
def __init__(
self,
input_dim: int,
mlp_dims: List[int],
dropout: float = 0.3,
nonlinearity: Type[nn.Module] = nn.ReLU,
normalization: Optional[Type[nn.Module]] = nn.LayerNorm,
):
super().__init__()
self.output_dim = mlp_dims[-1]
projection_prev_dim = input_dim
projection_modulelist = []
for mlp_dim in mlp_dims:
projection_modulelist.append(nn.Linear(projection_prev_dim, mlp_dim))
projection_modulelist.append(nonlinearity())
if normalization is not None:
projection_modulelist.append(normalization(mlp_dim))
if dropout != 0:
projection_modulelist.append(nn.Dropout(dropout))
projection_prev_dim = mlp_dim
self.projection = nn.Sequential(*projection_modulelist)
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""
input_arguments:
@x: torch.FloatTensor
"""
x = self.projection(x)
return x
@registry.register_transformer_head("refiner")
class Refiner(BaseTransformerHead):
@dataclass
class Config(BaseTransformerHead.Config):
type: str = "refiner"
vocab_size: int = 30522
hidden_size: int = 768
hidden_dropout_prob: float = 0.1
layer_norm_eps: float = 1e-5
hidden_act: str = "gelu"
ignore_index: int = -1
loss_name: str = "refiner_ss_loss"
loss_type: str = "mse"
refiner_target_pooler: str = "average_k_from_last"
refiner_target_layer_depth: int = 1
label_key: Optional[str] = None
modalities: List[str] = field(default_factory=lambda: ["text", "image"])
weights: List[float] = field(default_factory=lambda: [0.1, 0.1])
tol: float = 0.000001
def __init__(self, config: Config, *args, **kwargs):
super().__init__(config, *args, **kwargs)
self.cls = BertOnlyMLMHead(self.config)
loss_dict = dict(
mse=torch.nn.MSELoss(),
cosine=torch.nn.CosineSimilarity(dim=1),
contrastive=RefinerContrastiveLoss(),
ms=RefinerMSLoss(),
)
self.refiner_loss = loss_dict.get(self.config.loss_type)
self.refiner_decoder = {}
self.weights = {}
for i, modality in enumerate(self.config.modalities):
self.refiner_decoder[modality] = MLP(
input_dim=self.config.hidden_size,
mlp_dims=[self.config.hidden_size],
dropout=self.config.hidden_dropout_prob,
nonlinearity=torch.nn.ReLU,
normalization=torch.nn.LayerNorm,
)
self.weights[modality] = self.config.weights[i]
self.modalities = self.config.modalities
self.tol = self.config.tol
self.refiner_target_pooler = self.config.refiner_target_pooler
self.refiner_target_layer_depth = self.config.refiner_target_layer_depth
self.loss_name = self.config.loss_name
pool_class = registry.get_pool_class(self.refiner_target_pooler)
if pool_class is None:
raise ValueError(
f"No pooler {self.refiner_target_pooler} is\
registered to registry"
)
self.pooler = pool_class(self.refiner_target_layer_depth)
def forward(
self,
sequence_output: torch.Tensor,
encoded_layers: Optional[List[torch.Tensor]] = None,
processed_sample_list: Optional[Dict[str, Dict[str, torch.Tensor]]] = None,
):
start_token = {}
end_token = {}
prev_end_token = 0
masks = []
for modality in self.modalities:
masks.append(processed_sample_list["masks"][modality])
sz = processed_sample_list["masks"][modality].size()
start_token[modality] = prev_end_token
end_token[modality] = start_token[modality] + sz[1] - 1
prev_end_token = end_token[modality] + 1
pad_mask = torch.cat(masks, dim=1)
processed_sample_list["refiner_outputs"] = {}
processed_sample_list["refiner_outputs"]["fused_embedding"] = self.pooler(
encoded_layers, pad_mask
)
processed_sample_list["refiner_targets"] = {}
for modality in self.modalities:
modality_encodings = []
tk_start = start_token[modality]
tk_end = end_token[modality]
for enc_layers in encoded_layers:
modality_encodings.append(enc_layers[:, tk_start : tk_end + 1, :])
modality_mask_encodings = pad_mask[:, tk_start : tk_end + 1]
processed_sample_list["refiner_targets"][modality] = self.pooler(
modality_encodings, modality_mask_encodings
)
output_dict = {}
prediction = self.cls(sequence_output)
output_dict["logits"] = prediction
output_dict["losses"] = {}
fused_embedding = processed_sample_list["refiner_outputs"]["fused_embedding"]
refiner_reconstruct = {}
for modality in processed_sample_list["refiner_targets"].keys():
local_device = fused_embedding.device
self.refiner_decoder[modality].to(local_device)
refiner_reconstruct[modality] = self.refiner_decoder[modality](
fused_embedding
)
if isinstance(self.refiner_loss, torch.nn.CosineSimilarity):
loss = self.weights[modality] * (
1.0
- torch.mean(
self.refiner_loss(
processed_sample_list["refiner_targets"][modality],
refiner_reconstruct[modality],
)
)
)
elif isinstance(self.refiner_loss, RefinerContrastiveLoss) or isinstance(
self.refiner_loss, RefinerMSLoss
):
modality_targets = {}
modality_targets["targets"] = processed_sample_list["refiner_targets"][
modality
]
refiner_modal_outputs = {}
refiner_modal_outputs["scores"] = refiner_reconstruct[modality]
loss = self.refiner_loss(modality_targets, refiner_modal_outputs)
else:
loss = self.weights[modality] * self.refiner_loss(
processed_sample_list["refiner_targets"][modality],
refiner_reconstruct[modality],
)
if "current_loss" not in locals():
current_loss = loss
else:
current_loss += loss
output_dict["losses"][self.loss_name] = current_loss
output_dict["fused_embedding"] = fused_embedding
return output_dict
| EXA-1-master | exa/models/mmf-main/mmf/models/transformers/heads/refiner.py |
# Copyright (c) Facebook, Inc. and its affiliates.
from dataclasses import dataclass
from typing import Dict, List, Optional
import torch
from mmf.common.registry import registry
from mmf.models.transformers.base import BaseTransformerHead
try:
from transformers3.modeling_bert import BertOnlyNSPHead, BertPooler
except ImportError:
from transformers.modeling_bert import BertOnlyNSPHead, BertPooler
LABEL_KEY = "itm_labels"
@registry.register_transformer_head("itm")
class ITM(BaseTransformerHead):
@dataclass
class Config(BaseTransformerHead.Config):
type: str = "itm"
hidden_size: int = 768
loss_name: str = "itm_loss"
ignore_index: int = -1
itm_label_key: str = "is_correct"
def __init__(self, config: Config, *args, **kwargs):
super().__init__(config, *args, **kwargs)
# Head modules
self.pooler = BertPooler(self.config)
self.cls = BertOnlyNSPHead(self.config)
# Loss
self.ce_loss = torch.nn.CrossEntropyLoss(ignore_index=self.config.ignore_index)
def forward(
self,
sequence_output: torch.Tensor,
encoded_layers: Optional[List[torch.Tensor]] = None,
processed_sample_list: Optional[Dict[str, Dict[str, torch.Tensor]]] = None,
):
assert (
processed_sample_list is not None
), "ITM head requires 'processed_sample_list' argument"
output_dict = {}
if self.config.itm_label_key in processed_sample_list:
next_sentence_labels = processed_sample_list[self.config.itm_label_key]
else:
assert (
LABEL_KEY in processed_sample_list
and processed_sample_list[LABEL_KEY] is not None
), (
f"ITM pretraining requires {LABEL_KEY} to be in sample "
+ "list with value not None."
)
next_sentence_labels = processed_sample_list[LABEL_KEY][
self.config.itm_label_key
]
pooled_output = self.pooler(sequence_output)
seq_relationship_score = self.cls(pooled_output)
itm_loss = self.ce_loss(
seq_relationship_score.contiguous().view(-1, 2),
next_sentence_labels.contiguous().view(-1),
)
output_dict["losses"] = {}
output_dict["losses"][self.config.loss_name] = itm_loss
return output_dict
| EXA-1-master | exa/models/mmf-main/mmf/models/transformers/heads/itm.py |
# Copyright (c) Facebook, Inc. and its affiliates.
from dataclasses import dataclass
from typing import Dict, Optional
import torch
from mmf.common.registry import registry
from mmf.models.transformers.base import BaseTransformerHead
from mmf.models.transformers.heads.mlp import MLP
LABEL_KEY = "three_way_constrastive_labels"
@registry.register_transformer_head("contrastive_three_way")
class ThreeWayContrastive(BaseTransformerHead):
"""Three way contrastive loss used for VinVL pretraining.
Described here https://arxiv.org/pdf/2101.00529
A thin wrapper around MLP for 3 way classification.
Effectively ITM with 3 labels.
contrastive 3-way loss has 3 labels,
0 for a match, 1, 2 for a corrupt caption/image
"""
@dataclass
class Config(BaseTransformerHead.Config):
type: str = "three_way_contrastive"
hidden_size: int = 768
loss_name: str = "three_way_contrastive_loss"
ignore_index: int = -1
constrastive_label_key: str = "contrastive_labels"
num_layers: int = 0
num_labels: int = 3
hidden_dropout_prob: float = 0.1
layer_norm_eps: float = 1e-6
hidden_act: str = "gelu"
pooler_name: str = "bert_pooler"
in_dim: Optional[int] = None
def __init__(self, config: Config, *args, **kwargs):
super().__init__(config, *args, **kwargs)
# Head modules
self.contrast_head = MLP(config=self.config)
# Loss
self.ce_loss = torch.nn.CrossEntropyLoss(ignore_index=self.config.ignore_index)
def forward(
self,
sequence_output: torch.Tensor,
processed_sample_list: Dict[str, Dict[str, torch.Tensor]],
):
output_dict = {}
if self.config.constrastive_label_key in processed_sample_list:
next_sentence_labels = processed_sample_list[
self.config.constrastive_label_key
]
else:
assert (
LABEL_KEY in processed_sample_list
and processed_sample_list[LABEL_KEY] is not None
), (
f"Constrastive three way pretraining requires {LABEL_KEY} to "
+ "be in sample list with value not None."
)
next_sentence_labels = processed_sample_list[LABEL_KEY][
self.config.constrastive_label_key
]
scores = self.contrast_head(sequence_output)["scores"]
constrastive_loss = self.ce_loss(
scores.contiguous().view(-1, 3),
next_sentence_labels.contiguous().view(-1),
)
output_dict["losses"] = {}
output_dict["losses"][self.config.loss_name] = constrastive_loss
return output_dict
| EXA-1-master | exa/models/mmf-main/mmf/models/transformers/heads/contrastive.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import logging
from dataclasses import dataclass
from typing import Dict, List, Optional
import torch
import torch.nn as nn
from mmf.common.registry import registry
from mmf.models.transformers.base import BaseTransformerHead
from mmf.modules.losses import LogitBinaryCrossEntropy, MSLoss
from .mlp import MLP
from .refiner import Refiner
logger = logging.getLogger(__name__)
class MLPWithLoss(BaseTransformerHead):
class Config(BaseTransformerHead.Config):
config: MLP.Config
loss_name: str = "classification_loss"
loss: str = "cross_entropy"
max_sample_size: int = 10000
def __init__(self, config: Config, *args, **kwargs):
super().__init__(config, *args, **kwargs)
self.loss_name = self.config.loss_name
if self.config.loss == "cross_entropy":
self.loss_fn = nn.CrossEntropyLoss()
elif self.config.loss == "logit_bce":
self.loss_fn = LogitBinaryCrossEntropy()
self.init_output_dict = {}
self.init_output_dict["losses"] = {}
self.mlp_base = MLP(config)
def forward(
self,
sequence_output: torch.Tensor,
encoded_layers: Optional[List[torch.Tensor]] = None,
processed_sample_list: Optional[Dict[str, Dict[str, torch.Tensor]]] = None,
):
output_dict = self.mlp_base(
sequence_output, encoded_layers, processed_sample_list
)
scores = output_dict["scores"]
score_max = min(len(scores) - 1, self.config.max_sample_size)
if isinstance(self.loss_fn, nn.CrossEntropyLoss):
if "losses" not in output_dict.keys():
output_dict["losses"] = {}
output_dict["losses"][self.loss_name] = self.loss_fn(
scores[:score_max],
processed_sample_list["target_key"]["targets"][:score_max],
)
elif isinstance(self.loss_fn, LogitBinaryCrossEntropy):
scores_subset = {}
scores_subset["scores"] = scores[:score_max]
targets_subset = {}
targets_subset["targets"] = processed_sample_list["target_key"]["targets"]
targets_subset["targets"] = targets_subset["targets"][:score_max]
if "losses" not in output_dict.keys():
output_dict["losses"] = {}
output_dict["losses"][self.loss_name] = self.loss_fn(
targets_subset, scores_subset
)
return output_dict
@registry.register_transformer_head("refiner_classifier")
class RefinerClassifier(BaseTransformerHead):
@dataclass
class Config(BaseTransformerHead.Config):
type: str = "refiner_classifier"
refiner_config: Optional[Refiner.Config] = None
mlp_loss_config: Optional[MLPWithLoss.Config] = None
msloss_weight: float = 0.1
use_msloss: bool = False
embedding_key: str = "fused_embedding"
num_labels: int = 2
def __init__(self, config: Config, *args, **kwargs):
super().__init__(config, *args, **kwargs)
self.refiner_head = Refiner(self.config.refiner_config)
self.mlp_loss_head = MLPWithLoss(self.config.mlp_loss_config)
self.max_sample_size = self.config.mlp_loss_config.max_sample_size
self.msloss_weight = self.config.msloss_weight
if self.config.num_labels > 2:
self.is_multilabel = True
else:
self.is_multilabel = False
if self.config.use_msloss:
self.msloss = MSLoss(is_multilabel=self.is_multilabel)
else:
self.msloss = None
self.emb_f = self.config.embedding_key
def forward(
self,
sequence_output: torch.Tensor,
encoded_layers: Optional[List[torch.Tensor]] = None,
processed_sample_list: Optional[Dict[str, Dict[str, torch.Tensor]]] = None,
):
output_dict_refiner = self.refiner_head(
sequence_output, encoded_layers, processed_sample_list
)
output_dict = self.mlp_loss_head(
sequence_output, encoded_layers, processed_sample_list
)
for key in output_dict_refiner["losses"].keys():
if key not in output_dict["losses"].keys():
output_dict["losses"][key] = output_dict_refiner["losses"][key]
for key in output_dict_refiner.keys():
if key not in output_dict.keys():
output_dict[key] = output_dict_refiner[key]
scores = output_dict["scores"]
score_max = min(len(scores) - 1, self.max_sample_size)
if isinstance(self.msloss, MSLoss):
emb_f = self.emb_f
targets_list = {}
targets_list["targets"] = processed_sample_list["target_key"]["targets"][
:score_max
]
subset_score_list = {}
subset_score_list["scores"] = output_dict["scores"][:score_max]
subset_score_list[emb_f] = output_dict[emb_f][:score_max]
output_dict["losses"]["ms_loss"] = self.msloss_weight * self.msloss(
targets_list, subset_score_list
)
return output_dict
| EXA-1-master | exa/models/mmf-main/mmf/models/transformers/heads/refnet_classifier.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import warnings
from dataclasses import dataclass
from typing import Dict, List, Optional
import torch
from mmf.common.registry import registry
from mmf.models.transformers.base import BaseTransformerHead
try:
from transformers3.modeling_bert import BertOnlyMLMHead
except ImportError:
from transformers.modeling_bert import BertOnlyMLMHead
LABEL_KEY = "mlm_labels"
COMBINED_LABEL_KEY = "combined_labels"
@registry.register_transformer_head("mlm")
class MLM(BaseTransformerHead):
@dataclass
class Config(BaseTransformerHead.Config):
type: str = "mlm"
vocab_size: int = 30522
hidden_size: int = 768
hidden_dropout_prob: float = 0.1
layer_norm_eps: float = 1e-5
hidden_act: str = "gelu"
ignore_index: int = -1
loss_name: str = "masked_lm_loss"
label_key: Optional[str] = None
def __init__(self, config: Config, *args, **kwargs):
super().__init__(config, *args, **kwargs)
# Head modules
self.cls = BertOnlyMLMHead(self.config)
self.vocab_size = self.config.vocab_size
# Loss
self.ce_loss = torch.nn.CrossEntropyLoss(ignore_index=self.config.ignore_index)
def tie_weights(self, module: Optional[torch.nn.Module] = None):
self.cls.predictions.decoder.weight = module.weight
def forward(
self,
sequence_output: torch.Tensor,
encoded_layers: Optional[List[torch.Tensor]] = None,
processed_sample_list: Optional[Dict[str, Dict[str, torch.Tensor]]] = None,
):
assert (
processed_sample_list is not None
), "MLM head requires 'processed_sample_list' argument"
output_dict = {}
if self.config.label_key is not None:
assert self.config.label_key in processed_sample_list, (
f"Didn't find label key {self.config.label_key} in "
+ "SampleList required by MLM"
)
masked_labels = processed_sample_list[self.config.label_key]
else:
assert (
LABEL_KEY in processed_sample_list
and processed_sample_list[LABEL_KEY] is not None
), (
f"MLM pretraining requires {LABEL_KEY} to be in sample "
+ "list with value not None."
)
assert (
COMBINED_LABEL_KEY in processed_sample_list[LABEL_KEY]
), f"labels for all modalities must be concatenated in {COMBINED_LABEL_KEY}"
masked_labels = processed_sample_list[LABEL_KEY][COMBINED_LABEL_KEY]
masked_tokens = masked_labels.ne(self.config.ignore_index)
masked_labels = masked_labels[masked_tokens]
sequence_output = sequence_output[masked_tokens, :]
prediction = self.cls(sequence_output)
output_dict["logits"] = prediction
masked_lm_loss = self.ce_loss(
prediction.contiguous().view(-1, self.vocab_size),
masked_labels.contiguous().view(-1),
)
# When masked_labels are all ignore_index then masked_lm_loss is NaN,
# so we replace NaN with 0.
if torch.isnan(masked_lm_loss):
warnings.warn("NaN detected in masked_lm_loss. Replacing it with 0.")
masked_lm_loss = torch.nan_to_num(masked_lm_loss, nan=0.0)
output_dict["losses"] = {}
output_dict["losses"][self.config.loss_name] = masked_lm_loss
return output_dict
@registry.register_transformer_head("mlm_multi")
class MLMForMultiHeads(BaseTransformerHead):
def __init__(self, config):
super().__init__(config)
self.mlm_head = MLM(config)
def forward(self, _, processed_sample_list):
mlm_outputs = self.mlm_head(
processed_sample_list["hs_masked_for_mlm"],
processed_sample_list=processed_sample_list,
)
return mlm_outputs
| EXA-1-master | exa/models/mmf-main/mmf/models/transformers/heads/mlm.py |
# Copyright (c) Facebook, Inc. and its affiliates.
from mmf.utils.env import import_files
import_files(__file__, "mmf.models.transformers.heads")
| EXA-1-master | exa/models/mmf-main/mmf/models/transformers/heads/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# Initial version was taken from https://github.com/ChenRocks/UNITER/
# and adapted for MMF.
from typing import Dict
import torch
import torch.nn.functional as F
from mmf.common.registry import registry
from mmf.models.transformers.heads.utils import compute_masked_hidden
from torch import nn, Tensor
@registry.register_transformer_head("mrfr")
class MRFR(nn.Module):
"""
Masked Region Feature Regression transformer head,
From uniter paper https://arxiv.org/pdf/1909.11740.pdf
For an example usage take a look at the unit test.
"""
def __init__(
self,
img_embedding_weight: nn.Parameter,
hidden_size: int = 768,
loss_name: str = "mrfr_loss",
mrfr_target_key: str = "mrfr_region_target",
mrfr_mask_key: str = "mrfr_region_mask",
img_dim: int = 2048,
eps: float = 1e-12,
*args,
**kwargs,
):
super().__init__()
self.loss_name = loss_name
self.mrfr_target_key = mrfr_target_key
self.mrfr_mask_key = mrfr_mask_key
# Head modules
assert img_embedding_weight is not None and tuple(
img_embedding_weight.shape
) == (hidden_size, img_dim), (
"MRFR head requires 'img_embedding_weight' with shape "
+ f"({hidden_size}, {img_dim})."
)
self.linear_proj_weight = img_embedding_weight
self.linear_proj_bias = nn.Parameter(torch.zeros(img_dim))
self.feat_regress = nn.Sequential(
nn.Linear(hidden_size, hidden_size),
nn.GELU(),
nn.LayerNorm(hidden_size, eps=eps),
)
def forward(
self,
sequence_output: Tensor,
processed_sample_list: Dict[str, Dict[str, Tensor]],
) -> Dict[str, Dict[str, Tensor]]:
output_dict = {}
assert (
self.mrfr_target_key in processed_sample_list
and processed_sample_list[self.mrfr_target_key] is not None
), (
f"MRFR pretraining requires {self.mrfr_target_key} to be in sample "
+ "list with value not None."
)
# (bs*num_feat, img_dim) Look at unit test for example usage!
feat_targets = processed_sample_list[self.mrfr_target_key]
assert (
self.mrfr_mask_key in processed_sample_list
and processed_sample_list[self.mrfr_mask_key] is not None
), (
f"MRFR pretraining requires {self.mrfr_mask_key} to be in sample "
+ "list with value not None."
)
# (bs, num_feat)
image_region_masks = processed_sample_list[self.mrfr_mask_key]
masked_output = compute_masked_hidden(sequence_output, image_region_masks)
hidden_states = self.feat_regress(masked_output)
prediction_feat = F.linear(
hidden_states, self.linear_proj_weight.t(), self.linear_proj_bias
)
mrfr_loss = F.mse_loss(prediction_feat, feat_targets, reduction="mean")
output_dict["losses"] = {}
output_dict["losses"][self.loss_name] = mrfr_loss
return output_dict
| EXA-1-master | exa/models/mmf-main/mmf/models/transformers/heads/mrfr.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import copy
from dataclasses import dataclass
from typing import Dict, List, Optional
import torch
from mmf.common.registry import registry
from mmf.models.transformers.base import BaseTransformerHead
from mmf.modules import layers
from omegaconf import OmegaConf, open_dict
from torch import nn
try:
from transformers3.modeling_bert import BertPooler, BertPredictionHeadTransform
except ImportError:
from transformers.modeling_bert import BertPooler, BertPredictionHeadTransform
@registry.register_transformer_head("multilayer_mlp")
@registry.register_transformer_head("mlp")
class MLP(BaseTransformerHead):
@dataclass
class Config(BaseTransformerHead.Config):
type: str = "mlp"
num_labels: int = 2
hidden_size: int = 768
hidden_dropout_prob: float = 0.1
layer_norm_eps: float = 1e-6
hidden_act: str = "gelu"
pooler_name: str = "bert_pooler"
num_layers: int = 1
in_dim: Optional[int] = None
def __init__(self, config: Config, *args, **kwargs):
super().__init__(config, *args, **kwargs)
self.num_labels = self.config.num_labels
self.hidden_size = self.config.hidden_size
self.in_dim = self.config.in_dim = (
self.hidden_size if self.config.in_dim is None else self.config.in_dim
)
# Head modules
# get_pooler expects hidden_size to be input dim size
pooler_config = OmegaConf.create(dict(self.config, hidden_size=self.in_dim))
pooler_cls = self.get_pooler(self.config.pooler_name)
self.pooler = pooler_cls(pooler_config)
num_layers = config.get("num_layers", 1)
assert num_layers >= 0
layers = []
prediction_head_config = copy.deepcopy(self.config)
for _ in range(num_layers):
layers.append(nn.Dropout(self.config.hidden_dropout_prob))
layers.append(PredictionHeadTransformWithInDim(prediction_head_config))
with open_dict(prediction_head_config):
prediction_head_config.in_dim = prediction_head_config.hidden_size
self.classifier = nn.Sequential(
*layers, nn.Linear(self.hidden_size, self.num_labels)
)
def forward(
self,
sequence_output: torch.Tensor,
encoded_layers: Optional[List[torch.Tensor]] = None,
processed_sample_list: Optional[Dict[str, Dict[str, torch.Tensor]]] = None,
):
assert (
sequence_output.size()[-1] == self.in_dim
), "Mismatch between MLP head hidden_size and sequence_output last dim."
output_dict = {}
pooled_output = self.pooler(sequence_output)
prediction = self.classifier(pooled_output)
output_dict["scores"] = prediction.view(-1, self.num_labels)
return output_dict
def get_pooler(self, pooler_name):
if pooler_name == "bert_pooler":
return BertPooler
elif pooler_name == "identity":
return nn.Identity
elif hasattr(layers, pooler_name):
return getattr(layers, pooler_name)
else:
raise NotImplementedError(f"{pooler_name} is not implemented.")
class PredictionHeadTransformWithInDim(BertPredictionHeadTransform):
def __init__(self, config):
super().__init__(config)
self.dense = nn.Linear(config.in_dim, config.hidden_size)
| EXA-1-master | exa/models/mmf-main/mmf/models/transformers/heads/mlp.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import collections
import collections.abc
from typing import Dict, List, Optional, Union
from mmf.common.registry import registry
from torch import nn, Tensor
def build_heads_dict(head_configs: Union[Dict, List], tasks: List, losses: Dict):
"""
HeadsDict static constructor.
This function either,
returns a list of heads if head_configs is a list,
returns a dict of task: [ head1, head2, ... ] if head_configs is a dict
loss_names are a list or dict describing the loss module used for each head
loss_names has the same shape as heads
head_names is a list or dict containing head name strings
head_names is used to describe bad heads in exceptions
"""
def head_from_config(config):
head_type = config.get("type", "mlp")
head_class = registry.get_transformer_head_class(head_type)
return head_class(config)
if isinstance(head_configs, collections.abc.Sequence):
heads = nn.ModuleList(
[head_from_config(head_conf) for head_conf in head_configs]
)
head_loss_names = [head_conf.get("loss") for head_conf in head_configs]
head_names = [head_conf.get("type", "mlp") for head_conf in head_configs]
if isinstance(head_configs, collections.abc.Mapping):
heads = nn.ModuleDict()
head_names = {} # used to describe head in exceptions
head_loss_names = {}
for task in tasks:
head_config = head_configs.get(task)
if head_config is None:
raise ValueError(
f"No head defined for {task}. Dataset task {task} "
+ "requires a head to return dict with 'losses'"
)
head_config_list = (
head_config
if isinstance(head_config, collections.abc.Sequence)
else [head_config]
)
heads[task] = nn.ModuleList(
[head_from_config(head_conf) for head_conf in head_config_list]
)
head_loss_names[task] = [
head_conf.get("loss") for head_conf in head_config_list
]
head_names[task] = [
head_conf.get("type", "mlp") for head_conf in head_config_list
]
return HeadsDict(heads, head_names, losses, head_loss_names)
class HeadsDict(nn.Module):
"""
HeadsDict class manages the construction and forward pass for
multiple possible heads for multi-task learning.
Construction from list or dict configs is supported,
take a look at `build_heads_dict(head_configs, tasks, losses)`.
"""
def __init__(
self,
heads: Union[nn.ModuleDict, nn.ModuleList],
head_names: Union[Dict, List],
losses: Dict,
head_loss_names: Union[Dict, List],
):
super().__init__()
self.heads = heads
self.head_names = head_names
self.losses = losses
self.head_loss_names = head_loss_names
def forward(
self, task: Optional[str], sequence: Tensor, sample_list: Dict[str, Tensor]
) -> Dict[str, Tensor]:
"""
For a given task, compute the forward for each head
associated with the task, compute the losses for
each head, and sum the losses and scores
"""
if isinstance(self.heads, nn.ModuleList):
heads_modules_list = self.heads
# list of losses, head_losses[i] is the loss name for outputs_list[i]
head_losses = self.head_loss_names
head_names = self.head_names
else:
heads_modules_list = self.heads[task]
head_losses = self.head_loss_names[task]
head_names = self.head_names[task]
# list of dict( head outputs )
outputs_list = [
head(sequence, processed_sample_list=sample_list)
for head in heads_modules_list
]
assert len(head_losses) == len(outputs_list)
# list of dict( losses, scores )
processed_outputs_list = [
self._process_head_output(outputs, loss_name, head_name, sample_list)
for outputs, loss_name, head_name in zip(
outputs_list, head_losses, head_names
)
]
def reduce_losses(accum_result, loss_dict):
for loss_key, loss_val in loss_dict.items():
if loss_key in accum_result:
accum_result[loss_key] += loss_val
else:
accum_result[loss_key] = loss_val
loss_result = {}
for output in processed_outputs_list:
reduce_losses(loss_result, output["losses"])
results = {
"losses": loss_result,
"scores": sum(
[output.get("scores", 0) for output in processed_outputs_list]
),
}
return results
def _process_head_output(
self,
outputs: Union[Dict, Tensor],
loss_name: str,
head_name: str,
sample_list: Dict[str, Tensor],
) -> Dict[str, Tensor]:
if isinstance(outputs, collections.abc.MutableMapping) and "losses" in outputs:
return outputs
if isinstance(outputs, collections.abc.MutableMapping) and "scores" in outputs:
logits = outputs["scores"]
else:
logits = outputs
logits = logits.contiguous().view(-1, logits.size(-1))
if loss_name is None:
raise ValueError(
f"Transformer head {head_name} must either \
define a 'loss' in its config or return \
a dict that contains key 'losses'."
)
output = self.losses[loss_name](sample_list, {"scores": logits})
return {"losses": output, "scores": logits}
def compute_masked_hidden(hidden: Tensor, mask: Tensor) -> Tensor:
"""Get only the masked region.
hidden: tensor, dim (bs, num_feat, feat_dim)
mask: bool tensor, dim (bs, num_feat)
Returns a tensor of dim (bs * num_feat_unmasked, feat_dim),
containing the features in hidden that are True in the mask tensor.
"""
mask = mask.unsqueeze(-1).expand_as(hidden)
hidden_masked = hidden[mask].contiguous().view(-1, hidden.size(-1))
return hidden_masked
| EXA-1-master | exa/models/mmf-main/mmf/models/transformers/heads/utils.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# Initial version was taken from https://github.com/ChenRocks/UNITER/
# and adapted for MMF.
from typing import Dict
import torch
import torch.nn.functional as F
from mmf.common.registry import registry
from mmf.models.transformers.heads.utils import compute_masked_hidden
from torch import nn, Tensor
@registry.register_transformer_head("mrc")
class MRC(nn.Module):
def __init__(
self,
hidden_size: int = 768,
loss_name: str = "mrc_loss",
ignore_index: int = -1,
mrc_label_key: str = "region_class",
mrc_mask_key: str = "image_region_mask",
label_dim: int = 1601,
eps: float = 1e-12,
use_kl: bool = True,
*args,
**kwargs,
):
super().__init__()
self.loss_name = loss_name
self.ignore_index = ignore_index
self.mrc_label_key = mrc_label_key
self.mrc_mask_key = mrc_mask_key
self.use_kl = use_kl
# Head modules
self.region_classifier = nn.Sequential(
nn.Linear(hidden_size, hidden_size),
nn.GELU(),
nn.LayerNorm(hidden_size, eps=eps),
nn.Linear(hidden_size, label_dim),
)
def forward(
self,
sequence_output: Tensor,
processed_sample_list: Dict[str, Dict[str, Tensor]],
) -> Dict[str, Dict[str, Tensor]]:
output_dict = {}
assert (
self.mrc_label_key in processed_sample_list
and processed_sample_list[self.mrc_label_key] is not None
), (
f"MRC pretraining requires {self.mrc_label_key} to be in sample "
+ "list with value not None."
)
# (bs*num_feat, label_dim) Look at unit test for example usage!
region_labels = processed_sample_list[self.mrc_label_key]
assert (
self.mrc_mask_key in processed_sample_list
and processed_sample_list[self.mrc_mask_key] is not None
), (
f"MRC pretraining requires {self.mrc_mask_key} to be in sample "
+ "list with value not None."
)
# (bs, num_feat)
image_region_masks = processed_sample_list[self.mrc_mask_key]
masked_output = compute_masked_hidden(sequence_output, image_region_masks)
prediction_soft_label = self.region_classifier(masked_output)
if self.use_kl:
prediction_soft_label = F.log_softmax(prediction_soft_label, dim=-1)
mrc_loss = F.kl_div(
prediction_soft_label, region_labels, reduction="batchmean"
)
else:
# background class should not be the target
label_targets = torch.max(region_labels[:, 1:], dim=-1)[1] + 1
mrc_loss = F.cross_entropy(
prediction_soft_label,
label_targets,
ignore_index=self.ignore_index,
reduction="mean",
)
output_dict["losses"] = {}
output_dict["losses"][self.loss_name] = mrc_loss
return output_dict
| EXA-1-master | exa/models/mmf-main/mmf/models/transformers/heads/mrc.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# Initial version was taken from https://github.com/ChenRocks/UNITER/
# and adapted for MMF.
from typing import Dict
from mmf.common.registry import registry
from mmf.modules.ot import optimal_transport_dist
from torch import nn, Tensor
@registry.register_transformer_head("wra")
class WRA(nn.Module):
"""
Word Region Alignment from UNITER.
Optimal Transport (OT) distance between text and image
features is used to optimize for WRA.
OT transport plan (T) is approximated through IPOT.
"""
def __init__(
self,
loss_name: str = "wra_loss",
ot_inputs_key: str = "wra_info",
wra_label_key: str = "is_correct",
*args,
**kwargs,
):
super().__init__()
self.loss_name = loss_name
self.ot_inputs_key = ot_inputs_key
self.wra_label_key = wra_label_key
def forward(
self,
sequence_output: Tensor,
processed_sample_list: Dict[str, Dict[str, Tensor]],
) -> Dict[str, Dict[str, Tensor]]:
output_dict = {}
assert (
self.ot_inputs_key in processed_sample_list
and processed_sample_list[self.ot_inputs_key] is not None
), (
f"WRA pretraining requires {self.ot_inputs_key} to be in sample "
+ "list with value not None."
)
ot_inputs = processed_sample_list[self.ot_inputs_key]
assert (
ot_inputs.get("txt_pad") is not None
and ot_inputs.get("img_pad") is not None
), (
"WRA pretraining requires 'txt_pad', and 'img_pad' to be in "
+ f"'processed_sample_list[{self.ot_inputs_key}]' with"
+ " values not None."
)
assert processed_sample_list.get(self.wra_label_key) is not None, (
f"WRA pretraining requires {self.wra_label_key} to be in sample "
+ "list with value not None."
)
ctx_emb = sequence_output
tl = processed_sample_list["input_ids"].size(1)
il = processed_sample_list["image_feat"].size(1)
txt_emb = ctx_emb[:, :tl, :]
img_emb = ctx_emb[:, tl : tl + il, :]
txt_pad = ot_inputs["txt_pad"].bool()
img_pad = ot_inputs["img_pad"].bool()
itm_labels = processed_sample_list[self.wra_label_key]
# NOTE: run in fp32 for stability
ot_dist = optimal_transport_dist(
txt_emb.float(), img_emb.float(), txt_pad, img_pad
).to(txt_emb)
ot_pos = ot_dist.masked_select(itm_labels == 1)
ot_neg = ot_dist.masked_select(itm_labels == 0)
ot_loss = (ot_pos.sum() - ot_neg.sum()) / (ot_pos.size(0) + ot_neg.size(0))
output_dict["losses"] = {}
output_dict["losses"][self.loss_name] = ot_loss
return output_dict
| EXA-1-master | exa/models/mmf-main/mmf/models/transformers/heads/wra.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import mmf.models.albef.vit # noqa
| EXA-1-master | exa/models/mmf-main/mmf/models/albef/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# Initial version was taken from https://github.com/rwightman/pytorch-image-models
# which was cleaned up and adapted for MMF.
import collections.abc
import math
import warnings
from dataclasses import dataclass
from functools import partial
from itertools import repeat
import omegaconf
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmf.common.registry import registry
from mmf.modules.encoders import Encoder
@registry.register_encoder("albef_vit_encoder")
class AlbefVitEncoder(Encoder):
@dataclass
class Config(Encoder.Config):
name: str = "albef_vit_encoder"
pretrained: bool = False
out_dim: int = 768
def __init__(self, config: Config, *args, **kwargs):
super().__init__()
self.config = config.get("params", {})
pretrained = config.get("pretrained", False)
pretrained_path = config.get("pretrained_path", None)
self.vit = VisionTransformer(self.config)
if pretrained:
state_dict = torch.load(pretrained_path)
self.vit.load_state_dict(state_dict)
self.vit.eval()
def forward(self, x: torch.Tensor):
x = self.vit(x)
return x
# From PyTorch internals
def _ntuple(n):
def parse(x):
if isinstance(x, collections.abc.Iterable):
return x
return tuple(repeat(x, n))
return parse
to_1tuple = _ntuple(1)
to_2tuple = _ntuple(2)
to_3tuple = _ntuple(3)
to_4tuple = _ntuple(4)
to_ntuple = _ntuple
""" DropBlock, DropPath
PyTorch implementations of DropBlock and DropPath (Stochastic Depth) regularization
layers.
Papers:
DropBlock: A regularization method for convolutional networks
(https://arxiv.org/abs/1810.12890)
Deep Networks with Stochastic Depth (https://arxiv.org/abs/1603.09382)
Code:
DropBlock impl inspired by two Tensorflow impl that I liked:
- https://github.com/tensorflow/tpu/blob/master/models/official/resnet/
resnet_model.py#L74
- https://github.com/clovaai/assembled-cnn/blob/master/nets/blocks.py
Hacked together by / Copyright 2020 Ross Wightman
"""
def drop_block_2d(
x,
drop_prob: float = 0.1,
block_size: int = 7,
gamma_scale: float = 1.0,
with_noise: bool = False,
inplace: bool = False,
batchwise: bool = False,
):
"""DropBlock. See https://arxiv.org/pdf/1810.12890.pdf
DropBlock with an experimental gaussian noise option. This layer has been tested on
a few training runs with success, but needs further validation and possibly
optimization for lower runtime impact.
"""
B, C, H, W = x.shape
total_size = W * H
clipped_block_size = min(block_size, min(W, H))
# seed_drop_rate, the gamma parameter
gamma = (
gamma_scale
* drop_prob
* total_size
/ clipped_block_size**2
/ ((W - block_size + 1) * (H - block_size + 1))
)
# Forces the block to be inside the feature map.
w_i, h_i = torch.meshgrid(
torch.arange(W).to(x.device), torch.arange(H).to(x.device)
)
valid_block = (
(w_i >= clipped_block_size // 2) & (w_i < W - (clipped_block_size - 1) // 2)
) & ((h_i >= clipped_block_size // 2) & (h_i < H - (clipped_block_size - 1) // 2))
valid_block = torch.reshape(valid_block, (1, 1, H, W)).to(dtype=x.dtype)
if batchwise:
# one mask for whole batch, quite a bit faster
uniform_noise = torch.rand((1, C, H, W), dtype=x.dtype, device=x.device)
else:
uniform_noise = torch.rand_like(x)
block_mask = ((2 - gamma - valid_block + uniform_noise) >= 1).to(dtype=x.dtype)
block_mask = -F.max_pool2d(
-block_mask,
kernel_size=clipped_block_size, # block_size,
stride=1,
padding=clipped_block_size // 2,
)
if with_noise:
normal_noise = (
torch.randn((1, C, H, W), dtype=x.dtype, device=x.device)
if batchwise
else torch.randn_like(x)
)
if inplace:
x.mul_(block_mask).add_(normal_noise * (1 - block_mask))
else:
x = x * block_mask + normal_noise * (1 - block_mask)
else:
normalize_scale = (
block_mask.numel() / block_mask.to(dtype=torch.float32).sum().add(1e-7)
).to(x.dtype)
if inplace:
x.mul_(block_mask * normalize_scale)
else:
x = x * block_mask * normalize_scale
return x
def drop_block_fast_2d(
x: torch.Tensor,
drop_prob: float = 0.1,
block_size: int = 7,
gamma_scale: float = 1.0,
with_noise: bool = False,
inplace: bool = False,
batchwise: bool = False,
):
"""DropBlock. See https://arxiv.org/pdf/1810.12890.pdf
DropBlock with an experimental gaussian noise option. Simplied from above without
concern for valid block mask at edges.
"""
B, C, H, W = x.shape
total_size = W * H
clipped_block_size = min(block_size, min(W, H))
gamma = (
gamma_scale
* drop_prob
* total_size
/ clipped_block_size**2
/ ((W - block_size + 1) * (H - block_size + 1))
)
if batchwise:
# one mask for whole batch, quite a bit faster
block_mask = torch.rand((1, C, H, W), dtype=x.dtype, device=x.device) < gamma
else:
# mask per batch element
block_mask = torch.rand_like(x) < gamma
block_mask = F.max_pool2d(
block_mask.to(x.dtype),
kernel_size=clipped_block_size,
stride=1,
padding=clipped_block_size // 2,
)
if with_noise:
normal_noise = (
torch.randn((1, C, H, W), dtype=x.dtype, device=x.device)
if batchwise
else torch.randn_like(x)
)
if inplace:
x.mul_(1.0 - block_mask).add_(normal_noise * block_mask)
else:
x = x * (1.0 - block_mask) + normal_noise * block_mask
else:
block_mask = 1 - block_mask
normalize_scale = (
block_mask.numel() / block_mask.to(dtype=torch.float32).sum().add(1e-7)
).to(dtype=x.dtype)
if inplace:
x.mul_(block_mask * normalize_scale)
else:
x = x * block_mask * normalize_scale
return x
class DropBlock2d(nn.Module):
"""DropBlock. See https://arxiv.org/pdf/1810.12890.pdf"""
def __init__(
self,
drop_prob=0.1,
block_size=7,
gamma_scale=1.0,
with_noise=False,
inplace=False,
batchwise=False,
fast=True,
):
super().__init__()
self.drop_prob = drop_prob
self.gamma_scale = gamma_scale
self.block_size = block_size
self.with_noise = with_noise
self.inplace = inplace
self.batchwise = batchwise
self.fast = fast # FIXME finish comparisons of fast vs not
def forward(self, x):
if not self.training or not self.drop_prob:
return x
if self.fast:
return drop_block_fast_2d(
x,
self.drop_prob,
self.block_size,
self.gamma_scale,
self.with_noise,
self.inplace,
self.batchwise,
)
else:
return drop_block_2d(
x,
self.drop_prob,
self.block_size,
self.gamma_scale,
self.with_noise,
self.inplace,
self.batchwise,
)
def drop_path(x, drop_prob: float = 0.0, training: bool = False):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual
blocks). This is the same as the DropConnect impl I created for EfficientNet, etc
networks, however, the original name is misleading as 'Drop Connect' is a
different form of dropout in a separate paper... See discussion:
https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ...
I've opted for changing the layer and argument names to 'drop path' rather than mix
DropConnect as a layer name and use 'survival rate' as the argument.
"""
if drop_prob == 0.0 or not training:
return x
keep_prob = 1 - drop_prob
shape = (x.shape[0],) + (1,) * (
x.ndim - 1
) # work with diff dim tensors, not just 2D ConvNets
random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device)
random_tensor.floor_() # binarize
output = x.div(keep_prob) * random_tensor
return output
class DropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample
(when applied in main path of residual blocks).
"""
def __init__(self, drop_prob=None):
super().__init__()
self.drop_prob = drop_prob
def forward(self, x):
return drop_path(x, self.drop_prob, self.training)
def _no_grad_trunc_normal_(tensor, mean, std, a, b):
# Cut & paste from PyTorch official master until it's in more releases - RW
# Method based on
# https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
def norm_cdf(x):
# Computes standard normal cumulative distribution function
return (1.0 + math.erf(x / math.sqrt(2.0))) / 2.0
if (mean < a - 2 * std) or (mean > b + 2 * std):
warnings.warn(
"mean is more than 2 std from [a, b] in nn.init.trunc_normal_. "
"The distribution of values may be incorrect.",
stacklevel=2,
)
with torch.no_grad():
# Values are generated by using a truncated uniform distribution and
# then using the inverse CDF for the normal distribution.
# Get upper and lower cdf values
lower = norm_cdf((a - mean) / std)
upper = norm_cdf((b - mean) / std)
# Uniformly fill tensor with values from [l, u], then translate to
# [2l-1, 2u-1].
tensor.uniform_(2 * lower - 1, 2 * upper - 1)
# Use inverse cdf transform for normal distribution to get truncated
# standard normal
tensor.erfinv_()
# Transform to proper mean, std
tensor.mul_(std * math.sqrt(2.0))
tensor.add_(mean)
# Clamp to ensure it's in the proper range
tensor.clamp_(min=a, max=b)
return tensor
def trunc_normal_(tensor, mean=0.0, std=1.0, a=-2.0, b=2.0):
r"""
# type: (Tensor, float, float, float, float) -> Tensor
Fills the input Tensor with values drawn from a truncated
normal distribution. The values are effectively drawn from the
normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)`
with values outside :math:`[a, b]` redrawn until they are within
the bounds. The method used for generating the random values works
best when :math:`a \leq \text{mean} \leq b`.
Args:
tensor: an n-dimensional `torch.Tensor`
mean: the mean of the normal distribution
std: the standard deviation of the normal distribution
a: the minimum cutoff value
b: the maximum cutoff value
Examples:
>>> w = torch.empty(3, 5)
>>> nn.init.trunc_normal_(w)
"""
return _no_grad_trunc_normal_(tensor, mean, std, a, b)
class PatchEmbed(nn.Module):
"""2D Image to Patch Embedding"""
def __init__(
self,
img_size=224,
patch_size=16,
in_chans=3,
embed_dim=768,
norm_layer=None,
flatten=True,
):
super().__init__()
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
self.img_size = img_size
self.patch_size = patch_size
self.grid_size = (img_size[0] // patch_size[0], img_size[1] // patch_size[1])
self.num_patches = self.grid_size[0] * self.grid_size[1]
self.flatten = flatten
self.proj = nn.Conv2d(
in_chans, embed_dim, kernel_size=patch_size, stride=patch_size
)
self.norm = norm_layer(embed_dim) if norm_layer else nn.Identity()
def forward(self, x):
B, C, H, W = x.shape
assert H == self.img_size[0] and W == self.img_size[1], (
f"Input image size ({H}*{W}) doesn't match model "
f"({self.img_size[0]}*{self.img_size[1]})."
)
x = self.proj(x)
if self.flatten:
x = x.flatten(2).transpose(1, 2) # BCHW -> BNC
x = self.norm(x)
return x
class Mlp(nn.Module):
"""MLP as used in Vision Transformer, MLP-Mixer and related networks"""
def __init__(
self,
in_features,
hidden_features=None,
out_features=None,
act_layer=nn.GELU,
drop=0.0,
):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class Attention(nn.Module):
"""Attention Layer as used in Vision Transformer."""
def __init__(
self,
dim,
num_heads=8,
qkv_bias=False,
qk_scale=None,
attn_drop=0.0,
proj_drop=0.0,
):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
# NOTE scale factor was wrong in my original version,
# can set manually to be compat with prev weights
self.scale = qk_scale or head_dim**-0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
self.attn_gradients = None
self.attention_map = None
def save_attn_gradients(self, attn_gradients):
self.attn_gradients = attn_gradients
def get_attn_gradients(self):
return self.attn_gradients
def save_attention_map(self, attention_map):
self.attention_map = attention_map
def get_attention_map(self):
return self.attention_map
def forward(self, x, register_hook=False):
B, N, C = x.shape
qkv = (
self.qkv(x)
.reshape(B, N, 3, self.num_heads, C // self.num_heads)
.permute(2, 0, 3, 1, 4)
)
q, k, v = (
qkv[0],
qkv[1],
qkv[2],
) # make torchscript happy (cannot use tensor as tuple)
attn = (q @ k.transpose(-2, -1)) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
if register_hook:
self.save_attention_map(attn)
attn.register_hook(self.save_attn_gradients)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class Block(nn.Module):
def __init__(
self,
dim,
num_heads,
mlp_ratio=4.0,
qkv_bias=False,
qk_scale=None,
drop=0.0,
attn_drop=0.0,
drop_path=0.0,
act_layer=nn.GELU,
norm_layer=nn.LayerNorm,
):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention(
dim,
num_heads=num_heads,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
attn_drop=attn_drop,
proj_drop=drop,
)
# NOTE: drop path for stochastic depth,
# we shall see if this is better than dropout here
self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(
in_features=dim,
hidden_features=mlp_hidden_dim,
act_layer=act_layer,
drop=drop,
)
def forward(self, x, register_hook=False):
x = x + self.drop_path(self.attn(self.norm1(x), register_hook=register_hook))
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
class VisionTransformer(nn.Module):
"""Vision Transformer
A PyTorch impl of :
`An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale`
https://arxiv.org/abs/2010.11929
"""
def __init__(self, config: omegaconf.DictConfig):
"""
Args:
img_size (int, tuple): input image size
patch_size (int, tuple): patch size
in_chans (int): number of input channels
num_classes (int): number of classes for classification head
embed_dim (int): embedding dimension
depth (int): depth of transformer
num_heads (int): number of attention heads
mlp_ratio (int): ratio of mlp hidden dim to embedding dim
qkv_bias (bool): enable bias for qkv if True
qk_scale (float): override default qk scale of head_dim ** -0.5 if set
representation_size (Optional[int]): enable and set representation layer
(pre-logits) to this value if set
drop_rate (float): dropout rate
attn_drop_rate (float): attention dropout rate
drop_path_rate (float): stochastic depth rate
norm_layer: (nn.Module): normalization layer
"""
super().__init__()
self.img_size = config.get("img_size", 224)
self.patch_size = config.get("patch_size", 16)
self.in_chans = config.get("in_chans", 3)
self.num_classes = config.get("num_classes", 1000)
self.embed_dim = config.get("embed_dim", 768)
self.depth = config.get("depth", 12)
self.num_heads = config.get("num_heads", 12)
self.mlp_ratio = config.get("mlp_ratio", 4.0)
self.qkv_bias = config.get("qkv_bias", True)
self.qk_scale = config.get("qk_scale", None)
self.representation_size = config.get("representation_size", None)
self.drop_rate = config.get("drop_rate", 0.0)
self.attn_drop_rate = config.get("attn_drop_rate", 0.0)
self.drop_path_rate = config.get("drop_path_rate", 0.0)
self.norm_layer = config.get("norm_layer", None)
self.num_features = (
self.embed_dim
) # num_features for consistency with other models
norm_layer = self.norm_layer or partial(nn.LayerNorm, eps=1e-6)
self.patch_embed = PatchEmbed(
img_size=self.img_size,
patch_size=self.patch_size,
in_chans=self.in_chans,
embed_dim=self.embed_dim,
)
num_patches = self.patch_embed.num_patches
self.cls_token = nn.Parameter(torch.zeros(1, 1, self.embed_dim))
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, self.embed_dim))
self.pos_drop = nn.Dropout(p=self.drop_rate)
dpr = [
x.item() for x in torch.linspace(0, self.drop_path_rate, self.depth)
] # stochastic depth decay rule
self.blocks = nn.ModuleList(
[
Block(
dim=self.embed_dim,
num_heads=self.num_heads,
mlp_ratio=self.mlp_ratio,
qkv_bias=self.qkv_bias,
qk_scale=self.qk_scale,
drop=self.drop_rate,
attn_drop=self.attn_drop_rate,
drop_path=dpr[i],
norm_layer=norm_layer,
)
for i in range(self.depth)
]
)
self.norm = norm_layer(self.embed_dim)
trunc_normal_(self.pos_embed, std=0.02)
trunc_normal_(self.cls_token, std=0.02)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=0.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
@torch.jit.ignore
def no_weight_decay(self):
return {"pos_embed", "cls_token"}
def forward(self, images: torch.Tensor, register_blk=-1):
B = images.shape[0]
x = self.patch_embed(images)
cls_tokens = self.cls_token.expand(
B, -1, -1
) # stole cls_tokens impl from Phil Wang, thanks
x = torch.cat((cls_tokens, x), dim=1)
x = x + self.pos_embed[:, : x.size(1), :]
x = self.pos_drop(x)
for i, blk in enumerate(self.blocks):
x = blk(x, register_blk == i)
x = self.norm(x)
return x
def interpolate_pos_embed(pos_embed_checkpoint, visual_encoder):
# interpolate position embedding
embedding_size = pos_embed_checkpoint.shape[-1]
num_patches = visual_encoder.patch_embed.num_patches
num_extra_tokens = visual_encoder.pos_embed.shape[-2] - num_patches
# height (== width) for the checkpoint position embedding
orig_size = int((pos_embed_checkpoint.shape[-2] - num_extra_tokens) ** 0.5)
# height (== width) for the new position embedding
new_size = int(num_patches**0.5)
if orig_size != new_size:
# class_token and dist_token are kept unchanged
extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens]
# only the position tokens are interpolated
pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:]
pos_tokens = pos_tokens.reshape(
-1, orig_size, orig_size, embedding_size
).permute(0, 3, 1, 2)
pos_tokens = torch.nn.functional.interpolate(
pos_tokens, size=(new_size, new_size), mode="bicubic", align_corners=False
)
pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2)
new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1)
print(
"reshape position embedding from %d to %d" % (orig_size**2, new_size**2)
)
return new_pos_embed
else:
return pos_embed_checkpoint
| EXA-1-master | exa/models/mmf-main/mmf/models/albef/vit.py |
# Copyright (c) Facebook, Inc. and its affiliates.
| EXA-1-master | exa/models/mmf-main/mmf/models/interfaces/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import os
import tempfile
from pathlib import Path
from typing import Type, Union
import torch
import torchvision.datasets.folder as tv_helpers
from mmf.common.sample import Sample, SampleList
from mmf.models.base_model import BaseModel
from mmf.utils.build import build_processors
from mmf.utils.download import download
from mmf.utils.general import get_current_device
from omegaconf import DictConfig
from PIL import Image
from torch import nn
MMBT_GRID_HM_CONFIG_PATH = Path("projects/hateful_memes/configs/mmbt/defaults.yaml")
ImageType = Union[Type[Image.Image], str]
PathType = Union[Type[Path], str]
BaseModelType = Type[BaseModel]
class MMBTGridHMInterface(nn.Module):
"""Interface for MMBT Grid for Hateful Memes."""
def __init__(self, model: BaseModelType, config: DictConfig):
super().__init__()
self.model = model
self.config = config
self.init_processors()
def forward(self, *args, **kwargs):
return self.model(*args, **kwargs)
def init_processors(self):
config = self.config.dataset_config.hateful_memes
extra_params = {"data_dir": config.data_dir}
self.processor_dict = build_processors(config.processors, **extra_params)
def classify(self, image: ImageType, text: str):
"""Classifies a given image and text in it into Hateful/Non-Hateful.
Image can be a url or a local path or you can directly pass a PIL.Image.Image
object. Text needs to be a sentence containing all text in the image.
>>> from mmf.models.mmbt import MMBT
>>> model = MMBT.from_pretrained("mmbt.hateful_memes.images")
>>> model.classify("some_url", "some_text")
{"label": 0, "confidence": 0.56}
Args:
image (ImageType): Image to be classified
text (str): Text in the image
Returns:
bool: Whether image is hateful (1) or non hateful (0)
"""
if isinstance(image, str):
if image.startswith("http"):
temp_file = tempfile.NamedTemporaryFile()
download(image, *os.path.split(temp_file.name), disable_tqdm=True)
image = tv_helpers.default_loader(temp_file.name)
temp_file.close()
else:
image = tv_helpers.default_loader(image)
text = self.processor_dict["text_processor"]({"text": text})
image = self.processor_dict["image_processor"](image)
sample = Sample()
sample.text = text["text"]
if "input_ids" in text:
sample.update(text)
sample.image = image
sample_list = SampleList([sample])
sample_list = sample_list.to(get_current_device())
output = self.model(sample_list)
scores = nn.functional.softmax(output["scores"], dim=1)
confidence, label = torch.max(scores, dim=1)
return {"label": label.item(), "confidence": confidence.item()}
| EXA-1-master | exa/models/mmf-main/mmf/models/interfaces/mmbt.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import logging
import warnings
import omegaconf
import torch
from mmf.common.registry import registry
from mmf.datasets.multi_datamodule import MultiDataModule
from mmf.modules.metrics import Metrics
from mmf.trainers.base_trainer import BaseTrainer
from mmf.trainers.callbacks.checkpoint import CheckpointCallback
from mmf.trainers.callbacks.early_stopping import EarlyStoppingCallback
from mmf.trainers.callbacks.logistics import LogisticsCallback
from mmf.trainers.callbacks.lr_scheduler import LRSchedulerCallback
from mmf.trainers.core.callback_hook import TrainerCallbackHookMixin
from mmf.trainers.core.device import TrainerDeviceMixin
from mmf.trainers.core.evaluation_loop import TrainerEvaluationLoopMixin
from mmf.trainers.core.profiling import TrainerProfilingMixin
from mmf.trainers.core.training_loop import TrainerTrainingLoopMixin
from mmf.utils.build import build_model, build_optimizer
from mmf.utils.general import print_model_parameters
from omegaconf import DictConfig, OmegaConf
from packaging import version
logger = logging.getLogger(__name__)
@registry.register_trainer("mmf")
class MMFTrainer(
TrainerCallbackHookMixin,
TrainerTrainingLoopMixin,
TrainerDeviceMixin,
TrainerEvaluationLoopMixin,
TrainerProfilingMixin,
BaseTrainer,
):
def __init__(self, config: DictConfig):
super().__init__(config)
def load(self):
super().load()
self.load_fp16_scaler()
# Callbacks
self.on_init_start()
# Parallize model
self.parallelize_model()
# Callbacks
self.on_init_end()
def configure_callbacks(self):
self.checkpoint_callback = CheckpointCallback(self.config, self)
self.early_stop_callback = EarlyStoppingCallback(self.config, self)
self.logistics_callback = LogisticsCallback(self.config, self)
self.lr_scheduler_callback = LRSchedulerCallback(self.config, self)
# Reset callbacks as they are class variables and would be shared between
# multiple interactive shell calls to `run`
self.callbacks = []
# Add callbacks for execution during events
self.callbacks.append(self.lr_scheduler_callback)
# checkpoint_callback needs to be called after lr_scheduler_callback so that
# lr_scheduler_callback._scheduler.step() happens before saving checkpoints
# (otherwise the saved last_epoch in scheduler would be wrong)
self.callbacks.append(self.checkpoint_callback)
self.callbacks.append(self.logistics_callback)
# Add all customized callbacks defined by users
for callback in self.config.training.get("callbacks", []):
callback_type = callback.type
callback_param = callback.params
callback_cls = registry.get_callback_class(callback_type)
self.callbacks.append(callback_cls(self.config, self, **callback_param))
def load_datasets(self):
logger.info("Loading datasets")
self.dataset_loader = MultiDataModule(self.config)
self.train_loader = self.dataset_loader.train_dataloader()
self.val_loader = self.dataset_loader.val_dataloader()
self.test_loader = self.dataset_loader.test_dataloader()
def load_model(self):
logger.info("Loading model")
if self.config.model in self.config.model_config:
attributes = self.config.model_config[self.config.model]
else:
warnings.warn(
f"Model {self.config.model}'s config not present. "
+ "Continuing with empty config"
)
attributes = OmegaConf.create()
# Easy way to point to config for other model
if isinstance(attributes, str):
attributes = self.config.model_config[attributes]
with omegaconf.open_dict(attributes):
attributes.model = self.config.model
self.model = build_model(attributes)
self.model = self.model.to(self.device)
def load_optimizer(self):
logger.info("Loading optimizer")
self.optimizer = build_optimizer(self.model, self.config)
def load_metrics(self) -> None:
logger.info("Loading metrics")
metrics = self.config.evaluation.get("metrics", [])
self.metrics = Metrics(metrics)
self.metrics_params = self.metrics.required_params
def load_fp16_scaler(self):
if self.training_config.fp16:
assert version.parse(torch.__version__) >= version.parse(
"1.6"
), f"Using fp16 requires torch version >- 1.6, found: {torch.__version__}"
assert self.device != torch.device("cpu"), "fp16 cannot be used on cpu"
set_torch_grad_scaler = True
if self.training_config.fp16 and self.distributed:
try:
from fairscale.optim.grad_scaler import ShardedGradScaler
from fairscale.optim.oss import OSS
if isinstance(self.optimizer, OSS):
self.scaler = ShardedGradScaler()
set_torch_grad_scaler = False
logger.info("Using FairScale ShardedGradScaler")
except ImportError:
logger.info("Using Pytorch AMP GradScaler")
if set_torch_grad_scaler:
self.scaler = torch.cuda.amp.GradScaler(enabled=self.training_config.fp16)
def train(self):
logger.info("===== Model =====")
logger.info(self.model)
print_model_parameters(self.model)
if "train" in self.run_type:
self.on_train_start()
self.training_loop()
self.on_train_end()
self.inference()
self.finalize()
def inference(self):
dataset_type = []
if "val" in self.run_type:
dataset_type.append("val")
if any(rt in self.run_type for rt in ["inference", "test", "predict"]):
dataset_type.append("test")
for dataset in dataset_type:
if self.config.evaluation.predict:
self.on_prediction_start()
self.prediction_loop(dataset)
self.on_prediction_end()
else:
self.on_test_start()
logger.info(f"Starting inference on {dataset} set")
report, meter = self.evaluation_loop(dataset, use_tqdm=True)
self.on_test_end(report=report, meter=meter)
def finalize(self):
self.dataset_loader.teardown()
self.teardown()
| EXA-1-master | exa/models/mmf-main/mmf/trainers/mmf_trainer.py |
# Copyright (c) Facebook, Inc. and its affiliates.
from abc import ABC, abstractmethod
from mmf.common.registry import registry
from mmf.utils.logger import log_class_usage
from omegaconf import DictConfig
@registry.register_trainer("base")
class BaseTrainer(ABC):
def __init__(self, config: DictConfig):
self.config = config
self.training_config = self.config.training
log_class_usage("Trainer", self.__class__)
def load(self):
# Set run type
self.run_type = self.config.get("run_type", "train")
# Print configuration
configuration = registry.get("configuration", no_warning=True)
if configuration:
configuration.pretty_print()
# Configure device and cudnn deterministic
self.configure_device()
self.configure_seed()
# Load dataset, model, optimizer and metrics
self.load_datasets()
self.load_model()
self.load_optimizer()
self.load_metrics()
# Initialize Callbacks
self.configure_callbacks()
@abstractmethod
def configure_device(self):
"""Warning: this is just empty shell for code implemented in other class.
Configure and set device properties here.
"""
@abstractmethod
def configure_seed(self):
"""Configure seed and related changes like torch deterministic etc shere.
Warning: Empty shell for code to be implemented in other class.
"""
@abstractmethod
def configure_callbacks(self):
"""Configure callbacks and add callbacks be executed during
different events during training, validation or test.
Warning: Empty shell for code to be implemented in other class.
"""
@abstractmethod
def load_datasets(self):
"""Loads datasets and dataloaders.
Warning: Empty shell for code to be implemented in other class.
"""
@abstractmethod
def load_model(self):
"""Load the model.
Warning: Empty shell for code to be implemented in other class.
"""
@abstractmethod
def load_optimizer(self):
"""Load optimizers.
Warning: Empty shell for code to be implemented in other class.
"""
@abstractmethod
def load_metrics(self):
"""Load metrics for evaluation.
Warning: Empty shell for code to be implemented in other class.
"""
@abstractmethod
def train(self):
"""Runs full training and optimization.
Warning: Empty shell for code to be implemented in other class.
"""
@abstractmethod
def inference(self):
"""Runs inference and validation, generate predictions.
Warning: Empty shell for code to be implemented in other class.
"""
| EXA-1-master | exa/models/mmf-main/mmf/trainers/base_trainer.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import logging
import math
import os
from typing import Any, Dict, List, Optional
import omegaconf
from mmf.common.registry import registry
from mmf.datasets.lightning_multi_datamodule import LightningMultiDataModule
from mmf.modules.metrics import Metrics
from mmf.trainers.base_trainer import BaseTrainer
from mmf.trainers.lightning_core.loop_callback import LightningLoopCallback
from mmf.trainers.lightning_core.loop_callback_with_torchmetrics import (
LightningTorchMetricsCallback,
)
from mmf.trainers.lightning_core.torchmetric import LightningTorchMetrics
from mmf.utils.build import build_lightning_model
from mmf.utils.checkpoint import (
get_ckpt_from_path,
get_ckpt_path_from_folder,
get_config_from_folder_or_ckpt,
)
from mmf.utils.checkpoint_updater import is_model_only_checkpoint
from mmf.utils.configuration import get_mmf_env
from mmf.utils.download import download_pretrained_model
from mmf.utils.file_io import PathManager
from mmf.utils.general import get_max_updates, print_model_parameters
from mmf.utils.logger import setup_output_folder, TensorboardLogger
from omegaconf import DictConfig, OmegaConf
from pytorch_lightning import seed_everything, Trainer
from pytorch_lightning.callbacks import ModelCheckpoint
logger = logging.getLogger(__name__)
@registry.register_trainer("lightning")
class LightningTrainer(BaseTrainer):
def __init__(self, config: DictConfig):
super().__init__(config)
self.trainer = None
self.trainer_config = self.config.trainer.params
self.metrics_config = self.config.evaluation.get("metrics", [])
self.torchmetrics_config = self.config.evaluation.get("torchmetrics", [])
self.data_module = None
self.resume_from_checkpoint = None
self.torchmetrics: Optional[LightningTorchMetrics] = None
def load(self):
super().load()
self._calculate_max_updates()
self._load_loggers()
self._load_trainer()
def _load_trainer(self):
lightning_params = self.trainer_config
with omegaconf.open_dict(lightning_params):
lightning_params.pop("max_steps")
lightning_params.pop("max_epochs")
lightning_params.pop("resume_from_checkpoint")
lightning_params_dict = OmegaConf.to_container(lightning_params, resolve=True)
self.trainer = Trainer(
callbacks=self.callbacks,
max_steps=self._max_updates,
resume_from_checkpoint=self.resume_from_checkpoint,
default_root_dir=get_mmf_env(key="log_dir"),
**lightning_params_dict,
)
def configure_device(self) -> None:
pass
def configure_seed(self) -> None:
seed = self.config.training.seed
seed_everything(seed)
def _load_loggers(self) -> None:
self.tb_writer = None
if self.training_config.tensorboard:
# TODO: @sash PL logger upgrade
log_dir = setup_output_folder(folder_only=True)
env_tb_logdir = get_mmf_env(key="tensorboard_logdir")
if env_tb_logdir:
log_dir = env_tb_logdir
self.tb_writer = TensorboardLogger(log_dir)
def load_datasets(self) -> None:
logger.info("Loading datasets")
data_module = LightningMultiDataModule(self.config)
self.data_module = data_module
self.train_loader = data_module.train_dataloader()
self.val_loader = data_module.val_dataloader()
self.test_loader = data_module.test_dataloader()
def load_model(self) -> None:
logger.info("Loading models")
checkpoint_data = self.get_checkpoint_data()
checkpoint_path = checkpoint_data["checkpoint_path"]
ckpt = checkpoint_data["ckpt"]
is_zoo = checkpoint_data["is_zoo"]
config = checkpoint_data["config"]
model_checkpoint_path = None
if checkpoint_path is not None:
assert ckpt, "checkpoint should have been loaded when path is available"
if is_model_only_checkpoint(ckpt):
# it is model only checkpoint, then we load it here
model_checkpoint_path = checkpoint_path
else:
# it is a trainer checkpoint, we pass it as a trainer param
self.resume_from_checkpoint = checkpoint_path
attributes = self.get_model_config(is_zoo, config)
self.model = build_lightning_model(attributes, model_checkpoint_path)
if len(self.torchmetrics_config) > 0:
logger.warning(
"skip self.model.build_meters since torchmetrics are provided"
)
else:
self.model.build_meters(self.run_type)
def get_model_config(
self, is_zoo: bool = False, config: Dict[str, Any] = None
) -> Dict[str, Any]:
ckpt_config = self.config.checkpoint
if is_zoo and ckpt_config.zoo_config_override and config:
self.config.model_config = config.model_config
attributes = self.config.model_config[self.config.model]
if isinstance(attributes, str):
attributes = self.config.model_config[attributes]
with omegaconf.open_dict(attributes):
attributes.model = self.config.model
return attributes
def get_checkpoint_data(self) -> Dict[str, Any]:
"""This function gets checkpoint file path on disk from
config.trainer.params.resume_from_checkpoint. However if it not specified,
it gets checkpoint path from config.checkpoint. If config.resume is specified
it gets the latest checkpoint from the config's save directory (alternatively it
gets the best checkpoint if config.resume_best is True). If config.resume is not
specified, then it gets config.resume_file or the checkpoint file from
config.resume_zoo (in that order).
Returns:
Dict[str, Any]: a dict containing the following keys,
`checkpoint_path` (str) local file path for the checkpoint;
`ckpt` (Dict[str, Any])
`is_zoo` (Bool) whether or not the checkpoint is specified through a
zoo identifier
`config` (Dict[str, Any]]) the config that is stored together with this
checkpoint
"""
# get ckpt file path from config.trainer.params.resume_from_checkpoint
path = self.config.trainer.params.get("resume_from_checkpoint", None)
if path is not None:
is_zoo = self.is_zoo_path(path)
ckpt_filepath = path
if is_zoo:
folder = download_pretrained_model(path)
ckpt_filepath = get_ckpt_path_from_folder(folder)
ckpt = get_ckpt_from_path(ckpt_filepath)
config = get_config_from_folder_or_ckpt(folder, ckpt)
else:
ckpt = get_ckpt_from_path(ckpt_filepath)
config = None
return {
"ckpt": ckpt,
"checkpoint_path": ckpt_filepath,
"is_zoo": is_zoo,
"config": config,
}
is_zoo = False
config = None
ckpt = None
# get ckpt file path from config.checkpoint
ckpt_config = self.config.checkpoint
suffix = "best.ckpt" if ckpt_config.resume_best else "current.ckpt"
path = os.path.join(get_mmf_env(key="save_dir"), suffix)
ckpt_filepath = None
resume_from_specified_path = (
ckpt_config.resume_file is not None or ckpt_config.resume_zoo is not None
) and (not ckpt_config.resume or not PathManager.exists(path))
if resume_from_specified_path:
if ckpt_config.resume_file and PathManager.exists(ckpt_config.resume_file):
ckpt_filepath = ckpt_config.resume_file
elif ckpt_config.resume_zoo is not None:
is_zoo = True
folder = download_pretrained_model(ckpt_config.resume_zoo)
ckpt_filepath = get_ckpt_path_from_folder(folder)
ckpt = get_ckpt_from_path(ckpt_filepath)
config = get_config_from_folder_or_ckpt(folder, ckpt)
else:
raise RuntimeError(f"{ckpt_config.resume_file} doesn't exist")
if ckpt_config.resume and PathManager.exists(path):
ckpt_filepath = path
if ckpt_filepath is not None:
ckpt = get_ckpt_from_path(ckpt_filepath)
return {
"ckpt": ckpt,
"checkpoint_path": ckpt_filepath,
"is_zoo": is_zoo,
"config": config,
}
def is_zoo_path(self, path) -> bool:
from mmf.utils.configuration import get_mmf_env, load_yaml
model_zoo = load_yaml(get_mmf_env(key="model_zoo"))
OmegaConf.set_struct(model_zoo, True)
OmegaConf.set_readonly(model_zoo, True)
try:
model_config = OmegaConf.select(model_zoo, path)
return model_config is not None
except omegaconf.errors.OmegaConfBaseException:
return False
def load_optimizer(self) -> None:
logger.info("Loading optimizer: noop for lightning")
def load_metrics(self) -> None:
logger.info("Loading metrics")
# torchmetrics
if len(self.torchmetrics_config) > 0:
self.torchmetrics = LightningTorchMetrics(self.torchmetrics_config)
logger.warning(
"torchmetrics will be used, regular mmf metrics will be ignored"
)
else:
# moved metrics into the model object
self.model.metrics = Metrics(self.metrics_config)
def monitor_criteria(self):
monitor_criteria = self.training_config.early_stop.get("criteria", None)
assert (
monitor_criteria
), "monitor criteria is required when early stop is specified."
if "val" not in monitor_criteria:
monitor_criteria = f"val/{monitor_criteria}"
mode = (
"min" if self.training_config.early_stop.get("minimize", False) else "max"
)
return monitor_criteria, mode
def configure_callbacks(self) -> None:
if self.torchmetrics is not None:
self.callbacks = [LightningTorchMetricsCallback(self)]
else:
self.callbacks = [LightningLoopCallback(self)]
self.callbacks += self.configure_checkpoint_callbacks()
if self.training_config.get(
"early_stop", None
) and self.training_config.early_stop.get("enabled", False):
self.callbacks += self.configure_monitor_callbacks()
self.callbacks += self.configure_earlystop_callback()
def configure_earlystop_callback(self) -> List[ModelCheckpoint]:
return []
def configure_checkpoint_callbacks(self) -> List[ModelCheckpoint]:
train_callback = ModelCheckpoint(
monitor=None,
every_n_train_steps=self.config.training.checkpoint_interval,
dirpath=get_mmf_env(key="save_dir"),
filename="models/model_{step}",
save_top_k=-1,
save_last=True,
verbose=True,
)
train_callback.CHECKPOINT_NAME_LAST = "current"
return [train_callback]
def configure_monitor_callbacks(self) -> List[ModelCheckpoint]:
criteria, mode = self.monitor_criteria()
monitor_callback = ModelCheckpoint(
monitor=criteria,
dirpath=get_mmf_env(key="save_dir"),
filename="best",
mode=mode,
save_top_k=1,
save_last=False,
verbose=True,
)
return [monitor_callback]
def train(self) -> None:
logger.info("===== Model =====")
logger.info(self.model)
print_model_parameters(self.model)
logger.info("Starting training...")
if "train" not in self.run_type:
self.inference()
return
self.trainer.fit(self.model, self.data_module)
self.run_last_validation_after_train()
# TODO: Look for a better way to hook this
self.data_module.teardown()
def run_last_validation_after_train(self) -> None:
# Don't run if current iteration is divisble by
# val check interval as it will just be a repeat
if (
"val" in self.run_type
and self.trainer.global_step % self.trainer_config.val_check_interval != 0
):
logger.info("Stepping into final validation check")
self.trainer.validate(self.model, self.val_loader)
def inference(self) -> None:
logger.info("Starting inference...")
# TODO: @sash coming soon
pass
def _calculate_max_updates(self) -> None:
self._max_updates = self.trainer_config.max_steps
self._max_epochs = self.trainer_config.max_epochs
if self._max_updates is None and self._max_epochs is None:
raise ValueError("Neither max_updates nor max_epochs is specified.")
self._max_updates, max_epochs = get_max_updates(
self._max_updates,
self._max_epochs,
self.train_loader,
self.trainer_config.accumulate_grad_batches,
)
if max_epochs and max_epochs != math.inf:
self._max_epochs = math.ceil(max_epochs)
return self._max_updates
| EXA-1-master | exa/models/mmf-main/mmf/trainers/lightning_trainer.py |
# Copyright (c) Facebook, Inc. and its affiliates.
__all__ = ["BaseTrainer"]
from .base_trainer import BaseTrainer
| EXA-1-master | exa/models/mmf-main/mmf/trainers/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import logging
from abc import ABC
from typing import Any, Dict, Tuple, Type
import torch
import tqdm
from mmf.common.meter import Meter
from mmf.common.report import Report
from mmf.common.sample import to_device
from mmf.utils.distributed import gather_tensor, is_main, is_xla
logger = logging.getLogger(__name__)
class TrainerEvaluationLoopMixin(ABC):
def evaluation_loop(
self, dataset_type: str, use_tqdm: bool = False, single_batch: bool = False
) -> Tuple[Dict[str, Any], Type[Meter]]:
meter = Meter()
reporter = self.dataset_loader.get_test_reporter(dataset_type)
use_cpu = self.config.evaluation.get("use_cpu", False)
loaded_batches = 0
skipped_batches = 0
with torch.no_grad():
self.model.eval()
disable_tqdm = not use_tqdm or not is_main()
while reporter.next_dataset(flush_report=False):
dataloader = reporter.get_dataloader()
combined_report = None
if self._can_use_tqdm(dataloader):
dataloader = tqdm.tqdm(dataloader, disable=disable_tqdm)
for batch in dataloader:
loaded_batches += 1
prepared_batch = reporter.prepare_batch(batch)
prepared_batch = to_device(prepared_batch, self.device)
if not validate_batch_sizes(prepared_batch.get_batch_size()):
logger.info("Skip batch due to uneven batch sizes.")
skipped_batches += 1
continue
model_output = self.model(prepared_batch)
report = Report(prepared_batch, model_output)
report = report.detach()
meter.update_from_report(report)
moved_report = report
# Move to CPU for metrics calculation later if needed
# Explicitly use `non_blocking=False` as this can cause
# race conditions in next accumulate
if use_cpu:
moved_report = report.copy().to("cpu", non_blocking=False)
# accumulate necessary params for metric calculation
if combined_report is None:
# make a copy of report since `reporter.add_to_report` will
# change some of the report keys later
combined_report = moved_report.copy()
else:
combined_report.accumulate_tensor_fields_and_loss(
moved_report, self.metrics.required_params
)
combined_report.batch_size += moved_report.batch_size
# Each node generates a separate copy of predict JSON from the
# report, which will be used to evaluate dataset-level metrics
# (such as mAP in object detection or CIDEr in image captioning)
# Since `reporter.add_to_report` changes report keys,
# (e.g scores) do this after
# `combined_report.accumulate_tensor_fields_and_loss`
if "__prediction_report__" in self.metrics.required_params:
# Still need to use original report here on GPU/TPU since
# it will be gathered
reporter.add_to_report(report, self.model)
if single_batch is True:
break
logger.info(f"Finished evaluation inference. Loaded {loaded_batches}")
logger.info(f" -- skipped {skipped_batches} batches.")
reporter.postprocess_dataset_report()
assert (
combined_report is not None
), "Please check if your validation set is empty!"
# add prediction_report is used for set-level metrics
combined_report.prediction_report = reporter.report
combined_report.metrics = self.metrics(combined_report, combined_report)
# Since update_meter will reduce the metrics over GPUs, we need to
# move them back to GPU but we will only move metrics and losses
# which are needed by update_meter to avoid OOM
# Furthermore, do it in a non_blocking way to avoid any issues
# in device to host or host to device transfer
if use_cpu:
combined_report = combined_report.to(
self.device, fields=["metrics", "losses"], non_blocking=False
)
meter.update_from_report(combined_report, should_update_loss=False)
# enable train mode again
self.model.train()
return combined_report, meter
def prediction_loop(self, dataset_type: str) -> None:
reporter = self.dataset_loader.get_test_reporter(dataset_type)
skipped_batches = 0
loaded_batches = 0
with torch.no_grad():
self.model.eval()
logger.info(f"Starting {dataset_type} inference predictions")
while reporter.next_dataset():
dataloader = reporter.get_dataloader()
if self._can_use_tqdm(dataloader):
dataloader = tqdm.tqdm(dataloader)
for batch in dataloader:
prepared_batch = reporter.prepare_batch(batch)
prepared_batch = to_device(prepared_batch, self.device)
loaded_batches += 1
if not validate_batch_sizes(prepared_batch.get_batch_size()):
logger.info("Skip batch due to unequal batch sizes.")
skipped_batches += 1
continue
with torch.cuda.amp.autocast(enabled=self.training_config.fp16):
model_output = self.model(prepared_batch)
report = Report(prepared_batch, model_output)
reporter.add_to_report(report, self.model)
report.detach()
reporter.postprocess_dataset_report()
logger.info(f"Finished predicting. Loaded {loaded_batches}")
logger.info(f" -- skipped {skipped_batches} batches.")
self.model.train()
def _can_use_tqdm(self, dataloader: torch.utils.data.DataLoader):
"""
Checks whether tqdm can be gracefully used with a dataloader
1) should have `__len__` property defined
2) calling len(x) should not throw errors.
"""
use_tqdm = hasattr(dataloader, "__len__")
try:
_ = len(dataloader)
except (AttributeError, TypeError, NotImplementedError):
use_tqdm = False
return use_tqdm
def validate_batch_sizes(my_batch_size: int) -> bool:
"""
Validates all workers got the same batch size.
"""
# skip batch size validation on XLA (as there's too much overhead
# and data loader automatically drops the last batch in XLA mode)
if is_xla():
return True
batch_size_tensor = torch.IntTensor([my_batch_size])
if torch.cuda.is_available():
batch_size_tensor = batch_size_tensor.cuda()
all_batch_sizes = gather_tensor(batch_size_tensor)
for j, oth_batch_size in enumerate(all_batch_sizes.data):
if oth_batch_size != my_batch_size:
logger.error(f"Node {j} batch {oth_batch_size} != {my_batch_size}")
return False
return True
| EXA-1-master | exa/models/mmf-main/mmf/trainers/core/evaluation_loop.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import logging
import warnings
from abc import ABC
import torch
from mmf.common.registry import registry
from mmf.utils.distributed import (
broadcast_xla_master_model_param,
get_world_size,
is_xla,
)
from omegaconf import open_dict
logger = logging.getLogger(__name__)
class TrainerDeviceMixin(ABC):
def configure_seed(self) -> None:
seed = self.config.training.seed
if seed is None:
return
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = self.config.training.cudnn_benchmark
# TODO: Review self.device assignment and then override
def configure_device(self) -> None:
if self.config.training.get("device", "cuda") == "xla":
import torch_xla.core.xla_model as xm
self.device = xm.xla_device()
self.distributed = True
self.local_rank = xm.get_local_ordinal()
is_xla = True
else:
is_xla = False
if "device_id" not in self.config:
warnings.warn(
"No 'device_id' in 'config', setting to -1. "
"This can cause issues later in training. Ensure that "
"distributed setup is properly initialized."
)
self.local_rank = -1
else:
self.local_rank = self.config.device_id
self.device = self.local_rank
self.distributed = False
# Will be updated later based on distributed setup
registry.register("global_device", self.device)
if self.config.distributed.init_method is not None:
self.distributed = True
self.device = torch.device("cuda", self.local_rank)
torch.cuda.set_device(self.local_rank)
elif torch.cuda.is_available():
self.device = torch.device("cuda")
torch.cuda.set_device(0)
elif not is_xla:
self.device = torch.device("cpu")
if "rank" not in self.config.distributed:
if torch.distributed.is_available() and torch.distributed.is_initialized():
global_rank = torch.distributed.get_rank()
else:
global_rank = -1
with open_dict(self.config.distributed):
self.config.distributed.rank = global_rank
registry.register("global_device", self.config.distributed.rank)
def parallelize_model(self) -> None:
registry.register("data_parallel", False)
registry.register("distributed", False)
if (
"cuda" in str(self.device)
and torch.cuda.device_count() > 1
and not self.distributed
):
registry.register("data_parallel", True)
self.model = torch.nn.DataParallel(self.model)
if "cuda" in str(self.device) and self.distributed:
registry.register("distributed", True)
set_torch_ddp = True
try:
from fairscale.nn.data_parallel import ShardedDataParallel
from fairscale.optim.oss import OSS
if isinstance(self.optimizer, OSS):
self.model = ShardedDataParallel(self.model, self.optimizer)
set_torch_ddp = False
logger.info("Using FairScale ShardedDataParallel")
except ImportError:
logger.info("Using PyTorch DistributedDataParallel")
warnings.warn(
"You can enable ZeRO and Sharded DDP, by installing fairscale "
+ "and setting optimizer.enable_state_sharding=True."
)
if set_torch_ddp:
self.model = torch.nn.parallel.DistributedDataParallel(
self.model,
device_ids=[self.local_rank],
output_device=self.local_rank,
find_unused_parameters=self.config.training.find_unused_parameters,
)
if is_xla() and get_world_size() > 1:
broadcast_xla_master_model_param(self.model)
| EXA-1-master | exa/models/mmf-main/mmf/trainers/core/device.py |
# Copyright (c) Facebook, Inc. and its affiliates.
from abc import ABC
from typing import List
from mmf.trainers.callbacks.base import Callback
class TrainerCallbackHookMixin(ABC):
callbacks: List[Callback] = []
def teardown(self, **kwargs) -> None:
"""Called to teardown the callback at end of training"""
for callback in self.callbacks:
callback.teardown(**kwargs)
def on_init_start(self, **kwargs) -> None:
"""Called when the trainer initialization begins."""
for callback in self.callbacks:
callback.on_init_start(**kwargs)
def on_init_end(self, **kwargs) -> None:
"""Called when the trainer initialization ends."""
for callback in self.callbacks:
callback.on_init_end(**kwargs)
def on_train_start(self, **kwargs) -> None:
"""Called when training begins."""
for callback in self.callbacks:
callback.on_train_start(**kwargs)
def on_train_end(self, **kwargs) -> None:
"""Called when training ends."""
for callback in self.callbacks:
callback.on_train_end(**kwargs)
def on_batch_start(self, **kwargs) -> None:
"""Called when a forward pass begins."""
for callback in self.callbacks:
callback.on_batch_start(**kwargs)
def on_batch_end(self, **kwargs) -> None:
"""Called when a forward pass ends."""
for callback in self.callbacks:
callback.on_batch_end(**kwargs)
def on_update_start(self, **kwargs) -> None:
"""Called when the training update begins."""
for callback in self.callbacks:
callback.on_update_start(**kwargs)
def on_update_end(self, **kwargs) -> None:
"""Called when the training update ends."""
for callback in self.callbacks:
callback.on_update_end(**kwargs)
def on_validation_start(self, **kwargs) -> None:
"""Called when the validation loop begins."""
for callback in self.callbacks:
callback.on_validation_start(**kwargs)
def on_validation_end(self, **kwargs) -> None:
"""Called when the validation loop ends."""
for callback in self.callbacks:
callback.on_validation_end(**kwargs)
def on_validation_batch_start(self, **kwargs) -> None:
"""Called when the validation batch begins."""
for callback in self.callbacks:
callback.on_validation_batch_start(**kwargs)
def on_validation_batch_end(self, **kwargs) -> None:
"""Called when the validation batch ends."""
for callback in self.callbacks:
callback.on_validation_batch_end(**kwargs)
def on_test_start(self, **kwargs) -> None:
"""Called when the test begins."""
for callback in self.callbacks:
callback.on_test_start(**kwargs)
def on_test_end(self, **kwargs) -> None:
"""Called when the test ends."""
for callback in self.callbacks:
callback.on_test_end(**kwargs)
def on_test_batch_start(self, **kwargs) -> None:
"""Called when the test batch begins."""
for callback in self.callbacks:
callback.on_test_batch_start(**kwargs)
def on_test_batch_end(self, **kwargs) -> None:
"""Called when the test batch ends."""
for callback in self.callbacks:
callback.on_test_batch_end(**kwargs)
def on_prediction_start(self, **kwargs) -> None:
"""Called when the prediction begins."""
for callback in self.callbacks:
callback.on_prediction_start(**kwargs)
def on_prediction_end(self, **kwargs) -> None:
"""Called when the prediction ends."""
for callback in self.callbacks:
callback.on_prediction_end(**kwargs)
| EXA-1-master | exa/models/mmf-main/mmf/trainers/core/callback_hook.py |
# Copyright (c) Facebook, Inc. and its affiliates.
| EXA-1-master | exa/models/mmf-main/mmf/trainers/core/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import gc
import logging
from abc import ABC
from typing import Any, Dict
import torch
from mmf.common.meter import Meter
from mmf.common.registry import registry
from mmf.common.report import Report
from mmf.common.sample import to_device
from mmf.utils.distributed import is_xla
from mmf.utils.general import clip_gradients, extract_loss, get_max_updates
from torch import Tensor
logger = logging.getLogger(__name__)
class TrainerTrainingLoopMixin(ABC):
current_epoch: int = 0
current_iteration: int = 0
num_updates: int = 0
meter: Meter = Meter()
def training_loop(self) -> None:
self.max_updates = self._calculate_max_updates()
torch.autograd.set_detect_anomaly(self.training_config.detect_anomaly)
logger.info("Starting training...")
self.model.train()
self.run_training_epoch()
self.after_training_loop()
def after_training_loop(self) -> None:
logger.info("Stepping into final validation check")
# Only do when run_type has train as it shouldn't happen on validation and
# inference runs. Inference will take care of this anyways. Also, don't run
# if current iteration is divisble by snapshot interval as it will just
# be a repeat
if (
"train" in self.run_type
and "val" in self.run_type
and self.num_updates % self.training_config.evaluation_interval != 0
):
# Create a new meter for this case
report, meter = self.evaluation_loop("val")
# Validation end callbacks
self.on_validation_end(report=report, meter=meter)
def run_training_epoch(self) -> None:
should_break = False
while self.num_updates < self.max_updates and not should_break:
self.current_epoch += 1
registry.register("current_epoch", self.current_epoch)
# Seed the sampler in case if it is distributed
self.dataset_loader.seed_sampler("train", self.current_epoch)
# For iterable datasets we cannot determine length of dataset properly.
# For those cases we set num_remaining_batches to be the (number of
# updates remaining x update_frequency)
num_remaining_batches = (
(
(self.max_updates - self.num_updates)
* self.training_config.update_frequency
)
if isinstance(
self.train_loader.current_dataset, torch.utils.data.IterableDataset
)
else len(self.train_loader)
)
should_start_update = True
for idx, batch in enumerate(self.train_loader):
if should_start_update:
combined_report = None
self._start_update()
num_batches_for_this_update = min(
self.training_config.update_frequency, num_remaining_batches
)
should_start_update = False
self.current_iteration += 1
# batch execution starts here
self.on_batch_start()
self.profile("Batch load time")
report = self.run_training_batch(batch, num_batches_for_this_update)
report = report.detach()
# accumulate necessary params (including loss) for metric calculation
if combined_report is None:
combined_report = report
else:
combined_report.accumulate_tensor_fields_and_loss(
report, self.metrics.required_params
)
combined_report.batch_size += report.batch_size
# batch execution ends here
self.on_batch_end(report=combined_report, meter=self.meter)
# check if an update has finished or if it is the last, if no continue
if (
(idx + 1) % self.training_config.update_frequency
and num_remaining_batches != num_batches_for_this_update
):
continue
self._finish_update()
should_start_update = True
should_log = False
if self.num_updates % self.logistics_callback.log_interval == 0:
should_log = True
# Calculate metrics every log interval for debugging
if self.training_config.evaluate_metrics:
combined_report.metrics = self.metrics(
combined_report, combined_report
)
self.meter.update_from_report(combined_report)
self.on_update_end(
report=combined_report, meter=self.meter, should_log=should_log
)
num_remaining_batches -= num_batches_for_this_update
# Check if training should be stopped
should_break = False
if self.num_updates % self.training_config.evaluation_interval == 0:
# Validation begin callbacks
self.on_validation_start()
logger.info("Evaluation time. Running on full validation set...")
# Validation and Early stopping
# Create a new meter for this case
report, meter = self.evaluation_loop("val")
# Validation end callbacks
stop = self.early_stop_callback.on_validation_end(
report=report, meter=meter
)
self.on_validation_end(report=report, meter=meter)
gc.collect()
if "cuda" in str(self.device):
torch.cuda.empty_cache()
if stop is True:
logger.info("Early stopping activated")
should_break = True
if self.num_updates >= self.max_updates:
should_break = True
if should_break:
break
def run_training_batch(self, batch: Dict[str, Tensor], loss_divisor: int) -> None:
report = self._forward(batch)
if self.training_config.exit_on_nan_losses:
self._check_nan_losses(report)
loss = extract_loss(report, loss_divisor)
self._backward(loss)
return report
def _check_nan_losses(self, report):
# skip this check in XLA mode as calling .item() in forward pass
# greatly slows down the training
if not is_xla():
# check whether NaN has occurred in the losses, and exit the training
# when NaN happens
loss_dict = report.losses
nan_loss_keys = []
for key, value in loss_dict.items():
if torch.any(torch.isnan(value)).item():
nan_loss_keys.append(key)
if len(nan_loss_keys) > 0:
keys_str = ", ".join(nan_loss_keys)
error_msg = (
f"NaN occurred in the following loss(es): {keys_str}; "
f"exiting the training"
)
logger.info(error_msg)
raise RuntimeError(error_msg)
def _forward(self, batch: Dict[str, Tensor]) -> Dict[str, Any]:
# Move the sample list to device if it isn't as of now.
prepared_batch = to_device(batch, self.device)
self.profile("Batch prepare time")
# Arguments should be a dict at this point
with torch.cuda.amp.autocast(enabled=self.training_config.fp16):
model_output = self.model(prepared_batch)
report = Report(prepared_batch, model_output)
self.profile("Forward time")
return report
def _start_update(self):
logger.debug(self.num_updates + 1)
self.on_update_start()
self.optimizer.zero_grad()
def _backward(self, loss: Tensor) -> None:
self.scaler.scale(loss).backward()
self.profile("Backward time")
def _finish_update(self):
if self.training_config.clip_gradients:
clip_gradients(
self.model,
self.optimizer,
self.num_updates,
self.logistics_callback.tb_writer,
self.config,
scale=self.scaler.get_scale(),
)
if is_xla():
import torch_xla.core.xla_model as xm
# Assumes no model parallel
xm.reduce_gradients(self.optimizer)
self.scaler.step(self.optimizer)
self.scaler.update()
self.num_updates += 1
self.profile("Finished update")
def _calculate_max_updates(self):
config_max_updates = self.training_config.max_updates
config_max_epochs = self.training_config.max_epochs
max_updates, _ = get_max_updates(
config_max_updates,
config_max_epochs,
self.train_loader,
self.training_config.update_frequency,
)
return max_updates
| EXA-1-master | exa/models/mmf-main/mmf/trainers/core/training_loop.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import logging
from abc import ABC
from typing import Type
from mmf.utils.timer import Timer
logger = logging.getLogger(__name__)
class TrainerProfilingMixin(ABC):
profiler: Type[Timer] = Timer()
def profile(self, text: str) -> None:
if self.training_config.logger_level != "debug":
return
logging.debug(f"{text}: {self.profiler.get_time_since_start()}")
self.profiler.reset()
| EXA-1-master | exa/models/mmf-main/mmf/trainers/core/profiling.py |
# Copyright (c) Facebook, Inc. and its affiliates.
from mmf.trainers.callbacks.base import Callback
from mmf.utils.build import build_scheduler
class LRSchedulerCallback(Callback):
"""Callback which executes a LR scheduler. It is executed after every
batch iteration.
"""
def __init__(self, config, trainer):
"""
Attr:
config(mmf_typings.DictConfig): Config for the callback
trainer(Type[BaseTrainer]): Trainer object
"""
super().__init__(config, trainer)
self._scheduler = None
if self.training_config.lr_scheduler is True:
self._scheduler = build_scheduler(trainer.optimizer, self.config)
def on_update_end(self, **kwargs):
if self._scheduler is not None:
self._scheduler.step()
| EXA-1-master | exa/models/mmf-main/mmf/trainers/callbacks/lr_scheduler.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import logging
from mmf.trainers.callbacks.base import Callback
from mmf.utils.checkpoint import Checkpoint, consolidate_optim_state_dict
logger = logging.getLogger(__name__)
class CheckpointCallback(Callback):
"""Callback for executing different checkpoint requirements."""
def __init__(self, config, trainer):
"""
Attr:
config(mmf_typings.DictConfig): Config for the callback
trainer(Type[BaseTrainer]): Trainer object
"""
super().__init__(config, trainer)
self._checkpoint = Checkpoint(trainer)
self.checkpoint_interval = self.config.training.checkpoint_interval
@property
def checkpoint(self):
return self._checkpoint
def on_init_start(self, **kwargs):
self._checkpoint.load_state_dict()
def on_update_end(self, **kwargs):
if self.trainer.num_updates % self.checkpoint_interval == 0:
logger.info("Checkpoint time. Saving a checkpoint.")
# Consolidate the state dict of sharded optimizers
consolidate_optim_state_dict(self.trainer.optimizer)
self._checkpoint.save(
self.trainer.num_updates,
self.trainer.current_iteration,
update_best=False,
)
def on_train_end(self, **kwargs):
self._checkpoint.restore()
self._checkpoint.finalize()
| EXA-1-master | exa/models/mmf-main/mmf/trainers/callbacks/checkpoint.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import logging
import torch
from mmf.trainers.callbacks.base import Callback
from mmf.utils.configuration import get_mmf_env
from mmf.utils.logger import (
calculate_time_left,
setup_output_folder,
summarize_report,
TensorboardLogger,
WandbLogger,
)
from mmf.utils.timer import Timer
logger = logging.getLogger(__name__)
class LogisticsCallback(Callback):
"""Callback for handling train/validation logistics, report summarization,
logging etc.
"""
def __init__(self, config, trainer):
"""
Attr:
config(mmf_typings.DictConfig): Config for the callback
trainer(Type[BaseTrainer]): Trainer object
"""
super().__init__(config, trainer)
self.total_timer = Timer()
self.log_interval = self.training_config.log_interval
self.evaluation_interval = self.training_config.evaluation_interval
self.checkpoint_interval = self.training_config.checkpoint_interval
# Total iterations for snapshot
# len would be number of batches per GPU == max updates
self.snapshot_iterations = len(self.trainer.val_loader)
self.tb_writer = None
self.wandb_logger = None
if self.training_config.tensorboard:
log_dir = setup_output_folder(folder_only=True)
env_tb_logdir = get_mmf_env(key="tensorboard_logdir")
if env_tb_logdir:
log_dir = env_tb_logdir
self.tb_writer = TensorboardLogger(log_dir, self.trainer.current_iteration)
if self.training_config.wandb.enabled:
log_dir = setup_output_folder(folder_only=True)
env_wandb_logdir = get_mmf_env(key="wandb_logdir")
if env_wandb_logdir:
log_dir = env_wandb_logdir
self.wandb_logger = WandbLogger(
entity=config.training.wandb.entity,
config=config,
project=config.training.wandb.project,
)
def on_train_start(self):
self.train_timer = Timer()
self.snapshot_timer = Timer()
def on_update_end(self, **kwargs):
if not kwargs["should_log"]:
return
extra = {}
if "cuda" in str(self.trainer.device):
extra["max mem"] = torch.cuda.max_memory_allocated() / 1024
extra["max mem"] //= 1024
if self.training_config.experiment_name:
extra["experiment"] = self.training_config.experiment_name
max_updates = getattr(self.trainer, "max_updates", None)
num_updates = getattr(self.trainer, "num_updates", None)
extra.update(
{
"epoch": self.trainer.current_epoch,
"num_updates": num_updates,
"iterations": self.trainer.current_iteration,
"max_updates": max_updates,
"lr": "{:.5f}".format(
self.trainer.optimizer.param_groups[0]["lr"]
).rstrip("0"),
"ups": "{:.2f}".format(
self.log_interval / self.train_timer.unix_time_since_start()
),
"time": self.train_timer.get_time_since_start(),
"time_since_start": self.total_timer.get_time_since_start(),
"eta": calculate_time_left(
max_updates=max_updates,
num_updates=num_updates,
timer=self.train_timer,
num_snapshot_iterations=self.snapshot_iterations,
log_interval=self.log_interval,
eval_interval=self.evaluation_interval,
),
}
)
self.train_timer.reset()
summarize_report(
current_iteration=self.trainer.current_iteration,
num_updates=num_updates,
max_updates=max_updates,
meter=kwargs["meter"],
extra=extra,
tb_writer=self.tb_writer,
wandb_logger=self.wandb_logger,
)
def on_validation_start(self, **kwargs):
self.snapshot_timer.reset()
def on_validation_end(self, **kwargs):
max_updates = getattr(self.trainer, "max_updates", None)
num_updates = getattr(self.trainer, "num_updates", None)
extra = {
"num_updates": num_updates,
"epoch": self.trainer.current_epoch,
"iterations": self.trainer.current_iteration,
"max_updates": max_updates,
"val_time": self.snapshot_timer.get_time_since_start(),
}
extra.update(self.trainer.early_stop_callback.early_stopping.get_info())
self.train_timer.reset()
summarize_report(
current_iteration=self.trainer.current_iteration,
num_updates=num_updates,
max_updates=max_updates,
meter=kwargs["meter"],
extra=extra,
tb_writer=self.tb_writer,
wandb_logger=self.wandb_logger,
)
def on_test_end(self, **kwargs):
prefix = "{}: full {}".format(
kwargs["report"].dataset_name, kwargs["report"].dataset_type
)
summarize_report(
current_iteration=self.trainer.current_iteration,
num_updates=getattr(self.trainer, "num_updates", None),
max_updates=getattr(self.trainer, "max_updates", None),
meter=kwargs["meter"],
should_print=prefix,
tb_writer=self.tb_writer,
wandb_logger=self.wandb_logger,
)
logger.info(f"Finished run in {self.total_timer.get_time_since_start()}")
def teardown(self):
if self.tb_writer is not None:
self.tb_writer.close()
| EXA-1-master | exa/models/mmf-main/mmf/trainers/callbacks/logistics.py |
# Copyright (c) Facebook, Inc. and its affiliates.
| EXA-1-master | exa/models/mmf-main/mmf/trainers/callbacks/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
from mmf.trainers.callbacks.base import Callback
from mmf.utils.checkpoint import consolidate_optim_state_dict
from mmf.utils.distributed import broadcast_scalar
from mmf.utils.early_stopping import EarlyStopping
class EarlyStoppingCallback(Callback):
"""Callback for Early Stopping mechanism and checks if it training
should continue or stop.
"""
def __init__(self, config, trainer):
"""
Attr:
config(mmf_typings.DictConfig): Config for the callback
trainer(Type[BaseTrainer]): Trainer object
"""
super().__init__(config, trainer)
early_stop_criteria = self.training_config.early_stop.criteria
early_stop_minimize = self.training_config.early_stop.minimize
early_stop_enabled = self.training_config.early_stop.enabled
early_stop_patience = self.training_config.early_stop.patience
self.early_stopping = EarlyStopping(
self.trainer.model,
self.trainer.checkpoint_callback.checkpoint,
early_stop_criteria,
patience=early_stop_patience,
minimize=early_stop_minimize,
should_stop=early_stop_enabled,
)
def on_validation_end(self, **kwargs):
# Consolidate the state dict of sharded optimizers
consolidate_optim_state_dict(self.trainer.optimizer)
stop = self.early_stopping(
self.trainer.num_updates, self.trainer.current_iteration, kwargs["meter"]
)
stop = bool(broadcast_scalar(stop, src=0, device=self.trainer.device))
return stop
| EXA-1-master | exa/models/mmf-main/mmf/trainers/callbacks/early_stopping.py |
# Copyright (c) Facebook, Inc. and its affiliates.
from typing import Type
from mmf.trainers.base_trainer import BaseTrainer
from omegaconf import DictConfig
class Callback:
"""
Base class for callbacks that can be registered with type :class:`BaseTrainer`
Attr:
config(omegaconf.DictConfig): Config for the callback
trainer(Type[BaseTrainer]): Trainer object
"""
def __init__(self, config: DictConfig, trainer: Type[BaseTrainer]) -> None:
self.config = config
self.trainer = trainer
self.training_config = self.config.training
def teardown(self, **kwargs) -> None:
"""
Called at the end of the training to teardown the callback
"""
pass
def on_init_start(self, **kwargs) -> None:
"""
Called when the trainer initialization begins.
"""
pass
def on_init_end(self, **kwargs) -> None:
"""
Called when the trainer initialization ends.
"""
pass
def on_train_start(self, **kwargs) -> None:
"""
Called before training starts.
"""
pass
def on_train_end(self, **kwargs) -> None:
"""
Called after training ends.
"""
pass
def on_batch_start(self, **kwargs) -> None:
"""
Called before each train forward pass of a batch.
"""
pass
def on_batch_end(self, **kwargs) -> None:
"""
Called after each train forward pass of a batch.
"""
pass
def on_update_start(self, **kwargs) -> None:
"""
Called before each train update.
"""
pass
def on_update_end(self, **kwargs) -> None:
"""
Called after each train update.
"""
pass
def on_validation_start(self, **kwargs) -> None:
"""
Called before validation starts.
"""
pass
def on_validation_end(self, **kwargs) -> None:
"""
Called after validation ends.
"""
pass
def on_validation_batch_start(self, **kwargs) -> None:
"""
Called before each validation iteration.
"""
pass
def on_validation_batch_end(self, **kwargs) -> None:
"""
Called after each validation iteration.
"""
pass
def on_test_start(self, **kwargs) -> None:
"""
Called before test starts.
"""
pass
def on_test_end(self, **kwargs) -> None:
"""
Called after test ends.
"""
pass
def on_test_batch_start(self, **kwargs) -> None:
"""
Called before each test iteration.
"""
pass
def on_test_batch_end(self, **kwargs) -> None:
"""
Called after each test iteration.
"""
pass
def on_prediction_start(self, **kwargs) -> None:
"""
Called before prediction loop starts.
"""
pass
def on_prediction_end(self, **kwargs) -> None:
"""
Called after prediction loop ends.
"""
pass
| EXA-1-master | exa/models/mmf-main/mmf/trainers/callbacks/base.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import logging
from typing import Any, Dict, List, Optional
import torch
from mmf.common.registry import registry
from mmf.common.sample import SampleList
from mmf.utils.timer import Timer
from pytorch_lightning import LightningModule, Trainer
from pytorch_lightning.callbacks.base import Callback
logger = logging.getLogger(__name__)
class LightningTorchMetricsCallback(Callback):
"""
callback to be used with LightningTrainer and torchmetric
Warning: 'optimizer.enable_state_sharding=True' is not supported
"""
def __init__(self, lightning_trainer: Any):
super().__init__()
self.lightning_trainer = lightning_trainer
# this is lightning trainer's config
self.trainer_config = lightning_trainer.trainer_config
# training config configures training parameters.
self.training_config = lightning_trainer.training_config
self.run_type = lightning_trainer.run_type
# for logging
self.total_timer = Timer()
self.snapshot_timer = Timer()
self.train_timer = Timer()
def on_train_start(self, trainer: Trainer, pl_module: LightningModule):
registry.register("current_epoch", trainer.current_epoch)
self.lightning_trainer.torchmetrics.reset()
def on_train_batch_end(
self,
trainer: Trainer,
pl_module: LightningModule,
outputs: List,
batch: SampleList,
batch_idx: int,
dataloader_idx: int,
):
# prepare the next batch
self.lightning_trainer.data_module.train_loader.change_dataloader()
self.lightning_trainer.torchmetrics.update(batch, outputs)
# log
if (
self._get_num_updates_for_logging(trainer)
% self.trainer_config.log_every_n_steps
== 0
):
num_updates = self._get_num_updates_for_logging(trainer)
max_updates = trainer.max_steps
extra = self._get_train_extra_log(trainer, pl_module)
self._log_metrics_and_extra(
extra, num_updates, max_updates, log_type="train"
)
self.lightning_trainer.torchmetrics.reset()
self.train_timer.reset()
# Validation Callbacks
def on_validation_start(self, trainer: Trainer, pl_module: LightningModule):
logger.info("Evaluation time. Running on full validation set...")
self.snapshot_timer.reset()
self.lightning_trainer.torchmetrics.reset()
def on_validation_batch_end(
self,
trainer: Trainer,
pl_module: LightningModule,
outputs: Dict,
batch: SampleList,
batch_idx: int,
dataloader_idx: int,
):
# prepare the next batch
self.lightning_trainer.data_module.val_loader.change_dataloader()
self.lightning_trainer.torchmetrics.update(batch, outputs)
def on_validation_end(self, trainer: Trainer, pl_module: LightningModule):
iterations = self._get_iterations_for_logging(trainer)
current_epochs = self._get_current_epoch_for_logging(trainer)
num_updates = self._get_num_updates_for_logging(trainer)
max_updates = trainer.max_steps
extra = {
"num_updates": num_updates,
"epoch": current_epochs,
"iterations": iterations,
"max_updates": max_updates,
"val_time": self.snapshot_timer.get_time_since_start(),
}
self.train_timer.reset()
self._log_metrics_and_extra(extra, num_updates, max_updates, log_type="val")
self.lightning_trainer.torchmetrics.reset()
def _log_metrics_and_extra(
self,
extra: Optional[Dict],
num_updates: int,
max_updates: int,
log_type: str = "train",
):
logger.info(f"{num_updates}/{max_updates}")
if extra is not None:
logger.info(", ".join([f"{key}: {value}" for key, value in extra.items()]))
scalar_dict = self.lightning_trainer.torchmetrics.get_scalar_dict()
scalar_dict_with_type = {f"{log_type}_{k}": v for k, v in scalar_dict.items()}
if self.lightning_trainer.tb_writer is not None:
self.lightning_trainer.tb_writer.add_scalars(
scalar_dict_with_type, num_updates
)
logger.info(f"{log_type} metrics:")
logger.info(
", ".join([f"{key}: {value}" for key, value in scalar_dict.items()])
)
def get_optimizer(self, trainer: Trainer):
assert (
len(trainer.optimizers) == 1
), "mmf lightning_trainer supports 1 optimizer per model for now."
optimizer = trainer.optimizers[0]
return optimizer
def _get_current_epoch_for_logging(self, trainer: Trainer):
return trainer.current_epoch + 1
def _get_iterations_for_logging(self, trainer: Trainer):
return trainer.fit_loop.batch_idx + 1
def _get_num_updates_for_logging(self, trainer: Trainer):
return trainer.global_step
def _get_train_extra_log(self, trainer: Trainer, pl_module: LightningModule):
extra = {}
if "cuda" in str(trainer.model.device):
extra["max mem"] = torch.cuda.max_memory_allocated() / 1024
extra["max mem"] //= 1024
if self.training_config.experiment_name:
extra["experiment"] = self.training_config.experiment_name
optimizer = self.get_optimizer(trainer)
num_updates = self._get_num_updates_for_logging(trainer)
current_iteration = self._get_iterations_for_logging(trainer)
extra.update(
{
"epoch": self._get_current_epoch_for_logging(trainer),
"iterations": current_iteration,
"num_updates": num_updates,
"max_updates": trainer.max_steps,
"lr": "{:.5f}".format(optimizer.param_groups[0]["lr"]).rstrip("0"),
"ups": "{:.2f}".format(
self.trainer_config.log_every_n_steps
/ self.train_timer.unix_time_since_start()
),
"time": self.train_timer.get_time_since_start(),
"time_since_start": self.total_timer.get_time_since_start(),
}
)
return extra
| EXA-1-master | exa/models/mmf-main/mmf/trainers/lightning_core/loop_callback_with_torchmetrics.py |
# Copyright (c) Facebook, Inc. and its affiliates.
| EXA-1-master | exa/models/mmf-main/mmf/trainers/lightning_core/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import collections
import logging
from typing import Dict, List
import torch
from mmf.common.registry import registry
from mmf.common.sample import SampleList
logger = logging.getLogger(__name__)
class LightningTorchMetrics:
"""
A class used in LightningTrainer to compute torchmetrics
---
An example to register a torchmetric:
from mmf.common.registry import registry
@registry.register_torchmetric("top_k_accuracy")
class TopKAccuracy(Metric):
def __init__(
self,
k: int = 1,
...
) -> None:
...
def update(self, sample_list: SampleList,
model_output: Dict[str, Tensor]) -> None:
...
def compute(self) -> Tensor:
...
---
To config the metrics in yaml config file:
evaluation:
torchmetrics:
- type: top_k_accuracy
key: top_3_overlap
params:
k: 3
- type: top_k_accuracy
key: top_1_overlap
params:
k: 1
Warning: once torchmetrics are provided, regular mmf metrics will be ignored.
"""
def __init__(self, metric_list: collections.abc.Sequence):
if not isinstance(metric_list, collections.abc.Sequence):
metric_list = [metric_list]
self.metrics, self.metric_dataset_names = self._init_metrics(metric_list)
def _init_metrics(self, metric_list: collections.abc.Sequence):
metrics = {}
metric_dataset_names = {}
for metric in metric_list:
params = {}
dataset_names = []
if isinstance(metric, collections.abc.Mapping):
if "type" not in metric:
raise ValueError(
f"Metric {metric} needs to have 'type' attribute "
+ "or should be a string"
)
metric_type = key = metric.type
params = metric.get("params", {})
# Support cases where uses need to give custom metric name
if "key" in metric:
key = metric.key
# One key should only be used once
if key in metrics:
raise RuntimeError(
f"Metric with type/key '{metric_type}' has been defined more "
+ "than once in metric list."
)
# a custom list of dataset where this metric will be applied
if "datasets" in metric:
dataset_names = metric.datasets
else:
logger.warning(
f"metric '{key}' will be computed on all datasets \
since datasets are not provided"
)
else:
if not isinstance(metric, str):
raise TypeError(
"Metric {} has inappropriate type"
"'dict' or 'str' allowed".format(metric)
)
metric_type = key = metric
metric_cls = registry.get_torchmetric_class(metric_type)
if metric_cls is None:
raise ValueError(
f"No metric named {metric_type} registered to registry"
)
metric_instance = metric_cls(**params)
metrics[key] = metric_instance
metric_dataset_names[key] = dataset_names
return metrics, metric_dataset_names
def _is_dataset_applicable(
self, dataset_name: str, metric_dataset_names: List[str]
):
return len(metric_dataset_names) == 0 or dataset_name in metric_dataset_names
def update(
self,
sample_list: SampleList,
model_output: Dict[str, torch.Tensor],
*args,
**kwargs,
):
dataset_name = sample_list.dataset_name
with torch.no_grad():
for metric_name, metric in self.metrics.items():
if not self._is_dataset_applicable(
dataset_name, self.metric_dataset_names.get(metric_name, [])
):
continue
metric.update(sample_list, model_output)
def compute(self) -> Dict[str, torch.Tensor]:
results = {}
for metric_name, metric in self.metrics.items():
results[metric_name] = metric.compute()
return results
def reset(self) -> None:
for _, metric in self.metrics.items():
metric.reset()
def get_scalar_dict(self) -> Dict[str, torch.Tensor]:
results = self.compute()
scalar_dict = {}
for k, tensor_v in results.items():
val = torch.flatten(tensor_v)
if val.size(0) > 1:
# non-scalar will be ignored
continue
scalar_dict[k] = val.item()
return scalar_dict
| EXA-1-master | exa/models/mmf-main/mmf/trainers/lightning_core/torchmetric.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import logging
from typing import Any, Dict, List
import torch
from mmf.common.meter import Meter
from mmf.common.registry import registry
from mmf.common.report import Report
from mmf.common.sample import SampleList
from mmf.utils.logger import calculate_time_left, summarize_report
from mmf.utils.timer import Timer
from pytorch_lightning import LightningModule, Trainer
from pytorch_lightning.callbacks.base import Callback
logger = logging.getLogger(__name__)
class LightningLoopCallback(Callback):
def __init__(self, lightning_trainer: Any):
super().__init__()
self.lightning_trainer = lightning_trainer
# this is lightning trainer's config
self.trainer_config = lightning_trainer.trainer_config
# training config configures training parameters.
self.training_config = lightning_trainer.training_config
self.run_type = lightning_trainer.run_type
# for logging
self.total_timer = Timer()
self.snapshot_timer = Timer()
self.snapshot_iterations = 0
if self.lightning_trainer.val_loader.has_len():
self.snapshot_iterations = len(self.lightning_trainer.val_loader)
self.train_timer = Timer()
def on_train_start(self, trainer: Trainer, pl_module: LightningModule):
registry.register("current_epoch", trainer.current_epoch)
self.train_combined_report = None
def on_train_batch_end(
self,
trainer: Trainer,
pl_module: LightningModule,
outputs: List,
batch: SampleList,
batch_idx: int,
dataloader_idx: int,
):
# prepare the next batch
self.lightning_trainer.data_module.train_loader.change_dataloader()
# aggregate train combined_report
self.train_combined_report = self._update_and_create_report(
SampleList(batch), batch_idx, outputs, pl_module, self.train_combined_report
)
# Continue if an update has not finished
if (batch_idx + 1) % self.trainer_config.accumulate_grad_batches:
return
# log
if (
self._get_num_updates_for_logging(trainer)
% self.trainer_config.log_every_n_steps
== 0
):
self._train_log(trainer, pl_module)
# Validation Callbacks
def on_validation_start(self, trainer: Trainer, pl_module: LightningModule):
logger.info("Evaluation time. Running on full validation set...")
self.snapshot_timer.reset()
self.val_combined_report = None
pl_module.val_meter.reset()
def on_validation_batch_end(
self,
trainer: Trainer,
pl_module: LightningModule,
outputs: Dict,
batch: SampleList,
batch_idx: int,
dataloader_idx: int,
):
# prepare the next batch
self.lightning_trainer.data_module.val_loader.change_dataloader()
# aggregate val_combined_report
self.val_combined_report = self._update_and_create_report(
batch,
batch_idx,
outputs,
pl_module,
self.val_combined_report,
update_meter=pl_module.val_meter,
)
self.val_combined_report = self.val_combined_report.detach()
self.val_combined_report.metrics = pl_module.metrics(
self.val_combined_report, self.val_combined_report
)
pl_module.val_meter.update_from_report(
self.val_combined_report, should_update_loss=False
)
def on_validation_end(self, trainer: Trainer, pl_module: LightningModule):
iterations = self._get_iterations_for_logging(trainer)
current_epochs = self._get_current_epoch_for_logging(trainer)
num_updates = self._get_num_updates_for_logging(trainer)
extra = {
"num_updates": num_updates,
"epoch": current_epochs,
"iterations": iterations,
"max_updates": trainer.max_steps,
"val_time": self.snapshot_timer.get_time_since_start(),
}
# TODO: @sash populate early stop info for logging (next mvp)
# extra.update(self.trainer.early_stop_callback.early_stopping.get_info())
self.train_timer.reset()
summarize_report(
current_iteration=iterations,
num_updates=num_updates,
max_updates=trainer.max_steps,
meter=pl_module.val_meter,
extra=extra,
tb_writer=self.lightning_trainer.tb_writer,
)
def _update_and_create_report(
self,
batch: Dict,
batch_idx: int,
step_output: Dict,
pl_module: LightningModule,
combined_report: Report = None,
update_meter: Meter = None,
):
report = Report(batch, step_output)
# Normalize losses
for key in report.losses.keys():
report.losses[key] = (
report.losses[key] / self.trainer_config.accumulate_grad_batches
)
if update_meter:
update_meter.update_from_report(report)
should_accumulate = not (
batch_idx % self.trainer_config.accumulate_grad_batches == 0
)
final_report = report
if should_accumulate and combined_report is not None:
combined_report.accumulate_tensor_fields_and_loss(
report, pl_module.metrics.required_params
)
combined_report.batch_size += report.batch_size
final_report = combined_report
return final_report
def get_optimizer(self, trainer: Trainer):
assert (
len(trainer.optimizers) == 1
), "mmf lightning_trainer supports 1 optimizer per model for now."
optimizer = trainer.optimizers[0]
return optimizer
def _get_current_epoch_for_logging(self, trainer: Trainer):
return trainer.current_epoch + 1
def _get_iterations_for_logging(self, trainer: Trainer):
return trainer.fit_loop.batch_idx + 1
def _get_num_updates_for_logging(self, trainer: Trainer):
return trainer.global_step
def _train_log(self, trainer: Trainer, pl_module: LightningModule):
self.train_combined_report = self.train_combined_report.detach()
if self.training_config.evaluate_metrics:
self.train_combined_report.metrics = pl_module.metrics(
self.train_combined_report, self.train_combined_report
)
pl_module.train_meter.update_from_report(self.train_combined_report)
extra = {}
if "cuda" in str(trainer.model.device):
extra["max mem"] = torch.cuda.max_memory_allocated() / 1024
extra["max mem"] //= 1024
if self.training_config.experiment_name:
extra["experiment"] = self.training_config.experiment_name
optimizer = self.get_optimizer(trainer)
num_updates = self._get_num_updates_for_logging(trainer)
current_iteration = self._get_iterations_for_logging(trainer)
extra.update(
{
"epoch": self._get_current_epoch_for_logging(trainer),
"iterations": current_iteration,
"num_updates": num_updates,
"max_updates": trainer.max_steps,
"lr": "{:.5f}".format(optimizer.param_groups[0]["lr"]).rstrip("0"),
"ups": "{:.2f}".format(
self.trainer_config.log_every_n_steps
/ self.train_timer.unix_time_since_start()
),
"time": self.train_timer.get_time_since_start(),
"time_since_start": self.total_timer.get_time_since_start(),
"eta": calculate_time_left(
max_updates=trainer.max_steps,
num_updates=num_updates,
timer=self.train_timer,
num_snapshot_iterations=self.snapshot_iterations,
log_interval=self.trainer_config.log_every_n_steps,
eval_interval=self.trainer_config.val_check_interval,
),
}
)
self.train_timer.reset()
summarize_report(
current_iteration=current_iteration,
num_updates=num_updates,
max_updates=trainer.max_steps,
meter=pl_module.train_meter,
extra=extra,
tb_writer=self.lightning_trainer.tb_writer,
)
| EXA-1-master | exa/models/mmf-main/mmf/trainers/lightning_core/loop_callback.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import csv
import json
import logging
import os
import warnings
from dataclasses import dataclass, field
from typing import List
import pytorch_lightning as pl
from mmf.common.registry import registry
from mmf.common.sample import convert_batch_to_sample_list
from mmf.utils.configuration import get_mmf_env
from mmf.utils.distributed import gather_tensor, is_main
from mmf.utils.file_io import PathManager
from mmf.utils.general import ckpt_name_from_core_args, foldername_from_config_override
from mmf.utils.logger import log_class_usage
from mmf.utils.timer import Timer
from omegaconf import OmegaConf
from torch.utils.data import Dataset
logger = logging.getLogger(__name__)
DEFAULT_CANDIDATE_FIELDS = [
"id",
"question_id",
"image_id",
"context_tokens",
"captions",
"scores",
]
@registry.register_test_reporter("file")
@registry.register_test_reporter("default")
class TestReporter(Dataset):
@dataclass
class Config:
# A set of fields to be *considered* for exporting by the reporter
# Note that `format_for_prediction` is what ultimtly detemrimes the
# exported fields
candidate_fields: List[str] = field(
default_factory=lambda: DEFAULT_CANDIDATE_FIELDS
)
# csv or json
predict_file_format: str = "json"
def __init__(
self,
datamodules: List[pl.LightningDataModule],
config: Config = None,
dataset_type: str = "train",
):
self.test_reporter_config = OmegaConf.merge(
OmegaConf.structured(self.Config), config
)
self.datamodules = datamodules
self.dataset_type = dataset_type
self.config = registry.get("config")
self.report = []
self.timer = Timer()
self.training_config = self.config.training
self.num_workers = self.training_config.num_workers
self.batch_size = self.training_config.batch_size
self.report_folder_arg = get_mmf_env(key="report_dir")
self.experiment_name = self.training_config.experiment_name
self.current_datamodule_idx = -1
self.dataset_names = list(self.datamodules.keys())
self.current_datamodule = self.datamodules[
self.dataset_names[self.current_datamodule_idx]
]
self.current_dataloader = None
self.save_dir = get_mmf_env(key="save_dir")
self.report_folder = ckpt_name_from_core_args(self.config)
self.report_folder += foldername_from_config_override(self.config)
self.report_folder = os.path.join(self.save_dir, self.report_folder)
self.report_folder = os.path.join(self.report_folder, "reports")
if self.report_folder_arg:
self.report_folder = self.report_folder_arg
self.candidate_fields = self.test_reporter_config.candidate_fields
PathManager.mkdirs(self.report_folder)
log_class_usage("TestReporter", self.__class__)
@property
def current_dataset(self):
self._check_current_dataloader()
return self.current_dataloader.dataset
def next_dataset(self, flush_report=True):
if self.current_datamodule_idx >= 0:
if flush_report:
self.flush_report()
else:
self.report = []
self.current_datamodule_idx += 1
if self.current_datamodule_idx == len(self.datamodules):
return False
else:
self.current_datamodule = self.datamodules[
self.dataset_names[self.current_datamodule_idx]
]
logger.info(
f"Predicting for {self.dataset_names[self.current_datamodule_idx]}"
)
return True
def flush_report(self):
if not is_main():
# Empty report in all processes to avoid any leaks
self.report = []
return
name = self.current_datamodule.dataset_name
time_format = "%Y-%m-%dT%H:%M:%S"
time = self.timer.get_time_hhmmss(None, format=time_format)
filename = name + "_"
if len(self.experiment_name) > 0:
filename += self.experiment_name + "_"
filename += self.dataset_type + "_"
filename += time
use_csv_writer = (
self.config.evaluation.predict_file_format == "csv"
or self.test_reporter_config.predict_file_format == "csv"
)
if use_csv_writer:
filepath = os.path.join(self.report_folder, filename + ".csv")
self.csv_dump(filepath)
else:
filepath = os.path.join(self.report_folder, filename + ".json")
self.json_dump(filepath)
logger.info(f"Wrote predictions for {name} to {os.path.abspath(filepath)}")
self.report = []
def postprocess_dataset_report(self):
self._check_current_dataloader()
if hasattr(self.current_dataset, "on_prediction_end"):
self.report = self.current_dataset.on_prediction_end(self.report)
def csv_dump(self, filepath):
with PathManager.open(filepath, "w") as f:
title = self.report[0].keys()
cw = csv.DictWriter(f, title, delimiter=",", quoting=csv.QUOTE_MINIMAL)
cw.writeheader()
cw.writerows(self.report)
def json_dump(self, filepath):
with PathManager.open(filepath, "w") as f:
json.dump(self.report, f)
def get_dataloader(self):
self.current_dataloader = getattr(
self.current_datamodule, f"{self.dataset_type}_dataloader"
)()
# Make sure to assign dataset to dataloader object as
# required by MMF
if not hasattr(self.current_dataloader, "dataset"):
self.current_dataloader.dataset = getattr(
self.current_datamodule, f"{self.dataset_type}_dataset"
)
return self.current_dataloader
def prepare_batch(self, batch):
self._check_current_dataloader()
if hasattr(self.current_dataset, "prepare_batch"):
batch = self.current_dataset.prepare_batch(batch)
batch = convert_batch_to_sample_list(batch)
batch.dataset_name = self.current_dataset.dataset_name
batch.dataset_type = self.dataset_type
return batch
def __len__(self):
self._check_current_dataloader()
return len(self.current_dataloader)
def _check_current_dataloader(self):
assert self.current_dataloader is not None, (
"Please call `get_dataloader` before accessing any "
+ "'current_dataloader' based function"
)
def add_to_report(self, report, model, *args, **kwargs):
if "execute_on_master_only" in kwargs:
warnings.warn(
"'execute_on_master_only keyword is deprecated and isn't used anymore",
DeprecationWarning,
)
self._check_current_dataloader()
for key in self.candidate_fields:
report = self.reshape_and_gather(report, key)
results = []
if hasattr(self.current_dataset, "format_for_prediction"):
results = self.current_dataset.format_for_prediction(report)
if hasattr(model, "format_for_prediction"):
results = model.format_for_prediction(results, report)
elif hasattr(model.module, "format_for_prediction"):
results = model.module.format_for_prediction(results, report)
self.report = self.report + results
def reshape_and_gather(self, report, key):
if key in report:
num_dims = report[key].dim()
if num_dims == 1:
report[key] = gather_tensor(report[key]).view(-1)
elif num_dims >= 2:
# Collect dims other than batch
other_dims = report[key].size()[1:]
report[key] = gather_tensor(report[key]).view(-1, *other_dims)
return report
| EXA-1-master | exa/models/mmf-main/mmf/common/test_reporter.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# Inspired from maskrcnn benchmark
from collections import defaultdict, deque
import torch
from mmf.common.registry import registry
from mmf.utils.distributed import reduce_dict
from mmf.utils.general import scalarize_dict_values
class SmoothedValue:
"""Track a series of values and provide access to smoothed values over a
window or the global series average.
"""
def __init__(self, window_size=20):
self.window_size = window_size
self.reset()
def reset(self):
self.deque = deque(maxlen=self.window_size)
self.averaged_value_deque = deque(maxlen=self.window_size)
self.batch_sizes = deque(maxlen=self.window_size)
self.total_samples = 0
self.total = 0.0
self.count = 0
def update(self, value, batch_size):
self.deque.append(value * batch_size)
self.averaged_value_deque.append(value)
self.batch_sizes.append(batch_size)
self.count += 1
self.total_samples += batch_size
self.total += value * batch_size
@property
def median(self):
d = torch.tensor(list(self.averaged_value_deque))
return d.median().item()
@property
def avg(self):
d = torch.tensor(list(self.deque))
s = torch.tensor(list(self.batch_sizes))
return d.sum().item() / s.sum().item()
@property
def global_avg(self):
return self.total / self.total_samples
def get_latest(self):
return self.averaged_value_deque[-1]
class Meter:
def __init__(self, delimiter=", "):
self.meters = defaultdict(SmoothedValue)
self.delimiter = delimiter
def update_from_report(self, report, should_update_loss=True):
"""
this method updates the provided meter with report info.
this method by default handles reducing metrics.
Args:
report (Report): report object which content is used to populate
the current meter
Usage::
>>> meter = Meter()
>>> report = Report(prepared_batch, model_output)
>>> meter.update_from_report(report)
"""
if hasattr(report, "metrics"):
metrics_dict = report.metrics
reduced_metrics_dict = reduce_dict(metrics_dict)
if should_update_loss:
loss_dict = report.losses
reduced_loss_dict = reduce_dict(loss_dict)
with torch.no_grad():
meter_update_dict = {}
if should_update_loss:
meter_update_dict = scalarize_dict_values(reduced_loss_dict)
total_loss_key = report.dataset_type + "/total_loss"
total_loss = sum(meter_update_dict.values())
registry.register(total_loss_key, total_loss)
meter_update_dict.update({total_loss_key: total_loss})
if hasattr(report, "metrics"):
metrics_dict = scalarize_dict_values(reduced_metrics_dict)
meter_update_dict.update(**metrics_dict)
self._update(meter_update_dict, report.batch_size)
def _update(self, update_dict, batch_size):
scalarized = scalarize_dict_values(update_dict)
for k, v in scalarized.items():
# Skipping .item() call
# __format__() for tensor has .item
# Therefore it will implicitly get called when needed
self.meters[k].update(v, batch_size)
def update_from_meter(self, meter):
for key, value in meter.meters.items():
assert isinstance(value, SmoothedValue)
self.meters[key] = value
def __getattr__(self, attr):
if attr in self.meters:
return self.meters[attr]
if attr in self.__dict__:
return self.__dict__[attr]
raise AttributeError(
"'{}' object has no attribute '{}'".format(type(self).__name__, attr)
)
def get_scalar_dict(self):
scalar_dict = {}
for k, v in self.meters.items():
scalar_dict[k] = v.get_latest()
return scalar_dict
def get_log_dict(self):
log_dict = {}
for k, v in self.meters.items():
if "train" in k:
log_dict[k] = f"{v.median:.4f}"
log_dict[f"{k}/avg"] = f"{v.global_avg:.4f}"
else:
log_dict[k] = f"{v.global_avg:.4f}"
return log_dict
def __str__(self):
loss_str = []
for name, meter in self.meters.items():
if "train" in name:
loss_str.append(f"{name}: {meter.median:.4f} ({meter.global_avg:.4f})")
else:
# In case of val print global avg
loss_str.append(f"{name}: {meter.global_avg:.4f}")
return self.delimiter.join(loss_str)
def reset(self):
del self.meters
self.meters = defaultdict(SmoothedValue)
| EXA-1-master | exa/models/mmf-main/mmf/common/meter.py |
# Copyright (c) Facebook, Inc. and its affiliates.
"""
Registry is central source of truth in MMF. Inspired from Redux's
concept of global store, Registry maintains mappings of various information
to unique keys. Special functions in registry can be used as decorators to
register different kind of classes.
Import the global registry object using
``from mmf.common.registry import registry``
Various decorators for registry different kind of classes with unique keys
- Register a trainer: ``@registry.register_trainer``
- Register a dataset builder: ``@registry.register_builder``
- Register a callback function: ``@registry.register_callback``
- Register a metric: ``@registry.register_metric``
- Register a loss: ``@registry.register_loss``
- Register a fusion technique: ``@registery.register_fusion``
- Register a model: ``@registry.register_model``
- Register a processor: ``@registry.register_processor``
- Register a optimizer: ``@registry.register_optimizer``
- Register a scheduler: ``@registry.register_scheduler``
- Register a encoder: ``@registry.register_encoder``
- Register a decoder: ``@registry.register_decoder``
- Register a transformer backend: ``@registry.register_transformer_backend``
- Register a transformer head: ``@registry.register_transformer_head``
- Register a test reporter: ``@registry.register_test_reporter``
- Register a pl datamodule: ``@registry.register_datamodule``
"""
from mmf.utils.env import setup_imports
class Registry:
r"""Class for registry object which acts as central source of truth
for MMF
"""
mapping = {
# Mappings of builder name to their respective classes
# Use `registry.register_builder` to register a builder class
# with a specific name
# Further, use the name with the class is registered in the
# command line or configuration to load that specific dataset
"builder_name_mapping": {},
# Similar to the builder_name_mapping above except that this
# one is used to keep a mapping for dataset to its trainer class.
"trainer_name_mapping": {},
"model_name_mapping": {},
"metric_name_mapping": {},
"torchmetric_name_mapping": {},
"loss_name_mapping": {},
"pool_name_mapping": {},
"fusion_name_mapping": {},
"optimizer_name_mapping": {},
"scheduler_name_mapping": {},
"processor_name_mapping": {},
"encoder_name_mapping": {},
"decoder_name_mapping": {},
"transformer_backend_name_mapping": {},
"transformer_head_name_mapping": {},
"test_reporter_mapping": {},
"iteration_strategy_name_mapping": {},
"state": {},
"callback_name_mapping": {},
}
@classmethod
def register_trainer(cls, name):
r"""Register a trainer to registry with key 'name'
Args:
name: Key with which the trainer will be registered.
Usage::
from mmf.common.registry import registry
from mmf.trainers.custom_trainer import CustomTrainer
@registry.register_trainer("custom_trainer")
class CustomTrainer():
...
"""
def wrap(trainer_cls):
cls.mapping["trainer_name_mapping"][name] = trainer_cls
return trainer_cls
return wrap
@classmethod
def register_builder(cls, name):
r"""Register a dataset builder to registry with key 'name'
Args:
name: Key with which the metric will be registered.
Usage::
from mmf.common.registry import registry
from mmf.datasets.base_dataset_builder import BaseDatasetBuilder
@registry.register_builder("vqa2")
class VQA2Builder(BaseDatasetBuilder):
...
"""
def wrap(builder_cls):
from mmf.datasets.base_dataset_builder import BaseDatasetBuilder
assert issubclass(
builder_cls, BaseDatasetBuilder
), "All builders must inherit BaseDatasetBuilder class"
cls.mapping["builder_name_mapping"][name] = builder_cls
return builder_cls
return wrap
@classmethod
def register_callback(cls, name):
r"""Register a callback to registry with key 'name'
Args:
name: Key with which the callback will be registered.
Usage::
from mmf.common.registry import registry
from mmf.trainers.callbacks.base import Callback
@registry.register_callback("logistic")
class LogisticCallback(Callback):
...
"""
def wrap(func):
from mmf.trainers.callbacks.base import Callback
assert issubclass(
func, Callback
), "All callbacks must inherit Callback class"
cls.mapping["callback_name_mapping"][name] = func
return func
return wrap
@classmethod
def register_metric(cls, name):
r"""Register a metric to registry with key 'name'
Args:
name: Key with which the metric will be registered.
Usage::
from mmf.common.registry import registry
from mmf.modules.metrics import BaseMetric
@registry.register_metric("r@1")
class RecallAt1(BaseMetric):
...
"""
def wrap(func):
from mmf.modules.metrics import BaseMetric
assert issubclass(
func, BaseMetric
), "All Metric must inherit BaseMetric class"
cls.mapping["metric_name_mapping"][name] = func
return func
return wrap
@classmethod
def register_torchmetric(cls, name):
r"""Register a torchmetric to registry with key 'name'
Args:
name: Key with which the torchmetric will be registered.
Usage::
from mmf.common.registry import registry
from torchmetrics.metric import Metric
@registry.register_torchmetric("topk_accuracy")
class TopKAccuracy(Metric):
...
"""
def wrap(func):
from torchmetrics.metric import Metric
assert issubclass(func, Metric), "All metric must inherit Metric class"
cls.mapping["torchmetric_name_mapping"][name] = func
return func
return wrap
@classmethod
def register_loss(cls, name):
r"""Register a loss to registry with key 'name'
Args:
name: Key with which the loss will be registered.
Usage::
from mmf.common.registry import registry
from torch import nn
@registry.register_task("logit_bce")
class LogitBCE(nn.Module):
...
"""
def wrap(func):
from torch import nn
assert issubclass(
func, nn.Module
), "All loss must inherit torch.nn.Module class"
cls.mapping["loss_name_mapping"][name] = func
return func
return wrap
@classmethod
def register_pooler(cls, name):
r"""Register a modality pooling method to registry with key 'name'
Args:
name: Key with which the pooling method will be registered.
Usage::
from mmf.common.registry import registry
from torch import nn
@registry.register_pool("average_pool")
class average_pool(nn.Module):
...
"""
def wrap(func):
from torch import nn
assert issubclass(
func, nn.Module
), "All pooling methods must inherit torch.nn.Module class"
cls.mapping["pool_name_mapping"][name] = func
return func
return wrap
@classmethod
def register_fusion(cls, name):
r"""Register a fusion technique to registry with key 'name'
Args:
name: Key with which the fusion technique will be registered
Usage::
from mmf.common.registry import registry
from torch import nn
@registry.register_fusion("linear_sum")
class LinearSum():
...
"""
def wrap(func):
from torch import nn
assert issubclass(
func, nn.Module
), "All Fusion must inherit torch.nn.Module class"
cls.mapping["fusion_name_mapping"][name] = func
return func
return wrap
@classmethod
def register_model(cls, name):
r"""Register a model to registry with key 'name'
Args:
name: Key with which the model will be registered.
Usage::
from mmf.common.registry import registry
from mmf.models.base_model import BaseModel
@registry.register_task("pythia")
class Pythia(BaseModel):
...
"""
def wrap(func):
from mmf.models.base_model import BaseModel
assert issubclass(
func, BaseModel
), "All models must inherit BaseModel class"
cls.mapping["model_name_mapping"][name] = func
return func
return wrap
@classmethod
def register_processor(cls, name):
r"""Register a processor to registry with key 'name'
Args:
name: Key with which the processor will be registered.
Usage::
from mmf.common.registry import registry
from mmf.datasets.processors import BaseProcessor
@registry.register_task("glove")
class GloVe(BaseProcessor):
...
"""
def wrap(func):
from mmf.datasets.processors.processors import BaseProcessor
assert issubclass(
func, BaseProcessor
), "All Processor classes must inherit BaseProcessor class"
cls.mapping["processor_name_mapping"][name] = func
return func
return wrap
@classmethod
def register_optimizer(cls, name):
def wrap(func):
cls.mapping["optimizer_name_mapping"][name] = func
return func
return wrap
@classmethod
def register_scheduler(cls, name):
def wrap(func):
cls.mapping["scheduler_name_mapping"][name] = func
return func
return wrap
@classmethod
def register_transformer_backend(cls, name):
def wrap(func):
cls.mapping["transformer_backend_name_mapping"][name] = func
return func
return wrap
@classmethod
def register_transformer_head(cls, name):
def wrap(func):
cls.mapping["transformer_head_name_mapping"][name] = func
return func
return wrap
@classmethod
def register_test_reporter(cls, name):
def wrap(func):
cls.mapping["test_reporter_mapping"][name] = func
return func
return wrap
@classmethod
def register_decoder(cls, name):
r"""Register a decoder to registry with key 'name'
Args:
name: Key with which the decoder will be registered.
Usage::
from mmf.common.registry import registry
from mmf.utils.text import TextDecoder
@registry.register_decoder("nucleus_sampling")
class NucleusSampling(TextDecoder):
...
"""
def wrap(decoder_cls):
from mmf.utils.text import TextDecoder
assert issubclass(
decoder_cls, TextDecoder
), "All decoders must inherit TextDecoder class"
cls.mapping["decoder_name_mapping"][name] = decoder_cls
return decoder_cls
return wrap
@classmethod
def register_encoder(cls, name):
r"""Register a encoder to registry with key 'name'
Args:
name: Key with which the encoder will be registered.
Usage::
from mmf.common.registry import registry
from mmf.modules.encoders import Encoder
@registry.register_encoder("transformer")
class TransformerEncoder(Encoder):
...
"""
def wrap(encoder_cls):
from mmf.modules.encoders import Encoder
assert issubclass(
encoder_cls, Encoder
), "All encoders must inherit Encoder class"
cls.mapping["encoder_name_mapping"][name] = encoder_cls
return encoder_cls
return wrap
@classmethod
def register_datamodule(cls, name):
r"""Register a datamodule to registry with key 'name'
Args:
name: Key with which the datamodule will be registered.
Usage::
from mmf.common.registry import registry
import pytorch_lightning as pl
@registry.register_datamodule("my_datamodule")
class MyDataModule(pl.LightningDataModule):
...
"""
def wrap(datamodule_cls):
import pytorch_lightning as pl
assert issubclass(
datamodule_cls, pl.LightningDataModule
), "All datamodules must inherit PyTorch Lightning DataModule class"
cls.mapping["builder_name_mapping"][name] = datamodule_cls
return datamodule_cls
return wrap
@classmethod
def register_iteration_strategy(cls, name):
r"""Register an iteration_strategy to registry with key 'name'
Args:
name: Key with which the iteration_strategy will be registered.
Usage::
from dataclasses import dataclass
from mmf.common.registry import registry
from mmf.datasets.iterators import IterationStrategy
@registry.register_iteration_strategy("my_iteration_strategy")
class MyStrategy(IterationStrategy):
@dataclass
class Config:
name: str = "my_strategy"
def __init__(self, config, dataloader):
...
"""
def wrap(iteration_strategy_cls):
from mmf.datasets.iteration_strategies import IterationStrategy
assert issubclass(
iteration_strategy_cls, IterationStrategy
), "All datamodules must inherit IterationStrategy class"
cls.mapping["iteration_strategy_name_mapping"][
name
] = iteration_strategy_cls
return iteration_strategy_cls
return wrap
@classmethod
def register(cls, name, obj):
r"""Register an item to registry with key 'name'
Args:
name: Key with which the item will be registered.
Usage::
from mmf.common.registry import registry
registry.register("config", {})
"""
path = name.split(".")
current = cls.mapping["state"]
for part in path[:-1]:
if part not in current:
current[part] = {}
current = current[part]
current[path[-1]] = obj
@classmethod
def get_trainer_class(cls, name):
return cls.mapping["trainer_name_mapping"].get(name, None)
@classmethod
def get_builder_class(cls, name):
return cls.mapping["builder_name_mapping"].get(name, None)
@classmethod
def get_callback_class(cls, name):
return cls.mapping["callback_name_mapping"].get(name, None)
@classmethod
def get_model_class(cls, name):
return cls.mapping["model_name_mapping"].get(name, None)
@classmethod
def get_processor_class(cls, name):
return cls.mapping["processor_name_mapping"].get(name, None)
@classmethod
def get_metric_class(cls, name):
return cls.mapping["metric_name_mapping"].get(name, None)
@classmethod
def get_torchmetric_class(cls, name):
return cls.mapping["torchmetric_name_mapping"].get(name, None)
@classmethod
def get_loss_class(cls, name):
return cls.mapping["loss_name_mapping"].get(name, None)
@classmethod
def get_pool_class(cls, name):
return cls.mapping["pool_name_mapping"].get(name, None)
@classmethod
def get_optimizer_class(cls, name):
return cls.mapping["optimizer_name_mapping"].get(name, None)
@classmethod
def get_scheduler_class(cls, name):
return cls.mapping["scheduler_name_mapping"].get(name, None)
@classmethod
def get_decoder_class(cls, name):
return cls.mapping["decoder_name_mapping"].get(name, None)
@classmethod
def get_encoder_class(cls, name):
return cls.mapping["encoder_name_mapping"].get(name, None)
@classmethod
def get_iteration_strategy_class(cls, name):
return cls.mapping["iteration_strategy_name_mapping"].get(name, None)
@classmethod
def get_transformer_backend_class(cls, name):
return cls.mapping["transformer_backend_name_mapping"].get(name, None)
@classmethod
def get_transformer_head_class(cls, name):
return cls.mapping["transformer_head_name_mapping"].get(name, None)
@classmethod
def get_test_rerporter_class(cls, name):
return cls.mapping["test_reporter_mapping"].get(name, None)
@classmethod
def get(cls, name, default=None, no_warning=False):
r"""Get an item from registry with key 'name'
Args:
name (string): Key whose value needs to be retrieved.
default: If passed and key is not in registry, default value will
be returned with a warning. Default: None
no_warning (bool): If passed as True, warning when key doesn't exist
will not be generated. Useful for MMF's
internal operations. Default: False
Usage::
from mmf.common.registry import registry
config = registry.get("config")
"""
original_name = name
name = name.split(".")
value = cls.mapping["state"]
for subname in name:
value = value.get(subname, default)
if value is default:
break
if (
"writer" in cls.mapping["state"]
and value == default
and no_warning is False
):
cls.mapping["state"]["writer"].warning(
"Key {} is not present in registry, returning default value "
"of {}".format(original_name, default)
)
return value
@classmethod
def unregister(cls, name):
r"""Remove an item from registry with key 'name'
Args:
name: Key which needs to be removed.
Usage::
from mmf.common.registry import registry
config = registry.unregister("config")
"""
return cls.mapping["state"].pop(name, None)
registry = Registry()
# Only setup imports in main process, this means registry won't be
# fully available in spawned child processes (such as dataloader processes)
# but instantiated. This is to prevent issues such as
# https://github.com/facebookresearch/mmf/issues/355
if __name__ == "__main__":
setup_imports()
| EXA-1-master | exa/models/mmf-main/mmf/common/registry.py |
# Copyright (c) Facebook, Inc. and its affiliates.
imdb_version = 1
FASTTEXT_WIKI_URL = (
"https://dl.fbaipublicfiles.com/pythia/pretrained_models/fasttext/wiki.en.bin"
)
CLEVR_DOWNLOAD_URL = "https://dl.fbaipublicfiles.com/clevr/CLEVR_v1.0.zip"
VISUAL_GENOME_CONSTS = {
"imdb_url": "https://dl.fbaipublicfiles.com/pythia/data/imdb/visual_genome.tar.gz",
"features_url": "https://dl.fbaipublicfiles.com/pythia/features/visual_genome.tar.gz", # noqa
"synset_file": "vg_synsets.txt",
"vocabs": "https://dl.fbaipublicfiles.com/pythia/data/vocab.tar.gz",
}
VISUAL_DIALOG_CONSTS = {
"imdb_url": {
"train": "https://www.dropbox.com/s/ix8keeudqrd8hn8/visdial_1.0_train.zip?dl=1",
"val": "https://www.dropbox.com/s/ibs3a0zhw74zisc/visdial_1.0_val.zip?dl=1",
"test": "https://www.dropbox.com/s/ibs3a0zhw74zisc/visdial_1.0_test.zip?dl=1",
},
"features_url": {
"visual_dialog": "https://dl.fbaipublicfiles.com/pythia/features/visual_dialog.tar.gz", # noqa
"coco": "https://dl.fbaipublicfiles.com/pythia/features/coco.tar.gz",
},
"vocabs": "https://dl.fbaipublicfiles.com/pythia/data/vocab.tar.gz",
}
CLIP_VOCAB_CONSTS = {
"url": "https://dl.fbaipublicfiles.com/mmf/clip/bpe_simple_vocab_16e6.txt.gz",
"file_name": "bpe_simple_vocab_16e6.txt.gz",
"hashcode": "924691ac288e54409236115652ad4aa250f48203de50a9e4722a6ecd48d6804a",
}
DOWNLOAD_CHUNK_SIZE = 1024 * 1024
IMAGE_COLOR_MEAN = (0.485, 0.456, 0.406)
IMAGE_COLOR_STD = (0.229, 0.224, 0.225)
INCEPTION_IMAGE_NORMALIZE = (0.5, 0.5, 0.5)
| EXA-1-master | exa/models/mmf-main/mmf/common/constants.py |
# Copyright (c) Facebook, Inc. and its affiliates.
from dataclasses import dataclass
from typing import Any, Dict, List
@dataclass
class PerSetAttributeType:
train: List[str]
val: List[str]
test: List[str]
@dataclass
class ProcessorConfigType:
type: str
params: Dict[str, Any]
@dataclass
class MMFDatasetConfigType:
data_dir: str
use_images: bool
use_features: bool
zoo_requirements: List[str]
images: PerSetAttributeType
features: PerSetAttributeType
annotations: PerSetAttributeType
processors: Dict[str, ProcessorConfigType]
| EXA-1-master | exa/models/mmf-main/mmf/common/typings.py |
# Copyright (c) Facebook, Inc. and its affiliates.
from .meter import Meter
from .registry import registry
from .sample import Sample, SampleList
__all__ = ["Sample", "SampleList", "Meter", "registry"]
| EXA-1-master | exa/models/mmf-main/mmf/common/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
from mmf.common.sample import convert_batch_to_sample_list
class BatchCollator:
def __init__(self, dataset_name, dataset_type):
self._dataset_name = dataset_name
self._dataset_type = dataset_type
def __call__(self, batch):
sample_list = convert_batch_to_sample_list(batch)
sample_list.dataset_name = self._dataset_name
sample_list.dataset_type = self._dataset_type
return sample_list
| EXA-1-master | exa/models/mmf-main/mmf/common/batch_collator.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import warnings
from mmf.common.sample import SampleList
from mmf.datasets.multi_dataset_loader import MultiDatasetLoader
from mmf.utils.build import build_multiple_datamodules, build_test_reporter
class DatasetLoader:
def __init__(self, config):
# TODO: Remove in next version
warnings.warn(
"DatasetLoader has been deprecated and will be removed in future versions. "
"Please use mmf.datasets.multi_datamodule.MultiDataModule instead.",
DeprecationWarning,
stacklevel=2,
)
self.config = config
def load_datasets(self):
self.train_dataset = MultiDatasetLoader("train")
self.val_dataset = MultiDatasetLoader("val")
self.test_dataset = MultiDatasetLoader("test")
self.train_dataset.load(self.config)
self.val_dataset.load(self.config)
self.test_dataset.load(self.config)
# If number of datasets is one, this will return the first loader
self.train_loader = self.train_dataset
self.val_loader = self.val_dataset
self.test_loader = self.test_dataset
self.mapping = {
"train": self.train_dataset,
"val": self.val_dataset,
"test": self.test_dataset,
}
self.test_reporter = None
self.should_not_log = self.config.training.should_not_log
@property
def dataset_config(self):
return self._dataset_config
@dataset_config.setter
def dataset_config(self, config):
self._dataset_config = config
def get_config(self):
return self._dataset_config
def get_test_reporter(self, dataset_type):
dataset = getattr(self, f"{dataset_type}_dataset")
datamodules = build_multiple_datamodules(
dataset.dataset_list, self.config.dataset_config
)
test_reporter_config = self._get_test_reporter_config()
return build_test_reporter(datamodules, test_reporter_config, dataset_type)
def _get_test_reporter_config(self):
from mmf.utils.configuration import get_global_config
return get_global_config("evaluation.reporter")
def prepare_batch(self, batch, *args, **kwargs):
batch = SampleList(batch)
return self.mapping[batch.dataset_type].prepare_batch(batch)
def verbose_dump(self, report, *args, **kwargs):
if self.config.training.verbose_dump:
dataset_type = report.dataset_type
self.mapping[dataset_type].verbose_dump(report, *args, **kwargs)
def seed_sampler(self, dataset_type, seed):
dataset = getattr(self, f"{dataset_type}_dataset")
dataset.seed_sampler(seed)
| EXA-1-master | exa/models/mmf-main/mmf/common/dataset_loader.py |
# Copyright (c) Facebook, Inc. and its affiliates.
"""
``Sample`` and ``SampleList`` are data structures for arbitrary data returned from a
dataset. To work with MMF, minimum requirement for datasets is to return
an object of ``Sample`` class and for models to accept an object of type `SampleList`
as an argument.
``Sample`` is used to represent an arbitrary sample from dataset, while ``SampleList``
is list of Sample combined in an efficient way to be used by the model.
In simple term, ``SampleList`` is a batch of Sample but allow easy access of
attributes from ``Sample`` while taking care of properly batching things.
"""
import collections
import collections.abc
import warnings
from collections import OrderedDict
from typing import Any, Dict, Union
import torch
class Sample(OrderedDict):
"""Sample represent some arbitrary data. All datasets in MMF must
return an object of type ``Sample``.
Args:
init_dict (Dict): Dictionary to init ``Sample`` class with.
Usage::
>>> sample = Sample({"text": torch.tensor(2)})
>>> sample.text.zero_()
# Custom attributes can be added to ``Sample`` after initialization
>>> sample.context = torch.tensor(4)
"""
def __init__(self, init_dict=None):
if init_dict is None:
init_dict = {}
super().__init__(init_dict)
def __setattr__(self, key, value):
if isinstance(value, collections.abc.Mapping):
value = Sample(value)
self[key] = value
def __setitem__(self, key, value):
if isinstance(value, collections.abc.Mapping):
value = Sample(value)
super().__setitem__(key, value)
def __getattr__(self, key):
try:
return self[key]
except KeyError:
raise AttributeError(key)
def fields(self):
"""Get current attributes/fields registered under the sample.
Returns:
List[str]: Attributes registered under the Sample.
"""
return list(self.keys())
class SampleList(OrderedDict):
"""``SampleList`` is used to collate a list of ``Sample`` into a batch during batch
preparation. It can be thought of as a merger of list of Dicts into a single Dict.
If ``Sample`` contains an attribute 'text' of size (2) and there are 10 samples in
list, the returned ``SampleList`` will have an attribute 'text' which is a tensor
of size (10, 2).
Args:
samples (type): List of ``Sample`` from which the ``SampleList``
will be created.
Usage::
>>> sample_list = [
Sample({"text": torch.tensor(2)}),
Sample({"text": torch.tensor(2)})
]
>>> sample_list.text
torch.tensor([2, 2])
"""
_TENSOR_FIELD_ = "_tensor_field"
def __init__(self, samples=None):
super().__init__(self)
if samples is None:
samples = []
if len(samples) == 0:
return
if self._check_and_load_dict(samples):
return
# If passed sample list was in form of key, value pairs of tuples
# return after loading these
if self._check_and_load_tuple(samples):
return
fields = samples[0].keys()
for field in fields:
if isinstance(samples[0][field], torch.Tensor):
size = (len(samples), *samples[0][field].size())
self[field] = samples[0][field].new_empty(size)
if self._get_tensor_field() is None:
self._set_tensor_field(field)
else:
self[field] = [None for _ in range(len(samples))]
for idx, sample in enumerate(samples):
# it should be a tensor but not a 0-d tensor
if (
isinstance(sample[field], torch.Tensor)
and len(sample[field].size()) != 0
and sample[field].size(0) != samples[0][field].size(0)
):
raise AssertionError(
"Fields for all samples must be equally sized. "
"{} is of different sizes".format(field)
)
self[field][idx] = self._get_data_copy(sample[field])
if isinstance(samples[0][field], collections.abc.Mapping):
self[field] = SampleList(self[field])
def _check_and_load_tuple(self, samples):
if isinstance(samples[0], (tuple, list)) and isinstance(samples[0][0], str):
for kv_pair in samples:
self.add_field(kv_pair[0], kv_pair[1])
return True
else:
return False
def _check_and_load_dict(self, samples):
if isinstance(samples, collections.abc.Mapping):
for key, value in samples.items():
self.add_field(key, value)
return True
else:
return False
def _fix_sample_type(self, samples):
if not isinstance(samples[0], Sample):
proper_samples = []
for sample in samples:
proper_samples.append(Sample(sample))
samples = proper_samples
return samples
def __setattr__(self, key, value):
self[key] = value
def __getattr__(self, key):
if key not in self:
raise AttributeError(
"Key {} not found in the SampleList. "
"Valid choices are {}".format(key, self.fields())
)
fields = self.keys()
if key in fields:
return self[key]
sample = Sample()
for field in fields:
sample[field] = self[field][key]
return sample
def get_device(self):
field_tensor = self._get_tensor_field()
assert (
field_tensor is not None
), f"No tensor field in sample list, available keys: {self.fields()}"
return self[field_tensor].device
def get_item_list(self, key):
"""Get ``SampleList`` of only one particular attribute that is present
in the ``SampleList``.
Args:
key (str): Attribute whose ``SampleList`` will be made.
Returns:
SampleList: SampleList containing only the attribute value of the key
which was passed.
"""
sample = self[key]
return SampleList([sample])
def copy(self):
"""Get a copy of the current SampleList
Returns:
SampleList: Copy of current SampleList.
"""
sample_list = SampleList()
fields = self.fields()
for field in fields:
sample_list.add_field(field, self[field])
return sample_list
def fields(self):
"""Get current attributes/fields registered under the SampleList.
Returns:
List[str]: list of attributes of the SampleList.
"""
return list(self.keys())
def get_fields(self, fields):
"""Get a new ``SampleList`` generated from the current ``SampleList``
but contains only the attributes passed in `fields` argument
Args:
fields (List[str]): Attributes whose ``SampleList`` will be made.
Returns:
SampleList: SampleList containing only the attribute values of the fields
which were passed.
"""
current_fields = self.fields()
return_list = SampleList()
for field in fields:
if field not in current_fields:
raise AttributeError(
"{} not present in SampleList. "
"Valid choices are {}".format(field, current_fields)
)
return_list.add_field(field, self[field])
return return_list
def get_field(self, field):
"""Get value of a particular attribute
Args:
field (str): Attribute whose value is to be returned.
"""
return self[field]
def _get_data_copy(self, data):
# if isinstance(data, torch.Tensor):
# copy_ = data.clone()
# else:
# copy_ = deepcopy(data)
# return copy_
return data
def _get_tensor_field(self):
return self.__dict__.get(SampleList._TENSOR_FIELD_, None)
def _set_tensor_field(self, value):
self.__dict__[SampleList._TENSOR_FIELD_] = value
def get_batch_size(self):
"""Get batch size of the current ``SampleList``. There must be a tensor
be a tensor present inside sample list to use this function.
Returns:
int: Size of the batch in ``SampleList``.
"""
tensor_field = self._get_tensor_field()
assert tensor_field is not None, "There is no tensor yet in SampleList"
return self[tensor_field].size(0)
def add_field(self, field, data):
"""Add an attribute ``field`` with value ``data`` to the SampleList
Args:
field (str): Key under which the data will be added.
data (object): Data to be added, can be a ``torch.Tensor``, ``list``
or ``Sample``
"""
fields = self.fields()
tensor_field = self._get_tensor_field()
if (
len(fields) != 0
and isinstance(data, torch.Tensor)
and len(data.size()) != 0
and tensor_field is not None
and data.size(0) != self[tensor_field].size(0)
):
raise AssertionError(
"A tensor field to be added must "
"have same size as existing tensor "
"fields in SampleList. "
"Passed size: {}, Required size: {}".format(
len(data), len(self[tensor_field])
)
)
if isinstance(data, collections.abc.Mapping):
self[field] = SampleList(data)
else:
self[field] = self._get_data_copy(data)
if isinstance(self[field], torch.Tensor) and tensor_field is None:
self._set_tensor_field(field)
def to(self, device, non_blocking=True):
"""Similar to ``.to`` function on a `torch.Tensor`. Moves all of the
tensors present inside the ``SampleList`` to a particular device. If an
attribute's value is not a tensor, it is ignored and kept as it is.
Args:
device (str|torch.device): Device on which the ``SampleList`` should
moved.
non_blocking (bool): Whether the move should be non_blocking. Default: True
Returns:
SampleList: a SampleList moved to the ``device``.
"""
fields = self.keys()
sample_list = self.copy()
if not isinstance(device, torch.device):
if not isinstance(device, str):
raise TypeError(
"device must be either 'str' or "
"'torch.device' type, {} found".format(type(device))
)
device = torch.device(device)
for field in fields:
if hasattr(sample_list[field], "to"):
sample_list[field] = sample_list[field].to(
device, non_blocking=non_blocking
)
return sample_list
def pin_memory(self):
"""In custom batch object, we need to define pin_memory function so that
PyTorch can actually apply pinning. This function just individually pins
all of the tensor fields
"""
fields = self.keys()
for field in fields:
if hasattr(self[field], "pin_memory"):
# This will also handle nested sample list recursively
self[field] = self[field].pin_memory()
return self
def detach(self):
fields = self.keys()
for field in fields:
self[field] = detach_tensor(self[field])
return self
def to_dict(self) -> Dict[str, Any]:
"""Converts a sample list to dict, this is useful for TorchScript and for
other internal API unification efforts.
Returns:
Dict[str, Any]: A dict representation of current sample list
"""
sample_dict = {}
fields = self.keys()
for field in fields:
# Handle nested sample list recursively
if hasattr(self[field], "to_dict"):
sample_dict[field] = self[field].to_dict()
else:
sample_dict[field] = self[field]
return sample_dict
def convert_batch_to_sample_list(
batch: Union[SampleList, Dict[str, Any]]
) -> SampleList:
# Create and return sample list with proper name
# and type set if it is already not a sample list
# (case of batched iterators)
sample_list = batch
if (
# Check if batch is a list before checking batch[0]
# or len as sometimes batch is already SampleList
isinstance(batch, list)
and len(batch) == 1
and isinstance(batch[0], SampleList)
):
sample_list = batch[0]
elif not isinstance(batch, SampleList):
sample_list = SampleList(batch)
if sample_list._get_tensor_field() is None:
sample_list = SampleList(sample_list.to_dict())
return sample_list
device_type = Union[str, torch.device]
def to_device(
sample_list: Union[SampleList, Dict[str, Any]], device: device_type = "cuda"
) -> SampleList:
if isinstance(sample_list, collections.abc.Mapping):
sample_list = convert_batch_to_sample_list(sample_list)
# to_device is specifically for SampleList
# if user is passing something custom built
if not isinstance(sample_list, SampleList):
warnings.warn(
"You are not returning SampleList/Sample from your dataset. "
"MMF expects you to move your tensors to cuda yourself."
)
return sample_list
if isinstance(device, str):
device = torch.device(device)
# default value of device_type is cuda
# Other device types such as xla can also be passed.
# Fall back to cpu only happens when device_type
# is set to cuda but cuda is not available.
if device.type == "cuda" and not torch.cuda.is_available():
warnings.warn(
"Selected device is cuda, but it is NOT available!!! Falling back on cpu."
)
device = torch.device("cpu")
if sample_list.get_device() != device:
sample_list = sample_list.to(device)
return sample_list
def detach_tensor(tensor: Any) -> Any:
"""Detaches any element passed which has a `.detach` function defined.
Currently, in MMF can be SampleList, Report or a tensor.
Args:
tensor (Any): Item to be detached
Returns:
Any: Detached element
"""
if hasattr(tensor, "detach"):
tensor = tensor.detach()
return tensor
| EXA-1-master | exa/models/mmf-main/mmf/common/sample.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import collections
import collections.abc
import copy
import warnings
from collections import OrderedDict
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from mmf.common.sample import detach_tensor, SampleList
class Report(OrderedDict):
def __init__(
self, batch: SampleList = None, model_output: Dict[str, Any] = None, *args
):
super().__init__(self)
if batch is None:
return
if model_output is None:
model_output = {}
if self._check_and_load_tuple(batch):
return
all_args = [batch, model_output] + [*args]
for idx, arg in enumerate(all_args):
if not isinstance(arg, collections.abc.Mapping):
raise TypeError(
"Argument {:d}, {} must be of instance of "
"collections.abc.Mapping".format(idx, arg)
)
self.batch_size = batch.get_batch_size()
self.warning_string = (
"Updating forward report with key {}"
"{}, but it already exists in {}. "
"Please consider using a different key, "
"as this can cause issues during loss and "
"metric calculations."
)
for idx, arg in enumerate(all_args):
for key, item in arg.items():
if key in self and idx >= 2:
log = self.warning_string.format(
key, "", "in previous arguments to report"
)
warnings.warn(log)
self[key] = item
def get_batch_size(self) -> int:
return self.batch_size
@property
def batch_size(self) -> int:
return self._batch_size
@batch_size.setter
def batch_size(self, batch_size: int):
self._batch_size = batch_size
def _check_and_load_tuple(self, batch):
if isinstance(batch, collections.abc.Mapping):
return False
if isinstance(batch[0], (tuple, list)) and isinstance(batch[0][0], str):
for kv_pair in batch:
self[kv_pair[0]] = kv_pair[1]
return True
else:
return False
def __setattr__(self, key: str, value: Any):
self[key] = value
def __getattr__(self, key: str):
try:
return self[key]
except KeyError:
raise AttributeError(key)
def fields(self) -> List[str]:
return list(self.keys())
def apply_fn(self, fn: Callable, fields: Optional[List[str]] = None):
"""Applies a function `fn` on all items in a report. Can apply to specific
fields if `fields` parameter is passed
Args:
fn (Callable): A callable to called on each item in report
fields (List[str], optional): Use to apply on specific fields.
Defaults to None.
Returns:
Report: Update report after apply fn
"""
for key in self.keys():
if fields is not None and isinstance(fields, (list, tuple)):
if key not in fields:
continue
self[key] = fn(self[key])
if isinstance(self[key], collections.abc.MutableSequence):
for idx, item in enumerate(self[key]):
self[key][idx] = fn(item)
elif isinstance(self[key], dict):
for subkey in self[key].keys():
self[key][subkey] = fn(self[key][subkey])
return self
def detach(self) -> "Report":
"""Similar to tensor.detach, detach all items in a report from their graphs.
This is useful in clearing up memory sometimes.
Returns:
Report: Detached report is returned back.
"""
return self.apply_fn(detach_tensor)
def to(
self,
device: Union[torch.device, str],
non_blocking: bool = True,
fields: Optional[List[str]] = None,
):
"""Move report to a specific device defined 'device' parameter.
This is similar to how one moves a tensor or sample_list to a device
Args:
device (torch.device): Device can be str defining device or torch.device
non_blocking (bool, optional): Whether transfer should be non_blocking.
Defaults to True.
fields (List[str], optional): Use this is you only want to move some
specific fields to the device instead of full report. Defaults to None.
Raises:
TypeError: If device type is not correct
Returns:
Report: Updated report is returned back
"""
if not isinstance(device, torch.device):
if not isinstance(device, str):
raise TypeError(
"device must be either 'str' or "
"'torch.device' type, {} found".format(type(device))
)
device = torch.device(device)
def fn(x):
if hasattr(x, "to"):
x = x.to(device, non_blocking=non_blocking)
return x
return self.apply_fn(fn, fields)
def accumulate_tensor_fields_and_loss(
self, report: "Report", field_list: List[str]
):
for key in field_list:
if key == "__prediction_report__":
continue
if key not in self.keys():
warnings.warn(
f"{key} not found in report. Metrics calculation "
+ "might not work as expected."
)
continue
if isinstance(self[key], torch.Tensor):
self[key] = torch.cat((self[key], report[key]), dim=0)
elif isinstance(self[key], List):
self[key].extend(report[key])
self._accumulate_loss(report)
def _accumulate_loss(self, report: "Report"):
for key, value in report.losses.items():
if key not in self.losses.keys():
warnings.warn(
f"{key} not found in report. Loss calculation "
+ "might not work as expected."
)
self.losses[key] = value
if isinstance(self.losses[key], torch.Tensor):
self.losses[key] += value
def copy(self) -> "Report":
"""Get a copy of the current Report
Returns:
Report: Copy of current Report.
"""
report = Report()
fields = self.fields()
for field in fields:
report[field] = copy.deepcopy(self[field])
return report
| EXA-1-master | exa/models/mmf-main/mmf/common/report.py |
# Copyright (c) Facebook, Inc. and its affiliates.
"""
The metrics module contains implementations of various metrics used commonly to
understand how well our models are performing. For e.g. accuracy, vqa_accuracy,
r@1 etc.
For implementing your own metric, you need to follow these steps:
1. Create your own metric class and inherit ``BaseMetric`` class.
2. In the ``__init__`` function of your class, make sure to call
``super().__init__('name')`` where 'name' is the name of your metric. If
you require any parameters in your ``__init__`` function, you can use
keyword arguments to represent them and metric constructor will take care of
providing them to your class from config.
3. Implement a ``calculate`` function which takes in ``SampleList`` and
`model_output` as input and return back a float tensor/number.
4. Register your metric with a key 'name' by using decorator,
``@registry.register_metric('name')``.
Example::
import torch
from mmf.common.registry import registry
from mmf.modules.metrics import BaseMetric
@registry.register_metric("some")
class SomeMetric(BaseMetric):
def __init__(self, some_param=None):
super().__init__("some")
....
def calculate(self, sample_list, model_output):
metric = torch.tensor(2, dtype=torch.float)
return metric
Example config for above metric::
model_config:
pythia:
metrics:
- type: some
params:
some_param: a
"""
import collections
import warnings
from typing import Dict
import torch
from mmf.common.registry import registry
from mmf.datasets.processors.processors import EvalAIAnswerProcessor
from mmf.utils.logger import log_class_usage
from sklearn.metrics import (
average_precision_score,
f1_score,
precision_recall_curve,
precision_recall_fscore_support,
roc_auc_score,
)
from torch import Tensor
def _convert_to_one_hot(expected, output):
# This won't get called in case of multilabel, only multiclass or binary
# as multilabel will anyways be multi hot vector
if output.squeeze().dim() != expected.squeeze().dim() and expected.dim() == 1:
expected = torch.nn.functional.one_hot(
expected.long(), num_classes=output.size(-1)
).float()
return expected
class Metrics:
"""Internally used by MMF, Metrics acts as wrapper for handling
calculation of metrics over various metrics specified by the model in
the config. It initializes all of the metrics and when called it runs
calculate on each of them one by one and returns back a dict with proper
naming back. For e.g. an example dict returned by Metrics class:
``{'val/vqa_accuracy': 0.3, 'val/r@1': 0.8}``
Args:
metric_list (ListConfig): List of DictConfigs where each DictConfig
specifies name and parameters of the
metrics used.
"""
def __init__(self, metric_list):
if not isinstance(metric_list, collections.abc.Sequence):
metric_list = [metric_list]
self.metrics = self._init_metrics(metric_list)
def _init_metrics(self, metric_list):
metrics = {}
self.required_params = {"dataset_name", "dataset_type"}
for metric in metric_list:
params = {}
dataset_names = []
if isinstance(metric, collections.abc.Mapping):
if "type" not in metric:
raise ValueError(
f"Metric {metric} needs to have 'type' attribute "
+ "or should be a string"
)
metric_type = key = metric.type
params = metric.get("params", {})
# Support cases where uses need to give custom metric name
if "key" in metric:
key = metric.key
# One key should only be used once
if key in metrics:
raise RuntimeError(
f"Metric with type/key '{metric_type}' has been defined more "
+ "than once in metric list."
)
# a custom list of dataset where this metric will be applied
if "datasets" in metric:
dataset_names = metric.datasets
else:
if not isinstance(metric, str):
raise TypeError(
"Metric {} has inappropriate type"
"'dict' or 'str' allowed".format(metric)
)
metric_type = key = metric
metric_cls = registry.get_metric_class(metric_type)
if metric_cls is None:
raise ValueError(
f"No metric named {metric_type} registered to registry"
)
metric_instance = metric_cls(**params)
metric_instance.name = key
metric_instance.set_applicable_datasets(dataset_names)
metrics[key] = metric_instance
self.required_params.update(metrics[key].required_params)
return metrics
def __call__(self, sample_list, model_output, *args, **kwargs):
values = {}
dataset_type = sample_list.dataset_type
dataset_name = sample_list.dataset_name
with torch.no_grad():
for metric_name, metric_object in self.metrics.items():
if not metric_object.is_dataset_applicable(dataset_name):
continue
metric_result = metric_object._calculate_with_checks(
sample_list, model_output, *args, **kwargs
)
if not isinstance(metric_result, collections.abc.Mapping):
metric_result = {"": metric_result}
for child_metric_name, child_metric_result in metric_result.items():
key = f"{dataset_type}/{dataset_name}/{metric_name}"
key = f"{key}/{child_metric_name}" if child_metric_name else key
values[key] = child_metric_result
if not isinstance(values[key], torch.Tensor):
values[key] = torch.tensor(values[key], dtype=torch.float)
else:
values[key] = values[key].float()
if values[key].dim() == 0:
values[key] = values[key].view(1)
registry.register(
"{}.{}.{}".format("metrics", sample_list.dataset_name, dataset_type), values
)
return values
class BaseMetric:
"""Base class to be inherited by all metrics registered to MMF. See
the description on top of the file for more information. Child class must
implement ``calculate`` function.
Args:
name (str): Name of the metric.
"""
def __init__(self, name, *args, **kwargs):
self.name = name
self.required_params = ["scores", "targets"]
# the set of datasets where this metric will be applied
# an empty set means it will be applied on *all* datasets
self._dataset_names = set()
log_class_usage("Metric", self.__class__)
@property
def name(self):
return self._name
@name.setter
def name(self, name):
self._name = name
def calculate(self, sample_list, model_output, *args, **kwargs):
"""Abstract method to be implemented by the child class. Takes
in a ``SampleList`` and a dict returned by model as output and
returns back a float tensor/number indicating value for this metric.
Args:
sample_list (SampleList): SampleList provided by the dataloader for the
current iteration.
model_output (Dict): Output dict from the model for the current
SampleList
Returns:
torch.Tensor|float: Value of the metric.
"""
# Override in your child class
raise NotImplementedError("'calculate' must be implemented in the child class")
def __call__(self, *args, **kwargs):
return self.calculate(*args, **kwargs)
def _calculate_with_checks(self, *args, **kwargs):
value = self.calculate(*args, **kwargs)
return value
def set_applicable_datasets(self, dataset_names):
self._dataset_names = set(dataset_names)
def is_dataset_applicable(self, dataset_name):
return len(self._dataset_names) == 0 or dataset_name in self._dataset_names
@registry.register_metric("accuracy")
class Accuracy(BaseMetric):
"""Metric for calculating accuracy.
**Key:** ``accuracy``
"""
def __init__(self, score_key="scores", target_key="targets", topk=1):
super().__init__("accuracy")
self.score_key = score_key
self.target_key = target_key
self.topk = topk
def calculate(self, sample_list, model_output, *args, **kwargs):
"""Calculate accuracy and return it back.
Args:
sample_list (SampleList): SampleList provided by DataLoader for
current iteration
model_output (Dict): Dict returned by model.
Returns:
torch.FloatTensor: accuracy.
"""
output = model_output[self.score_key]
batch_size = output.shape[0]
expected = sample_list[self.target_key]
assert (
output.dim() <= 2
), "Output from model shouldn't have more than dim 2 for accuracy"
assert (
expected.dim() <= 2
), "Expected target shouldn't have more than dim 2 for accuracy"
if output.dim() == 2:
output = output.topk(self.topk, 1, True, True)[1].t().squeeze()
# If more than 1
# If last dim is 1, we directly have class indices
if expected.dim() == 2 and expected.size(-1) != 1:
expected = expected.topk(self.topk, 1, True, True)[1].t().squeeze()
correct = (expected == output.squeeze()).sum().float()
return correct / batch_size
@registry.register_metric("topk_accuracy")
class TopKAccuracy(Accuracy):
def __init__(self, score_key: str, k: int):
super().__init__(score_key=score_key, topk=k)
@registry.register_metric("caption_bleu4")
class CaptionBleu4Metric(BaseMetric):
"""Metric for calculating caption accuracy using BLEU4 Score.
**Key:** ``caption_bleu4``
"""
def __init__(self):
import nltk.translate.bleu_score as bleu_score
self._bleu_score = bleu_score
super().__init__("caption_bleu4")
self.caption_processor = registry.get("coco_caption_processor")
self.required_params = ["scores", "answers", "captions"]
def calculate(self, sample_list, model_output, *args, **kwargs):
"""Calculate accuracy and return it back.
Args:
sample_list (SampleList): SampleList provided by DataLoader for
current iteration
model_output (Dict): Dict returned by model.
Returns:
torch.FloatTensor: bleu4 score.
"""
# Create reference and hypotheses captions.
references = []
hypotheses = []
# References
targets = sample_list.answers
for j, _ in enumerate(targets):
img_captions = [
self.caption_processor(c)["tokens"] for c in targets[j].tolist()
]
references.append(img_captions)
# Hypotheses
if "captions" in model_output:
scores = model_output["captions"]
else:
scores = torch.max(model_output["scores"], dim=-1)[1]
scores = scores.tolist()
predictions = []
for j, _ in enumerate(scores):
caption = self.caption_processor(scores[j])["tokens"]
predictions.append(caption)
hypotheses.extend(predictions)
assert len(references) == len(hypotheses)
bleu4 = self._bleu_score.corpus_bleu(references, hypotheses)
return targets.new_tensor(bleu4, dtype=torch.float)
@registry.register_metric("vqa_accuracy")
class VQAAccuracy(BaseMetric):
"""
Calculate VQAAccuracy. Find more information here_
**Key**: ``vqa_accuracy``.
.. _here: https://visualqa.org/evaluation.html
"""
def __init__(self):
super().__init__("vqa_accuracy")
def _masked_unk_softmax(self, x, dim, mask_idx):
x1 = torch.nn.functional.softmax(x, dim=dim)
x1[:, mask_idx] = 0
x1_sum = torch.sum(x1, dim=1, keepdim=True)
y = x1 / x1_sum
return y
def calculate(self, sample_list, model_output, *args, **kwargs):
"""Calculate vqa accuracy and return it back.
Args:
sample_list (SampleList): SampleList provided by DataLoader for
current iteration
model_output (Dict): Dict returned by model.
Returns:
torch.FloatTensor: VQA Accuracy
"""
output = model_output["scores"]
# for three branch movie+mcan model
if output.dim() == 3:
output = output[:, 0]
expected = sample_list["targets"]
output = self._masked_unk_softmax(output, 1, 0)
output = output.argmax(dim=1) # argmax
one_hots = expected.new_zeros(*expected.size())
one_hots.scatter_(1, output.view(-1, 1), 1)
scores = one_hots * expected
accuracy = torch.sum(scores) / expected.size(0)
return accuracy
@registry.register_metric("vqa_evalai_accuracy")
class VQAEvalAIAccuracy(BaseMetric):
"""
Calculate Eval AI VQAAccuracy. Find more information here_
This is more accurate and similar comparision to Eval AI
but is slower compared to vqa_accuracy.
**Key**: ``vqa_evalai_accuracy``.
.. _here: https://visualqa.org/evaluation.html
"""
def __init__(self):
super().__init__("vqa_evalai_accuracy")
self.evalai_answer_processor = EvalAIAnswerProcessor()
self.required_params = ["scores", "answers", "context_tokens"]
def _masked_unk_softmax(self, x, dim, mask_idx):
x1 = torch.nn.functional.softmax(x, dim=dim)
x1[:, mask_idx] = 0
x1_sum = torch.sum(x1, dim=1, keepdim=True)
y = x1 / x1_sum
return y
def calculate(self, sample_list, model_output, *args, **kwargs):
"""Calculate vqa accuracy and return it back.
Args:
sample_list (SampleList): SampleList provided by DataLoader for
current iteration
model_output (Dict): Dict returned by model.
Returns:
torch.FloatTensor: VQA Accuracy
"""
output = model_output["scores"]
expected = sample_list["answers"]
answer_processor = registry.get(sample_list.dataset_name + "_answer_processor")
answer_space_size = answer_processor.get_true_vocab_size()
output = self._masked_unk_softmax(output, 1, 0)
output = output.argmax(dim=1).clone().tolist()
accuracy = []
for idx, answer_id in enumerate(output):
if answer_id >= answer_space_size:
answer_id -= answer_space_size
answer = sample_list["context_tokens"][idx][answer_id]
else:
answer = answer_processor.idx2word(answer_id)
answer = self.evalai_answer_processor(answer)
gt_answers = [self.evalai_answer_processor(x) for x in expected[idx]]
gt_answers = list(enumerate(gt_answers))
gt_acc = []
for gt_answer in gt_answers:
other_answers = [item for item in gt_answers if item != gt_answer]
matching_answers = [item for item in other_answers if item[1] == answer]
acc = min(1, float(len(matching_answers)) / 3)
gt_acc.append(acc)
avgGTAcc = float(sum(gt_acc)) / len(gt_acc)
accuracy.append(avgGTAcc)
accuracy = float(sum(accuracy)) / len(accuracy)
return model_output["scores"].new_tensor(accuracy, dtype=torch.float)
class RecallAtK(BaseMetric):
def __init__(self, name="recall@k"):
super().__init__(name)
def score_to_ranks(self, scores):
# sort in descending order - largest score gets highest rank
sorted_ranks, ranked_idx = scores.sort(1, descending=True)
# convert from ranked_idx to ranks
ranks = ranked_idx.clone().fill_(0)
for i in range(ranked_idx.size(0)):
for j in range(100):
ranks[i][ranked_idx[i][j]] = j
ranks += 1
return ranks
def get_gt_ranks(self, ranks, ans_ind):
_, ans_ind = ans_ind.max(dim=1)
ans_ind = ans_ind.view(-1)
gt_ranks = torch.LongTensor(ans_ind.size(0))
for i in range(ans_ind.size(0)):
gt_ranks[i] = int(ranks[i, ans_ind[i].long()])
return gt_ranks
def process_ranks(self, ranks):
num_opts = 100
# none of the values should be 0, there is gt in options
if torch.sum(ranks.le(0)) > 0:
num_zero = torch.sum(ranks.le(0))
warnings.warn(f"Some of ranks are zero: {num_zero}")
ranks = ranks[ranks.gt(0)]
# rank should not exceed the number of options
if torch.sum(ranks.ge(num_opts + 1)) > 0:
num_ge = torch.sum(ranks.ge(num_opts + 1))
warnings.warn(f"Some of ranks > 100: {num_ge}")
ranks = ranks[ranks.le(num_opts + 1)]
return ranks
def get_ranks(self, sample_list, model_output, *args, **kwargs):
output = model_output["scores"]
expected = sample_list["targets"]
ranks = self.score_to_ranks(output)
gt_ranks = self.get_gt_ranks(ranks, expected)
ranks = self.process_ranks(gt_ranks)
return ranks.float()
def calculate(self, sample_list, model_output, k, *args, **kwargs):
ranks = self.get_ranks(sample_list, model_output)
recall = float(torch.sum(torch.le(ranks, k))) / ranks.size(0)
return recall
@registry.register_metric("r@1")
class RecallAt1(RecallAtK):
"""
Calculate Recall@1 which specifies how many time the chosen candidate
was rank 1.
**Key**: ``r@1``.
"""
def __init__(self):
super().__init__("r@1")
def calculate(self, sample_list, model_output, *args, **kwargs):
"""Calculate Recall@1 and return it back.
Args:
sample_list (SampleList): SampleList provided by DataLoader for
current iteration
model_output (Dict): Dict returned by model.
Returns:
torch.FloatTensor: Recall@1
"""
return super().calculate(sample_list, model_output, k=1)
@registry.register_metric("r@5")
class RecallAt5(RecallAtK):
"""
Calculate Recall@5 which specifies how many time the chosen candidate
was among first 5 rank.
**Key**: ``r@5``.
"""
def __init__(self):
super().__init__("r@5")
def calculate(self, sample_list, model_output, *args, **kwargs):
"""Calculate Recall@5 and return it back.
Args:
sample_list (SampleList): SampleList provided by DataLoader for
current iteration
model_output (Dict): Dict returned by model.
Returns:
torch.FloatTensor: Recall@5
"""
return super().calculate(sample_list, model_output, k=5)
@registry.register_metric("r@10")
class RecallAt10(RecallAtK):
"""
Calculate Recall@10 which specifies how many time the chosen candidate
was among first 10 ranks.
**Key**: ``r@10``.
"""
def __init__(self):
super().__init__("r@10")
def calculate(self, sample_list, model_output, *args, **kwargs):
"""Calculate Recall@10 and return it back.
Args:
sample_list (SampleList): SampleList provided by DataLoader for
current iteration
model_output (Dict): Dict returned by model.
Returns:
torch.FloatTensor: Recall@10
"""
return super().calculate(sample_list, model_output, k=10)
@registry.register_metric("mean_r")
class MeanRank(RecallAtK):
"""
Calculate MeanRank which specifies what was the average rank of the chosen
candidate.
**Key**: ``mean_r``.
"""
def __init__(self):
super().__init__("mean_r")
def calculate(self, sample_list, model_output, *args, **kwargs):
"""Calculate Mean Rank and return it back.
Args:
sample_list (SampleList): SampleList provided by DataLoader for
current iteration
model_output (Dict): Dict returned by model.
Returns:
torch.FloatTensor: mean rank
"""
ranks = self.get_ranks(sample_list, model_output)
return torch.mean(ranks)
@registry.register_metric("mean_rr")
class MeanReciprocalRank(RecallAtK):
"""
Calculate reciprocal of mean rank..
**Key**: ``mean_rr``.
"""
def __init__(self):
super().__init__("mean_rr")
def calculate(self, sample_list, model_output, *args, **kwargs):
"""Calculate Mean Reciprocal Rank and return it back.
Args:
sample_list (SampleList): SampleList provided by DataLoader for
current iteration
model_output (Dict): Dict returned by model.
Returns:
torch.FloatTensor: Mean Reciprocal Rank
"""
ranks = self.get_ranks(sample_list, model_output)
return torch.mean(ranks.reciprocal())
@registry.register_metric("textvqa_accuracy")
class TextVQAAccuracy(BaseMetric):
def __init__(self):
super().__init__("textvqa_accuracy")
import mmf.utils.m4c_evaluators as evaluators
self.evaluator = evaluators.TextVQAAccuracyEvaluator()
self.required_params = ["scores", "answers", "context_tokens"]
self.gt_key = "answers"
def calculate(self, sample_list, model_output, *args, **kwargs):
answer_processor = registry.get(sample_list.dataset_name + "_answer_processor")
batch_size = sample_list.context_tokens.size(0)
pred_answers = model_output["scores"].argmax(dim=-1)
context_tokens = sample_list.context_tokens.cpu().numpy()
answers = sample_list.get(self.gt_key).cpu().numpy()
answer_space_size = answer_processor.get_true_vocab_size()
predictions = []
from mmf.utils.distributed import byte_tensor_to_object
from mmf.utils.text import word_tokenize
for idx in range(batch_size):
tokens = byte_tensor_to_object(context_tokens[idx])
answer_words = []
for answer_id in pred_answers[idx].tolist():
if answer_id >= answer_space_size:
answer_id -= answer_space_size
answer_words.append(word_tokenize(tokens[answer_id]))
else:
if answer_id == answer_processor.EOS_IDX:
break
answer_words.append(
answer_processor.answer_vocab.idx2word(answer_id)
)
pred_answer = " ".join(answer_words).replace(" 's", "'s")
gt_answers = byte_tensor_to_object(answers[idx])
predictions.append({"pred_answer": pred_answer, "gt_answers": gt_answers})
accuracy = self.evaluator.eval_pred_list(predictions)
accuracy = torch.tensor(accuracy).to(sample_list.context_tokens.device)
return accuracy
@registry.register_metric("stvqa_anls")
class STVQAANLS(TextVQAAccuracy):
def __init__(self):
super().__init__()
self.name = "stvqa_anls"
import mmf.utils.m4c_evaluators as evaluators
self.evaluator = evaluators.STVQAANLSEvaluator()
@registry.register_metric("stvqa_accuracy")
class STVQAAccuracy(TextVQAAccuracy):
def __init__(self):
super().__init__()
self.name = "stvqa_accuracy"
import mmf.utils.m4c_evaluators as evaluators
self.evaluator = evaluators.STVQAAccuracyEvaluator()
@registry.register_metric("ocrvqa_accuracy")
class OCRVQAAccuracy(STVQAAccuracy):
def __init__(self):
super().__init__()
# same as STVQAAccuracy except for the name
self.name = "ocrvqa_accuracy"
@registry.register_metric("textcaps_bleu4")
class TextCapsBleu4(TextVQAAccuracy):
def __init__(self):
super().__init__()
self.name = "textcaps_bleu4"
self.required_params = ["scores", "ref_strs", "context_tokens"]
self.gt_key = "ref_strs"
import mmf.utils.m4c_evaluators as evaluators
self.evaluator = evaluators.TextCapsBleu4Evaluator()
@registry.register_metric("f1")
class F1(BaseMetric):
"""Metric for calculating F1. Can be used with type and params
argument for customization. params will be directly passed to sklearn
f1 function.
**Key:** ``f1``
"""
def __init__(self, *args, **kwargs):
super().__init__("f1")
self._multilabel = kwargs.pop("multilabel", False)
self._sk_kwargs = kwargs
def calculate(self, sample_list, model_output, *args, **kwargs):
"""Calculate f1 and return it back.
Args:
sample_list (SampleList): SampleList provided by DataLoader for
current iteration
model_output (Dict): Dict returned by model.
Returns:
torch.FloatTensor: f1.
"""
scores = model_output["scores"]
expected = sample_list["targets"]
if self._multilabel:
output = torch.sigmoid(scores)
output = torch.round(output)
expected = _convert_to_one_hot(expected, output)
else:
# Multiclass, or binary case
output = scores.argmax(dim=-1)
if expected.dim() != 1:
# Probably one-hot, convert back to class indices array
expected = expected.argmax(dim=-1)
value = f1_score(expected.cpu(), output.cpu(), **self._sk_kwargs)
return expected.new_tensor(value, dtype=torch.float)
@registry.register_metric("macro_f1")
class MacroF1(F1):
"""Metric for calculating Macro F1.
**Key:** ``macro_f1``
"""
def __init__(self, *args, **kwargs):
super().__init__(average="macro", **kwargs)
self.name = "macro_f1"
@registry.register_metric("micro_f1")
class MicroF1(F1):
"""Metric for calculating Micro F1.
**Key:** ``micro_f1``
"""
def __init__(self, *args, **kwargs):
super().__init__(average="micro", **kwargs)
self.name = "micro_f1"
@registry.register_metric("binary_f1")
class BinaryF1(F1):
"""Metric for calculating Binary F1.
**Key:** ``binary_f1``
"""
def __init__(self, *args, **kwargs):
super().__init__(average="binary", **kwargs)
self.name = "binary_f1"
@registry.register_metric("multilabel_f1")
class MultiLabelF1(F1):
"""Metric for calculating Multilabel F1.
**Key:** ``multilabel_f1``
"""
def __init__(self, *args, **kwargs):
super().__init__(multilabel=True, **kwargs)
self.name = "multilabel_f1"
@registry.register_metric("multilabel_micro_f1")
class MultiLabelMicroF1(MultiLabelF1):
"""Metric for calculating Multilabel Micro F1.
**Key:** ``multilabel_micro_f1``
"""
def __init__(self, *args, **kwargs):
super().__init__(average="micro", **kwargs)
self.name = "multilabel_micro_f1"
@registry.register_metric("multilabel_macro_f1")
class MultiLabelMacroF1(MultiLabelF1):
"""Metric for calculating Multilabel Macro F1.
**Key:** ``multilabel_macro_f1``
"""
def __init__(self, *args, **kwargs):
super().__init__(average="macro", **kwargs)
self.name = "multilabel_macro_f1"
@registry.register_metric("f1_precision_recall")
class F1PrecisionRecall(BaseMetric):
"""Metric for calculating F1 precision and recall.
params will be directly passed to sklearn
precision_recall_fscore_support function.
**Key:** ``f1_precision_recall``
"""
def __init__(self, *args, **kwargs):
super().__init__("f1_precision_recall")
self._multilabel = kwargs.pop("multilabel", False)
self._sk_kwargs = kwargs
def calculate(self, sample_list, model_output, *args, **kwargs):
"""Calculate f1_precision_recall and return it back as a dict.
Args:
sample_list (SampleList): SampleList provided by DataLoader for
current iteration
model_output (Dict): Dict returned by model.
Returns:
Dict(
'f1': torch.FloatTensor,
'precision': torch.FloatTensor,
'recall': torch.FloatTensor
)
"""
scores = model_output["scores"]
expected = sample_list["targets"]
if self._multilabel:
output = torch.sigmoid(scores)
output = torch.round(output)
expected = _convert_to_one_hot(expected, output)
else:
# Multiclass, or binary case
output = scores.argmax(dim=-1)
if expected.dim() != 1:
# Probably one-hot, convert back to class indices array
expected = expected.argmax(dim=-1)
value_tuple = precision_recall_fscore_support(
expected.cpu(), output.cpu(), **self._sk_kwargs
)
value = {
"precision": expected.new_tensor(value_tuple[0], dtype=torch.float),
"recall": expected.new_tensor(value_tuple[1], dtype=torch.float),
"f1": expected.new_tensor(value_tuple[2], dtype=torch.float),
}
return value
@registry.register_metric("binary_f1_precision_recall")
class BinaryF1PrecisionRecall(F1PrecisionRecall):
"""Metric for calculating Binary F1 Precision and Recall.
**Key:** ``binary_f1_precision_recall``
"""
def __init__(self, *args, **kwargs):
super().__init__(average="binary", **kwargs)
self.name = "binary_f1_precision_recall"
@registry.register_metric("macro_f1_precision_recall")
class MacroF1PrecisionRecall(F1PrecisionRecall):
"""Metric for calculating Macro F1 Precision and Recall.
**Key:** ``macro_f1_precision_recall``
"""
def __init__(self, *args, **kwargs):
super().__init__(average="macro", **kwargs)
self.name = "macro_f1_precision_recall"
@registry.register_metric("micro_f1_precision_recall")
class MicroF1PrecisionRecall(F1PrecisionRecall):
"""Metric for calculating Micro F1 Precision and Recall.
**Key:** ``micro_f1_precision_recall``
"""
def __init__(self, *args, **kwargs):
super().__init__(average="micro", **kwargs)
self.name = "micro_f1_precision_recall"
@registry.register_metric("roc_auc")
class ROC_AUC(BaseMetric):
"""Metric for calculating ROC_AUC.
See more details at `sklearn.metrics.roc_auc_score <http://scikit-learn.org/stable/modules/generated/sklearn.metrics.roc_auc_score.html#sklearn.metrics.roc_auc_score>`_ # noqa
**Note**: ROC_AUC is not defined when expected tensor only contains one
label. Make sure you have both labels always or use it on full val only
**Key:** ``roc_auc``
"""
def __init__(self, *args, **kwargs):
super().__init__("roc_auc")
self._sk_kwargs = kwargs
def calculate(self, sample_list, model_output, *args, **kwargs):
"""Calculate ROC_AUC and returns it back. The function performs softmax
on the logits provided and then calculated the ROC_AUC.
Args:
sample_list (SampleList): SampleList provided by DataLoader for
current iteration.
model_output (Dict): Dict returned by model. This should contain "scores"
field pointing to logits returned from the model.
Returns:
torch.FloatTensor: ROC_AUC.
"""
output = torch.nn.functional.softmax(model_output["scores"], dim=-1)
expected = sample_list["targets"]
expected = _convert_to_one_hot(expected, output)
value = roc_auc_score(expected.cpu(), output.cpu(), **self._sk_kwargs)
return expected.new_tensor(value, dtype=torch.float)
@registry.register_metric("micro_roc_auc")
class MicroROC_AUC(ROC_AUC):
"""Metric for calculating Micro ROC_AUC.
**Key:** ``micro_roc_auc``
"""
def __init__(self, *args, **kwargs):
super().__init__(average="micro", **kwargs)
self.name = "micro_roc_auc"
@registry.register_metric("macro_roc_auc")
class MacroROC_AUC(ROC_AUC):
"""Metric for calculating Macro ROC_AUC.
**Key:** ``macro_roc_auc``
"""
def __init__(self, *args, **kwargs):
super().__init__(average="macro", **kwargs)
self.name = "macro_roc_auc"
@registry.register_metric("ap")
class AveragePrecision(BaseMetric):
"""Metric for calculating Average Precision.
See more details at `sklearn.metrics.average_precision_score <http://scikit-learn.org/stable/modules/generated/sklearn.metrics.average_precision_score.html#sklearn.metrics.average_precision_score>`_ # noqa
If you are looking for binary case, please take a look at binary_ap
**Key:** ``ap``
"""
def __init__(self, *args, **kwargs):
super().__init__("ap")
self._sk_kwargs = kwargs
def calculate(self, sample_list, model_output, *args, **kwargs):
"""Calculate AP and returns it back. The function performs softmax
on the logits provided and then calculated the AP.
Args:
sample_list (SampleList): SampleList provided by DataLoader for
current iteration.
model_output (Dict): Dict returned by model. This should contain "scores"
field pointing to logits returned from the model.
Returns:
torch.FloatTensor: AP.
"""
output = torch.nn.functional.softmax(model_output["scores"], dim=-1)
expected = sample_list["targets"]
expected = _convert_to_one_hot(expected, output)
value = average_precision_score(expected.cpu(), output.cpu(), **self._sk_kwargs)
return expected.new_tensor(value, dtype=torch.float)
@registry.register_metric("binary_ap")
class BinaryAP(AveragePrecision):
"""Metric for calculating Binary Average Precision.
See more details at `sklearn.metrics.average_precision_score <http://scikit-learn.org/stable/modules/generated/sklearn.metrics.average_precision_score.html#sklearn.metrics.average_precision_score>`_ # noqa
**Key:** ``binary_ap``
"""
def __init__(self, *args, **kwargs):
super().__init__(**kwargs)
self.name = "binary_ap"
def calculate(self, sample_list, model_output, *args, **kwargs):
"""Calculate Binary AP and returns it back. The function performs softmax
on the logits provided and then calculated the binary AP.
Args:
sample_list (SampleList): SampleList provided by DataLoader for
current iteration.
model_output (Dict): Dict returned by model. This should contain "scores"
field pointing to logits returned from the model.
Returns:
torch.FloatTensor: AP.
"""
output = torch.nn.functional.softmax(model_output["scores"], dim=-1)
# Take the score for positive (1) label
output = output[:, 1]
expected = sample_list["targets"]
# One hot format -> Labels
if expected.dim() == 2:
expected = expected.argmax(dim=1)
value = average_precision_score(expected.cpu(), output.cpu(), **self._sk_kwargs)
return expected.new_tensor(value, dtype=torch.float)
@registry.register_metric("micro_ap")
class MicroAP(AveragePrecision):
"""Metric for calculating Micro Average Precision.
**Key:** ``micro_ap``
"""
def __init__(self, *args, **kwargs):
super().__init__(average="micro", **kwargs)
self.name = "micro_ap"
@registry.register_metric("macro_ap")
class MacroAP(AveragePrecision):
"""Metric for calculating Macro Average Precision.
**Key:** ``macro_ap``
"""
def __init__(self, *args, **kwargs):
super().__init__(average="macro", **kwargs)
self.name = "macro_ap"
@registry.register_metric("r@pk")
class RecallAtPrecisionK(BaseMetric):
"""Metric for calculating recall when precision is above a
particular threshold. Use `p_threshold` param to specify the
precision threshold i.e. k. Accepts precision in both 0-1
and 1-100 format.
**Key:** ``r@pk``
"""
def __init__(self, p_threshold, *args, **kwargs):
"""Initialization function recall @ precision k
Args:
p_threshold (float): Precision threshold
"""
super().__init__(name="r@pk")
self.name = "r@pk"
self.p_threshold = p_threshold if p_threshold < 1 else p_threshold / 100
def calculate(self, sample_list, model_output, *args, **kwargs):
"""Calculate Recall at precision k and returns it back. The function
performs softmax on the logits provided and then calculated the metric.
Args:
sample_list (SampleList): SampleList provided by DataLoader for
current iteration.
model_output (Dict): Dict returned by model. This should contain "scores"
field pointing to logits returned from the model.
Returns:
torch.FloatTensor: Recall @ precision k.
"""
output = torch.nn.functional.softmax(model_output["scores"], dim=-1)[:, 1]
expected = sample_list["targets"]
# One hot format -> Labels
if expected.dim() == 2:
expected = expected.argmax(dim=1)
precision, recall, thresh = precision_recall_curve(expected.cpu(), output.cpu())
try:
value, _ = max(
(r, p) for p, r in zip(precision, recall) if p >= self.p_threshold
)
except ValueError:
value = 0
return expected.new_tensor(value, dtype=torch.float)
@registry.register_metric("r@k_retrieval")
class RecallAtK_ret(BaseMetric):
def __init__(self, name="recall@k"):
super().__init__(name)
def _get_RatK_multi(
self, correlations: Tensor, labels: Tensor, k: int, factor: int
):
_, top_k_ids = torch.topk(correlations, k, dim=1)
hits = (
torch.logical_and(
labels[:, None] <= top_k_ids, top_k_ids < labels[:, None] + factor
)
.long()
.max(dim=1)[0]
)
return hits
def calculate(
self,
sample_list: Dict[str, Tensor],
model_output: Dict[str, Tensor],
k: int,
flip=False,
*args,
**kwargs,
):
# calculate image to text retrieval recalls
# correlations shape is either BxB or Bx(5B)
# when flip=True, calculate text to image
image_embeddings = model_output["scores"]
text_embeddings = model_output["targets"]
correlations = image_embeddings @ text_embeddings.t() # B x B or Bx5B
assert correlations.shape[1] % correlations.shape[0] == 0
batch_size = correlations.shape[0]
factor = correlations.shape[1] // correlations.shape[0]
labels = torch.arange(batch_size, device=image_embeddings.device) * factor
if flip:
correlations = correlations.t() # 5B x B
labels = torch.arange(batch_size, device=image_embeddings.device)
labels = labels[:, None].expand(-1, factor).flatten()
factor = 1
hits = self._get_RatK_multi(correlations, labels, k, factor)
ratk = hits.sum().float() / hits.shape[0]
return ratk
@registry.register_metric("r@1_retrieval")
class RecallAt1_ret(RecallAtK_ret):
def __init__(self):
super().__init__("r@1")
def calculate(
self,
sample_list: Dict[str, Tensor],
model_output: Dict[str, Tensor],
*args,
**kwargs,
):
ratk = super().calculate(sample_list, model_output, 1)
return ratk
@registry.register_metric("r@1_rev_retrieval")
class RecallAt1_rev_ret(RecallAtK_ret):
def __init__(self):
super().__init__("r@1_rev")
def calculate(
self,
sample_list: Dict[str, Tensor],
model_output: Dict[str, Tensor],
*args,
**kwargs,
):
ratk = super().calculate(sample_list, model_output, 1, flip=True)
return ratk
@registry.register_metric("r@5_retrieval")
class RecallAt5_ret(RecallAtK_ret):
def __init__(self):
super().__init__("r@5")
def calculate(
self,
sample_list: Dict[str, Tensor],
model_output: Dict[str, Tensor],
*args,
**kwargs,
):
ratk = super().calculate(sample_list, model_output, 5)
return ratk
@registry.register_metric("r@5_rev_retrieval")
class RecallAt5_rev_ret(RecallAtK_ret):
def __init__(self):
super().__init__("r@5_rev")
def calculate(
self,
sample_list: Dict[str, Tensor],
model_output: Dict[str, Tensor],
*args,
**kwargs,
):
ratk = super().calculate(sample_list, model_output, 5, flip=True)
return ratk
@registry.register_metric("r@10_retrieval")
class RecallAt10_ret(RecallAtK_ret):
def __init__(self):
super().__init__("r@10")
def calculate(
self,
sample_list: Dict[str, Tensor],
model_output: Dict[str, Tensor],
*args,
**kwargs,
):
ratk = super().calculate(sample_list, model_output, 10)
return ratk
@registry.register_metric("r@10_rev_retrieval")
class RecallAt10_rev_ret(RecallAtK_ret):
def __init__(self):
super().__init__("r@10_rev")
def calculate(
self,
sample_list: Dict[str, Tensor],
model_output: Dict[str, Tensor],
*args,
**kwargs,
):
ratk = super().calculate(sample_list, model_output, 10, flip=True)
return ratk
@registry.register_metric("detection_mean_ap")
class DetectionMeanAP(BaseMetric):
"""Metric for calculating the detection mean average precision (mAP) using the COCO
evaluation toolkit, returning the default COCO-style mAP@IoU=0.50:0.95
**Key:** ``detection_mean_ap``
"""
def __init__(self, dataset_json_files, *args, **kwargs):
"""Initialization function detection mean AP (mAP)
Args:
dataset_json_files (Dict): paths to the dataset (instance) json files
for each dataset type and dataset name in the following format:
``{'val/detection_coco': '/path/to/instances_val2017.json', ...}``
"""
super().__init__("detection_mean_ap")
self.required_params = ["__prediction_report__"]
self.dataset_json_files = dataset_json_files
def calculate(
self, sample_list, model_output, execute_on_master_only=True, *args, **kwargs
):
"""Calculate detection mean AP (mAP) from the prediction list and the dataset
annotations. The function returns COCO-style mAP@IoU=0.50:0.95.
Args:
sample_list (SampleList): SampleList provided by DataLoader for
current iteration.
model_output (Dict): Dict returned by model. This should contain
"prediction_report" field, which is a list of
detection predictions from the model.
execute_on_master_only (bool): Whether to only run mAP evaluation on the
master node over the gathered detection prediction
(to avoid wasting computation and CPU OOM).
Default: True (only run mAP evaluation on master).
Returns:
torch.FloatTensor: COCO-style mAP@IoU=0.50:0.95.
"""
# as the detection mAP metric is run on the entire dataset-level predictions,
# which are *already* gathered from all notes, the evaluation should only happen
# in one node and broadcasted to other nodes (to avoid CPU OOM due to concurrent
# mAP evaluation)
from mmf.utils.distributed import broadcast_tensor, is_master
from mmf.utils.general import get_current_device
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
device = get_current_device()
if execute_on_master_only and not is_master():
# dummy mAP to be override in boardcasting
mAP = torch.tensor(-1, dtype=torch.float, device=device)
else:
predictions = model_output.prediction_report
cocoGt = COCO(
self.dataset_json_files[sample_list.dataset_name][
sample_list.dataset_type
]
)
cocoDt = cocoGt.loadRes(predictions)
cocoEval = COCOeval(cocoGt, cocoDt, "bbox")
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
mAP = torch.tensor(cocoEval.stats[0], dtype=torch.float, device=device)
if execute_on_master_only:
mAP = broadcast_tensor(mAP, src=0)
return mAP
| EXA-1-master | exa/models/mmf-main/mmf/modules/metrics.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import math
from typing import Optional, Tuple, Type
import torch
from mmf.modules.layers import GatedTanh, ModalCombineLayer, TransformLayer
from torch import nn
class AttentionLayer(nn.Module):
def __init__(self, image_dim, question_dim, **kwargs):
super().__init__()
combine_type = kwargs["modal_combine"]["type"]
combine_params = kwargs["modal_combine"]["params"]
modal_combine_layer = ModalCombineLayer(
combine_type, image_dim, question_dim, **combine_params
)
transform_type = kwargs["transform"]["type"]
transform_params = kwargs["transform"]["params"]
transform_layer = TransformLayer(
transform_type, modal_combine_layer.out_dim, **transform_params
)
normalization = kwargs["normalization"]
self.module = TopDownAttention(
modal_combine_layer, transform_layer, normalization
)
if hasattr(self.module, "out_dim"):
self.out_dim = self.module.out_dim
def forward(self, *args, **kwargs):
return self.module(*args, **kwargs)
class ConcatenationAttention(nn.Module):
def __init__(self, image_feat_dim, txt_rnn_embeding_dim, hidden_size):
super().__init__()
self.image_feat_dim = image_feat_dim
self.txt_embeding_dim = txt_rnn_embeding_dim
self.fa = GatedTanh(image_feat_dim + txt_rnn_embeding_dim, hidden_size)
self.lc = nn.Linear(hidden_size, 1)
def forward(self, image_feat, question_embedding):
_, num_location, _ = image_feat.shape
question_embedding_expand = torch.unsqueeze(question_embedding, 1).expand(
-1, num_location, -1
)
concat_feature = torch.cat((image_feat, question_embedding_expand), dim=2)
raw_attention = self.lc(self.fa(concat_feature))
# softmax across locations
attention_weights = nn.functional.softmax(raw_attention, dim=1)
attention_weights = attention_weights.expand_as(image_feat)
return attention_weights
class ProjectAttention(nn.Module):
def __init__(self, image_feat_dim, txt_rnn_embeding_dim, hidden_size, dropout=0.2):
super().__init__()
self.image_feat_dim = image_feat_dim
self.txt_embeding_dim = txt_rnn_embeding_dim
self.fa_image = GatedTanh(image_feat_dim, hidden_size)
self.fa_txt = GatedTanh(txt_rnn_embeding_dim, hidden_size)
self.dropout = nn.Dropout(dropout)
self.lc = nn.Linear(hidden_size, 1)
def compute_raw_att(self, image_feat, question_embedding):
num_location = image_feat.shape[1]
image_fa = self.fa_image(image_feat)
question_fa = self.fa_txt(question_embedding)
question_fa_expand = torch.unsqueeze(question_fa, 1).expand(
-1, num_location, -1
)
joint_feature = image_fa * question_fa_expand
joint_feature = self.dropout(joint_feature)
raw_attention = self.lc(joint_feature)
return raw_attention
def forward(self, image_feat, question_embedding):
raw_attention = self.compute_raw_att(image_feat, question_embedding)
# softmax across locations
attention_weights = nn.functional.softmax(raw_attention, dim=1)
attention_weights = attention_weights.expand_as(image_feat)
return attention_weights
class DoubleProjectAttention(nn.Module):
def __init__(self, image_feat_dim, txt_rnn_embeding_dim, hidden_size, dropout=0.2):
super().__init__()
self.att1 = ProjectAttention(
image_feat_dim, txt_rnn_embeding_dim, hidden_size, dropout
)
self.att2 = ProjectAttention(
image_feat_dim, txt_rnn_embeding_dim, hidden_size, dropout
)
self.image_feat_dim = image_feat_dim
self.txt_embeding_dim = txt_rnn_embeding_dim
def forward(self, image_feat, question_embedding):
att1 = self.att1.compute_raw_att(image_feat, question_embedding)
att2 = self.att2.compute_raw_att(image_feat, question_embedding)
raw_attn_weights = att1 + att2
# softmax across locations
attention_weights = nn.functional.softmax(raw_attn_weights, dim=1)
attention_weights = attention_weights.expand_as(image_feat)
return attention_weights
class TopDownAttention(nn.Module):
EPS = 1.0e-08
def __init__(self, combination_layer, transform_module, normalization):
super().__init__()
self.combination_layer = combination_layer
self.normalization = normalization
self.transform = transform_module
self.out_dim = self.transform.out_dim
@staticmethod
def _mask_attentions(attention, image_locs):
batch_size, num_loc, n_att = attention.size()
tmp1 = attention.new_zeros(num_loc)
tmp1[:num_loc] = torch.arange(0, num_loc, dtype=attention.dtype).unsqueeze(
dim=0
)
tmp1 = tmp1.expand(batch_size, num_loc)
tmp2 = image_locs.type(tmp1.type())
tmp2 = tmp2.unsqueeze(dim=1).expand(batch_size, num_loc)
mask = torch.ge(tmp1, tmp2)
mask = mask.unsqueeze(dim=2).expand_as(attention)
attention = attention.masked_fill(mask, 0)
return attention
def forward(self, image_feat, question_embedding, image_locs=None):
# N x K x joint_dim
joint_feature = self.combination_layer(image_feat, question_embedding)
# N x K x n_att
raw_attn = self.transform(joint_feature)
if self.normalization.lower() == "softmax":
attention = nn.functional.softmax(raw_attn, dim=1)
if image_locs is not None:
masked_attention = self._mask_attentions(attention, image_locs)
masked_attention_sum = torch.sum(masked_attention, dim=1, keepdim=True)
masked_attention_sum += masked_attention_sum.eq(0).float() + self.EPS
masked_attention = masked_attention / masked_attention_sum
else:
masked_attention = attention
elif self.normalization.lower() == "sigmoid":
attention = torch.sigmoid(raw_attn)
masked_attention = attention
if image_locs is not None:
masked_attention = self._mask_attentions(attention, image_locs)
return masked_attention
# TODO(vedanuj): Remove this and use torch.nn.MultiHeadAttention
class MovieMcanMultiHeadAttention(nn.Module):
"""
Multi-Head Attention implementation from https://arxiv.org/abs/1706.03762
used for Movie+MCAN
"""
def __init__(self, dim: int, num_attn: int, dropout: float = 0.1):
super().__init__()
self.p_attn = None
self.h = num_attn
self.d_k = dim // num_attn
self.linears = nn.ModuleList([nn.Linear(dim, dim) for _ in range(4)])
self.dropout = nn.Dropout(p=dropout)
def qkv_attention(
self,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
mask: Optional[torch.Tensor] = None,
dropout: Type[nn.Dropout] = None,
) -> Tuple[torch.Tensor, torch.Tensor]:
d_k = query.size(-1)
scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(d_k)
if mask is not None:
scores.data.masked_fill_(mask.unsqueeze(1).unsqueeze(2), -1e9)
p_attn = nn.functional.softmax(scores, dim=-1)
if dropout is not None:
p_attn = dropout(p_attn)
return torch.matmul(p_attn, value), p_attn
def forward(
self, q: torch.Tensor, k: torch.Tensor, v: torch.Tensor, mask: torch.Tensor
) -> torch.Tensor:
b = q.size(0)
q = self.linears[0](q).view(b, -1, self.h, self.d_k).transpose(1, 2)
k = self.linears[1](k).view(b, -1, self.h, self.d_k).transpose(1, 2)
v = self.linears[2](v).view(b, -1, self.h, self.d_k).transpose(1, 2)
x, self.p_attn = self.qkv_attention(q, k, v, mask=mask, dropout=self.dropout)
x = x.transpose(1, 2).contiguous().view(b, -1, self.h * self.d_k)
return self.linears[-1](x)
class SelfAttention(nn.Module):
def __init__(self, dim: int, num_attn: int, dropout: float):
super().__init__()
self.multi_head_attn = MovieMcanMultiHeadAttention(dim, num_attn, dropout=0.1)
self.fcn = nn.Sequential(
nn.Linear(dim, 4 * dim),
nn.ReLU(inplace=True),
nn.Dropout(p=dropout),
nn.Linear(4 * dim, dim),
)
self.drop_mha = nn.Dropout(p=dropout)
self.ln_mha = nn.LayerNorm(dim)
self.drop_fcn = nn.Dropout(p=dropout)
self.ln_fcn = nn.LayerNorm(dim)
def forward(self, x: torch.Tensor, x_mask: torch.Tensor) -> torch.Tensor:
x = self.ln_mha(x + self.drop_mha(self.multi_head_attn(x, x, x, x_mask)))
x = self.ln_fcn(x + self.drop_fcn(self.fcn(x)))
return x
class SelfGuidedAttention(nn.Module):
def __init__(self, dim: int, num_attn: int, dropout: float):
super().__init__()
self.multi_head_attn = nn.ModuleList(
[MovieMcanMultiHeadAttention(dim, num_attn, dropout=0.1) for _ in range(2)]
)
self.fcn = nn.Sequential(
nn.Linear(dim, 4 * dim),
nn.ReLU(inplace=True),
nn.Dropout(p=dropout),
nn.Linear(4 * dim, dim),
)
self.drop_mha = nn.ModuleList([nn.Dropout(p=dropout) for _ in range(2)])
self.ln_mha = nn.ModuleList([nn.LayerNorm(dim) for _ in range(3)])
self.drop_fcn = nn.Dropout(p=dropout)
self.ln_fcn = nn.LayerNorm(dim)
def forward(
self,
x: torch.Tensor,
y: torch.Tensor,
x_mask: torch.Tensor,
y_mask: torch.Tensor,
) -> torch.Tensor:
x = self.ln_mha[0](
x + self.drop_mha[0](self.multi_head_attn[0](x, x, x, x_mask))
)
x = self.ln_mha[1](
x + self.drop_mha[1](self.multi_head_attn[1](x, y, y, y_mask))
)
x = self.ln_fcn(x + self.drop_fcn(self.fcn(x)))
return x
| EXA-1-master | exa/models/mmf-main/mmf/modules/attention.py |
# Copyright (c) Facebook, Inc. and its affiliates.
"""
This module contains implementations for various pooling methods from
transformer encoder layers
.. code::
from mmf.common.registry import registry
from torch import nn
@registry.register_pooler("custom")
class CustomPool(nn.Module):
...
"""
from typing import Any, List
import torch
import torch.nn as nn
from mmf.common.registry import registry
@registry.register_pooler("average_concat_last_k")
class AverageConcatLastN(nn.Module):
def __init__(self, k=4, tol=0.000001):
super().__init__()
self.num_layers = k
self.tol = tol
def forward(self, encoded_layers: List[torch.Tensor], pad_mask: torch.Tensor):
assert self.num_layers <= len(
encoded_layers
), "k should be less than the number of encoder layers"
encoder_avg = torch.cat(encoded_layers[-self.num_layers :], 2)
pad_mask = pad_mask.unsqueeze(2)
encoder_avg = encoder_avg * pad_mask.float()
pooled_output = torch.sum(encoder_avg, 1) / (
torch.sum(pad_mask, 1).float() + self.tol
)
return pooled_output
@registry.register_pooler("average_k_from_last")
class AverageKFromLast(nn.Module):
def __init__(self, k=2, tol=0.000001):
super().__init__()
self.k = k
self.tol = tol
def forward(self, encoded_layers: List[torch.Tensor], pad_mask: torch.Tensor):
assert self.k <= len(
encoded_layers
), "k should be less than the number of encoder layers"
encoder_avg = encoded_layers[-self.k]
pad_mask = pad_mask.unsqueeze(2)
encoder_avg = encoder_avg * pad_mask.float()
pooled_output = torch.sum(encoder_avg, 1) / (
torch.sum(pad_mask, 1).float() + self.tol
)
return pooled_output
@registry.register_pooler("average_sum_last_k")
class AverageSumLastK(nn.Module):
def __init__(self, k=4, tol=0.000001):
super().__init__()
self.k = k
self.tol = tol
def forward(self, encoded_layers: List[torch.Tensor], pad_mask: torch.Tensor):
assert self.k <= len(
encoded_layers
), "k should be less than the number of encoder layers"
encoder_avg = torch.stack(encoded_layers[-self.k :]).sum(0)
pad_mask = pad_mask.unsqueeze(2)
encoder_avg = encoder_avg * pad_mask.float()
pooled_output = torch.sum(encoder_avg, 1) / (
torch.sum(pad_mask, 1).float() + self.tol
)
return pooled_output
@registry.register_pooler("identity")
class IdentityPooler(nn.Module):
def forward(self, x: Any):
return x
@registry.register_pooler("cls")
class ClsPooler(nn.Module):
def __init__(self, dim=1, cls_index=0):
super().__init__()
self.dim = dim
self.cls_index = cls_index
def forward(self, last_hidden_state: torch.Tensor):
"""Returns the last layer hidden-state of the first token of of the
sequence, the classification (cls) token.
Args:
last_hidden_state (torch.Tensor): Sequence of hidden-state of
at the output of the last layer of the model (bs, seq length, hidden size)
Returns:
[torch.Tensor]: First token of the last hidden-state. (bs, hidden size)
"""
return last_hidden_state.select(dim=self.dim, index=self.cls_index)
@registry.register_pooler("avg")
class MeanPooler(nn.Module):
def __init__(self, dim=1):
super().__init__()
self.dim = dim
def forward(self, last_hidden_state: torch.Tensor):
"""Returns the averaged feature of last layer hidden-state sequence,
Args:
last_hidden_state (torch.Tensor): Sequence of hidden-state of
at the output of the last layer of the model (bs, seq length, hidden size)
Returns:
[torch.Tensor]: First token of the last hidden-state. (bs, hidden size)
"""
return torch.mean(last_hidden_state, dim=self.dim)
| EXA-1-master | exa/models/mmf-main/mmf/modules/poolers.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import torch
from mmf.common.registry import registry
from torch import nn
from torch.nn.utils.weight_norm import weight_norm
class VisDialDiscriminator(nn.Module):
def __init__(self, config, embedding):
super().__init__()
self.config = config
self.embedding = embedding
self.emb_out_dim = embedding.text_out_dim
self.hidden_dim = self.config.hidden_dim
self.projection_layer = nn.Linear(self.emb_out_dim, self.hidden_dim)
def forward(self, encoder_output, batch):
answer_options_len = batch["answer_options_len"]
# BATCH_SIZE X DIALOGUES X 100 X SEQ_LEN
answer_options = batch["answer_options"]
max_seq_len = answer_options.size(-1)
batch_size, ndialogues, noptions, seq_len = answer_options.size()
# (B X D X 100) X SEQ_LEN
answer_options = answer_options.view(-1, max_seq_len)
answer_options_len = answer_options_len.view(-1)
# (B x D x 100) x EMB_OUT_DIM
answer_options = self.embedding(answer_options)
# (B x D x 100) x HIDDEN_DIM
answer_options = self.projection_layer(answer_options)
# (B x D) x 100 x HIDDEN_DIM
answer_options = answer_options.view(
batch_size * ndialogues, noptions, self.hidden_dim
)
# (B x D) x HIDDEN_DIM => (B x D) x 100 x HIDDEN_DIM
encoder_output = encoder_output.unsqueeze(1).expand(-1, noptions, -1)
# (B x D) x 100 x HIDDEN_DIM * (B x D) x 100 x HIDDEN_DIM = SAME THING
# SUM => (B x D) x 100
scores = torch.sum(answer_options * encoder_output, dim=2)
return scores
class LanguageDecoder(nn.Module):
def __init__(self, in_dim, out_dim, **kwargs):
super().__init__()
self.language_lstm = nn.LSTMCell(
in_dim + kwargs["hidden_dim"], kwargs["hidden_dim"], bias=True
)
self.fc = weight_norm(nn.Linear(kwargs["hidden_dim"], out_dim))
self.dropout = nn.Dropout(p=kwargs["dropout"])
self.init_weights(kwargs["fc_bias_init"])
def init_weights(self, fc_bias_init):
self.fc.bias.data.fill_(fc_bias_init)
self.fc.weight.data.uniform_(-0.1, 0.1)
def forward(self, weighted_attn):
# Get LSTM state
state = registry.get(f"{weighted_attn.device}_lstm_state")
h1, c1 = state["td_hidden"]
h2, c2 = state["lm_hidden"]
# Language LSTM
h2, c2 = self.language_lstm(torch.cat([weighted_attn, h1], dim=1), (h2, c2))
predictions = self.fc(self.dropout(h2))
# Update hidden state for t+1
state["lm_hidden"] = (h2, c2)
return predictions
| EXA-1-master | exa/models/mmf-main/mmf/modules/decoders.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import math
from typing import List, Optional, Tuple
import torch
from mmf.utils.patch import restore_saved_modules, safecopy_modules
from torch import nn, Tensor
try:
from transformers3.modeling_bert import (
BertAttention,
BertEmbeddings,
BertEncoder,
BertLayer,
BertModel,
BertPooler,
BertSelfAttention,
BertSelfOutput,
)
from transformers3.modeling_roberta import (
RobertaAttention,
RobertaEmbeddings,
RobertaEncoder,
RobertaLayer,
RobertaModel,
RobertaSelfAttention,
)
from transformers3.modeling_utils import PreTrainedModel
except ImportError:
from transformers.modeling_bert import (
BertAttention,
BertEmbeddings,
BertEncoder,
BertLayer,
BertModel,
BertPooler,
BertSelfAttention,
BertSelfOutput,
)
from transformers.modeling_roberta import (
RobertaAttention,
RobertaEmbeddings,
RobertaEncoder,
RobertaLayer,
RobertaModel,
RobertaSelfAttention,
)
from transformers.modeling_utils import PreTrainedModel
patch_functions = [
"BertEmbeddings.forward",
"BertEncoder.forward",
"BertLayer.forward",
"BertAttention.forward",
"BertSelfAttention.forward",
"BertSelfAttention.transpose_for_scores",
"BertModel.forward",
"RobertaEmbeddings.forward",
"RobertaEncoder.forward",
"RobertaLayer.forward",
"RobertaAttention.forward",
"RobertaSelfAttention.forward",
"RobertaSelfAttention.transpose_for_scores",
"RobertaModel.forward",
]
patch_modules = [p_fun.split(".")[0] for p_fun in patch_functions]
def replace_with_jit():
"""
Monkey patch some transformer functions to replace with scriptable ones.
"""
# to revert monkey patch without reload()
safecopy_modules(patch_functions, _get_modules_dict(patch_modules))
BertEmbeddings.forward = BertEmbeddingsJit.forward
BertEncoder.forward = BertEncoderJit.forward
BertLayer.forward = BertLayerJit.forward
BertAttention.forward = BertAttentionJit.forward
BertSelfAttention.forward = BertSelfAttentionJit.forward
BertSelfAttention.transpose_for_scores = BertSelfAttentionJit.transpose_for_scores
BertModel.forward = BertModelJit.forward
PreTrainedModel.__jit_unused_properties__ = [
"base_model",
"dummy_inputs",
"device",
"dtype",
]
RobertaEmbeddings.forward = RobertaEmbeddingsJit.forward
RobertaEncoder.forward = BertEncoderJit.forward
RobertaLayer.forward = BertLayerJit.forward
RobertaAttention.forward = BertAttentionJit.forward
RobertaSelfAttention.forward = BertSelfAttentionJit.forward
RobertaSelfAttention.transpose_for_scores = (
BertSelfAttentionJit.transpose_for_scores
)
RobertaModel.forward = BertModelJit.forward
def undo_replace_with_jit():
"""
Reload modules to undo monkey patch.
"""
restore_saved_modules(_get_modules_dict(patch_modules))
def _get_modules_dict(modules_list):
"""
Expects a list of str module names.
Returns a dict of module_name: module obj,
a subset of globals().
"""
global_table = globals()
return {module_name: global_table[module_name] for module_name in modules_list}
class BertEmbeddingsJit(BertEmbeddings):
"""
Torchscriptable version of `BertEmbeddings` from Huggingface transformers v2.3.0
https://github.com/huggingface/transformers/blob/v2.3.0/transformers/modeling_bert.py # noqa
Modifies the `forward` function
Changes to `forward` function ::
Typed inputs and modified device to be input_ids.device by default
"""
def forward(
self,
input_ids: Tensor,
token_type_ids: Optional[Tensor] = None,
position_ids: Optional[Tensor] = None,
inputs_embeds: Optional[Tensor] = None,
):
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
seq_length = input_shape[1]
device = inputs_embeds.device if inputs_embeds is not None else input_ids.device
if position_ids is None:
position_ids = torch.arange(seq_length, dtype=torch.long, device=device)
position_ids = position_ids.unsqueeze(0).expand(input_shape)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = inputs_embeds + position_embeddings + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class BertSelfAttentionJit(BertSelfAttention):
"""
Torchscriptable version of `BertSelfAttention` from Huggingface transformers v2.3.0
https://github.com/huggingface/transformers/blob/v2.3.0/transformers/modeling_bert.py # noqa
Modifies the `forward` function and `transpose_for_scores` function
Changes to `transpose_for_scores` function ::
Changes the `new_x_shape` unpacking as static size inference is not supported
Changes to `forward` function ::
Uses scriptable `nn.functional.softmax` and also removes several static size
inference which is not supported.
"""
def transpose_for_scores(self, x: Tensor) -> Tensor:
new_x_shape = x.size()[:-1] + (
self.num_attention_heads,
self.attention_head_size,
)
x = x.view(new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(
self,
hidden_states: Tensor,
attention_mask: Optional[Tensor] = None,
head_mask: Optional[Tensor] = None,
encoder_hidden_states: Optional[Tensor] = None,
encoder_attention_mask: Optional[Tensor] = None,
) -> Tuple[Tensor, Tensor]:
mixed_query_layer = self.query(hidden_states)
# If this is instantiated as a cross-attention module, the keys
# and values come from an encoder; the attention mask needs to be
# such that the encoder's padding tokens are not attended to.
if encoder_hidden_states is not None:
mixed_key_layer = self.key(encoder_hidden_states)
mixed_value_layer = self.value(encoder_hidden_states)
attention_mask = encoder_attention_mask
else:
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
# Take the dot product between "query" and "key" to get the raw attention
# scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
if attention_mask is not None:
# Apply the attention mask is (precomputed for all layers in BertModel
# forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.functional.softmax(attention_scores, dim=-1)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
# Mask heads if we want to
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(new_context_layer_shape)
outputs = (context_layer, attention_probs)
return outputs
class BertAttentionJit(BertAttention):
"""
Torchscriptable version of `BertAttention` from Huggingface transformers v2.3.0
https://github.com/huggingface/transformers/blob/v2.3.0/transformers/modeling_bert.py # noqa
Modifies the `forward` function as well as uses scriptable `BertSelfAttentionJit`
Changes to `forward` function ::
Typed inputs and modifies the output to be a List[Tensor]
"""
def __init__(self, config):
super().__init__(config)
self.self = BertSelfAttentionJit(config)
self.output = BertSelfOutput(config)
self.pruned_heads = set()
def forward(
self,
hidden_states: Tensor,
attention_mask: Optional[Tensor] = None,
head_mask: Optional[Tensor] = None,
encoder_hidden_states: Optional[Tensor] = None,
encoder_attention_mask: Optional[Tensor] = None,
) -> List[Tensor]:
self_outputs = self.self(
hidden_states,
attention_mask,
head_mask,
encoder_hidden_states,
encoder_attention_mask,
)
attention_output = self.output(self_outputs[0], hidden_states)
outputs = (attention_output,) + self_outputs[
1:
] # add attentions if we output them
return outputs
class BertLayerJit(BertLayer):
"""
Torchscriptable version of `BertLayer` from Huggingface transformers v2.3.0
https://github.com/huggingface/transformers/blob/v2.3.0/transformers/modeling_bert.py # noqa
Modifies the `forward` function as well as uses scriptable `BertAttentionJit`
Changes to `forward` function::
Typed inputs and modifies the output to be a List[Tensor]
"""
def __init__(self, config):
super().__init__(config)
self.attention = BertAttentionJit(config)
self.is_decoder = config.is_decoder
if self.is_decoder:
self.crossattention = BertAttentionJit(config)
def forward(
self,
hidden_states: Tensor,
attention_mask: Optional[Tensor] = None,
head_mask: Optional[Tensor] = None,
encoder_hidden_states: Optional[Tensor] = None,
encoder_attention_mask: Optional[Tensor] = None,
) -> List[Tensor]:
self_attention_outputs = self.attention(
hidden_states, attention_mask, head_mask
)
attention_output = self_attention_outputs[0]
outputs = self_attention_outputs[
1:
] # add self attentions if we output attention weights
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
outputs = (layer_output,) + outputs
return outputs
class BertEncoderJit(BertEncoder):
"""
Torchscriptable version of `BertEncoder` from Huggingface transformers v2.3.0
https://github.com/huggingface/transformers/blob/v2.3.0/transformers/modeling_bert.py # noqa
Modifies the `forward` function as well as uses scriptable `BertLayerJit`
Changes to `forward` function::
Typed inputs and modifies the output to be of Tuple[Tensor] type in scripting
mode. Due to different possible types when `output_hidden_states` or
`output_attentions` are enable, we do not support these in scripting mode
"""
def __init__(self, config):
super().__init__(config)
self.output_attentions = config.output_attentions
self.output_hidden_states = config.output_hidden_states
self.layer = nn.ModuleList(
[BertLayerJit(config) for _ in range(config.num_hidden_layers)]
)
def forward(
self,
hidden_states: Tensor,
attention_mask: Optional[Tensor],
encoder_hidden_states: Optional[Tensor] = None,
encoder_attention_mask: Optional[Tensor] = None,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = False,
head_mask: Optional[Tensor] = None,
) -> Tuple[Tensor]:
all_hidden_states = ()
all_attentions = ()
for i, layer_module in enumerate(self.layer):
if not torch.jit.is_scripting() and output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_outputs = layer_module(
hidden_states,
attention_mask,
None,
encoder_hidden_states,
encoder_attention_mask,
)
hidden_states = layer_outputs[0]
if not torch.jit.is_scripting() and output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
# Add last layer
if not torch.jit.is_scripting() and output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
outputs = (hidden_states,)
if not torch.jit.is_scripting():
if output_hidden_states:
outputs = outputs + (all_hidden_states,)
if output_attentions:
outputs = outputs + (all_attentions,)
return outputs # last-layer hidden state, (all hidden states), (all attentions)
class BertModelJit(BertModel):
"""
Torchscriptable version of `BertModel` from Huggingface transformers v2.3.0
https://github.com/huggingface/transformers/blob/v2.3.0/transformers/modeling_bert.py # noqa
Modifies the `forward` function
Changes to `forward` function ::
Typings for input, modifications to device, change output type to
Tuple[Tensor, Tensor, List[Tensor]]
"""
__jit_unused_properties__ = ["base_model", "dummy_inputs", "device", "dtype"]
def __init__(self, config):
super().__init__(config)
self.config = config
self.embeddings = BertEmbeddingsJit(config)
self.encoder = BertEncoderJit(config)
self.pooler = BertPooler(config)
self.init_weights()
def forward(
self,
input_ids: Tensor,
attention_mask: Optional[Tensor] = None,
token_type_ids: Optional[Tensor] = None,
position_ids: Optional[Tensor] = None,
head_mask: Optional[Tensor] = None,
inputs_embeds: Optional[Tensor] = None,
encoder_hidden_states: Optional[Tensor] = None,
encoder_attention_mask: Optional[Tensor] = None,
) -> Tuple[Tensor, Tensor, List[Tensor]]:
"""Forward pass on the Model.
The model can behave as an encoder (with only self-attention) as well
as a decoder, in which case a layer of cross-attention is added between
the self-attention layers, following the architecture described in
`Attention is all you need`_ by Ashish Vaswani, Noam Shazeer, Niki Parmar,
Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser and
Illia Polosukhin.
To behave as an decoder the model needs to be initialized with the
`is_decoder` argument of the configuration set to `True`; an
`encoder_hidden_states` is expected as an input to the forward pass.
.. _`Attention is all you need`:
https://arxiv.org/abs/1706.03762
"""
if input_ids is not None and inputs_embeds is not None:
raise ValueError(
"You cannot specify both input_ids and inputs_embeds at the same time"
)
elif input_ids is not None:
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
device = inputs_embeds.device if inputs_embeds is not None else input_ids.device
if attention_mask is None:
attention_mask = torch.ones(input_shape, device=device)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
# We can provide a self-attention mask of dimensions
# [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
if attention_mask.dim() == 3:
extended_attention_mask = attention_mask[:, None, :, :]
elif attention_mask.dim() == 2:
# Provided a padding mask of dimensions [batch_size, seq_length]
# - if the model is a decoder, apply a causal mask in addition to
# the padding mask
# - if the model is an encoder, make the mask broadcastable to
# [batch_size, num_heads, seq_length, seq_length]
extended_attention_mask = attention_mask[:, None, None, :]
else:
raise ValueError(
f"Wrong shape for input_ids (shape {input_shape}) or "
+ f"attention_mask (shape {attention_mask.shape})"
)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
# Python builtin next is currently not supported in Torchscript
if not torch.jit.is_scripting():
extended_attention_mask = extended_attention_mask.to(
dtype=next(self.parameters()).dtype
) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastabe to
# [batch_size, num_heads, seq_length, seq_length]
encoder_extended_attention_mask = None
embedding_output = self.embeddings(
input_ids=input_ids,
position_ids=position_ids,
token_type_ids=token_type_ids,
inputs_embeds=inputs_embeds,
)
encoder_outputs = self.encoder(
embedding_output,
attention_mask=extended_attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_extended_attention_mask,
)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(sequence_output)
# add hidden_states and attentions if they are here
outputs = (sequence_output, pooled_output, encoder_outputs[1:])
return outputs # sequence_output, pooled_output, (hidden_states), (attentions)
class RobertaEmbeddingsJit(RobertaEmbeddings):
"""
Torchscriptable version of `RobertaEmbeddings` from Huggingface transformers v2.3.0
https://github.com/huggingface/transformers/blob/v2.3.0/transformers/modeling_roberta.py # noqa
Modifies the `forward` function
Changes to `forward` function ::
Typed inputs and modified device to be input_ids.device by default
"""
def forward(
self,
input_ids: Tensor,
token_type_ids: Optional[Tensor] = None,
position_ids: Optional[Tensor] = None,
inputs_embeds: Optional[Tensor] = None,
):
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
seq_length = input_shape[1]
device = inputs_embeds.device if inputs_embeds is not None else input_ids.device
if position_ids is None:
# Position numbers begin at padding_idx+1. Padding symbols are ignored.
# cf. fairseq's `utils.make_positions`
position_ids = torch.arange(
self.padding_idx + 1,
seq_length + self.padding_idx + 1,
dtype=torch.long,
device=device,
)
position_ids = position_ids.unsqueeze(0).expand(input_shape)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = inputs_embeds + position_embeddings + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
| EXA-1-master | exa/models/mmf-main/mmf/modules/hf_layers.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import mmf.modules.losses # noqa
import mmf.modules.metrics # noqa
import mmf.modules.optimizers # noqa
import mmf.modules.schedulers # noqa
| EXA-1-master | exa/models/mmf-main/mmf/modules/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import importlib
import logging
import os
import pickle
import re
from collections import OrderedDict
from copy import deepcopy
from dataclasses import asdict, dataclass
from enum import Enum
from typing import Any
import torch
import torchvision
from mmf.common.registry import registry
from mmf.models.frcnn import GeneralizedRCNN
from mmf.modules.embeddings import ProjectionEmbedding, TextEmbedding
from mmf.modules.hf_layers import BertModelJit
from mmf.modules.layers import Identity
from mmf.utils.build import build_image_encoder, build_text_encoder
from mmf.utils.download import download_pretrained_model
from mmf.utils.file_io import PathManager
from mmf.utils.general import get_absolute_path
from mmf.utils.logger import log_class_usage
from omegaconf import MISSING, OmegaConf
from torch import nn, Tensor
try:
from transformers3.configuration_auto import AutoConfig
from transformers3.modeling_auto import AutoModel
except ImportError:
from transformers.configuration_auto import AutoConfig
from transformers.modeling_auto import AutoModel
try:
from detectron2.modeling import build_resnet_backbone, ShapeSpec
except ImportError:
pass
logger = logging.getLogger()
class Encoder(nn.Module):
@dataclass
class Config:
name: str = MISSING
def __init__(self):
super().__init__()
log_class_usage("Encoder", self.__class__)
@classmethod
def from_params(cls, **kwargs):
config = OmegaConf.structured(cls.Config(**kwargs))
return cls(config)
class EncoderFactory(nn.Module):
@dataclass
class Config:
type: str = MISSING
params: Encoder.Config = MISSING
class ImageFeatureEncoderTypes(Enum):
default = "default"
identity = "identity"
projection = "projection"
frcnn_fc7 = "finetune_faster_rcnn_fpn_fc7"
class ImageFeatureEncoder(Encoder):
@dataclass
class Config(Encoder.Config):
in_dim: int = MISSING
class ImageFeatureEncoderFactory(EncoderFactory):
@dataclass
class Config(EncoderFactory.Config):
type: ImageFeatureEncoderTypes = MISSING
params: ImageFeatureEncoder.Config = MISSING
def __init__(self, config: Config, *args, **kwargs):
super().__init__()
encoder_type = config.type
if isinstance(encoder_type, ImageFeatureEncoderTypes):
encoder_type = encoder_type.value
assert (
"in_dim" in config.params
), "ImageFeatureEncoder require 'in_dim' param in config"
params = config.params
if encoder_type == "default" or encoder_type == "identity":
self.module = Identity()
self.module.in_dim = params.in_dim
self.module.out_dim = params.in_dim
elif encoder_type == "projection":
if "module" not in params:
params = deepcopy(params)
params.module = "linear"
self.module = ProjectionEmbedding(**params)
elif encoder_type == "finetune_faster_rcnn_fpn_fc7":
self.module = FinetuneFasterRcnnFpnFc7(params)
else:
raise NotImplementedError("Unknown Image Encoder: %s" % encoder_type)
self.out_dim = self.module.out_dim
def forward(self, *args, **kwargs):
return self.module(*args, **kwargs)
@registry.register_encoder("finetune_faster_rcnn_fpn_fc7")
class FinetuneFasterRcnnFpnFc7(ImageFeatureEncoder):
@dataclass
class Config(ImageFeatureEncoder.Config):
name: str = "finetune_faster_rcnn_fpn_fc7"
in_dim: int = MISSING
weights_file: str = "fc7_w.pkl"
bias_file: str = "fc7_b.pkl"
model_data_dir: str = MISSING
def __init__(self, config: Config, *args, **kwargs):
super().__init__()
model_data_dir = get_absolute_path(config.model_data_dir)
if not os.path.isabs(config.weights_file):
weights_file = os.path.join(model_data_dir, config.weights_file)
if not os.path.isabs(config.bias_file):
bias_file = os.path.join(model_data_dir, config.bias_file)
if not PathManager.exists(bias_file) or not PathManager.exists(weights_file):
download_path = download_pretrained_model("detectron.vmb_weights")
weights_file = get_absolute_path(os.path.join(download_path, "fc7_w.pkl"))
bias_file = get_absolute_path(os.path.join(download_path, "fc7_b.pkl"))
with PathManager.open(weights_file, "rb") as w:
weights = pickle.load(w)
with PathManager.open(bias_file, "rb") as b:
bias = pickle.load(b)
out_dim = bias.shape[0]
self.lc = nn.Linear(config.in_dim, out_dim)
self.lc.weight.data.copy_(torch.from_numpy(weights))
self.lc.bias.data.copy_(torch.from_numpy(bias))
self.out_dim = out_dim
def _load_from_state_dict(
self,
state_dict,
prefix,
local_metadata,
strict,
missing_keys,
unexpected_keys,
error_msgs,
):
old_prefix = prefix + "module."
for k in list(state_dict.keys()):
if k.startswith(old_prefix):
new_k = k.replace(old_prefix, prefix)
state_dict[new_k] = state_dict.pop(k)
super()._load_from_state_dict(
state_dict,
prefix,
local_metadata,
strict,
missing_keys,
unexpected_keys,
error_msgs,
)
def forward(self, image):
i2 = self.lc(image)
i3 = nn.functional.relu(i2)
return i3
@registry.register_encoder("identity")
class IdentityEncoder(Encoder):
@dataclass
class Config(Encoder.Config):
name: str = "identity"
# Random in_dim if not specified
in_dim: int = 100
def __init__(self, config: Config):
super().__init__()
self.module = nn.Identity()
self.in_dim = config.get("in_dim", 100)
self.out_dim = self.in_dim
def forward(self, x):
return self.module(x)
class ImageEncoderTypes(Enum):
default = "default"
identity = "identity"
torchvision_resnet = "torchvision_resnet"
resnet152 = "resnet152"
detectron2_resnet = "detectron2_resnet"
class ImageEncoderFactory(EncoderFactory):
@dataclass
class Config(EncoderFactory.Config):
type: ImageEncoderTypes = MISSING
def __init__(self, config: Config, *args, **kwargs):
super().__init__()
self._type = config.type
if isinstance(self._type, ImageEncoderTypes):
self._type = self._type.value
params = config.params
if self._type == "default" or self._type == "identity":
self.module = nn.Identity()
self.module.out_dim = params.in_dim
elif self._type == "resnet152":
self.module = ResNet152ImageEncoder(params)
elif self._type == "torchvision_resnet":
self.module = TorchvisionResNetImageEncoder(params)
elif self._type == "detectron2_resnet":
self.module = Detectron2ResnetImageEncoder(params)
elif self._type == "frcnn":
self.module = FRCNNImageEncoder(params)
else:
raise NotImplementedError("Unknown Image Encoder: %s" % self._type)
@property
def out_dim(self):
return self.module.out_dim
def forward(self, image):
return self.module(image)
# Taken from facebookresearch/mmbt with some modifications
@registry.register_encoder("resnet152")
class ResNet152ImageEncoder(Encoder):
@dataclass
class Config(Encoder.Config):
name: str = "resnet152"
pretrained: bool = True
# "avg" or "adaptive"
pool_type: str = "avg"
num_output_features: int = 1
def __init__(self, config: Config, *args, **kwargs):
super().__init__()
self.config = config
model = torchvision.models.resnet152(pretrained=config.get("pretrained", True))
modules = list(model.children())[:-2]
self.model = nn.Sequential(*modules)
pool_func = (
nn.AdaptiveAvgPool2d if config.pool_type == "avg" else nn.AdaptiveMaxPool2d
)
# -1 will keep the original feature size
if config.num_output_features == -1:
self.pool = nn.Identity()
elif config.num_output_features in [1, 2, 3, 5, 7]:
self.pool = pool_func((config.num_output_features, 1))
elif config.num_output_features == 4:
self.pool = pool_func((2, 2))
elif config.num_output_features == 6:
self.pool = pool_func((3, 2))
elif config.num_output_features == 8:
self.pool = pool_func((4, 2))
elif config.num_output_features == 9:
self.pool = pool_func((3, 3))
self.out_dim = 2048
def forward(self, x):
# Bx3x224x224 -> Bx2048x7x7 -> Bx2048xN -> BxNx2048
out = self.pool(self.model(x))
out = torch.flatten(out, start_dim=2)
out = out.transpose(1, 2).contiguous()
return out # BxNx2048
@registry.register_encoder("torchvision_resnet")
class TorchvisionResNetImageEncoder(Encoder):
@dataclass
class Config(Encoder.Config):
name: str = "resnet50"
pretrained: bool = False
zero_init_residual: bool = True
num_output_features: int = -1
pool_type: str = "avg"
def __init__(self, config: Config, *args, **kwargs):
super().__init__()
self.config = config
model = getattr(torchvision.models, config.name)(
pretrained=config.pretrained, zero_init_residual=config.zero_init_residual
)
# checks if use_avgpool exists to maintain the old logic
self.use_avgpool = config.get("use_avgpool", None)
if self.use_avgpool: # use_avgpool is True
config.num_output_features = 1
config.pool_type = "avg"
elif self.use_avgpool is False: # use_avgpool is False
config.num_output_features = -1
if config.pretrained:
model = self._load_pretrained(model, config)
modules = list(model.children())[:-2]
self.model = nn.Sequential(*modules)
self.pool = self._pool_func(config)
self.out_dim = config.get("out_dim", 2048)
def _load_pretrained(self, model, config: Config):
pretrained_model = config.get("pretrained_model", "supervised")
if pretrained_model == "supervised":
pass # this is already loaded via torchvision using pretrained=True
elif os.path.exists(pretrained_model):
model.load_state_dict(torch.load(pretrained_model))
else:
try:
with PathManager.open(pretrained_model, "rb") as f:
model.load_state_dict(
torch.load(f, map_location=lambda storage, loc: storage),
strict=False,
)
except Exception:
raise Exception(f"unknown pretrained ResNet model: {pretrained_model}")
return model
def _pool_func(self, config: Config):
pool_func = (
nn.AdaptiveAvgPool2d if config.pool_type == "avg" else nn.AdaptiveMaxPool2d
)
# -1 will keep the original feature size
if config.num_output_features == -1:
pool = nn.Identity()
elif config.num_output_features in [1, 2, 3, 5, 7]:
pool = pool_func((config.num_output_features, 1))
elif config.num_output_features == 4:
pool = pool_func((2, 2))
elif config.num_output_features == 6:
pool = pool_func((3, 2))
elif config.num_output_features == 8:
pool = pool_func((4, 2))
elif config.num_output_features == 9:
pool = pool_func((3, 3))
return pool
def forward(self, x):
# B x 3 x 224 x 224 -> B x out_dim x 7 x 7
out = self.pool(self.model(x))
if self.use_avgpool is None:
out = torch.flatten(out, start_dim=2)
out = out.transpose(1, 2).contiguous() # BxNxout_dim
else:
out = torch.flatten(out, start_dim=1) # BxN*out_dim
return out
@registry.register_encoder("detectron2_resnet")
class Detectron2ResnetImageEncoder(Encoder):
@dataclass
class Config(Encoder.Config):
name: str = "detectron2_resnet"
pretrained: bool = True
pretrained_path: str = None
def __init__(self, config: Config, *args, **kwargs):
super().__init__()
self.config = config
pretrained = config.get("pretrained", False)
pretrained_path = config.get("pretrained_path", None)
self.resnet = build_resnet_backbone(config, ShapeSpec(channels=3))
if pretrained:
state_dict = torch.hub.load_state_dict_from_url(
pretrained_path, progress=False
)
new_state_dict = OrderedDict()
replace_layer = {"backbone.": ""}
for key, value in state_dict["model"].items():
new_key = re.sub(
r"(backbone\.)", lambda x: replace_layer[x.groups()[0]], key
)
new_state_dict[new_key] = value
self.resnet.load_state_dict(new_state_dict, strict=False)
self.out_dim = 2048
def forward(self, x):
x = self.resnet(x)
return x["res5"]
@registry.register_encoder("frcnn")
class FRCNNImageEncoder(Encoder):
@dataclass
class Config(Encoder.Config):
name: str = "frcnn"
pretrained: bool = True
pretrained_path: str = None
def __init__(self, config: Config, *args, **kwargs):
super().__init__()
self.config = config
pretrained = config.get("pretrained", False)
pretrained_path = config.get("pretrained_path", None)
self.frcnn = GeneralizedRCNN(config)
if pretrained:
state_dict = torch.load(pretrained_path)
self.frcnn.load_state_dict(state_dict)
self.frcnn.eval()
def forward(
self,
x: torch.Tensor,
sizes: torch.Tensor = None,
scales_yx: torch.Tensor = None,
padding: torch.Tensor = None,
max_detections: int = 0,
return_tensors: str = "pt",
):
x = self.frcnn(
x,
sizes,
scales_yx=scales_yx,
padding=padding,
max_detections=max_detections,
return_tensors=return_tensors,
)
return x
class TextEncoderTypes(Enum):
identity = "identity"
transformer = "transformer"
embedding = "embedding"
class TextEncoderFactory(EncoderFactory):
@dataclass
class Config(EncoderFactory.Config):
# identity, transformer or embedding as of now
type: TextEncoderTypes = MISSING
params: Encoder.Config = MISSING
def __init__(self, config: Config, *args, **kwargs):
super().__init__()
self._type = config.type
if isinstance(self._type, TextEncoderTypes):
self._type = self._type.value
if self._type == "identity":
self.module = nn.Identity()
elif self._type == "transformer":
self._module = TransformerEncoder(config.params)
self.module = self._module.module
elif self._type == "embedding":
self.module = TextEmbeddingEncoder(config.params)
else:
raise NotImplementedError(f"Unknown Text Encoder {self._type}")
def forward(self, *args, **kwargs):
return self.module(*args, **kwargs)
@registry.register_encoder("text_embedding")
class TextEmbeddingEncoder(Encoder):
@dataclass
class Config(Encoder.Config):
name: str = "text_embedding"
operator: str = MISSING
# Keeping this Any for now as this
# needs a separate refactor PR.
embedding_params: Any = MISSING
def __init__(self, config: Config):
super().__init__()
self._operator = config.operator
self._embedding_params = config.embedding_params
self.module = TextEmbedding(
self._embedding_params.type, **self._embedding_params.params
)
def forward(self, x):
x = self.module(x)
if self._operator == "sum":
x = x.sum(dim=1)
elif self._operator == "concat":
x = torch.cat(x, dim=1)
elif self._operator == "mul":
x = torch.prod(x, dim=1)
return x.squeeze()
@registry.register_encoder("transformer")
class TransformerEncoder(Encoder):
@dataclass
class Config(Encoder.Config):
name: str = "transformer"
num_segments: int = 2
bert_model_name: str = "bert-base-uncased"
# Options below can be overridden to update the bert configuration used
# to initialize the bert encoder. If some option is missing or
# if you are using an encoder different then BERT, add extra parameters
# by inheriting and extending this config
# Those options will automatically override the options for your transformer
# encoder's configuration. For e.g. vocab_size is missing here, just add
# vocab_size: x to update the size of the vocabulary with which encoder is
# initialized. If you update the default values, the transformer you
# will get will be initialized from scratch.
hidden_size: int = 768
num_hidden_layers: int = 12
num_attention_heads: int = 12
output_attentions: bool = False
output_hidden_states: bool = False
random_init: bool = False
def __init__(self, config: Config, *args, **kwargs):
super().__init__()
self.config = config
hf_params = {"config": self._build_encoder_config(config)}
should_random_init = self.config.get("random_init", False)
# For BERT models, initialize using Jit version
if self.config.bert_model_name.startswith("bert-"):
if should_random_init:
self.module = BertModelJit(**hf_params)
else:
self.module = BertModelJit.from_pretrained(
self.config.bert_model_name, **hf_params
)
else:
if should_random_init:
self.module = AutoModel.from_config(**hf_params)
else:
self.module = AutoModel.from_pretrained(
self.config.bert_model_name, **hf_params
)
self.embeddings = self.module.embeddings
self.original_config = self.config
self.config = self.module.config
self._init_segment_embeddings()
def _init_segment_embeddings(self):
if self.original_config.get("num_segments", None):
num_segments = self.original_config.num_segments
if hasattr(self.embeddings, "token_type_embeddings"):
new_embeds = nn.Embedding(num_segments, self.config.hidden_size)
new_embeds.weight.data[:2].copy_(
self.embeddings.token_type_embeddings.weight
)
for idx in range(2, num_segments - 1):
new_embeds.weight.data[idx].copy_(
self.embeddings.token_type_embeddings.weight.data.mean(dim=0)
)
self.embeddings.token_type_embeddings = new_embeds
def _build_encoder_config(self, config: Config):
return AutoConfig.from_pretrained(
config.bert_model_name, **OmegaConf.to_container(config)
)
def forward(self, *args, return_sequence=False, **kwargs) -> Tensor:
# Only return pooled output
output = self.module(*args, **kwargs)
return output[0] if return_sequence else output[1]
class MultiModalEncoderBase(Encoder):
__jit_unused_properties__ = ["encoder_config"]
@dataclass
class Config(Encoder.Config):
# This actually is Union[ImageEncoderConfig, ImageFeatureEncoderConfig]
modal_encoder: EncoderFactory.Config = ImageEncoderFactory.Config(
type=ImageEncoderTypes.resnet152, params=ResNet152ImageEncoder.Config()
)
text_encoder: EncoderFactory.Config = TextEncoderFactory.Config(
type=TextEncoderTypes.transformer, params=TransformerEncoder.Config()
)
direct_features_input: bool = False
modal_hidden_size: int = 2048
text_hidden_size: int = 768
def __init__(self, config: Config, *args, **kwargs):
super().__init__()
self.config = config
self._modal_encoder_config = self.config.get("modal_encoder", None)
self._is_direct_features_input = self.config.get("direct_features_input", False)
self.build()
self.modal_hidden_size = self.config.get("modal_hidden_size", None)
self.text_hidden_size = self.config.get("text_hidden_size", None)
def build(self):
encoders = self._build_encoders(self.config)
self.text_encoder, self.modal_encoder = encoders[0], encoders[1]
self._encoder_config = None
if self.text_encoder:
self._encoder_config = self.text_encoder.config
@property
def encoder_config(self):
return self._encoder_config
def _build_encoders(self, config):
text_encoder = None
if config.get("text_encoder", None):
text_encoder = build_text_encoder(config.text_encoder)
modal_encoder = None
if config.get("modal_encoder", None):
modal_encoder = self._build_modal_encoder(config.modal_encoder)
return (text_encoder, modal_encoder)
def _build_modal_encoder(self, config):
return build_image_encoder(
config, direct_features=self._is_direct_features_input
)
class PooledEncoder(Encoder):
"""
Standard pooled encoder class which takes in an input, encodes it with an encoder
implemented and returned from `self.build_encoder` function, pools it based
`pool_type` and `num_output_features` specified, flattens it and returns it
back as a tensor.
"""
@dataclass
class Config(Encoder.Config):
num_output_features: int = 1 # How many output features need to be returned.
pool_type: str = "avg" # type of pooling to apply "avg" | "adaptive"
out_dim: int = MISSING # size of out dim expected
three_d: bool = False # if input requires 3D pooling (for video)
def __init__(self, config: Config, *args, **kwargs):
super().__init__()
self.encoder = self.build_encoder(config)
pool_func = (
nn.AdaptiveAvgPool2d if config.pool_type == "avg" else nn.AdaptiveMaxPool2d
)
params = (config.num_output_features, 1)
if config.three_d:
pool_func = (
nn.AdaptiveAvgPool3d
if config.pool_type == "avg"
else nn.AdaptiveMaxPool3d
)
params = (config.num_output_features, 1, 1)
# -1 will keep the original feature size
if config.num_output_features == -1:
self.pool = nn.Identity()
else:
self.pool = pool_func(params)
self.out_dim = config.out_dim
def build_encoder(self, config: Config, *args, **kwargs):
"""Build an encoder on whose output the pooling will be applied.
Args:
config (Config): Config parameter required to build the encoder.
Raises:
NotImplementedError: Not implemented by default.
"""
raise NotImplementedError()
def forward(self, x: Tensor) -> Tensor:
out = self.encoder(x)
out = self.pool(out)
out = torch.flatten(out, start_dim=2)
out = out.transpose(1, 2).contiguous()
return out
@registry.register_encoder("pytorchvideo")
class PytorchVideoEncoder(Encoder):
"""A thin wrapper around pytorchvideo models.
This class is responsible for integrating pytorchvideo models as encoders.
THis class attempts to construct a pytorchvideo model from torch hub.
If this fails for a random weight model, and pytorchvideo package is available,
build the model with random weights from pytorchvideo.models.
Config:
name (str): Always 'pytorchvideo' Used for builder_encoder()
random_init (bool): Flag to load pretrained weights
model_name (str): Name of the pytorchvideo model to use
drop_last_n_layers (int):
<=0 value for the number of layers to drop off the end
pooler_name (str): Name of pooler used on model output
Raises:
ImportError:
The constructor raises an ImportError if pytorchvideo is not installed.
"""
@dataclass
class Config(Encoder.Config):
name: str = "pytorchvideo"
random_init: bool = False
model_name: str = "slowfast_r50"
drop_last_n_layers: int = -1
pooler_name: str = "identity"
PYTORCHVIDEO_REPO = "facebookresearch/pytorchvideo:main"
def __init__(self, config: Config):
super().__init__()
config = OmegaConf.create({**asdict(self.Config()), **config})
if config.random_init:
params = dict(**OmegaConf.to_container(config))
params = {
k: v
for k, v in params.items()
if k not in PytorchVideoEncoder.Config().__dict__
}
try:
model = torch.hub.load(
PytorchVideoEncoder.PYTORCHVIDEO_REPO,
model=config.model_name,
pretrained=False,
**params,
)
except BaseException as err:
pytorchvideo_spec = importlib.util.find_spec("pytorchvideo")
if pytorchvideo_spec is None:
raise err
import pytorchvideo.models.hub as hub
model_create_fn = getattr(hub, config.model_name)
model = model_create_fn(pretrained=False, **params)
else:
# load weights from TorchHub
model = torch.hub.load(
PytorchVideoEncoder.PYTORCHVIDEO_REPO,
model=config.model_name,
pretrained=True,
)
encoder_list = []
if config.drop_last_n_layers == 0:
encoder_list += [model]
else:
modules_list = list(model.children())
if len(modules_list) == 1:
modules_list = list(modules_list[0].children())
modules = modules_list[: config.drop_last_n_layers]
encoder_list += modules
pooler = registry.get_pool_class(config.pooler_name)()
encoder_list += [pooler]
self.encoder = nn.Sequential(*encoder_list)
def forward(self, *args, **kwargs):
# pass along input to model
# assumes caller obeys the dynamic model signature
return self.encoder(*args, **kwargs)
@registry.register_encoder("r2plus1d_18")
class R2Plus1D18VideoEncoder(PooledEncoder):
"""
R2Plus1D based video encoder. Returns back a tensor of dim 2048.
By default, pretrained version is used.
See https://arxiv.org/abs/1711.11248.
"""
@dataclass
class Config(PooledEncoder.Config):
name: str = "r2plus1d_18"
out_dim: int = 512 # out dim
pretrained: bool = True # if should use pretrained version or not
three_d: bool = True
def build_encoder(self, config: Config, *args, **kwargs):
model = torchvision.models.video.r2plus1d_18(
pretrained=config.get("pretrained", True)
)
modules = list(model.children())[:-2]
return nn.Sequential(*modules)
@registry.register_encoder("resnet18_audio")
class ResNet18AudioEncoder(PooledEncoder):
"""
Audio encoder based on ResNet18 used in various audio classification paper
as a baseline. By default, not pretrained version is used.
"""
@dataclass
class Config(PooledEncoder.Config):
name: str = "resnet18_audio"
out_dim: int = 512
pretrained: bool = False
def build_encoder(self, config: Config, *args, **kwargs):
model = torchvision.models.resnet18(pretrained=config.get("pretrained", False))
model.conv1 = nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3, bias=False)
modules = list(model.children())[:-2]
return nn.Sequential(*modules)
@registry.register_encoder("vit")
class ViTEncoder(Encoder):
@dataclass
class Config(Encoder.Config):
name: str = "vit"
# See https://huggingface.co/models?filter=vit for available options
pretrained_model_name: str = "google/vit-base-patch16-224"
random_init: bool = False
gradient_checkpointing: bool = False
def __init__(self, config: Config, *args, **kwargs):
super().__init__()
self.config = config
self.module, self.hf_config = self._model_class.from_config(config)
self.embeddings = self.module.embeddings
self.out_dim = self.hf_config.hidden_size
@property
def _model_class(self):
from mmf.modules.vit import ViTModel
return ViTModel
def forward(self, *args, **kwargs):
if "output_hidden_states" not in kwargs:
kwargs["output_hidden_states"] = False
output = self.module(*args, **kwargs)
return output["last_hidden_state"], output.get("hidden_states", None)
| EXA-1-master | exa/models/mmf-main/mmf/modules/encoders.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import math
from typing import Callable
import torch
from mmf.common.registry import registry
try:
from transformers3.optimization import AdamW
except ImportError:
from transformers.optimization import AdamW
registry.register_optimizer("adam_w")(AdamW)
@registry.register_optimizer("adam_w_skip_params_with_zero_grad")
class AdamWSkipParamsWithZeroGrad(AdamW):
def step(self, closure: Callable = None):
"""
Performs a single optimization step.
Arguments:
closure (:obj:`Callable`, `optional`): A closure that reevaluates the model
and returns the loss.
modified from
https://github.com/huggingface/transformers/blob/d2f9cb838ec1ed7f62ddfb850dccd223e19441ad/src/transformers/optimization.py#L259-L318 # NoQA
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group["params"]:
if p.grad is None:
continue
if p.grad.abs().sum().item() == 0:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError(
"Adam does not support sparse gradients, please consider "
"SparseAdam instead"
)
state = self.state[p]
# State initialization
if len(state) == 0:
state["step"] = 0
# Exponential moving average of gradient values
state["exp_avg"] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state["exp_avg_sq"] = torch.zeros_like(p.data)
exp_avg, exp_avg_sq = state["exp_avg"], state["exp_avg_sq"]
beta1, beta2 = group["betas"]
state["step"] += 1
# Decay the first and second moment running average coefficient
# In-place operations to update the averages at the same time
exp_avg.mul_(beta1).add_(grad, alpha=1.0 - beta1)
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1.0 - beta2)
denom = exp_avg_sq.sqrt().add_(group["eps"])
step_size = group["lr"]
if group["correct_bias"]: # No bias correction for Bert
bias_correction1 = 1.0 - beta1 ** state["step"]
bias_correction2 = 1.0 - beta2 ** state["step"]
step_size = (
step_size * math.sqrt(bias_correction2) / bias_correction1
)
p.data.addcdiv_(exp_avg, denom, value=-step_size)
# Just adding the square of the weights to the loss function is *not*
# the correct way of using L2 regularization/weight decay with Adam,
# since that will interact with the m and v parameters in strange ways.
#
# Instead we want to decay the weights in a manner that doesn't interact
# with the m/v parameters. This is equivalent to adding the square
# of the weights to the loss with plain (non-momentum) SGD.
# Add weight decay at the end (fixed version)
if group["weight_decay"] > 0.0:
p.data.add_(p.data, alpha=-group["lr"] * group["weight_decay"])
return loss
| EXA-1-master | exa/models/mmf-main/mmf/modules/optimizers.py |
# Copyright (c) Facebook, Inc. and its affiliates.
from collections import namedtuple
from dataclasses import asdict, dataclass
import torch
from mmf.utils.general import retry_n
from omegaconf import OmegaConf
from packaging import version
from torch import nn
try:
from transformers3 import __version__ as transformers_version
from transformers3.modeling_bert import BertSelfAttention
except ImportError:
from transformers import __version__ as transformers_version
from transformers.modeling_bert import BertSelfAttention
if version.parse(transformers_version) >= version.parse("4.5.0"):
try:
import transformers3.models.vit.modeling_vit as vit
except ImportError:
import transformers.models.vit.modeling_vit as vit
has_VIT = True
else:
ViTStub = namedtuple("Vit", ["ViTAttention", "ViTPreTrainedModel"])
vit = ViTStub(torch.nn.Module, torch.nn.Module)
has_VIT = False
def check_vit_in_transformers():
if not has_VIT:
raise ImportError(
"transformers version >= 4.5.0 required for using modeling_vit"
)
NUM_RETRIES = 6
class ViTAttention(vit.ViTAttention):
def __init__(self, config):
check_vit_in_transformers()
super().__init__(config)
# We need to support attention masks for vision language input
# ViTAttention from transformers doesn't currently support attention masks,
# for versions without attention_mask support we use these clones of ViT modules
# that use BertSelfAttention to enable masking.
self.attention = BertSelfAttention(config)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
output_attentions=False,
):
self_outputs = self.attention(
hidden_states,
attention_mask=attention_mask,
head_mask=head_mask,
output_attentions=output_attentions,
)
attention_output = self.output(self_outputs[0], hidden_states)
outputs = (attention_output,) + self_outputs[1:]
return outputs
class ViTLayer(nn.Module):
"""This corresponds to the Block class in the timm implementation."""
def __init__(self, config):
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = ViTAttention(config)
self.intermediate = vit.ViTIntermediate(config)
self.output = vit.ViTOutput(config)
self.layernorm_before = nn.LayerNorm(
config.hidden_size, eps=config.layer_norm_eps
)
self.layernorm_after = nn.LayerNorm(
config.hidden_size, eps=config.layer_norm_eps
)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
output_attentions=False,
):
self_attention_outputs = self.attention(
self.layernorm_before(
hidden_states
), # in ViT, layernorm is applied before self-attention
attention_mask=attention_mask,
head_mask=head_mask,
output_attentions=output_attentions,
)
attention_output = self_attention_outputs[0]
outputs = self_attention_outputs[1:]
# first residual connection
hidden_states = attention_output + hidden_states
# in ViT, layernorm is also applied after self-attention
layer_output = self.layernorm_after(hidden_states)
layer_output = self.intermediate(layer_output)
# second residual connection is done here
layer_output = self.output(layer_output, hidden_states)
outputs = (layer_output,) + outputs
return outputs
class ViTEncoder(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.layer = nn.ModuleList(
[ViTLayer(config) for _ in range(config.num_hidden_layers)]
)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
output_attentions=False,
output_hidden_states=False,
return_dict=True,
):
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_head_mask = head_mask[i] if head_mask is not None else None
if getattr(self.config, "gradient_checkpointing", False) and self.training:
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, output_attentions)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(layer_module),
hidden_states,
attention_mask,
layer_head_mask,
)
else:
layer_outputs = layer_module(
hidden_states, attention_mask, layer_head_mask, output_attentions
)
hidden_states = layer_outputs[0]
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(
v
for v in [hidden_states, all_hidden_states, all_self_attentions]
if v is not None
)
return vit.BaseModelOutput(
last_hidden_state=hidden_states,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
)
class ViTModel(vit.ViTPreTrainedModel):
@dataclass
class Config:
name: str = "vit"
# See https://huggingface.co/models?filter=vit for available options
pretrained_model_name: str = "google/vit-base-patch16-224"
random_init: bool = False
gradient_checkpointing: bool = False
do_patch_embeddings: bool = True
def __init__(self, config):
check_vit_in_transformers()
super().__init__(config)
self.config = config
self.embeddings = vit.ViTEmbeddings(config)
self.encoder = ViTEncoder(config)
self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
add_pooling_layer = getattr(config, "add_pooling_layer", True)
self.pooler = vit.ViTPooler(config) if add_pooling_layer else None
self.init_weights()
def get_input_embeddings(self):
return self.embeddings.patch_embeddings
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads
to prune in this layer} See base class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
def forward(
self,
input_values=None,
attention_mask=None,
head_mask=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
Returns:
Examples::
>>> from transformers import ViTFeatureExtractor, ViTModel
>>> from PIL import Image
>>> import requests
>>> url = 'http://images.cocodataset.org/val2017/000000039769.jpg'
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> feature_extractor = ViTFeatureExtractor.from_pretrained(
'google/vit-base-patch16-224-in21k'
)
>>> model = ViTModel.from_pretrained('google/vit-base-patch16-224-in21k')
>>> inputs = feature_extractor(images=image, return_tensors="pt")
>>> outputs = model(**inputs)
>>> last_hidden_states = outputs.last_hidden_state
"""
output_attentions = (
output_attentions
if output_attentions is not None
else self.config.output_attentions
)
output_hidden_states = (
output_hidden_states
if output_hidden_states is not None
else self.config.output_hidden_states
)
return_dict = (
return_dict if return_dict is not None else self.config.use_return_dict
)
if input_values is None:
raise ValueError("You have to specify input_values")
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape
# [num_hidden_layers x batch x num_heads x seq_length x seq_length]
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
do_patch_embeddings = getattr(self.config, "do_patch_embeddings", True)
embedding_output = (
self.embeddings(input_values) if do_patch_embeddings else input_values
)
batch_size, seq_length, _ = embedding_output.shape
device = embedding_output.device
if attention_mask is None:
attention_mask = torch.ones(((batch_size, seq_length)), device=device)
# We can provide a self-attention mask of dimensions
# [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(
attention_mask, (batch_size, seq_length), device
)
encoder_outputs = self.encoder(
embedding_output,
attention_mask=extended_attention_mask,
head_mask=head_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = encoder_outputs[0]
sequence_output = self.layernorm(sequence_output)
pooled_output = (
self.pooler(sequence_output) if self.pooler is not None else None
)
if not return_dict:
return (sequence_output, pooled_output) + encoder_outputs[1:]
return vit.BaseModelOutputWithPooling(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
)
@staticmethod
def from_config(config: Config):
check_vit_in_transformers()
config_with_defaults = OmegaConf.create({**asdict(ViTModel.Config()), **config})
random_init = config_with_defaults.get("random_init", False)
hf_config = retry_n(
NUM_RETRIES,
vit.ViTConfig.from_pretrained,
config_with_defaults.pretrained_model_name,
**OmegaConf.to_container(config_with_defaults),
)
hf_config.update(config)
if not random_init:
module = retry_n(
NUM_RETRIES,
ViTModel.from_pretrained,
config.pretrained_model_name,
config=hf_config,
)
else:
module = ViTModel(hf_config)
return module, hf_config
| EXA-1-master | exa/models/mmf-main/mmf/modules/vit.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# TODO: Update kwargs with defaults
import os
import pickle
from copy import deepcopy
from functools import lru_cache
from typing import Optional, Tuple
import numpy as np
import torch
from mmf.modules.attention import AttentionLayer, SelfAttention, SelfGuidedAttention
from mmf.modules.bottleneck import MovieBottleneck
from mmf.modules.layers import AttnPool1d, Identity
from mmf.utils.file_io import PathManager
from mmf.utils.vocab import Vocab
from torch import nn, Tensor
try:
from transformers3.modeling_bert import BertEmbeddings
except ImportError:
from transformers.modeling_bert import BertEmbeddings
class TextEmbedding(nn.Module):
def __init__(self, emb_type, **kwargs):
super().__init__()
self.model_data_dir = kwargs.get("model_data_dir", None)
self.embedding_dim = kwargs.get("embedding_dim", None)
# Update kwargs here
if emb_type == "identity":
self.module = Identity()
self.module.text_out_dim = self.embedding_dim
elif emb_type == "vocab":
self.module = VocabEmbedding(**kwargs)
self.module.text_out_dim = self.embedding_dim
elif emb_type == "projection":
self.module = ProjectionEmbedding(**kwargs)
self.module.text_out_dim = self.module.out_dim
elif emb_type == "preextracted":
self.module = PreExtractedEmbedding(**kwargs)
elif emb_type == "bilstm":
self.module = BiLSTMTextEmbedding(**kwargs)
elif emb_type == "attention":
self.module = AttentionTextEmbedding(**kwargs)
elif emb_type == "mcan":
self.module = SAEmbedding(**kwargs)
elif emb_type == "torch":
vocab_size = kwargs["vocab_size"]
embedding_dim = kwargs["embedding_dim"]
self.module = nn.Embedding(vocab_size, embedding_dim)
self.module.text_out_dim = self.embedding_dim
else:
raise NotImplementedError("Unknown question embedding '%s'" % emb_type)
self.text_out_dim = self.module.text_out_dim
def forward(self, *args, **kwargs):
return self.module(*args, **kwargs)
class VocabEmbedding(nn.Module):
def __init__(self, embedding_dim, **vocab_params):
super().__init__()
self.vocab = Vocab(**vocab_params)
self.module = self.vocab.get_embedding(
nn.Embedding, embedding_dim=embedding_dim
)
def forward(self, x):
return self.module(x)
class BiLSTMTextEmbedding(nn.Module):
def __init__(
self,
hidden_dim,
embedding_dim,
num_layers,
dropout,
bidirectional=False,
rnn_type="GRU",
):
super().__init__()
self.text_out_dim = hidden_dim
self.bidirectional = bidirectional
if rnn_type == "LSTM":
rnn_cls = nn.LSTM
elif rnn_type == "GRU":
rnn_cls = nn.GRU
self.recurrent_encoder = rnn_cls(
input_size=embedding_dim,
hidden_size=hidden_dim,
num_layers=num_layers,
dropout=dropout,
bidirectional=bidirectional,
batch_first=True,
)
def forward(self, x):
out, _ = self.recurrent_encoder(x)
# Return last state
if self.bidirectional:
return out[:, -1]
forward_ = out[:, -1, : self.num_hid]
backward = out[:, 0, self.num_hid :]
return torch.cat((forward_, backward), dim=1)
def forward_all(self, x):
output, _ = self.recurrent_encoder(x)
return output
class PreExtractedEmbedding(nn.Module):
def __init__(self, out_dim, base_path):
super().__init__()
self.text_out_dim = out_dim
self.base_path = base_path
self.cache = {}
def forward(self, qids):
embeddings = []
for qid in qids:
embeddings.append(self.get_item(qid))
return torch.stack(embeddings, dim=0)
@lru_cache(maxsize=5000)
def get_item(self, qid):
return np.load(os.path.join(self.base_path, str(qid.item()) + ".npy"))
class AttentionTextEmbedding(nn.Module):
def __init__(self, hidden_dim, embedding_dim, num_layers, dropout, **kwargs):
super().__init__()
self.text_out_dim = hidden_dim * kwargs["conv2_out"]
bidirectional = kwargs.get("bidirectional", False)
self.recurrent_unit = nn.LSTM(
input_size=embedding_dim,
hidden_size=hidden_dim // 2 if bidirectional else hidden_dim,
num_layers=num_layers,
batch_first=True,
bidirectional=bidirectional,
)
self.dropout = nn.Dropout(p=dropout)
conv1_out = kwargs["conv1_out"]
conv2_out = kwargs["conv2_out"]
kernel_size = kwargs["kernel_size"]
padding = kwargs["padding"]
self.conv1 = nn.Conv1d(
in_channels=hidden_dim,
out_channels=conv1_out,
kernel_size=kernel_size,
padding=padding,
)
self.conv2 = nn.Conv1d(
in_channels=conv1_out,
out_channels=conv2_out,
kernel_size=kernel_size,
padding=padding,
)
self.relu = nn.ReLU()
def forward(self, x):
batch_size = x.size(0)
self.recurrent_unit.flatten_parameters()
# self.recurrent_unit.flatten_parameters()
lstm_out, _ = self.recurrent_unit(x) # N * T * hidden_dim
lstm_drop = self.dropout(lstm_out) # N * T * hidden_dim
lstm_reshape = lstm_drop.permute(0, 2, 1) # N * hidden_dim * T
qatt_conv1 = self.conv1(lstm_reshape) # N x conv1_out x T
qatt_relu = self.relu(qatt_conv1)
qatt_conv2 = self.conv2(qatt_relu) # N x conv2_out x T
# Over last dim
qtt_softmax = nn.functional.softmax(qatt_conv2, dim=2)
# N * conv2_out * hidden_dim
qtt_feature = torch.bmm(qtt_softmax, lstm_drop)
# N * (conv2_out * hidden_dim)
qtt_feature_concat = qtt_feature.view(batch_size, -1)
return qtt_feature_concat
class ProjectionEmbedding(nn.Module):
def __init__(self, module, in_dim, out_dim, **kwargs):
super().__init__()
if module == "linear":
self.layers = nn.Linear(in_dim, out_dim)
self.out_dim = out_dim
elif module == "conv":
last_out_channels = in_dim
layers = []
for conv in kwargs["convs"]:
layers.append(nn.Conv1d(in_channels=last_out_channels, **conv))
last_out_channels = conv["out_channels"]
self.layers = nn.ModuleList(*layers)
self.out_dim = last_out_channels
else:
raise TypeError(
"Unknown module type for 'ProjectionEmbedding',"
"use either 'linear' or 'conv'"
)
def forward(self, x):
return self.layers(x)
class ImageFeatureEmbedding(nn.Module):
"""
parameters:
input:
image_feat_variable: [batch_size, num_location, image_feat_dim]
or a list of [num_location, image_feat_dim]
when using adaptive number of objects
question_embedding:[batch_size, txt_embeding_dim]
output:
image_embedding:[batch_size, image_feat_dim]
"""
def __init__(self, img_dim, question_dim, **kwargs):
super().__init__()
self.image_attention_model = AttentionLayer(img_dim, question_dim, **kwargs)
self.out_dim = self.image_attention_model.out_dim
def forward(self, image_feat_variable, question_embedding, image_dims, extra=None):
if extra is None:
extra = {}
# N x K x n_att
attention = self.image_attention_model(
image_feat_variable, question_embedding, image_dims
)
att_reshape = attention.permute(0, 2, 1)
order_vectors = getattr(extra, "order_vectors", None)
if order_vectors is not None:
image_feat_variable = torch.cat(
[image_feat_variable, order_vectors], dim=-1
)
tmp_embedding = torch.bmm(
att_reshape, image_feat_variable
) # N x n_att x image_dim
batch_size = att_reshape.size(0)
image_embedding = tmp_embedding.view(batch_size, -1)
return image_embedding, attention
class MultiHeadImageFeatureEmbedding(nn.Module):
def __init__(self, img_dim, question_dim, **kwargs):
super().__init__()
self.module = nn.MultiheadAttention(
embed_dim=question_dim, kdim=img_dim, vdim=img_dim, **kwargs
)
self.out_dim = question_dim
def forward(self, image_feat_variable, question_embedding, image_dims, extra=None):
if extra is None:
extra = {}
image_feat_variable = image_feat_variable.transpose(0, 1)
question_embedding = question_embedding.unsqueeze(1).transpose(0, 1)
output, weights = self.module(
question_embedding, image_feat_variable, image_feat_variable
)
output = output.transpose(0, 1)
return output.squeeze(), weights
class ImageFinetune(nn.Module):
def __init__(self, in_dim, weights_file, bias_file):
super().__init__()
with PathManager.open(weights_file, "rb") as w:
weights = pickle.load(w)
with PathManager.open(bias_file, "rb") as b:
bias = pickle.load(b)
out_dim = bias.shape[0]
self.lc = nn.Linear(in_dim, out_dim)
self.lc.weight.data.copy_(torch.from_numpy(weights))
self.lc.bias.data.copy_(torch.from_numpy(bias))
self.out_dim = out_dim
def forward(self, image):
i2 = self.lc(image)
i3 = nn.functional.relu(i2)
return i3
class BertVisioLinguisticEmbeddings(BertEmbeddings):
def __init__(self, config, *args, **kwargs):
super().__init__(config)
self.token_type_embeddings_visual = nn.Embedding(
config.type_vocab_size, config.hidden_size
)
self.position_embeddings_visual = nn.Embedding(
config.max_position_embeddings, config.hidden_size
)
self.projection = nn.Linear(config.visual_embedding_dim, config.hidden_size)
def initialize_visual_from_pretrained(self):
self.token_type_embeddings_visual.weight = nn.Parameter(
deepcopy(self.token_type_embeddings.weight.data), requires_grad=True
)
self.position_embeddings_visual.weight = nn.Parameter(
deepcopy(self.position_embeddings.weight.data), requires_grad=True
)
def encode_text(
self, input_ids: Tensor, token_type_ids: Optional[Tensor] = None
) -> Tensor:
seq_length = input_ids.size(1)
position_ids = torch.arange(
seq_length, dtype=torch.long, device=input_ids.device
)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
words_embeddings = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = words_embeddings + position_embeddings + token_type_embeddings
return embeddings
def encode_image(
self,
visual_embeddings: Tensor,
visual_embeddings_type: Tensor,
image_text_alignment: Optional[Tensor] = None,
) -> Tensor:
visual_embeddings = self.projection(visual_embeddings)
token_type_embeddings_visual = self.token_type_embeddings_visual(
visual_embeddings_type
)
# get position_embeddings
# this depends on image_text_alignment
position_embeddings_visual = self.get_position_embeddings_visual(
visual_embeddings, image_text_alignment=image_text_alignment
)
# calculate visual embeddings
v_embeddings = (
visual_embeddings
+ position_embeddings_visual
+ token_type_embeddings_visual
)
return v_embeddings
def get_position_embeddings_visual(
self, visual_embeddings: Tensor, image_text_alignment: Optional[Tensor] = None
) -> Tensor:
if image_text_alignment is not None:
# image_text_alignment = Batch x image_length x alignment_number.
# Each element denotes the position of the word corresponding to the
# image feature. -1 is the padding value.
image_text_alignment_mask = (
(image_text_alignment != -1).long().to(image_text_alignment.device)
)
# Get rid of the -1.
image_text_alignment = image_text_alignment_mask * image_text_alignment
# position_embeddings_visual
# = Batch x image_length x alignment length x dim
position_embeddings_visual = self.position_embeddings(
image_text_alignment
) * image_text_alignment_mask.unsqueeze(-1)
position_embeddings_visual = position_embeddings_visual.sum(2)
# We want to averge along the alignment_number dimension.
image_text_alignment_mask = image_text_alignment_mask.sum(2)
image_text_alignment_mask[image_text_alignment_mask == 0] = torch.tensor(
[1], dtype=torch.long
) # Avoid devide by zero error
position_embeddings_visual = (
position_embeddings_visual / image_text_alignment_mask.unsqueeze(-1)
)
position_ids_visual = torch.zeros(
visual_embeddings.size()[:-1],
dtype=torch.long,
device=visual_embeddings.device,
)
position_embeddings_visual = (
position_embeddings_visual
+ self.position_embeddings_visual(position_ids_visual)
)
else:
position_ids_visual = torch.zeros(
visual_embeddings.size()[:-1],
dtype=torch.long,
device=visual_embeddings.device,
)
position_embeddings_visual = self.position_embeddings_visual(
position_ids_visual
)
return position_embeddings_visual
def forward(
self,
input_ids: Tensor,
token_type_ids: Optional[Tensor] = None,
visual_embeddings: Optional[Tensor] = None,
visual_embeddings_type: Optional[Tensor] = None,
image_text_alignment: Optional[Tensor] = None,
) -> Tensor:
"""
input_ids = [batch_size, sequence_length]
token_type_ids = [batch_size, sequence_length]
visual_embedding = [batch_size, image_feature_length, image_feature_dim]
image_text_alignment = [batch_size, image_feature_length, alignment_dim]
"""
# text embeddings
text_embeddings = self.encode_text(input_ids, token_type_ids=token_type_ids)
# visual embeddings
if visual_embeddings is not None and visual_embeddings_type is not None:
v_embeddings = self.encode_image(
visual_embeddings,
visual_embeddings_type=visual_embeddings_type,
image_text_alignment=image_text_alignment,
)
# Concate the two:
embeddings = torch.cat(
(text_embeddings, v_embeddings), dim=1
) # concat the visual embeddings after the attentions
else:
embeddings = text_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class SAEmbedding(nn.Module):
"""Encoder block implementation in MCAN https://arxiv.org/abs/1906.10770"""
def __init__(self, hidden_dim: int, embedding_dim: int, **kwargs):
super().__init__()
num_attn = kwargs["num_attn"]
num_layers = kwargs["num_layers"]
dropout = kwargs.get("dropout", 0.1)
num_attn_pool = kwargs.get("num_attn_pool", 1)
num_feat = kwargs.get("num_feat", -1)
self.lstm = nn.LSTM(
input_size=embedding_dim,
hidden_size=hidden_dim,
num_layers=1,
batch_first=True,
)
self.self_attns = nn.ModuleList(
[SelfAttention(hidden_dim, num_attn, dropout) for _ in range(num_layers)]
)
self.attn_pool = None
self.num_feat = num_feat
self.text_out_dim = hidden_dim
if num_attn_pool > 0:
self.attn_pool = AttnPool1d(hidden_dim, num_feat * num_attn_pool)
self.text_out_dim = hidden_dim * num_attn_pool
def forward(
self, x: torch.Tensor, mask: Optional[torch.Tensor] = None
) -> Tuple[torch.Tensor, torch.Tensor]:
b = x.size(0)
out, (h, c) = self.lstm(x)
for self_attn in self.self_attns:
out = self_attn(out, mask)
vec = h.transpose(0, 1).contiguous().view(b, 1, -1)
if self.attn_pool:
vec = self.attn_pool(out, out, mask).view(b, self.num_feat, -1)
return out, vec
class SGAEmbedding(nn.Module):
"""Decoder block implementation in MCAN https://arxiv.org/abs/1906.10770"""
def __init__(self, embedding_dim: int, **kwargs):
super().__init__()
num_attn = kwargs["num_attn"]
num_layers = kwargs["num_layers"]
dropout = kwargs.get("dropout", 0.1)
hidden_dim = kwargs.get("hidden_dim", 512)
self.linear = nn.Linear(embedding_dim, hidden_dim)
self.self_guided_attns = nn.ModuleList(
[
SelfGuidedAttention(hidden_dim, num_attn, dropout)
for _ in range(num_layers)
]
)
self.out_dim = hidden_dim
def forward(
self,
x: torch.Tensor,
y: torch.Tensor,
x_mask: torch.Tensor,
y_mask: torch.Tensor,
) -> torch.Tensor:
if x.dim() == 4:
b, c, h, w = x.shape
x = x.view(b, c, -1).transpose(1, 2).contiguous() # b x (h*w) x c
x = self.linear(x)
for self_guided_attn in self.self_guided_attns:
x = self_guided_attn(x, y, x_mask, y_mask)
return x
class CBNEmbedding(nn.Module):
"""MoVie bottleneck layers from https://arxiv.org/abs/2004.11883"""
def __init__(self, embedding_dim: int, **kwargs):
super().__init__()
cond_dim = kwargs["cond_dim"]
num_layers = kwargs["cbn_num_layers"]
compressed = kwargs.get("compressed", True)
use_se = kwargs.get("use_se", True)
self.out_dim = 1024
self.layer_norm = nn.LayerNorm(self.out_dim)
cbns = []
for i in range(num_layers):
if embedding_dim != self.out_dim:
downsample = nn.Conv2d(
embedding_dim, self.out_dim, kernel_size=1, stride=1, bias=False
)
cbns.append(
MovieBottleneck(
embedding_dim,
self.out_dim // 4,
cond_dim,
downsample=downsample,
compressed=compressed,
use_se=use_se,
)
)
else:
cbns.append(
MovieBottleneck(
embedding_dim,
self.out_dim // 4,
cond_dim,
compressed=compressed,
use_se=use_se,
)
)
embedding_dim = self.out_dim
self.cbns = nn.ModuleList(cbns)
self._init_layers()
def _init_layers(self) -> None:
for cbn in self.cbns:
cbn.init_layers()
def forward(self, x: torch.Tensor, v: torch.Tensor) -> torch.Tensor:
for cbn in self.cbns:
x, _ = cbn(x, v)
x = self.layer_norm(
nn.functional.adaptive_avg_pool2d(x, (1, 1)).squeeze(3).squeeze(2)
)
return x
class TwoBranchEmbedding(nn.Module):
"""Attach MoVie into MCAN model as a counting module in
https://arxiv.org/abs/2004.11883
"""
def __init__(self, embedding_dim: int, **kwargs):
super().__init__()
hidden_dim = kwargs.get("hidden_dim", 512)
self.sga = SGAEmbedding(embedding_dim, **kwargs)
self.sga_pool = AttnPool1d(hidden_dim, 1)
self.cbn = CBNEmbedding(embedding_dim, **kwargs)
self.out_dim = hidden_dim
def forward(
self,
x: torch.Tensor,
y: torch.Tensor,
v: torch.Tensor,
x_mask: torch.Tensor,
y_mask: torch.Tensor,
) -> Tuple[torch.Tensor, torch.Tensor]:
x_sga = self.sga(x, y, x_mask, y_mask)
x_sga = self.sga_pool(x_sga, x_sga, x_mask).squeeze(1)
x_cbn = self.cbn(x, v)
return x_sga, x_cbn
| EXA-1-master | exa/models/mmf-main/mmf/modules/embeddings.py |
# Copyright (c) Facebook, Inc. and its affiliates.
from collections import OrderedDict
from typing import Optional, Tuple, Type
import torch
import torch.nn as nn
from torchvision.models.resnet import Bottleneck, conv1x1, conv3x3
from torchvision.ops.misc import FrozenBatchNorm2d
class ChannelPool(nn.Module):
"""Average pooling in the channel dimension"""
def __init__(self):
super().__init__()
def forward(self, x: torch.Tensor) -> torch.Tensor:
return x.mean(dim=1, keepdim=True)
class SEModule(nn.Module):
"""Squeeze-and-Excitation module from https://arxiv.org/pdf/1709.01507.pdf
Args:
dim: the original hidden dim.
sqrate: the squeeze rate in hidden dim.
Returns:
New features map that channels are gated
by sigmoid weights from SE module.
"""
def __init__(self, dim: int, sqrate: float):
super().__init__()
self.se = nn.Sequential(
nn.AdaptiveAvgPool2d((1, 1)),
nn.Conv2d(dim, dim // sqrate, kernel_size=1, bias=False),
nn.ReLU(inplace=True),
nn.Conv2d(dim // sqrate, dim, kernel_size=1, bias=False),
nn.Sigmoid(),
)
self.attn = nn.Sequential(
ChannelPool(),
nn.Conv2d(1, 1, kernel_size=7, padding=3, bias=False),
nn.Sigmoid(),
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = x * self.se(x)
return x * self.attn(x)
class Modulation(nn.Module):
def __init__(
self, num_features: int, num_cond_features: int, compressed: bool = True
):
super().__init__()
self.linear = nn.Linear(num_cond_features, num_features)
self.conv = (
nn.Conv2d(num_features, 256, kernel_size=1)
if compressed
else nn.Conv2d(num_features, num_features, kernel_size=1)
)
def forward(self, x: torch.Tensor, cond: torch.Tensor) -> torch.Tensor:
cond = self.linear(cond).unsqueeze(2).unsqueeze(3)
return self.conv(x * cond)
class MovieBottleneck(nn.Module):
"""
Standard ResNet bottleneck with MoVie modulation in
https://arxiv.org/abs/2004.11883
The code is inspired from
https://pytorch.org/docs/stable/_modules/torchvision/models/resnet.html
"""
expansion = 4
def __init__(
self,
inplanes: int,
planes: int,
cond_planes: int = None,
stride: int = 1,
downsample: Optional[Type[nn.Module]] = None,
groups: int = 1,
base_width: int = 64,
dilation: int = 1,
norm_layer: Optional[Type[nn.Module]] = None,
stride_in_1x1: bool = False,
compressed: bool = True,
use_se: bool = True,
):
super().__init__()
if norm_layer is None:
self.norm_layer = FrozenBatchNorm2d
else:
self.norm_layer = norm_layer
self.cond_planes = cond_planes
self.planes = planes
self.inplanes = inplanes
stride_1x1, stride_3x3 = (stride, 1) if stride_in_1x1 else (1, stride)
self.width = int(planes * (base_width / 64.0)) * groups
# Both self.conv2 and self.downsample layers downsample the input when
# stride != 1
self.conv1 = conv1x1(inplanes, self.width, stride_1x1)
self.bn1 = self.norm_layer(self.width)
self.conv2 = conv3x3(self.width, self.width, stride_3x3, groups, dilation)
self.bn2 = self.norm_layer(self.width)
self.conv3 = conv1x1(self.width, planes * self.expansion)
self.bn3 = self.norm_layer(self.planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.se = None
self.compressed = compressed
self.use_se = use_se
def init_layers(self):
if self.cond_planes:
self.cond = Modulation(
self.inplanes, self.cond_planes, compressed=self.compressed
)
self.se = SEModule(self.planes * self.expansion, 4) if self.use_se else None
def forward(
self, x: torch.Tensor, cond: Optional[torch.Tensor] = None
) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
identity = x
if self.cond_planes and self.compressed:
x = self.conv1(x) + self.cond(x, cond)
elif self.cond_planes and not self.compressed:
x += self.cond(x, cond)
x = self.conv1(x)
else:
x = self.conv1(x)
out = self.bn1(x)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample:
shortcut = self.downsample(identity)
else:
shortcut = identity
if self.se:
out = self.se(out)
out += shortcut
out = self.relu(out)
return out, cond
class AvgPoolBottleneck(Bottleneck):
expansion = 4
def __init__(self, inplanes: int, planes: int, stride: int = 1):
# setting stride to 1 bc we use average pooling to downsample
super().__init__(inplanes=inplanes, planes=planes, stride=1)
if stride > 1 or inplanes != planes * AvgPoolBottleneck.expansion:
# downsampling layer is prepended with an avgpool, and the
# subsequent convolution has stride 1
self.downsample = nn.Sequential(
OrderedDict(
[
("-1", nn.AvgPool2d(stride)),
(
"0",
nn.Conv2d(
inplanes,
planes * AvgPoolBottleneck.expansion,
1,
stride=1,
bias=False,
),
),
("1", nn.BatchNorm2d(planes * AvgPoolBottleneck.expansion)),
]
)
)
self.avgpool = nn.AvgPool2d(stride) if stride > 1 else nn.Identity()
def forward(self, x: torch.Tensor) -> torch.Tensor:
identity = x
out = self.relu(self.bn1(self.conv1(x)))
out = self.relu(self.bn2(self.conv2(out)))
out = self.avgpool(out)
out = self.bn3(self.conv3(out))
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
| EXA-1-master | exa/models/mmf-main/mmf/modules/bottleneck.py |
# Copyright (c) Facebook, Inc. and its affiliates.
"""
The fusions module contains various Fusion techniques, some based on BLOCK:
Bilinear Superdiagonal Fusion for VQA and VRD. For e.g. LinearSum, ConcatMLP
etc taken from https://github.com/Cadene/block.bootstrap.pytorch#fusions.
For implementing your own fusion technique, you need to follow these steps:
.. code::
from torch import nn
from mmf.common.registry import registry
from mmf.modules.fusions import Block
from mmf.modules.fusions import LinearSum
from mmf.modules.fusions import ConcatMLP
from mmf.modules.fusions import MLB
from mmf.modules.fusions import Mutan
from mmf.modules.fusions import Tucker
from mmf.modules.fusions import BlockTucker
from mmf.modules.fusions import MFH
from mmf.modules.fusions import MFB
from mmf.modules.fusions import MCB
@regitery.register_fusion("custom")
class CustomFusion(nn.Module):
def __init__(self, params=None):
super().__init__("Custom")
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmf.common.registry import registry
from mmf.utils.general import get_chunks, get_sizes_list, irfft, rfft
from mmf.utils.logger import log_class_usage
class CompactBilinearPooling(nn.Module):
def __init__(self, input_dim1, input_dim2, output_dim, sum_pool=True):
super().__init__()
self.output_dim = output_dim
self.sum_pool = sum_pool
self.sketch1 = nn.Parameter(
self.generate_sketch_matrix(
torch.randint(output_dim, size=(input_dim1,)),
2 * torch.randint(2, size=(input_dim1,)) - 1,
input_dim1,
output_dim,
),
requires_grad=False,
)
self.sketch2 = nn.Parameter(
self.generate_sketch_matrix(
torch.randint(output_dim, size=(input_dim2,)),
2 * torch.randint(2, size=(input_dim2,)) - 1,
input_dim2,
output_dim,
),
requires_grad=False,
)
def generate_sketch_matrix(self, rand_h, rand_s, input_dim, output_dim):
return torch.sparse.FloatTensor(
torch.stack(
[torch.arange(input_dim, out=torch.LongTensor()), rand_h.long()]
),
rand_s.float(),
[input_dim, output_dim],
).to_dense()
def forward(self, x1, x2):
assert len(x1.shape) == len(x2.shape)
if len(x1.shape) == 4 and len(x2.shape) == 4:
fft1 = rfft(x1.permute(0, 2, 3, 1).matmul(self.sketch1), signal_ndim=1)
fft2 = rfft(x2.permute(0, 2, 3, 1).matmul(self.sketch2), signal_ndim=1)
else:
fft1 = rfft(x1.matmul(self.sketch1), signal_ndim=1)
fft2 = rfft(x2.matmul(self.sketch2), signal_ndim=1)
fft_product = torch.stack(
[
fft1[..., 0] * fft2[..., 0] - fft1[..., 1] * fft2[..., 1],
fft1[..., 0] * fft2[..., 1] + fft1[..., 1] * fft2[..., 0],
],
dim=-1,
)
cbp = (
irfft(fft_product, signal_ndim=1, dim=-1, s=(self.output_dim,))
* self.output_dim
)
if len(x1.shape) == 4 and len(x2.shape) == 4:
cbp = cbp.sum(dim=[1, 2]) if self.sum_pool else cbp.permute(0, 3, 1, 2)
return cbp
class MLP(nn.Module):
def __init__(self, input_dim, dimensions, activation="relu", dropout=0.0):
super().__init__()
self.input_dim = input_dim
self.dimensions = dimensions
self.activation = activation
self.dropout = dropout
self.linears = nn.ModuleList([nn.Linear(input_dim, dimensions[0])])
for din, dout in zip(dimensions[:-1], dimensions[1:]):
self.linears.append(nn.Linear(din, dout))
def forward(self, x):
for i, lin in enumerate(self.linears):
x = lin(x)
if i < len(self.linears) - 1:
x = F.__dict__[self.activation](x)
if self.dropout > 0:
x = F.dropout(x, self.dropout, training=self.training)
return x
@registry.register_fusion("block")
class Block(nn.Module):
def __init__(
self,
input_dims,
output_dim,
mm_dim=1600,
chunks=20,
rank=15,
shared=False,
dropout_input=0.0,
dropout_pre_lin=0.0,
dropout_output=0.0,
pos_norm="before_cat",
):
super().__init__()
self.input_dims = input_dims
self.output_dim = output_dim
self.mm_dim = mm_dim
self.chunks = chunks
self.rank = rank
self.shared = shared
self.dropout_input = dropout_input
self.dropout_pre_lin = dropout_pre_lin
self.dropout_output = dropout_output
assert pos_norm in ["before_cat", "after_cat"]
self.pos_norm = pos_norm
# Modules
self.linear0 = nn.Linear(input_dims[0], mm_dim)
if shared:
self.linear1 = self.linear0
else:
self.linear1 = nn.Linear(input_dims[1], mm_dim)
merge_linears0, merge_linears1 = [], []
self.sizes_list = get_sizes_list(mm_dim, chunks)
for size in self.sizes_list:
ml0 = nn.Linear(size, size * rank)
merge_linears0.append(ml0)
if self.shared:
ml1 = ml0
else:
ml1 = nn.Linear(size, size * rank)
merge_linears1.append(ml1)
self.merge_linears0 = nn.ModuleList(merge_linears0)
self.merge_linears1 = nn.ModuleList(merge_linears1)
self.linear_out = nn.Linear(mm_dim, output_dim)
self.n_params = sum(p.numel() for p in self.parameters() if p.requires_grad)
log_class_usage("Fusion", self.__class__)
def forward(self, x):
x0 = self.linear0(x[0])
x1 = self.linear1(x[1])
bsize = x1.size(0)
if self.dropout_input > 0:
x0 = F.dropout(x0, p=self.dropout_input, training=self.training)
x1 = F.dropout(x1, p=self.dropout_input, training=self.training)
x0_chunks = get_chunks(x0, self.sizes_list)
x1_chunks = get_chunks(x1, self.sizes_list)
zs = []
for chunk_id, m0, m1 in zip(
range(len(self.sizes_list)), self.merge_linears0, self.merge_linears1
):
x0_c = x0_chunks[chunk_id]
x1_c = x1_chunks[chunk_id]
m = m0(x0_c) * m1(x1_c) # bsize x split_size*rank
m = m.view(bsize, self.rank, -1)
z = torch.sum(m, 1)
if self.pos_norm == "before_cat":
z = torch.sqrt(F.relu(z)) - torch.sqrt(F.relu(-z))
z = F.normalize(z, p=2)
zs.append(z)
z = torch.cat(zs, 1)
if self.pos_norm == "after_cat":
z = torch.sqrt(F.relu(z)) - torch.sqrt(F.relu(-z))
z = F.normalize(z, p=2)
if self.dropout_pre_lin > 0:
z = F.dropout(z, p=self.dropout_pre_lin, training=self.training)
z = self.linear_out(z)
if self.dropout_output > 0:
z = F.dropout(z, p=self.dropout_output, training=self.training)
return z
@registry.register_fusion("block_tucker")
class BlockTucker(nn.Module):
def __init__(
self,
input_dims,
output_dim,
mm_dim=1600,
chunks=20,
shared=False,
dropout_input=0.0,
dropout_pre_lin=0.0,
dropout_output=0.0,
pos_norm="before_cat",
):
super().__init__()
self.input_dims = input_dims
self.output_dim = output_dim
self.mm_dim = mm_dim
self.chunks = chunks
self.shared = shared
self.dropout_input = dropout_input
self.dropout_pre_lin = dropout_pre_lin
self.dropout_output = dropout_output
assert pos_norm in ["before_cat", "after_cat"]
self.pos_norm = pos_norm
# Modules
self.linear0 = nn.Linear(input_dims[0], mm_dim)
if self.shared:
self.linear1 = self.linear0
else:
self.linear1 = nn.Linear(input_dims[1], mm_dim)
self.sizes_list = get_sizes_list(mm_dim, chunks)
bilinears = []
for size in self.sizes_list:
bilinears.append(nn.Bilinear(size, size, size))
self.bilinears = nn.ModuleList(bilinears)
self.linear_out = nn.Linear(self.mm_dim, self.output_dim)
self.n_params = sum(p.numel() for p in self.parameters() if p.requires_grad)
log_class_usage("Fusion", self.__class__)
def forward(self, x):
x0 = self.linear0(x[0])
x1 = self.linear1(x[1])
if self.dropout_input:
x0 = F.dropout(x0, p=self.dropout_input, training=self.training)
x1 = F.dropout(x1, p=self.dropout_input, training=self.training)
x0_chunks = get_chunks(x0, self.sizes_list)
x1_chunks = get_chunks(x1, self.sizes_list)
zs = []
for chunk_id, bilinear in enumerate(self.bilinears):
x0_c = x0_chunks[chunk_id]
x1_c = x1_chunks[chunk_id]
z = bilinear(x0_c, x1_c)
if self.pos_norm == "before_cat":
z = torch.sqrt(F.relu(z)) - torch.sqrt(F.relu(-z))
z = F.normalize(z, p=2)
zs.append(z)
z = torch.cat(zs, 1)
if self.pos_norm == "after_cat":
z = torch.sqrt(F.relu(z)) - torch.sqrt(F.relu(-z))
z = F.normalize(z, p=2)
if self.dropout_pre_lin > 0:
z = F.dropout(z, p=self.dropout_pre_lin, training=self.training)
z = self.linear_out(z)
if self.dropout_output > 0:
z = F.dropout(z, p=self.dropout_output, training=self.training)
return z
@registry.register_fusion("mutan")
class Mutan(nn.Module):
def __init__(
self,
input_dims,
output_dim,
mm_dim=1600,
rank=15,
shared=False,
normalize=False,
dropout_input=0.0,
dropout_pre_lin=0.0,
dropout_output=0.0,
):
super().__init__()
self.input_dims = input_dims
self.shared = shared
self.mm_dim = mm_dim
self.rank = rank
self.output_dim = output_dim
self.dropout_input = dropout_input
self.dropout_pre_lin = dropout_pre_lin
self.dropout_output = dropout_output
self.normalize = normalize
# Modules
self.linear0 = nn.Linear(input_dims[0], mm_dim)
self.merge_linear0 = nn.Linear(mm_dim, mm_dim * rank)
if self.shared:
self.linear1 = self.linear0
self.merge_linear1 = self.merge_linear0
else:
self.linear1 = nn.Linear(input_dims[1], mm_dim)
self.merge_linear1 = nn.Linear(mm_dim, mm_dim * rank)
self.linear_out = nn.Linear(mm_dim, output_dim)
self.n_params = sum(p.numel() for p in self.parameters() if p.requires_grad)
log_class_usage("Fusion", self.__class__)
def forward(self, x):
x0 = self.linear0(x[0])
x1 = self.linear1(x[1])
if self.dropout_input > 0:
x0 = F.dropout(x0, p=self.dropout_input, training=self.training)
x1 = F.dropout(x1, p=self.dropout_input, training=self.training)
m0 = self.merge_linear0(x0)
m1 = self.merge_linear1(x1)
m = m0 * m1
m = m.view(-1, self.rank, self.mm_dim)
z = torch.sum(m, 1)
if self.normalize:
z = torch.sqrt(F.relu(z)) - torch.sqrt(F.relu(-z))
z = F.normalize(z, p=2)
if self.dropout_pre_lin > 0:
z = F.dropout(z, p=self.dropout_pre_lin, training=self.training)
z = self.linear_out(z)
if self.dropout_output > 0:
z = F.dropout(z, p=self.dropout_output, training=self.training)
return z
@registry.register_fusion("tucker")
class Tucker(nn.Module):
def __init__(
self,
input_dims,
output_dim,
mm_dim=1600,
shared=False,
normalize=False,
dropout_input=0.0,
dropout_pre_lin=0.0,
dropout_output=0.0,
):
super().__init__()
self.input_dims = input_dims
self.shared = shared
self.mm_dim = mm_dim
self.output_dim = output_dim
self.normalize = normalize
self.dropout_input = dropout_input
self.dropout_pre_lin = dropout_pre_lin
self.dropout_output = dropout_output
# Modules
self.linear0 = nn.Linear(input_dims[0], mm_dim)
if shared:
self.linear1 = self.linear0
else:
self.linear1 = nn.Linear(input_dims[1], mm_dim)
self.linear1 = nn.Linear(input_dims[1], mm_dim)
self.bilinear = nn.Bilinear(mm_dim, mm_dim, mm_dim)
self.linear_out = nn.Linear(mm_dim, output_dim)
self.n_params = sum(p.numel() for p in self.parameters() if p.requires_grad)
log_class_usage("Fusion", self.__class__)
def forward(self, x):
x0 = self.linear0(x[0])
x1 = self.linear1(x[1])
if self.dropout_input > 0:
x0 = F.dropout(x0, p=self.dropout_input, training=self.training)
x1 = F.dropout(x1, p=self.dropout_input, training=self.training)
z = self.bilinear(x0, x1)
if self.normalize:
z = torch.sqrt(F.relu(z)) - torch.sqrt(F.relu(-z))
z = F.normalize(z, p=2)
if self.dropout_pre_lin > 0:
z = F.dropout(z, p=self.dropout_pre_lin, training=self.training)
z = self.linear_out(z)
if self.dropout_output > 0:
z = F.dropout(z, p=self.dropout_output, training=self.training)
return z
@registry.register_fusion("mlb")
class MLB(nn.Module):
def __init__(
self,
input_dims,
output_dim,
mm_dim=1200,
activ_input="relu",
activ_output="relu",
normalize=False,
dropout_input=0.0,
dropout_pre_lin=0.0,
dropout_output=0.0,
):
super().__init__()
self.input_dims = input_dims
self.mm_dim = mm_dim
self.output_dim = output_dim
self.activ_input = activ_input
self.activ_output = activ_output
self.normalize = normalize
self.dropout_input = dropout_input
self.dropout_pre_lin = dropout_pre_lin
self.dropout_output = dropout_output
# Modules
self.linear0 = nn.Linear(input_dims[0], mm_dim)
self.linear1 = nn.Linear(input_dims[1], mm_dim)
self.linear_out = nn.Linear(mm_dim, output_dim)
self.n_params = sum(p.numel() for p in self.parameters() if p.requires_grad)
log_class_usage("Fusion", self.__class__)
def forward(self, x):
x0 = self.linear0(x[0])
x1 = self.linear1(x[1])
if self.activ_input:
x0 = getattr(F, self.activ_input)(x0)
x1 = getattr(F, self.activ_input)(x1)
if self.dropout_input > 0:
x0 = F.dropout(x0, p=self.dropout_input, training=self.training)
x1 = F.dropout(x1, p=self.dropout_input, training=self.training)
z = x0 * x1
if self.normalize:
z = torch.sqrt(F.relu(z)) - torch.sqrt(F.relu(-z))
z = F.normalize(z, p=2)
if self.dropout_pre_lin > 0:
z = F.dropout(z, p=self.dropout_pre_lin, training=self.training)
z = self.linear_out(z)
if self.activ_output:
z = getattr(F, self.activ_output)(z)
if self.dropout_output > 0:
z = F.dropout(z, p=self.dropout_output, training=self.training)
return z
@registry.register_fusion("mfb")
class MFB(nn.Module):
def __init__(
self,
input_dims,
output_dim,
mm_dim=1200,
factor=2,
activ_input="relu",
activ_output="relu",
normalize=False,
dropout_input=0.0,
dropout_pre_norm=0.0,
dropout_output=0.0,
):
super().__init__()
self.input_dims = input_dims
self.mm_dim = mm_dim
self.factor = factor
self.output_dim = output_dim
self.activ_input = activ_input
self.activ_output = activ_output
self.normalize = normalize
self.dropout_input = dropout_input
self.dropout_pre_norm = dropout_pre_norm
self.dropout_output = dropout_output
# Modules
self.linear0 = nn.Linear(input_dims[0], mm_dim * factor)
self.linear1 = nn.Linear(input_dims[1], mm_dim * factor)
self.linear_out = nn.Linear(mm_dim, output_dim)
self.n_params = sum(p.numel() for p in self.parameters() if p.requires_grad)
log_class_usage("Fusion", self.__class__)
def forward(self, x):
x0 = self.linear0(x[0])
x1 = self.linear1(x[1])
if self.activ_input:
x0 = getattr(F, self.activ_input)(x0)
x1 = getattr(F, self.activ_input)(x1)
if self.dropout_input > 0:
x0 = F.dropout(x0, p=self.dropout_input, training=self.training)
x1 = F.dropout(x1, p=self.dropout_input, training=self.training)
z = x0 * x1
if self.dropout_pre_norm > 0:
z = F.dropout(z, p=self.dropout_pre_norm, training=self.training)
z = z.view(z.size(0), self.mm_dim, self.factor)
z = z.sum(2)
if self.normalize:
z = torch.sqrt(F.relu(z)) - torch.sqrt(F.relu(-z))
z = F.normalize(z, p=2)
z = self.linear_out(z)
if self.activ_output:
z = getattr(F, self.activ_output)(z)
if self.dropout_output > 0:
z = F.dropout(z, p=self.dropout_output, training=self.training)
return z
@registry.register_fusion("mfh")
class MFH(nn.Module):
def __init__(
self,
input_dims,
output_dim,
mm_dim=1200,
factor=2,
activ_input="relu",
activ_output="relu",
normalize=False,
dropout_input=0.0,
dropout_pre_lin=0.0,
dropout_output=0.0,
):
super().__init__()
self.input_dims = input_dims
self.output_dim = output_dim
self.mm_dim = mm_dim
self.factor = factor
self.activ_input = activ_input
self.activ_output = activ_output
self.normalize = normalize
self.dropout_input = dropout_input
self.dropout_pre_lin = dropout_pre_lin
self.dropout_output = dropout_output
# Modules
self.linear0_0 = nn.Linear(input_dims[0], mm_dim * factor)
self.linear1_0 = nn.Linear(input_dims[1], mm_dim * factor)
self.linear0_1 = nn.Linear(input_dims[0], mm_dim * factor)
self.linear1_1 = nn.Linear(input_dims[1], mm_dim * factor)
self.linear_out = nn.Linear(mm_dim * 2, output_dim)
self.n_params = sum(p.numel() for p in self.parameters() if p.requires_grad)
log_class_usage("Fusion", self.__class__)
def forward(self, x):
x0 = self.linear0_0(x[0])
x1 = self.linear1_0(x[1])
if self.activ_input:
x0 = getattr(F, self.activ_input)(x0)
x1 = getattr(F, self.activ_input)(x1)
if self.dropout_input > 0:
x0 = F.dropout(x0, p=self.dropout_input, training=self.training)
x1 = F.dropout(x1, p=self.dropout_input, training=self.training)
z_0_skip = x0 * x1
if self.dropout_pre_lin:
z_0_skip = F.dropout(
z_0_skip, p=self.dropout_pre_lin, training=self.training
)
z_0 = z_0_skip.view(z_0_skip.size(0), self.mm_dim, self.factor)
z_0 = z_0.sum(2)
if self.normalize:
z_0 = torch.sqrt(F.relu(z_0)) - torch.sqrt(F.relu(-z_0))
z_0 = F.normalize(z_0, p=2)
#
x0 = self.linear0_1(x[0])
x1 = self.linear1_1(x[1])
if self.activ_input:
x0 = getattr(F, self.activ_input)(x0)
x1 = getattr(F, self.activ_input)(x1)
if self.dropout_input > 0:
x0 = F.dropout(x0, p=self.dropout_input, training=self.training)
x1 = F.dropout(x1, p=self.dropout_input, training=self.training)
z_1 = x0 * x1 * z_0_skip
if self.dropout_pre_lin > 0:
z_1 = F.dropout(z_1, p=self.dropout_pre_lin, training=self.training)
z_1 = z_1.view(z_1.size(0), self.mm_dim, self.factor)
z_1 = z_1.sum(2)
if self.normalize:
z_1 = torch.sqrt(F.relu(z_1)) - torch.sqrt(F.relu(-z_1))
z_1 = F.normalize(z_1, p=2)
#
cat_dim = z_0.dim() - 1
z = torch.cat([z_0, z_1], cat_dim)
z = self.linear_out(z)
if self.activ_output:
z = getattr(F, self.activ_output)(z)
if self.dropout_output > 0:
z = F.dropout(z, p=self.dropout_output, training=self.training)
return z
@registry.register_fusion("mcb")
class MCB(nn.Module):
def __init__(
self,
input_dims,
output_dim,
mm_dim=16000,
activ_output="relu",
dropout_output=0.0,
):
super().__init__()
self.input_dims = input_dims
self.output_dim = output_dim
self.mm_dim = mm_dim
self.activ_output = activ_output
self.dropout_output = dropout_output
# Modules
self.mcb = CompactBilinearPooling(input_dims[0], input_dims[1], mm_dim)
self.linear_out = nn.Linear(mm_dim, output_dim)
self.n_params = sum(p.numel() for p in self.parameters() if p.requires_grad)
log_class_usage("Fusion", self.__class__)
def forward(self, x):
z = self.mcb(x[0], x[1])
z = self.linear_out(z)
if self.activ_output:
z = getattr(F, self.activ_output)(z)
if self.dropout_output > 0:
z = F.dropout(z, p=self.dropout_output, training=self.training)
return z
@registry.register_fusion("linear_sum")
class LinearSum(nn.Module):
def __init__(
self,
input_dims,
output_dim,
mm_dim=1200,
activ_input="relu",
activ_output="relu",
normalize=False,
dropout_input=0.0,
dropout_pre_lin=0.0,
dropout_output=0.0,
):
super().__init__()
self.input_dims = input_dims
self.output_dim = output_dim
self.mm_dim = mm_dim
self.activ_input = activ_input
self.activ_output = activ_output
self.normalize = normalize
self.dropout_input = dropout_input
self.dropout_pre_lin = dropout_pre_lin
self.dropout_output = dropout_output
# Modules
self.linear0 = nn.Linear(input_dims[0], mm_dim)
self.linear1 = nn.Linear(input_dims[1], mm_dim)
self.linear_out = nn.Linear(mm_dim, output_dim)
self.n_params = sum(p.numel() for p in self.parameters() if p.requires_grad)
log_class_usage("Fusion", self.__class__)
def forward(self, x):
x0 = self.linear0(x[0])
x1 = self.linear1(x[1])
if self.activ_input:
x0 = getattr(F, self.activ_input)(x0)
x1 = getattr(F, self.activ_input)(x1)
if self.dropout_input > 0:
x0 = F.dropout(x0, p=self.dropout_input, training=self.training)
x1 = F.dropout(x1, p=self.dropout_input, training=self.training)
z = x0 + x1
if self.normalize:
z = torch.sqrt(F.relu(z)) - torch.sqrt(F.relu(-z))
z = F.normalize(z, p=2)
if self.dropout_pre_lin > 0:
z = F.dropout(z, p=self.dropout_pre_lin, training=self.training)
z = self.linear_out(z)
if self.activ_output:
z = getattr(F, self.activ_output)(z)
if self.dropout_output > 0:
z = F.dropout(z, p=self.dropout_output, training=self.training)
return z
@registry.register_fusion("concat_mlp")
class ConcatMLP(nn.Module):
def __init__(
self, input_dims, output_dim, dimensions=None, activation="relu", dropout=0.0
):
super().__init__()
self.input_dims = input_dims
self.output_dim = output_dim
self.input_dim = sum(input_dims)
if dimensions is None:
dimensions = [500, 500]
self.dimensions = dimensions + [output_dim]
self.activation = activation
self.dropout = dropout
# Modules
self.mlp = MLP(self.input_dim, self.dimensions, self.activation, self.dropout)
self.n_params = sum(p.numel() for p in self.parameters() if p.requires_grad)
log_class_usage("Fusion", self.__class__)
def forward(self, x):
if x[0].dim() == 3 and x[1].dim() == 2:
x[1] = x[1].unsqueeze(1).reshape_as(x[0])
if x[1].dim() == 3 and x[0].dim() == 2:
x[0] = x[0].unsqueeze(1).reshape_as(x[1])
z = torch.cat(x, dim=x[0].dim() - 1)
z = self.mlp(z)
return z
| EXA-1-master | exa/models/mmf-main/mmf/modules/fusions.py |
# Copyright (c) Facebook, Inc. and its affiliates.
"""
Initial version was taken from https://github.com/ChenRocks/UNITER/
Licensed under the MIT license.
Wasserstein Distance (Optimal Transport)
"""
import torch
from torch import Tensor
from torch.nn import functional as F
def cost_matrix_cosine(x: Tensor, y: Tensor, eps: float = 1e-5) -> Tensor:
"""Compute cosine distance across every pairs of x, y (batched)
[B, L_x, D] [B, L_y, D] -> [B, Lx, Ly]"""
assert x.dim() == y.dim()
assert x.size(0) == y.size(0)
assert x.size(2) == y.size(2)
x_norm = F.normalize(x, p=2, dim=-1, eps=eps)
y_norm = F.normalize(y, p=2, dim=-1, eps=eps)
cosine_sim = x_norm.matmul(y_norm.transpose(1, 2))
cosine_dist = 1 - cosine_sim
return cosine_dist
def trace(x: Tensor) -> Tensor:
"""Compute trace of input tensor (batched)"""
b, m, n = x.size()
assert m == n
mask = torch.eye(n, dtype=torch.bool, device=x.device).unsqueeze(0).expand_as(x)
trace = x.masked_select(mask).contiguous().view(b, n).sum(dim=-1, keepdim=False)
return trace
@torch.no_grad()
def ipot(
C: Tensor,
x_len: int,
x_pad: Tensor,
y_len: int,
y_pad: Tensor,
joint_pad: Tensor,
beta: float,
iteration: int,
k: int,
) -> Tensor:
"""[B, M, N], [B], [B, M], [B], [B, N], [B, M, N]"""
b, m, n = C.size()
sigma = torch.ones(b, m, dtype=C.dtype, device=C.device) / x_len.unsqueeze(1)
T = torch.ones(b, n, m, dtype=C.dtype, device=C.device)
A = torch.exp(-C.transpose(1, 2) / beta)
# mask padded positions
sigma.masked_fill_(x_pad, 0)
joint_pad = joint_pad.transpose(1, 2)
T.masked_fill_(joint_pad, 0)
A.masked_fill_(joint_pad, 0)
# broadcastable lengths
x_len = x_len.unsqueeze(1).unsqueeze(2)
y_len = y_len.unsqueeze(1).unsqueeze(2)
# mask to zero out padding in delta and sigma
x_mask = (x_pad.to(C.dtype) * 1e4).unsqueeze(1)
y_mask = (y_pad.to(C.dtype) * 1e4).unsqueeze(1)
for _ in range(iteration):
Q = A * T # bs * n * m
sigma = sigma.view(b, m, 1)
for _ in range(k):
delta = 1 / (y_len * Q.matmul(sigma).view(b, 1, n) + y_mask)
sigma = 1 / (x_len * delta.matmul(Q) + x_mask)
T = delta.view(b, n, 1) * Q * sigma
T.masked_fill_(joint_pad, 0)
return T
def optimal_transport_dist(
txt_emb: Tensor,
img_emb: Tensor,
txt_pad: Tensor,
img_pad: Tensor,
beta: float = 0.5,
iteration: int = 50,
k: int = 1,
) -> Tensor:
"""[B, M, D], [B, N, D], [B, M], [B, N]"""
cost = cost_matrix_cosine(txt_emb, img_emb)
# mask the padded inputs
joint_pad = txt_pad.unsqueeze(-1) | img_pad.unsqueeze(-2)
cost.masked_fill_(joint_pad, 0)
txt_len = (txt_pad.size(1) - txt_pad.sum(dim=1, keepdim=False)).to(dtype=cost.dtype)
img_len = (img_pad.size(1) - img_pad.sum(dim=1, keepdim=False)).to(dtype=cost.dtype)
T = ipot(
cost.detach(), txt_len, txt_pad, img_len, img_pad, joint_pad, beta, iteration, k
)
distance = trace(cost.matmul(T.detach()))
return distance
| EXA-1-master | exa/models/mmf-main/mmf/modules/ot.py |
# Copyright (c) Facebook, Inc. and its affiliates.
"""
Losses module contains implementations for various losses used generally
in vision and language space. One can register custom losses to be detected by
MMF using the following example.
.. code::
from mmf.common.registry import registry
from torch import nn
@registry.register_loss("custom")
class CustomLoss(nn.Module):
...
Then in your model's config you can specify ``losses`` attribute to use this loss
in the following way:
.. code::
model_config:
some_model:
losses:
- type: custom
- params: {}
"""
import collections
import warnings
from dataclasses import dataclass
from typing import Any, Dict, List, Union
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmf.common.registry import registry
from mmf.utils.distributed import gather_tensor_along_batch_with_backward, get_rank
from mmf.utils.logger import log_class_usage
from omegaconf import MISSING
from packaging import version
from torch import Tensor
from torch.nn.utils.rnn import pack_padded_sequence
@dataclass
class LossConfig:
type: str = MISSING
params: Dict[str, Any] = MISSING
class Losses(nn.Module):
"""``Losses`` acts as an abstraction for instantiating and calculating
losses. ``BaseModel`` instantiates this class based on the `losses`
attribute in the model's configuration `model_config`. ``loss_list``
needs to be a list for each separate loss containing `type` and `params`
attributes.
Args:
loss_list (ListConfig): Description of parameter `loss_list`.
Example::
# losses:
# - type: logit_bce
# Can also contain `params` to specify that particular loss's init params
# - type: combined
config = [{"type": "logit_bce"}, {"type": "combined"}]
losses = Losses(config)
.. note::
Since, ``Losses`` is instantiated in the ``BaseModel``, normal end user
mostly doesn't need to use this class.
Attributes:
losses: List containing instantiations of each loss
passed in config
"""
# TODO: Union types are not supported in OmegaConf.
# Later investigate for a workaround.for
def __init__(self, loss_list: List[Union[str, LossConfig]]):
super().__init__()
self.losses = nn.ModuleList()
config = registry.get("config")
self._evaluation_predict = False
if config:
self._evaluation_predict = config.get("evaluation", {}).get(
"predict", False
)
for loss in loss_list:
self.losses.append(MMFLoss(loss))
def forward(self, sample_list: Dict[str, Tensor], model_output: Dict[str, Tensor]):
"""Takes in the original ``SampleList`` returned from DataLoader
and `model_output` returned from the model and returned a Dict containing
loss for each of the losses in `losses`.
Args:
sample_list (SampleList): SampleList given be the dataloader.
model_output (Dict): Dict returned from model as output.
Returns:
Dict: Dictionary containing loss value for each of the loss.
"""
output = {}
if "targets" not in sample_list:
if not self._evaluation_predict:
warnings.warn(
"Sample list has not field 'targets', are you "
"sure that your ImDB has labels? you may have "
"wanted to run with evaluation.predict=true"
)
return output
for loss in self.losses:
output.update(loss(sample_list, model_output))
if not torch.jit.is_scripting():
registry_loss_key = "{}.{}.{}".format(
"losses", sample_list["dataset_name"], sample_list["dataset_type"]
)
# Register the losses to registry
registry.register(registry_loss_key, output)
return output
class MMFLoss(nn.Module):
"""Internal MMF helper and wrapper class for all Loss classes.
It makes sure that the value returned from a Loss class is a dict and
contain proper dataset type in keys, so that it is easy to figure out
which one is the val loss and which one is train loss.
For example: it will return ``{"val/vqa2/logit_bce": 27.4}``, in case
`logit_bce` is used and SampleList is from `val` set of dataset `vqa2`.
Args:
params (type): Description of parameter `params`.
.. note::
Since, ``MMFLoss`` is used by the ``Losses`` class, end user
doesn't need to worry about it.
"""
def __init__(self, params=None):
super().__init__()
if params is None:
params = {}
is_mapping = isinstance(params, collections.abc.MutableMapping)
if is_mapping:
if "type" not in params:
raise ValueError(
"Parameters to loss must have 'type' field to"
"specify type of loss to instantiate"
)
else:
loss_name = params["type"]
else:
assert isinstance(
params, str
), "loss must be a string or dictionary with 'type' key"
loss_name = params
self.name = loss_name
loss_class = registry.get_loss_class(loss_name)
log_class_usage("Loss", loss_class)
if loss_class is None:
raise ValueError(f"No loss named {loss_name} is registered to registry")
# Special case of multi as it requires an array
if loss_name.startswith("multi"):
assert is_mapping
self.loss_criterion = loss_class(params)
else:
if is_mapping:
loss_params = params.get("params", {})
else:
loss_params = {}
self.loss_criterion = loss_class(**loss_params)
def forward(self, sample_list: Dict[str, Tensor], model_output: Dict[str, Tensor]):
loss_dict = {}
if hasattr(self.loss_criterion, "datasets"):
datasets = self.loss_criterion.datasets
if (
isinstance(datasets, list)
and sample_list["dataset_name"] not in datasets
):
return loss_dict
loss_result = self.loss_criterion(sample_list, model_output)
if not isinstance(loss_result, collections.abc.Mapping):
loss_result = {"": loss_result}
for child_loss_name, child_loss_result in loss_result.items():
if not isinstance(child_loss_result, torch.Tensor):
child_loss_result = torch.tensor(child_loss_result, dtype=torch.float)
if child_loss_result.dim() == 0:
child_loss_result = child_loss_result.view(1)
if not torch.jit.is_scripting():
key = "{}/{}/{}".format(
sample_list.dataset_type, sample_list.dataset_name, self.name
)
else:
key = f"{self.name}"
key = f"{key}/{child_loss_name}" if child_loss_name else key
loss_dict[key] = child_loss_result
return loss_dict
@registry.register_loss("logit_bce")
class LogitBinaryCrossEntropy(nn.Module):
"""Returns Binary Cross Entropy for logits.
Attention:
`Key`: logit_bce
"""
def __init__(self):
super().__init__()
def forward(self, sample_list, model_output):
"""Calculates and returns the binary cross entropy for logits
Args:
sample_list (SampleList): SampleList containing `targets` attribute.
model_output (Dict): Model output containing `scores` attribute.
Returns:
torch.FloatTensor: Float value for loss.
"""
scores = model_output["scores"]
targets = sample_list["targets"]
loss = F.binary_cross_entropy_with_logits(scores, targets, reduction="mean")
return loss * targets.size(1)
@registry.register_loss("triple_logit_bce")
class TripleLogitBinaryCrossEntropy(nn.Module):
"""
This is used for Three-branch fusion only. We predict scores and compute
cross entropy loss for each of branches.
"""
def __init__(self):
super().__init__()
def forward(self, sample_list, model_output):
"""Calculates and returns the binary cross entropy for logits
Args:
sample_list (SampleList): SampleList containing `targets` attribute.
model_output (Dict): Model output containing `scores` attribute.
Returns:
torch.FloatTensor: Float value for loss.
"""
scores = model_output["scores"]
targets = sample_list["targets"]
if scores.dim() == 3:
loss = (
F.binary_cross_entropy_with_logits(
scores[:, 0], targets, reduction="mean"
)
+ F.binary_cross_entropy_with_logits(
scores[:, 1], targets, reduction="mean"
)
+ F.binary_cross_entropy_with_logits(
scores[:, 2], targets, reduction="mean"
)
)
else:
loss = F.binary_cross_entropy_with_logits(scores, targets, reduction="mean")
return loss * targets.size(-1)
@registry.register_loss("bce")
class BinaryCrossEntropyLoss(nn.Module):
def __init__(self):
super().__init__()
def forward(self, sample_list, model_output):
"""Calculates and returns the binary cross entropy.
Args:
sample_list (SampleList): SampleList containing `targets` attribute.
model_output (Dict): Model output containing `scores` attribute.
Returns:
torch.FloatTensor: Float value for loss.
"""
scores = model_output["scores"]
targets = sample_list["targets"]
loss = F.binary_cross_entropy(scores, targets, reduction="mean")
return loss * targets.size(1)
@registry.register_loss("caption_cross_entropy")
class CaptionCrossEntropyLoss(nn.Module):
def __init__(self):
super().__init__()
def forward(self, sample_list, model_output):
"""Calculates and returns the cross entropy loss for captions.
Args:
sample_list (SampleList): SampleList containing `targets` attribute.
model_output (Dict): Model output containing `scores` attribute.
Returns:
torch.FloatTensor: Float value for loss.
"""
scores = model_output["scores"]
targets = sample_list["targets"]
# If no captions(test dataset) then assume decode length to be uniform
if hasattr(sample_list, "caption_len"):
caption_lengths, _ = sample_list.caption_len.sort(dim=0, descending=True)
decode_lengths = (caption_lengths - 1).tolist()
else:
decode_lengths = [targets.size(1)] * targets.size(0)
if version.parse(torch.__version__) >= version.parse("1.1"):
scores = pack_padded_sequence(scores, decode_lengths, batch_first=True).data
targets = pack_padded_sequence(
targets, decode_lengths, batch_first=True
).data
else:
scores, _ = pack_padded_sequence(scores, decode_lengths, batch_first=True)
targets, _ = pack_padded_sequence(targets, decode_lengths, batch_first=True)
loss = F.cross_entropy(scores, targets)
return loss
@registry.register_loss("nll_loss")
class NLLLoss(nn.Module):
"""Negative log likelikehood loss."""
def __init__(self):
super().__init__()
def forward(self, sample_list, model_output):
"""Calculates and returns the negative log likelihood.
Args:
sample_list (SampleList): SampleList containing `targets` attribute.
model_output (Dict): Model output containing `scores` attribute.
Returns:
torch.FloatTensor: Float value for loss.
"""
scores = model_output["scores"]
targets = sample_list["targets"]
_, idx = targets.max(dim=1)
loss = F.nll_loss(scores, idx, reduction="mean")
return loss * targets.size(1)
def kl_div(log_x, y):
y_is_0 = torch.eq(y.data, 0)
y.data.masked_fill_(y_is_0, 1)
log_y = torch.log(y)
y.data.masked_fill_(y_is_0, 0)
res = y * (log_y - log_x)
return torch.sum(res, dim=1, keepdim=True)
@registry.register_loss("multi")
class MultiLoss(nn.Module):
"""A loss for combining multiple losses with weights.
Args:
params (List(Dict)): A list containing parameters for each different loss
and their weights.
Example::
# MultiLoss works with config like below where each loss's params and
# weights are defined
losses:
- type: multi
params:
- type: logit_bce
weight: 0.3
params: {}
- type: attention_supervision
weight: 0.7
params: {}
"""
def __init__(self, params):
super().__init__()
self.losses = []
self.losses_weights = []
self.loss_names = []
for loss_params in params["params"]:
self.loss_names.append(loss_params["type"])
loss_fn = MMFLoss(loss_params)
loss_weight = loss_params.get("weight", {})
self.losses.append(loss_fn)
self.losses_weights.append(loss_weight)
def forward(self, sample_list, model_output, *args, **kwargs):
"""Calculates and returns the multi loss.
Args:
sample_list (SampleList): SampleList containing `attentions` attribute.
model_output (Dict): Model output containing `attention_supervision`
attribute.
Returns:
torch.FloatTensor: Float value for loss.
"""
loss = 0
for idx, loss_fn in enumerate(self.losses):
value = loss_fn(sample_list, model_output, *args, **kwargs)
loss += self.losses_weights[idx] * list(value.values())[0]
return loss
@registry.register_loss("attention_supervision")
class AttentionSupervisionLoss(nn.Module):
"""Loss for attention supervision. Used in case you want to make attentions
similar to some particular values.
"""
def __init__(self):
super().__init__()
self.loss_fn = lambda *args, **kwargs: nn.functional.binary_cross_entropy(
*args, **kwargs
)
def forward(self, sample_list, model_output):
"""Calculates and returns the multi loss.
Args:
sample_list (SampleList): SampleList containing `targets` attribute.
model_output (Dict): Model output containing `scores` attribute.
Returns:
torch.FloatTensor: Float value for loss.
"""
context_attentions = model_output["attentions"]
attention_supervision = sample_list["info"]["attention_supervision"]
loss = self.loss_fn(
context_attentions[0],
attention_supervision.float(),
weight=attention_supervision.float(),
)
# Multiply average loss back with target size to get actual loss
return loss * attention_supervision.size(1)
@registry.register_loss("weighted_softmax")
class WeightedSoftmaxLoss(nn.Module):
def __init__(self):
super().__init__()
def forward(self, sample_list, model_output):
pred_score = model_output["scores"]
target_score = sample_list["targets"]
tar_sum = torch.sum(target_score, dim=1, keepdim=True)
tar_sum_is_0 = torch.eq(tar_sum, 0)
tar_sum.masked_fill_(tar_sum_is_0, 1.0e-06)
tar = target_score / tar_sum
res = F.log_softmax(pred_score, dim=1)
loss = kl_div(res, tar)
loss = loss * tar_sum
loss = torch.sum(loss) / loss.size(0)
return loss
@registry.register_loss("softmax_kldiv")
class SoftmaxKlDivLoss(nn.Module):
def __init__(self):
super().__init__()
def forward(self, sample_list, model_output):
pred_score = model_output["scores"]
target_score = sample_list["targets"]
tar_sum = torch.sum(target_score, dim=1, keepdim=True)
tar_sum_is_0 = torch.eq(tar_sum, 0)
tar_sum.masked_fill_(tar_sum_is_0, 1.0e-06)
tar = target_score / tar_sum
res = F.log_softmax(pred_score, dim=1)
loss = kl_div(res, tar)
loss = torch.sum(loss) / loss.size(0)
return loss
@registry.register_loss("wrong")
class WrongLoss(nn.Module):
def __init__(self):
super().__init__()
def forward(self, sample_list, model_output):
pred_score = model_output["scores"]
target_score = sample_list["targets"]
tar_sum = torch.sum(target_score, dim=1, keepdim=True)
tar_sum_is_0 = torch.eq(tar_sum, 0)
tar_sum.masked_fill_(tar_sum_is_0, 1.0e-06)
tar = target_score / tar_sum
res = F.log_softmax(pred_score, dim=1)
loss = F.kl_div(res, tar, reduction="mean")
loss *= target_score.size(1)
return loss
@registry.register_loss("bce_kl_combined")
class CombinedLoss(nn.Module):
def __init__(self, weight_softmax):
super().__init__()
self.weight_softmax = weight_softmax
def forward(self, sample_list, model_output):
pred_score = model_output["scores"]
target_score = sample_list["targets"]
tar_sum = torch.sum(target_score, dim=1, keepdim=True)
tar_sum_is_0 = torch.eq(tar_sum, 0)
tar_sum.masked_fill_(tar_sum_is_0, 1.0e-06)
tar = target_score / tar_sum
res = F.log_softmax(pred_score, dim=1)
loss1 = kl_div(res, tar)
loss1 = torch.sum(loss1) / loss1.size(0)
loss2 = F.binary_cross_entropy_with_logits(
pred_score, target_score, reduction="mean"
)
loss2 *= target_score.size(1)
loss = self.weight_softmax * loss1 + loss2
return loss
@registry.register_loss("m4c_decoding_bce_with_mask")
class M4CDecodingBCEWithMaskLoss(nn.Module):
def __init__(self):
super().__init__()
self.one = torch.Tensor([1.0])
def forward(self, sample_list, model_output):
scores = model_output["scores"]
targets = sample_list["targets"]
loss_mask = sample_list["train_loss_mask"]
assert scores.dim() == 3 and loss_mask.dim() == 2
losses = F.binary_cross_entropy_with_logits(scores, targets, reduction="none")
losses *= loss_mask.unsqueeze(-1)
count = torch.max(torch.sum(loss_mask), self.one.to(losses.device))
loss = torch.sum(losses) / count
return loss
@registry.register_loss("cross_entropy")
class CrossEntropyLoss(nn.Module):
def __init__(self, **params):
super().__init__()
self.loss_fn = nn.CrossEntropyLoss(**params)
def forward(self, sample_list, model_output):
return self.loss_fn(model_output["scores"], sample_list["targets"])
@registry.register_loss("soft_label_cross_entropy")
class SoftLabelCrossEntropyLoss(nn.Module):
def __init__(self, ignore_index=-100, reduction="mean", normalize_targets=True):
assert reduction in (
"mean",
"sum",
), "Argument `reduction` only supports `mean` and `sum`"
super().__init__()
self.ignore_index = ignore_index
self.reduction = reduction
self.normalize_targets = normalize_targets
self.eps = torch.finfo(torch.float32).eps
@staticmethod
def convert_to_one_hot(targets, n_classes):
one_hot_targets = torch.zeros(
(targets.size(0), n_classes), dtype=torch.long, device=targets.device
)
one_hot_targets.scatter_(1, targets.long().view(-1, 1), 1)
return one_hot_targets
def compute_loss(self, targets, scores):
"""for N examples and C classes
- scores: N x C these are raw outputs (without softmax/sigmoid)
- targets: N x C or N corresponding targets
Target elements set to ignore_index contribute 0 loss.
Samples where all entries are ignore_index do not contribute to the loss
reduction.
"""
assert targets.size(0) == scores.size(
0
), "`targets` and `scores` should have the same batch size"
if targets.dim() == 1:
targets = targets.unsqueeze(1)
mask = targets.ne(self.ignore_index).float() # mask out `ignore_index`
else:
mask = targets.sum(-1, keepdim=True).ne(0).float() # mask out zero rows
if targets.size(1) == 1:
targets = self.convert_to_one_hot(targets, scores.size(1))
targets = targets.float() * mask
if self.normalize_targets:
targets /= self.eps + targets.sum(dim=1, keepdim=True)
per_sample_per_target_loss = -targets * F.log_softmax(scores, dim=-1)
per_sample_loss = torch.sum(per_sample_per_target_loss, -1)
loss = per_sample_loss.sum()
# perform reduction
if self.reduction == "mean":
# normalize based on the number of samples with > 0 non-ignored targets
loss /= torch.sum(torch.sum(mask, -1) > 0).clamp(min=1)
return loss
def forward(self, sample_list, model_output):
return self.compute_loss(sample_list["targets"], model_output["scores"])
@registry.register_loss("label_smoothing_cross_entropy")
class LabelSmoothingCrossEntropyLoss(SoftLabelCrossEntropyLoss):
"""Cross-entropy loss with label smoothing. If `label_smoothing` = 0, then
it's canonical cross entropy.
The smoothed one-hot encoding is 1 - label_smoothing for true label and
label_smoothing / (num_classes - 1) for the rest.
Reference: https://stackoverflow.com/questions/55681502/label-smoothing-in-pytorch
"""
def __init__(self, label_smoothing=0.1, reduction="mean", ignore_index=-100):
assert (
0 <= label_smoothing < 1
), "value of argument `label_smoothing` must be in range [0, 1)."
super().__init__(ignore_index, reduction, False)
self.label_smoothing = label_smoothing
def smooth_targets(self, targets, n_classes):
if targets.dim() == 1:
targets = targets.unsqueeze(1)
mask = targets.ne(self.ignore_index)
smoothing_value = self.label_smoothing / (n_classes - 1)
one_hot = torch.full(
(n_classes,), smoothing_value, device=targets.device
).repeat(targets.size(0), 1)
# mask out target with `ignore_index` to avoid error `index out of bounds`
one_hot.scatter_(1, targets * mask.long(), 1 - self.label_smoothing)
return one_hot * mask.float()
def forward(self, sample_list, model_output):
scores = model_output["scores"]
one_hot = self.smooth_targets(sample_list["targets"], scores.size(1))
loss = self.compute_loss(one_hot, scores)
return loss
@registry.register_loss("in_batch_hinge")
class InBatchHinge(nn.Module):
"""
Based on the code from https://github.com/fartashf/vsepp/blob/master/model.py
"""
def __init__(self, margin: float = 0.0, hard: bool = False):
super().__init__()
self.margin = margin
self.hard = hard
def _compute_loss(self, correlations: Tensor):
diagonal = correlations.diag()[:, None]
d1 = diagonal.expand_as(correlations)
d2 = diagonal.t().expand_as(correlations)
# compare every diagonal score to scores in its column
# caption retrieval
cost_s = (self.margin + correlations - d1).clamp(min=0)
# compare every diagonal score to scores in its row
# image retrieval
cost_im = (self.margin + correlations - d2).clamp(min=0)
# clear diagonals
mask = 1 - torch.eye(correlations.size(0), device=correlations.device)
cost_s = cost_s * mask
cost_im = cost_im * mask
if self.hard:
cost_s = cost_s.max(1)[0]
cost_im = cost_im.max(0)[0]
return cost_s.sum() + cost_im.sum()
def forward(self, sample_list: Dict[str, Tensor], model_output: Dict[str, Tensor]):
image_embeddings = model_output["scores"]
text_embeddings = model_output["targets"]
if image_embeddings.shape[0] == text_embeddings.shape[0]:
# Training/Single-GT loss
correlations = image_embeddings @ text_embeddings.t()
loss = self._compute_loss(correlations)
else:
# Evaluation/Multi-GT loss
assert text_embeddings.shape[0] % image_embeddings.shape[0] == 0
batch_size, dim_size = image_embeddings.shape
factor = text_embeddings.shape[0] // image_embeddings.shape[0]
text_embeddings = text_embeddings.reshape(batch_size, factor, dim_size)
correlations = image_embeddings @ text_embeddings.permute(1, 2, 0) # FxBxB
loss = 0
for corr in correlations:
loss += self._compute_loss(corr)
return loss
@registry.register_loss("contrastive_loss")
class ContrastiveLoss(nn.Module):
"""
This is a generic contrastive loss typically used for pretraining. No modality
assumptions are made here.
"""
def __init__(self):
super().__init__()
def forward(self, sample_list: Dict[str, Tensor], model_output: Dict[str, Tensor]):
assert (
"embedding_1" in model_output and "embedding_2" in model_output
), "Embedding names must be available before loss calculation"
embedding_1 = model_output["embedding_1"]
embedding_2 = model_output["embedding_2"]
assert embedding_1.size(0) == embedding_2.size(0), "batch size must match"
per_gpu_batch_size = embedding_1.size(0)
embedding_1_all_gpus = gather_tensor_along_batch_with_backward(embedding_1)
embedding_2_all_gpus = gather_tensor_along_batch_with_backward(embedding_2)
temperature = model_output["temperature"]
logits_1 = (
torch.matmul(embedding_1, embedding_2_all_gpus.transpose(0, 1))
/ temperature
)
logits_2 = (
torch.matmul(embedding_2, embedding_1_all_gpus.transpose(0, 1))
/ temperature
)
labels = per_gpu_batch_size * get_rank() + torch.arange(
per_gpu_batch_size, device=temperature.device
)
loss_1 = F.cross_entropy(logits_1, labels)
loss_2 = F.cross_entropy(logits_2, labels)
return (loss_1 + loss_2) / 2
@registry.register_loss("mse")
class MSELoss(nn.Module):
"""Mean Squared Error loss"""
def __init__(self):
super().__init__()
self.loss_fn = nn.MSELoss()
def forward(self, sample_list, model_output):
targets = sample_list["targets"]
scores = model_output["scores"]
loss = self.loss_fn(scores, targets)
return loss
@registry.register_loss("cos_emb_loss")
class CosineEmbeddingLoss(nn.Module):
"""Cosine embedding loss"""
def __init__(self):
super().__init__()
self.loss_fn = nn.CosineEmbeddingLoss()
def forward(self, sample_list, model_output):
targets = sample_list["targets"]
scores = model_output["scores"]
y = torch.ones(targets.size(0)).to(targets.device)
loss = self.loss_fn(scores, targets, y)
return loss
@registry.register_loss("bce_kl")
class BCEAndKLLoss(nn.Module):
"""binary_cross_entropy_with_logits and kl divergence loss.
Calculates both losses and returns a dict with string keys.
Similar to bce_kl_combined, but returns both losses.
"""
def __init__(self, weight_softmax):
super().__init__()
self.weight_softmax = weight_softmax
def forward(self, sample_list, model_output):
pred_score = model_output["scores"]
target_score = sample_list["targets"]
tar_sum = torch.sum(target_score, dim=1, keepdim=True)
tar_sum_is_0 = torch.eq(tar_sum, 0)
tar_sum.masked_fill_(tar_sum_is_0, 1.0e-06)
tar = target_score / tar_sum
res = F.log_softmax(pred_score, dim=1)
loss1 = kl_div(res, tar)
loss1 = torch.sum(loss1) / loss1.size(0)
loss2 = F.binary_cross_entropy_with_logits(
pred_score, target_score, reduction="mean"
)
loss2 *= target_score.size(1)
loss = {"kl": self.weight_softmax * loss1, "bce": loss2}
return loss
def calc_ms_loss(pair, base, param, multiplier):
return (
1.0
/ param
* torch.log(1 + torch.sum(torch.exp(multiplier * param * (pair - base))))
)
@registry.register_loss("refiner_ms")
class RefinerMSLoss(nn.Module):
"""
A Multi-Similarity loss between the decoder outputs of a given embedding size
and its targets
This loss pulls the decoded signal of a sample closer to its target,
while simultaneously pushing it away from other targets
References:
1) Wang et al., Multi-Similarity Loss With General Pair Weighting
for Deep Metric Learning, CVPR 2019
2) Sankaran, S., Yang, D. and Lim, S.N., "Multimodal Fusion Refiner Networks"
Parameters:
same as ms_loss (see below)
"""
def __init__(
self,
alpha: float = 50,
beta: float = 2,
base: float = 0.5,
margin: float = 0.1,
epsilon: float = 1e-16,
):
super().__init__()
self.alpha = alpha
self.beta = beta
self.margin = margin
self.base = base
self.epsilon = epsilon
def forward(self, sample_list, model_output):
targets = sample_list["targets"]
inputs = model_output["scores"]
n = inputs.size(0)
sim_mat = torch.matmul(inputs, targets.t())
loss = []
for i in range(n):
# pos pair is the similarity between the refiner output (input to forward)
# and target (original encoding)
pos_pair = sim_mat[i][i]
# neg pair is all the remaining pairs
neg_pairs_all = sim_mat[i]
# remove the pos_par from neg_pairs
neg_pairs_all = neg_pairs_all[abs(neg_pairs_all - pos_pair) > self.epsilon]
# All pairs whose similarity is within a margin of the pos_pair similarity
neg_pairs = neg_pairs_all[neg_pairs_all + self.margin > pos_pair]
# nothing to do if there are no negative pairs from line 918
if len(neg_pairs) < 1:
continue
pos_loss = calc_ms_loss(pos_pair, self.base, self.beta, -1)
neg_loss = calc_ms_loss(neg_pairs, self.base, self.alpha, 1)
loss.append(pos_loss + neg_loss)
if n > 0:
loss = sum(loss) / n
else:
loss = inputs.new_zeros(1, requires_grad=True)
return loss
@registry.register_loss("ms_loss")
class MSLoss(nn.Module):
"""
A Multi-Similarity loss between embeddings of similar and dissimilar
labels is implemented here.
Reference:
"Multi-similarity loss with general pair weighting for deep metric learning"
Args:
alpha, beta, margin: parameters used in loss function calculation
hard_mining: if true, select only the hardest examples (defined based on margin)
is_multilabel: True if there are more than two labels, false otherwise
"""
def __init__(
self, alpha=50, beta=2, margin=0.5, hard_mining=True, is_multilabel=False
):
super().__init__()
self.alpha = alpha
self.beta = beta
self.hard_mining = hard_mining
self.margin = margin
self.is_multilabel = is_multilabel
def get_positive_and_negative_pairs(self, sim_vec, targets, curr_target):
# given a sample with a similarity vec (embedding similarity to other samples)
# return pairs of samples which share the same targets/labels (positive pairs)
# and pairs of samples which have different labels (negative pairs)
if self.is_multilabel:
pos_pair_ = torch.masked_select(
sim_vec, torch.matmul(targets, targets[0]) > 0
)
else:
pos_pair_ = torch.masked_select(sim_vec, targets == curr_target)
# remove itself
pos_pair_ = torch.masked_select(pos_pair_, pos_pair_ < 1 - 1e-5)
pos_pair_ = torch.sort(pos_pair_)[0]
if self.is_multilabel:
neg_pair_ = torch.masked_select(
sim_vec, torch.matmul(targets, targets[0]) < 1e-5
)
else:
neg_pair_ = torch.masked_select(sim_vec, targets != curr_target)
neg_pair_ = torch.sort(neg_pair_)[0]
if len(pos_pair_) == 0 or len(neg_pair_) == 0:
return (pos_pair_, neg_pair_)
if self.hard_mining is not None:
neg_pair = torch.masked_select(neg_pair_, neg_pair_ + 0.1 > pos_pair_[0])
pos_pair = torch.masked_select(pos_pair_, pos_pair_ - 0.1 < neg_pair_[-1])
neg_pair_ = neg_pair
pos_pair_ = pos_pair
return (pos_pair_, neg_pair_)
def forward(self, sample_list, model_output):
# get the fused features and normalize
fusion_features = model_output["fused_embedding"]
inputs = F.normalize(fusion_features)
# get the targets
targets = sample_list["targets"]
batch_size = inputs.size(0)
# sim_mat(i,j) contains the similarity between fused_embeddings of ith
# and jth samples in a batch
sim_mat = torch.matmul(inputs, inputs.t())
# this is the margin allowed for multi-similarity loss
base = self.margin
loss = []
for i in range(batch_size):
(pos_pair_, neg_pair_) = self.get_positive_and_negative_pairs(
sim_mat[i], targets, targets[i]
)
# no compute needed when one of the pairs is not available
if len(pos_pair_) == 0 or len(neg_pair_) == 0:
continue
pos_loss = calc_ms_loss(pos_pair_, base, self.beta, -1)
neg_loss = calc_ms_loss(neg_pair_, base, self.alpha, 1)
loss.append(pos_loss + neg_loss)
if len(loss) == 0:
loss = inputs.new_zeros(1, requires_grad=True)
else:
loss = sum(loss) / batch_size
return loss
@registry.register_loss("refiner_contrastive_loss")
class RefinerContrastiveLoss(nn.Module):
"""
A contrastive loss between the decoder outputs of a given embedding size
and its targets
This loss can be used in lieu of a reconstruction loss, wherein the goal
is to get a decoded signal closer to its target than other targets. As long
as the reconstructed signal of a given input is closer to its target than
any other target, the loss will remain zero.
Reference:
Sankaran, S., Yang, D. and Lim, S.N., "Multimodal Fusion Refiner Networks"
Parameters:
sim_thresh: similarity threshold used to consider only samples beyond
# this threshold
"""
def __init__(self, sim_thresh=0.1, epsilon=1e-16):
super().__init__()
self.similarity_threshold = sim_thresh
self.epsilon = epsilon
def forward(self, sample_list, model_output):
targets = sample_list["targets"]
inputs = model_output["scores"]
batch_size = inputs.size(0)
# normalize inputs and targets
inputs = F.normalize(inputs)
targets = F.normalize(targets)
# matrix containing the similarity between the inputs and targets
# (i,j) contains similarity betweeh the i^th decoder and j^th target
sim_mat = torch.matmul(inputs, targets.t())
loss = []
for i in range(batch_size):
sim_ij = sim_mat[i]
# pos_similarity contains the similarity between i^th decoder
# and i^th target
pos_similarity = sim_ij[i]
# neg_pair_ contains all the batch samples whose similarity with i^th
# decoder is better than a threshold corrected similarity between
# i^th decoder and i^th target
neg_pair_ = torch.masked_select(
sim_ij, sim_ij > pos_similarity - self.similarity_threshold
)
# remove the pos_pair from the neg_pair list
neg_pair_ = torch.masked_select(
neg_pair_, abs(neg_pair_ - pos_similarity) > self.epsilon
)
# The loss is non-zero only when there exists at least one sample whose
# target is closer to the decoded signal.
if neg_pair_.shape[0] > 0:
neg_loss = torch.mean(
self.similarity_threshold + neg_pair_ - pos_similarity
)
loss.append(neg_loss)
if len(loss) == 0:
loss = inputs.new_zeros(1, requires_grad=True)
else:
loss = sum(loss) / batch_size
return loss
| EXA-1-master | exa/models/mmf-main/mmf/modules/losses.py |
# Copyright (c) Facebook, Inc. and its affiliates.
from typing import Optional
import torch
from mmf.common.registry import registry
from mmf.modules.decoders import LanguageDecoder
from torch import nn
from torch.nn.utils.weight_norm import weight_norm
class ConvNet(nn.Module):
def __init__(
self,
in_channels,
out_channels,
kernel_size,
padding_size="same",
pool_stride=2,
batch_norm=True,
):
super().__init__()
if padding_size == "same":
padding_size = kernel_size // 2
self.conv = nn.Conv2d(
in_channels, out_channels, kernel_size, padding=padding_size
)
self.max_pool2d = nn.MaxPool2d(pool_stride, stride=pool_stride)
self.batch_norm = batch_norm
if self.batch_norm:
self.batch_norm_2d = nn.BatchNorm2d(out_channels)
def forward(self, x):
x = self.max_pool2d(nn.functional.leaky_relu(self.conv(x)))
if self.batch_norm:
x = self.batch_norm_2d(x)
return x
class Flatten(nn.Module):
def forward(self, input):
if input.dim() > 1:
input = input.view(input.size(0), -1)
return input
class UnFlatten(nn.Module):
def forward(self, input, sizes=None):
if sizes is None:
sizes = []
return input.view(input.size(0), *sizes)
class GatedTanh(nn.Module):
"""
From: https://arxiv.org/pdf/1707.07998.pdf
nonlinear_layer (f_a) : x\\in R^m => y \\in R^n
\tilda{y} = tanh(Wx + b)
g = sigmoid(W'x + b')
y = \tilda(y) \\circ g
input: (N, *, in_dim)
output: (N, *, out_dim)
"""
def __init__(self, in_dim, out_dim):
super().__init__()
self.fc = nn.Linear(in_dim, out_dim)
self.gate_fc = nn.Linear(in_dim, out_dim)
def forward(self, x):
y_tilda = torch.tanh(self.fc(x))
gated = torch.sigmoid(self.gate_fc(x))
# Element wise multiplication
y = y_tilda * gated
return y
# TODO: Do clean implementation without Sequential
class ReLUWithWeightNormFC(nn.Module):
def __init__(self, in_dim, out_dim):
super().__init__()
layers = []
layers.append(weight_norm(nn.Linear(in_dim, out_dim), dim=None))
layers.append(nn.ReLU())
self.layers = nn.Sequential(*layers)
def forward(self, x):
return self.layers(x)
class ClassifierLayer(nn.Module):
def __init__(self, classifier_type, in_dim, out_dim, **kwargs):
super().__init__()
if classifier_type == "weight_norm":
self.module = WeightNormClassifier(in_dim, out_dim, **kwargs)
elif classifier_type == "logit":
self.module = LogitClassifier(in_dim, out_dim, **kwargs)
elif classifier_type == "language_decoder":
self.module = LanguageDecoder(in_dim, out_dim, **kwargs)
elif classifier_type == "bert":
self.module = BertClassifierHead(
in_dim, out_dim, kwargs.get("config", None)
).module
elif classifier_type == "mlp":
self.module = MLPClassifer(in_dim, out_dim, **kwargs)
elif classifier_type == "triple_linear":
self.module = TripleLinear(in_dim, out_dim)
elif classifier_type == "linear":
self.module = nn.Linear(in_dim, out_dim)
else:
raise NotImplementedError("Unknown classifier type: %s" % classifier_type)
def forward(self, *args, **kwargs):
return self.module(*args, **kwargs)
class BertClassifierHead(nn.Module):
def __init__(self, in_dim=768, out_dim=2, config=None, *args, **kwargs):
super().__init__()
try:
from transformers3.modeling_bert import BertPredictionHeadTransform
except ImportError:
from transformers.modeling_bert import BertPredictionHeadTransform
if config is None:
try:
from transformers3.configuration_bert import BertConfig
except ImportError:
from transformers.configuration_bert import BertConfig
config = BertConfig.from_pretrained("bert-base-uncased")
assert config.hidden_size == in_dim
self.module = nn.Sequential(
nn.Dropout(config.hidden_dropout_prob),
BertPredictionHeadTransform(config),
nn.Linear(in_dim, out_dim),
)
def forward(self, *args, **kwargs):
return self.module(*args, **kwargs)
class MLPClassifer(nn.Module):
def __init__(
self,
in_dim,
out_dim,
hidden_dim=None,
num_layers=0,
dropout=0.5,
hidden_act="relu",
batch_norm=True,
**kwargs,
):
super().__init__()
from mmf.utils.modeling import ACT2FN
activation = ACT2FN[hidden_act]
self.layers = nn.ModuleList()
if hidden_dim is None:
hidden_dim = in_dim
for _ in range(num_layers):
self.layers.append(nn.Linear(in_dim, hidden_dim))
if batch_norm:
self.layers.append(nn.BatchNorm1d(hidden_dim))
self.layers.append(activation())
self.layers.append(nn.Dropout(dropout))
in_dim = hidden_dim
self.layers.append(nn.Linear(in_dim, out_dim))
def forward(self, x):
for layer in self.layers:
x = layer(x)
return x
class LogitClassifier(nn.Module):
def __init__(self, in_dim, out_dim, **kwargs):
super().__init__()
input_dim = in_dim
num_ans_candidates = out_dim
text_non_linear_dim = kwargs["text_hidden_dim"]
image_non_linear_dim = kwargs["img_hidden_dim"]
self.f_o_text = ReLUWithWeightNormFC(input_dim, text_non_linear_dim)
self.f_o_image = ReLUWithWeightNormFC(input_dim, image_non_linear_dim)
self.linear_text = nn.Linear(text_non_linear_dim, num_ans_candidates)
self.linear_image = nn.Linear(image_non_linear_dim, num_ans_candidates)
if "pretrained_image" in kwargs and kwargs["pretrained_text"] is not None:
self.linear_text.weight.data.copy_(
torch.from_numpy(kwargs["pretrained_text"])
)
if "pretrained_image" in kwargs and kwargs["pretrained_image"] is not None:
self.linear_image.weight.data.copy_(
torch.from_numpy(kwargs["pretrained_image"])
)
def forward(self, joint_embedding):
text_val = self.linear_text(self.f_o_text(joint_embedding))
image_val = self.linear_image(self.f_o_image(joint_embedding))
logit_value = text_val + image_val
return logit_value
class WeightNormClassifier(nn.Module):
def __init__(self, in_dim, out_dim, hidden_dim, dropout):
super().__init__()
layers = [
weight_norm(nn.Linear(in_dim, hidden_dim), dim=None),
nn.ReLU(),
nn.Dropout(dropout, inplace=True),
weight_norm(nn.Linear(hidden_dim, out_dim), dim=None),
]
self.main = nn.Sequential(*layers)
def forward(self, x):
logits = self.main(x)
return logits
class Identity(nn.Module):
def __init__(self, **kwargs):
super().__init__()
def forward(self, x):
return x
class ModalCombineLayer(nn.Module):
def __init__(self, combine_type, img_feat_dim, txt_emb_dim, **kwargs):
super().__init__()
if combine_type == "MFH":
self.module = MFH(img_feat_dim, txt_emb_dim, **kwargs)
elif combine_type == "non_linear_element_multiply":
self.module = NonLinearElementMultiply(img_feat_dim, txt_emb_dim, **kwargs)
elif combine_type == "two_layer_element_multiply":
self.module = TwoLayerElementMultiply(img_feat_dim, txt_emb_dim, **kwargs)
elif combine_type == "top_down_attention_lstm":
self.module = TopDownAttentionLSTM(img_feat_dim, txt_emb_dim, **kwargs)
else:
raise NotImplementedError("Not implemented combine type: %s" % combine_type)
self.out_dim = self.module.out_dim
def forward(self, *args, **kwargs):
return self.module(*args, **kwargs)
class MfbExpand(nn.Module):
def __init__(self, img_feat_dim, txt_emb_dim, hidden_dim, dropout):
super().__init__()
self.lc_image = nn.Linear(in_features=img_feat_dim, out_features=hidden_dim)
self.lc_ques = nn.Linear(in_features=txt_emb_dim, out_features=hidden_dim)
self.dropout = nn.Dropout(dropout)
def forward(self, image_feat, question_embed):
image1 = self.lc_image(image_feat)
ques1 = self.lc_ques(question_embed)
if len(image_feat.data.shape) == 3:
num_location = image_feat.data.size(1)
ques1_expand = torch.unsqueeze(ques1, 1).expand(-1, num_location, -1)
else:
ques1_expand = ques1
joint_feature = image1 * ques1_expand
joint_feature = self.dropout(joint_feature)
return joint_feature
class MFH(nn.Module):
def __init__(self, image_feat_dim, ques_emb_dim, **kwargs):
super().__init__()
self.mfb_expand_list = nn.ModuleList()
self.mfb_sqz_list = nn.ModuleList()
self.relu = nn.ReLU()
hidden_sizes = kwargs["hidden_sizes"]
self.out_dim = int(sum(hidden_sizes) / kwargs["pool_size"])
self.order = kwargs["order"]
self.pool_size = kwargs["pool_size"]
for i in range(self.order):
mfb_exp_i = MfbExpand(
img_feat_dim=image_feat_dim,
txt_emb_dim=ques_emb_dim,
hidden_dim=hidden_sizes[i],
dropout=kwargs["dropout"],
)
self.mfb_expand_list.append(mfb_exp_i)
self.mfb_sqz_list.append(self.mfb_squeeze)
def forward(self, image_feat, question_embedding):
feature_list = []
prev_mfb_exp = 1
for i in range(self.order):
mfb_exp = self.mfb_expand_list[i]
mfb_sqz = self.mfb_sqz_list[i]
z_exp_i = mfb_exp(image_feat, question_embedding)
if i > 0:
z_exp_i = prev_mfb_exp * z_exp_i
prev_mfb_exp = z_exp_i
z = mfb_sqz(z_exp_i)
feature_list.append(z)
# append at last feature
cat_dim = len(feature_list[0].size()) - 1
feature = torch.cat(feature_list, dim=cat_dim)
return feature
def mfb_squeeze(self, joint_feature):
# joint_feature dim: N x k x dim or N x dim
orig_feature_size = len(joint_feature.size())
if orig_feature_size == 2:
joint_feature = torch.unsqueeze(joint_feature, dim=1)
batch_size, num_loc, dim = joint_feature.size()
if dim % self.pool_size != 0:
exit(
"the dim %d is not multiply of \
pool_size %d"
% (dim, self.pool_size)
)
joint_feature_reshape = joint_feature.view(
batch_size, num_loc, int(dim / self.pool_size), self.pool_size
)
# N x 100 x 1000 x 1
iatt_iq_sumpool = torch.sum(joint_feature_reshape, 3)
iatt_iq_sqrt = torch.sqrt(self.relu(iatt_iq_sumpool)) - torch.sqrt(
self.relu(-iatt_iq_sumpool)
)
iatt_iq_sqrt = iatt_iq_sqrt.view(batch_size, -1) # N x 100000
iatt_iq_l2 = nn.functional.normalize(iatt_iq_sqrt)
iatt_iq_l2 = iatt_iq_l2.view(batch_size, num_loc, int(dim / self.pool_size))
if orig_feature_size == 2:
iatt_iq_l2 = torch.squeeze(iatt_iq_l2, dim=1)
return iatt_iq_l2
# need to handle two situations,
# first: image (N, K, i_dim), question (N, q_dim);
# second: image (N, i_dim), question (N, q_dim);
class NonLinearElementMultiply(nn.Module):
def __init__(self, image_feat_dim, ques_emb_dim, **kwargs):
super().__init__()
self.fa_image = ReLUWithWeightNormFC(image_feat_dim, kwargs["hidden_dim"])
self.fa_txt = ReLUWithWeightNormFC(ques_emb_dim, kwargs["hidden_dim"])
context_dim = kwargs.get("context_dim", None)
if context_dim is not None:
self.fa_context = ReLUWithWeightNormFC(context_dim, kwargs["hidden_dim"])
self.dropout = nn.Dropout(kwargs["dropout"])
self.out_dim = kwargs["hidden_dim"]
def forward(self, image_feat, question_embedding, context_embedding=None):
image_fa = self.fa_image(image_feat)
question_fa = self.fa_txt(question_embedding)
if len(image_feat.size()) == 3 and len(question_fa.size()) != 3:
question_fa_expand = question_fa.unsqueeze(1)
else:
question_fa_expand = question_fa
joint_feature = image_fa * question_fa_expand
if context_embedding is not None:
context_fa = self.fa_context(context_embedding)
context_text_joint_feaure = context_fa * question_fa_expand
joint_feature = torch.cat([joint_feature, context_text_joint_feaure], dim=1)
joint_feature = self.dropout(joint_feature)
return joint_feature
class TopDownAttentionLSTM(nn.Module):
def __init__(self, image_feat_dim, embed_dim, **kwargs):
super().__init__()
self.fa_image = weight_norm(nn.Linear(image_feat_dim, kwargs["attention_dim"]))
self.fa_hidden = weight_norm(
nn.Linear(kwargs["hidden_dim"], kwargs["attention_dim"])
)
self.top_down_lstm = nn.LSTMCell(
embed_dim + image_feat_dim + kwargs["hidden_dim"],
kwargs["hidden_dim"],
bias=True,
)
self.relu = nn.ReLU()
self.dropout = nn.Dropout(kwargs["dropout"])
self.out_dim = kwargs["attention_dim"]
def forward(self, image_feat, embedding):
image_feat_mean = image_feat.mean(1)
# Get LSTM state
state = registry.get(f"{image_feat.device}_lstm_state")
h1, c1 = state["td_hidden"]
h2, c2 = state["lm_hidden"]
h1, c1 = self.top_down_lstm(
torch.cat([h2, image_feat_mean, embedding], dim=1), (h1, c1)
)
state["td_hidden"] = (h1, c1)
image_fa = self.fa_image(image_feat)
hidden_fa = self.fa_hidden(h1)
joint_feature = self.relu(image_fa + hidden_fa.unsqueeze(1))
joint_feature = self.dropout(joint_feature)
return joint_feature
class TwoLayerElementMultiply(nn.Module):
def __init__(self, image_feat_dim, ques_emb_dim, **kwargs):
super().__init__()
self.fa_image1 = ReLUWithWeightNormFC(image_feat_dim, kwargs["hidden_dim"])
self.fa_image2 = ReLUWithWeightNormFC(
kwargs["hidden_dim"], kwargs["hidden_dim"]
)
self.fa_txt1 = ReLUWithWeightNormFC(ques_emb_dim, kwargs["hidden_dim"])
self.fa_txt2 = ReLUWithWeightNormFC(kwargs["hidden_dim"], kwargs["hidden_dim"])
self.dropout = nn.Dropout(kwargs["dropout"])
self.out_dim = kwargs["hidden_dim"]
def forward(self, image_feat, question_embedding):
image_fa = self.fa_image2(self.fa_image1(image_feat))
question_fa = self.fa_txt2(self.fa_txt1(question_embedding))
if len(image_feat.size()) == 3:
num_location = image_feat.size(1)
question_fa_expand = torch.unsqueeze(question_fa, 1).expand(
-1, num_location, -1
)
else:
question_fa_expand = question_fa
joint_feature = image_fa * question_fa_expand
joint_feature = self.dropout(joint_feature)
return joint_feature
class TransformLayer(nn.Module):
def __init__(self, transform_type, in_dim, out_dim, hidden_dim=None):
super().__init__()
if transform_type == "linear":
self.module = LinearTransform(in_dim, out_dim)
elif transform_type == "conv":
self.module = ConvTransform(in_dim, out_dim, hidden_dim)
else:
raise NotImplementedError(
"Unknown post combine transform type: %s" % transform_type
)
self.out_dim = self.module.out_dim
def forward(self, *args, **kwargs):
return self.module(*args, **kwargs)
class LinearTransform(nn.Module):
def __init__(self, in_dim, out_dim):
super().__init__()
self.lc = weight_norm(
nn.Linear(in_features=in_dim, out_features=out_dim), dim=None
)
self.out_dim = out_dim
def forward(self, x):
return self.lc(x)
class ConvTransform(nn.Module):
def __init__(self, in_dim, out_dim, hidden_dim):
super().__init__()
self.conv1 = nn.Conv2d(
in_channels=in_dim, out_channels=hidden_dim, kernel_size=1
)
self.conv2 = nn.Conv2d(
in_channels=hidden_dim, out_channels=out_dim, kernel_size=1
)
self.out_dim = out_dim
def forward(self, x):
if len(x.size()) == 3: # N x k xdim
# N x dim x k x 1
x_reshape = torch.unsqueeze(x.permute(0, 2, 1), 3)
elif len(x.size()) == 2: # N x dim
# N x dim x 1 x 1
x_reshape = torch.unsqueeze(torch.unsqueeze(x, 2), 3)
iatt_conv1 = self.conv1(x_reshape) # N x hidden_dim x * x 1
iatt_relu = nn.functional.relu(iatt_conv1)
iatt_conv2 = self.conv2(iatt_relu) # N x out_dim x * x 1
if len(x.size()) == 3:
iatt_conv3 = torch.squeeze(iatt_conv2, 3).permute(0, 2, 1)
elif len(x.size()) == 2:
iatt_conv3 = torch.squeeze(torch.squeeze(iatt_conv2, 3), 2)
return iatt_conv3
class BCNet(nn.Module):
"""
Simple class for non-linear bilinear connect network
"""
def __init__(self, v_dim, q_dim, h_dim, h_out, act="ReLU", dropout=None, k=3):
super().__init__()
self.c = 32
self.k = k
self.v_dim = v_dim
self.q_dim = q_dim
self.h_dim = h_dim
self.h_out = h_out
if dropout is None:
dropout = [0.2, 0.5]
self.v_net = FCNet([v_dim, h_dim * self.k], act=act, dropout=dropout[0])
self.q_net = FCNet([q_dim, h_dim * self.k], act=act, dropout=dropout[0])
self.dropout = nn.Dropout(dropout[1])
if k > 1:
self.p_net = nn.AvgPool1d(self.k, stride=self.k)
if h_out is None:
pass
elif h_out <= self.c:
self.h_mat = nn.Parameter(
torch.Tensor(1, h_out, 1, h_dim * self.k).normal_()
)
self.h_bias = nn.Parameter(torch.Tensor(1, h_out, 1, 1).normal_())
else:
self.h_net = weight_norm(nn.Linear(h_dim * self.k, h_out), dim=None)
def forward(self, v, q):
if self.h_out is None:
v_ = self.v_net(v).transpose(1, 2).unsqueeze(3)
q_ = self.q_net(q).transpose(1, 2).unsqueeze(2)
d_ = torch.matmul(v_, q_)
logits = d_.transpose(1, 2).transpose(2, 3)
return logits
# broadcast Hadamard product, matrix-matrix production
# fast computation but memory inefficient
elif self.h_out <= self.c:
v_ = self.dropout(self.v_net(v)).unsqueeze(1)
q_ = self.q_net(q)
h_ = v_ * self.h_mat
logits = torch.matmul(h_, q_.unsqueeze(1).transpose(2, 3))
logits = logits + self.h_bias
return logits
# batch outer product, linear projection
# memory efficient but slow computation
else:
v_ = self.dropout(self.v_net(v)).transpose(1, 2).unsqueeze(3)
q_ = self.q_net(q).transpose(1, 2).unsqueeze(2)
d_ = torch.matmul(v_, q_)
logits = self.h_net(d_.transpose(1, 2).transpose(2, 3))
return logits.transpose(2, 3).transpose(1, 2)
def forward_with_weights(self, v, q, w):
v_ = self.v_net(v).transpose(1, 2).unsqueeze(2)
q_ = self.q_net(q).transpose(1, 2).unsqueeze(3)
logits = torch.matmul(torch.matmul(v_, w.unsqueeze(1)), q_)
logits = logits.squeeze(3).squeeze(2)
if self.k > 1:
logits = logits.unsqueeze(1)
logits = self.p_net(logits).squeeze(1) * self.k
return logits
class FCNet(nn.Module):
"""
Simple class for non-linear fully connect network
"""
def __init__(self, dims, act="ReLU", dropout=0):
super().__init__()
layers = []
for i in range(len(dims) - 2):
in_dim = dims[i]
out_dim = dims[i + 1]
if dropout > 0:
layers.append(nn.Dropout(dropout))
layers.append(weight_norm(nn.Linear(in_dim, out_dim), dim=None))
if act is not None:
layers.append(getattr(nn, act)())
if dropout > 0:
layers.append(nn.Dropout(dropout))
layers.append(weight_norm(nn.Linear(dims[-2], dims[-1]), dim=None))
if act is not None:
layers.append(getattr(nn, act)())
self.main = nn.Sequential(*layers)
def forward(self, x):
return self.main(x)
class BiAttention(nn.Module):
def __init__(self, x_dim, y_dim, z_dim, glimpse, dropout=None):
super().__init__()
if dropout is None:
dropout = [0.2, 0.5]
self.glimpse = glimpse
self.logits = weight_norm(
BCNet(x_dim, y_dim, z_dim, glimpse, dropout=dropout, k=3),
name="h_mat",
dim=None,
)
def forward(self, v, q, v_mask=True):
p, logits = self.forward_all(v, q, v_mask)
return p, logits
def forward_all(self, v, q, v_mask=True):
v_num = v.size(1)
q_num = q.size(1)
logits = self.logits(v, q)
if v_mask:
v_abs_sum = v.abs().sum(2)
mask = (v_abs_sum == 0).unsqueeze(1).unsqueeze(3)
mask = mask.expand(logits.size())
logits.masked_fill_(mask, -float("inf"))
expanded_logits = logits.view(-1, self.glimpse, v_num * q_num)
p = nn.functional.softmax(expanded_logits, 2)
return p.view(-1, self.glimpse, v_num, q_num), logits
class TripleLinear(nn.Module):
"""
The three-branch classifier in https://arxiv.org/abs/2004.11883:
During training, all three branches will produce the prediction on its own.
During inference, only the fused branch is used to predict the answers.
"""
def __init__(self, in_dim: int, out_dim: int):
super().__init__()
self.linears = nn.ModuleList([nn.Linear(in_dim, out_dim) for _ in range(3)])
def forward(self, joint_embedding: torch.Tensor) -> torch.Tensor:
if self.training:
feat = [self.linears[i](joint_embedding[:, i]) for i in range(3)]
return torch.stack(feat, dim=1)
return self.linears[0](joint_embedding)
class BranchCombineLayer(nn.Module):
"""Three-branch fusion module used for fusing MoVie and MCAN in
https://arxiv.org/abs/2004.11883
"""
def __init__(self, img_dim: int, ques_dim: int):
super().__init__()
self.out_dim = img_dim * 2
self.linear_cga = nn.ModuleList(
[nn.Linear(img_dim, self.out_dim) for _ in range(2)]
)
self.linear_cbn = nn.ModuleList(
[nn.Linear(img_dim, self.out_dim) for _ in range(2)]
)
self.linear_ques = nn.ModuleList(
[nn.Linear(ques_dim, self.out_dim) for _ in range(2)]
)
self.layer_norm = nn.ModuleList([nn.LayerNorm(self.out_dim) for _ in range(3)])
def forward(
self, v_cga: torch.Tensor, v_cbn: torch.Tensor, q: torch.Tensor
) -> torch.Tensor:
feat = [
self.layer_norm[0](
self.linear_ques[0](q)
+ self.linear_cbn[0](v_cbn)
+ self.linear_cga[0](v_cga)
),
self.layer_norm[1](self.linear_cbn[1](v_cbn)),
self.layer_norm[2](self.linear_ques[1](q) + self.linear_cga[1](v_cga)),
]
if self.training:
return torch.stack(feat, dim=1)
return feat[0]
class AttnPool1d(nn.Module):
"""An attention pooling layer that learns weights using an mlp"""
def __init__(self, num_features: int, num_attn: int = 1, dropout: float = 0.1):
super().__init__()
self.linear = nn.Sequential(
nn.Linear(num_features, num_features // 2),
nn.ReLU(),
nn.Dropout(p=dropout),
nn.Linear(num_features // 2, num_attn),
)
self.p_attn = torch.tensor(float("nan"))
self.num_attn = num_attn
def forward(
self,
query: torch.Tensor,
value: torch.Tensor,
mask: Optional[torch.Tensor] = None,
) -> torch.Tensor:
b = query.size(0)
score = self.linear(query).transpose(-2, -1)
if mask is not None:
score.data.masked_fill_(mask.unsqueeze(1), -10000.0)
p_attn = nn.functional.softmax(score, dim=-1)
if self.training:
self.p_attn = p_attn
return torch.matmul(p_attn, value).view(b, self.num_attn, -1)
class AttnPool2d(nn.Module):
"""An attention pooling layer in 2D with multiheaded attention"""
def __init__(
self, spacial_dim: int, embed_dim: int, num_heads: int, output_dim: int = None
):
super().__init__()
self.positional_embedding = nn.Parameter(
torch.randn(spacial_dim**2 + 1, embed_dim) / embed_dim**0.5
)
self.k_proj = nn.Linear(embed_dim, embed_dim)
self.q_proj = nn.Linear(embed_dim, embed_dim)
self.v_proj = nn.Linear(embed_dim, embed_dim)
self.c_proj = nn.Linear(embed_dim, output_dim or embed_dim)
self.num_heads = num_heads
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = x.reshape(x.shape[0], x.shape[1], x.shape[2] * x.shape[3]).permute(
2, 0, 1
) # NCHW -> (HW)NC
x = torch.cat([x.mean(dim=0, keepdim=True), x], dim=0) # (HW+1)NC
x = x + self.positional_embedding[:, None, :].to(x.dtype) # (HW+1)NC
x, _ = nn.functional.multi_head_attention_forward(
query=x,
key=x,
value=x,
embed_dim_to_check=x.shape[-1],
num_heads=self.num_heads,
q_proj_weight=self.q_proj.weight,
k_proj_weight=self.k_proj.weight,
v_proj_weight=self.v_proj.weight,
in_proj_weight=None,
in_proj_bias=torch.cat(
[self.q_proj.bias, self.k_proj.bias, self.v_proj.bias]
),
bias_k=None,
bias_v=None,
add_zero_attn=False,
dropout_p=0,
out_proj_weight=self.c_proj.weight,
out_proj_bias=self.c_proj.bias,
use_separate_proj_weight=True,
training=self.training,
need_weights=False,
)
return x[0]
| EXA-1-master | exa/models/mmf-main/mmf/modules/layers.py |
# Copyright (c) Facebook, Inc. and its affiliates.
from bisect import bisect_right
from mmf.common.registry import registry
from torch.optim.lr_scheduler import LambdaLR
try:
from transformers3.optimization import (
get_cosine_schedule_with_warmup,
get_linear_schedule_with_warmup,
)
except ImportError:
from transformers.optimization import (
get_cosine_schedule_with_warmup,
get_linear_schedule_with_warmup,
)
@registry.register_scheduler("pythia")
class PythiaScheduler(LambdaLR):
def __init__(self, optimizer, *args, **kwargs):
from mmf.utils.general import lr_lambda_update
self._lambda_func = lr_lambda_update
self._global_config = registry.get("config")
super().__init__(optimizer, self.lr_lambda, *args, **kwargs)
def lr_lambda(self, step):
return self._lambda_func(step, self._global_config)
@registry.register_scheduler("warmup_linear")
class WarmupLinearScheduler(LambdaLR):
def __new__(cls, optimizer, *args, **kwargs):
return get_linear_schedule_with_warmup(optimizer, *args, **kwargs)
@registry.register_scheduler("warmup_cosine")
class WarmupCosineScheduler(LambdaLR):
def __new__(cls, optimizer, *args, **kwargs):
return get_cosine_schedule_with_warmup(optimizer, *args, **kwargs)
@registry.register_scheduler("multi_step")
class MultiStepScheduler(PythiaScheduler):
def __init__(self, optimizer, *args, **kwargs):
self.use_warmup = kwargs["use_warmup"]
self.lr_steps = kwargs["lr_steps"]
self.lr_ratio = kwargs["lr_ratio"]
self.warmup_iterations = kwargs["warmup_iterations"] if self.use_warmup else 0
self.warmup_factor = kwargs["warmup_factor"]
assert self.warmup_iterations < self.lr_steps[0]
super().__init__(optimizer)
def get_lr(self):
if self.last_epoch <= self.warmup_iterations and self.use_warmup is True:
alpha = float(self.last_epoch) / float(self.warmup_iterations)
lr_ratio = self.warmup_factor * (1.0 - alpha) + alpha
return [base_lr * lr_ratio for base_lr in self.base_lrs]
else:
return [
base_lr * self.lr_ratio ** bisect_right(self.lr_steps, self.last_epoch)
for base_lr in self.base_lrs
]
| EXA-1-master | exa/models/mmf-main/mmf/modules/schedulers.py |
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
import argparse
import logging
import random
import typing
import torch
from mmf.common.registry import registry
from mmf.utils.build import build_config, build_trainer
from mmf.utils.configuration import Configuration
from mmf.utils.distributed import distributed_init, get_rank, infer_init_method, is_xla
from mmf.utils.env import set_seed, setup_imports
from mmf.utils.flags import flags
from mmf.utils.general import log_device_names
from mmf.utils.logger import setup_logger, setup_very_basic_config
setup_very_basic_config()
def main(configuration, init_distributed=False, predict=False):
# A reload might be needed for imports
setup_imports()
configuration.import_user_dir()
config = configuration.get_config()
if torch.cuda.is_available():
torch.cuda.set_device(config.device_id)
torch.cuda.init()
if init_distributed:
distributed_init(config)
seed = config.training.seed
config.training.seed = set_seed(seed if seed == -1 else seed + get_rank())
registry.register("seed", config.training.seed)
config = build_config(configuration)
setup_logger(
color=config.training.colored_logs, disable=config.training.should_not_log
)
logger = logging.getLogger("mmf_cli.run")
# Log args for debugging purposes
logger.info(configuration.args)
logger.info(f"Torch version: {torch.__version__}")
log_device_names()
logger.info(f"Using seed {config.training.seed}")
trainer = build_trainer(config)
trainer.load()
if predict:
trainer.inference()
else:
trainer.train()
def distributed_main(device_id, configuration, predict=False):
config = configuration.get_config()
config.device_id = device_id
if config.distributed.rank is None:
config.distributed.rank = config.start_rank + device_id
main(configuration, init_distributed=True, predict=predict)
def run(opts: typing.Optional[typing.List[str]] = None, predict: bool = False):
"""Run starts a job based on the command passed from the command line.
You can optionally run the mmf job programmatically by passing an optlist as opts.
Args:
opts (typing.Optional[typing.List[str]], optional): Optlist which can be used.
to override opts programmatically. For e.g. if you pass
opts = ["training.batch_size=64", "checkpoint.resume=True"], this will
set the batch size to 64 and resume from the checkpoint if present.
Defaults to None.
predict (bool, optional): If predict is passed True, then the program runs in
prediction mode. Defaults to False.
"""
setup_imports()
if opts is None:
parser = flags.get_parser()
args = parser.parse_args()
else:
args = argparse.Namespace(config_override=None)
args.opts = opts
configuration = Configuration(args)
# Do set runtime args which can be changed by MMF
configuration.args = args
config = configuration.get_config()
config.start_rank = 0
if config.distributed.init_method is None:
infer_init_method(config)
if config.distributed.init_method is not None:
if torch.cuda.device_count() > 1 and not config.distributed.no_spawn:
config.start_rank = config.distributed.rank
config.distributed.rank = None
torch.multiprocessing.spawn(
fn=distributed_main,
args=(configuration, predict),
nprocs=torch.cuda.device_count(),
)
else:
distributed_main(0, configuration, predict)
elif config.distributed.world_size > 1:
if is_xla():
import torch_xla.distributed.xla_multiprocessing as xmp
torch.multiprocessing.set_sharing_strategy("file_system")
xmp.spawn(
fn=distributed_main,
args=(configuration, predict),
nprocs=8, # use all 8 TPU cores
start_method="fork",
)
else:
assert config.distributed.world_size <= torch.cuda.device_count()
port = random.randint(10000, 20000)
config.distributed.init_method = f"tcp://localhost:{port}"
config.distributed.rank = None
torch.multiprocessing.spawn(
fn=distributed_main,
args=(configuration, predict),
nprocs=config.distributed.world_size,
)
else:
config.device_id = 0
main(configuration, predict=predict)
if __name__ == "__main__":
run()
| EXA-1-master | exa/models/mmf-main/mmf_cli/run.py |
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
import sys
from mmf_cli.run import run
def predict(opts=None):
if opts is None:
sys.argv.extend(["evaluation.predict=true"])
else:
opts.extend(["evaluation.predict=true"])
run(predict=True)
if __name__ == "__main__":
predict()
| EXA-1-master | exa/models/mmf-main/mmf_cli/predict.py |
# Copyright (c) Facebook, Inc. and its affiliates.
| EXA-1-master | exa/models/mmf-main/mmf_cli/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
"""
Entrypoint script used by TorchX to start the training run in each process
"""
from mmf_cli.fb_run import fb_scheduler_run
if __name__ == "__main__":
fb_scheduler_run()
| EXA-1-master | exa/models/mmf-main/mmf_cli/torchx_entryscript.py |
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
import argparse
import logging
import typing
from mmf.utils.configuration import _merge_with_dotlist
from mmf.utils.flags import flags
from mmf.utils.inference import Inference
from mmf.utils.logger import setup_logger
from omegaconf import OmegaConf
def construct_config(opts: typing.List[str]):
config = OmegaConf.create({"checkpoint_path": ""})
return _merge_with_dotlist(config, opts)
def interactive(opts: typing.Optional[typing.List[str]] = None):
"""Inference runs inference on an image and text provided by the user.
You can optionally run inference programmatically by passing an optlist as opts.
Args:
opts (typing.Optional[typing.List[str]], optional): Optlist which can be used.
to override opts programmatically. For e.g. if you pass
opts = ["checkpoint_path=my/directory"], this will set the checkpoint.
"""
if opts is None:
parser = flags.get_parser()
args = parser.parse_args()
else:
args = argparse.Namespace(config_override=None)
args.opts = opts
setup_logger()
logger = logging.getLogger("mmf_cli.interactive")
config = construct_config(args.opts)
inference = Inference(checkpoint_path=config.checkpoint_path)
logger.info("Enter 'exit' at any point to terminate.")
logger.info("Enter an image URL:")
image_url = input()
logger.info("Got image URL.")
logger.info("Enter text:")
text = input()
logger.info("Got text input.")
while text != "exit":
logger.info("Running inference on image and text input.")
answer = inference.forward(image_url, {"text": text}, image_format="url")
logger.info("Model response: " + answer)
logger.info(
f"Enter another image URL or leave it blank to continue using {image_url}:"
)
new_image_url = input()
if new_image_url != "":
image_url = new_image_url
if new_image_url == "exit":
break
logger.info("Enter another text input:")
text = input()
if __name__ == "__main__":
interactive()
| EXA-1-master | exa/models/mmf-main/mmf_cli/interactive.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import argparse
import hashlib
import os
import subprocess
import warnings
import zipfile
from mmf.utils.configuration import Configuration
from mmf.utils.download import copy, decompress, move
from mmf.utils.file_io import PathManager
class HMConverter:
IMAGE_FILES = ["img.tar.gz", "img"]
JSONL_PHASE_ONE_FILES = ["train.jsonl", "dev.jsonl", "test.jsonl"]
JSONL_PHASE_TWO_FILES = [
"train.jsonl",
"dev_seen.jsonl",
"test_seen.jsonl",
"dev_unseen.jsonl",
"test_unseen.jsonl",
]
POSSIBLE_CHECKSUMS = [
"d8f1073f5fbf1b08a541cc2325fc8645619ab8ed768091fb1317d5c3a6653a77",
"a424c003b7d4ea3f3b089168b5f5ea73b90a3ff043df4b8ff4d7ed87c51cb572",
"6e609b8c230faff02426cf462f0c9528957b7884d68c60ebc26ff83846e5f80f",
"c1363aae9649c79ae4abfdb151b56d3d170187db77757f3daa80856558ac367c",
]
def __init__(self):
self.parser = self.get_parser()
self.args = self.parser.parse_args()
self.configuration = Configuration()
def assert_files(self, folder):
files_needed = self.JSONL_PHASE_ONE_FILES
phase_one = True
for file in files_needed:
try:
assert PathManager.exists(
os.path.join(folder, "data", file)
), f"{file} doesn't exist in {folder}"
except AssertionError:
phase_one = False
if not phase_one:
files_needed = self.JSONL_PHASE_TWO_FILES
for file in files_needed:
assert PathManager.exists(
os.path.join(folder, "data", file)
), f"{file} doesn't exist in {folder}"
else:
warnings.warn(
"You are on Phase 1 of the Hateful Memes Challenge. "
"Please update to Phase 2"
)
files_needed = self.IMAGE_FILES
exists = False
for file in files_needed:
exists = exists or PathManager.exists(os.path.join(folder, "data", file))
if not exists:
raise AssertionError("Neither img or img.tar.gz exists in current zip")
return phase_one
def get_parser(self):
parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument(
"--zip_file",
required=True,
type=str,
help="Zip file downloaded from the DrivenData",
)
parser.add_argument(
"--password", required=True, type=str, help="Password for the zip file"
)
parser.add_argument(
"--move", required=None, type=int, help="Move data dir to mmf cache dir"
)
parser.add_argument(
"--mmf_data_folder", required=None, type=str, help="MMF Data folder"
)
parser.add_argument(
"--bypass_checksum",
required=None,
type=int,
help="Pass 1 if you want to skip checksum",
)
return parser
def convert(self):
config = self.configuration.get_config()
data_dir = config.env.data_dir
if self.args.mmf_data_folder:
data_dir = self.args.mmf_data_folder
bypass_checksum = False
if self.args.bypass_checksum:
bypass_checksum = bool(self.args.bypass_checksum)
print(f"Data folder is {data_dir}")
print(f"Zip path is {self.args.zip_file}")
base_path = os.path.join(data_dir, "datasets", "hateful_memes", "defaults")
images_path = os.path.join(base_path, "images")
PathManager.mkdirs(images_path)
move_dir = False
if self.args.move:
move_dir = bool(self.args.move)
if not bypass_checksum:
self.checksum(self.args.zip_file, self.POSSIBLE_CHECKSUMS)
src = self.args.zip_file
dest = images_path
if move_dir:
print(f"Moving {src}")
move(src, dest)
else:
print(f"Copying {src}")
copy(src, dest)
print(f"Unzipping {src}")
self.decompress_zip(
dest, fname=os.path.basename(src), password=self.args.password
)
phase_one = self.assert_files(images_path)
annotations_path = os.path.join(base_path, "annotations")
PathManager.mkdirs(annotations_path)
annotations = (
self.JSONL_PHASE_ONE_FILES
if phase_one is True
else self.JSONL_PHASE_TWO_FILES
)
for annotation in annotations:
print(f"Moving {annotation}")
src = os.path.join(images_path, "data", annotation)
dest = os.path.join(annotations_path, annotation)
move(src, dest)
images = self.IMAGE_FILES
for image_file in images:
src = os.path.join(images_path, "data", image_file)
if PathManager.exists(src):
print(f"Moving {image_file}")
else:
continue
dest = os.path.join(images_path, image_file)
move(src, dest)
if src.endswith(".tar.gz"):
decompress(dest, fname=image_file, delete_original=False)
def checksum(self, file, hashes):
sha256_hash = hashlib.sha256()
destination = file
with PathManager.open(destination, "rb") as f:
print("Starting checksum for {}".format(os.path.basename(file)))
for byte_block in iter(lambda: f.read(65536), b""):
sha256_hash.update(byte_block)
if sha256_hash.hexdigest() not in hashes:
# remove_dir(download_path)
raise AssertionError(
f"Checksum of downloaded file does not match the expected "
+ "checksum. Please try again."
)
else:
print("Checksum successful")
def decompress_zip(self, dest, fname, password=None):
path = os.path.join(dest, fname)
print("Extracting the zip can take time. Sit back and relax.")
try:
# Python's zip file module is very slow with password encrypted files
# Try command line
command = ["unzip", "-o", "-q", "-d", dest]
if password:
command += ["-P", password]
command += [path]
subprocess.run(command, check=True)
except Exception:
obj = zipfile.ZipFile(path, "r")
if password:
obj.setpassword(password.encode("utf-8"))
obj.extractall(path=dest)
obj.close()
def main():
converter = HMConverter()
converter.convert()
if __name__ == "__main__":
main()
| EXA-1-master | exa/models/mmf-main/mmf_cli/hm_convert.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from pathlib import Path
import re
from typing import List, Tuple
from setuptools import setup, find_packages
NAME = "dinov2"
DESCRIPTION = "PyTorch code and models for the DINOv2 self-supervised learning method."
URL = "https://github.com/facebookresearch/dinov2"
AUTHOR = "FAIR"
REQUIRES_PYTHON = ">=3.9.0"
HERE = Path(__file__).parent
try:
with open(HERE / "README.md", encoding="utf-8") as f:
long_description = "\n" + f.read()
except FileNotFoundError:
long_description = DESCRIPTION
def get_requirements(path: str = HERE / "requirements.txt") -> Tuple[List[str], List[str]]:
requirements = []
extra_indices = []
with open(path) as f:
for line in f.readlines():
line = line.rstrip("\r\n")
if line.startswith("--extra-index-url "):
extra_indices.append(line[18:])
continue
requirements.append(line)
return requirements, extra_indices
def get_package_version() -> str:
with open(HERE / "dinov2/__init__.py") as f:
result = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", f.read(), re.M)
if result:
return result.group(1)
raise RuntimeError("Can't get package version")
requirements, extra_indices = get_requirements()
version = get_package_version()
dev_requirements, _ = get_requirements(HERE / "requirements-dev.txt")
setup(
name=NAME,
version=version,
description=DESCRIPTION,
long_description=long_description,
long_description_content_type="text/markdown",
author=AUTHOR,
python_requires=REQUIRES_PYTHON,
url=URL,
packages=find_packages(),
package_data={
"": ["*.yaml"],
},
install_requires=requirements,
dependency_links=extra_indices,
extras_require={
"dev": dev_requirements,
},
install_package_data=True,
license="CC-BY-NC",
license_files=("LICENSE",),
classifiers=[
# Trove classifiers: https://github.com/pypa/trove-classifiers/blob/main/src/trove_classifiers/__init__.py
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"License :: Other/Proprietary License",
"Programming Language :: Python :: 3.9",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Topic :: Software Development :: Libraries :: Python Modules",
],
)
| EXA-1-master | exa/models/dinov2/setup.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
dependencies = ["torch"]
_DINOV2_BASE_URL = "https://dl.fbaipublicfiles.com/dinov2"
def _make_dinov2_model_name(arch_name: str, patch_size: int) -> str:
compact_arch_name = arch_name.replace("_", "")[:4]
return f"dinov2_{compact_arch_name}{patch_size}"
def _make_dinov2_model(
*,
arch_name: str = "vit_large",
img_size: int = 518,
patch_size: int = 14,
init_values: float = 1.0,
ffn_layer: str = "mlp",
block_chunks: int = 0,
pretrained: bool = True,
**kwargs,
):
from dinov2.models import vision_transformer as vits
model_name = _make_dinov2_model_name(arch_name, patch_size)
vit_kwargs = dict(
img_size=img_size,
patch_size=patch_size,
init_values=init_values,
ffn_layer=ffn_layer,
block_chunks=block_chunks,
)
vit_kwargs.update(**kwargs)
model = vits.__dict__[arch_name](**vit_kwargs)
if pretrained:
url = _DINOV2_BASE_URL + f"/{model_name}/{model_name}_pretrain.pth"
state_dict = torch.hub.load_state_dict_from_url(url, map_location="cpu")
model.load_state_dict(state_dict, strict=False)
return model
def dinov2_vits14(*, pretrained: bool = True, **kwargs):
"""
DINOv2 ViT-S/14 model (optionally) pretrained on the LVD-142M dataset.
"""
return _make_dinov2_model(arch_name="vit_small", pretrained=pretrained, **kwargs)
def dinov2_vitb14(*, pretrained: bool = True, **kwargs):
"""
DINOv2 ViT-B/14 model pretrained on the LVD-142M dataset.
"""
return _make_dinov2_model(arch_name="vit_base", pretrained=pretrained, **kwargs)
def dinov2_vitl14(*, pretrained: bool = True, **kwargs):
"""
DINOv2 ViT-L/14 model (optionally) pretrained on the LVD-142M dataset.
"""
return _make_dinov2_model(arch_name="vit_large", pretrained=pretrained, **kwargs)
def dinov2_vitg14(*, pretrained: bool = True, **kwargs):
"""
DINOv2 ViT-g/14 model (optionally) pretrained on the LVD-142M dataset.
"""
return _make_dinov2_model(arch_name="vit_giant2", ffn_layer="swiglufused", pretrained=pretrained, **kwargs)
def _make_dinov2_linear_head(
*,
model_name: str = "dinov2_vitl14",
embed_dim: int = 1024,
layers: int = 4,
pretrained: bool = True,
**kwargs,
):
assert layers in (1, 4), f"Unsupported number of layers: {layers}"
linear_head = nn.Linear((1 + layers) * embed_dim, 1_000)
if pretrained:
layers_str = str(layers) if layers == 4 else ""
url = _DINOV2_BASE_URL + f"/{model_name}/{model_name}_linear{layers_str}_head.pth"
state_dict = torch.hub.load_state_dict_from_url(url, map_location="cpu")
linear_head.load_state_dict(state_dict, strict=False)
return linear_head
class _LinearClassifierWrapper(nn.Module):
def __init__(self, *, backbone: nn.Module, linear_head: nn.Module, layers: int = 4):
super().__init__()
self.backbone = backbone
self.linear_head = linear_head
self.layers = layers
def forward(self, x):
if self.layers == 1:
x = self.backbone.forward_features(x)
cls_token = x["x_norm_clstoken"].squeeze(0)
patch_tokens = x["x_norm_patchtokens"].squeeze(0)
linear_input = torch.cat([
cls_token,
patch_tokens.mean(0)
])
elif self.layers == 4:
x = self.backbone.get_intermediate_layers(x, n=4, return_class_token=True)
linear_input = torch.cat([
x[0][1].squeeze(0),
x[1][1].squeeze(0),
x[2][1].squeeze(0),
x[3][1].squeeze(0),
x[3][0].squeeze(0).mean(0)
])
else:
assert False, f"Unsupported number of layers: {self.layers}"
return self.linear_head(linear_input)
def _make_dinov2_linear_classifier(
*,
arch_name: str = "vit_large",
layers: int = 4,
pretrained: bool = True,
**kwargs,
):
backbone = _make_dinov2_model(arch_name=arch_name, pretrained=pretrained, **kwargs)
embed_dim = backbone.embed_dim
patch_size = backbone.patch_size
model_name = _make_dinov2_model_name(arch_name, patch_size)
linear_head = _make_dinov2_linear_head(model_name=model_name, embed_dim=embed_dim, layers=layers, pretrained=pretrained)
return _LinearClassifierWrapper(backbone=backbone, linear_head=linear_head, layers=layers)
def dinov2_vits14_lc(*, layers: int = 4, pretrained: bool = True, **kwargs):
"""
Linear classifier (1 or 4 layers) on top of a DINOv2 ViT-S/14 backbone (optionally) pretrained on the LVD-142M dataset and trained on ImageNet-1k.
"""
return _make_dinov2_linear_classifier(arch_name="vit_small", layers=layers, pretrained=pretrained, **kwargs)
def dinov2_vitb14_lc(*, pretrained: bool = True, **kwargs):
"""
Linear classifier (1 or 4 layers) on top of a DINOv2 ViT-B/14 backbone (optionally) pretrained on the LVD-142M dataset and trained on ImageNet-1k.
"""
return _make_dinov2_linear_classifier(arch_name="vit_base", pretrained=pretrained, **kwargs)
def dinov2_vitl14_lc(*, pretrained: bool = True, **kwargs):
"""
Linear classifier (1 or 4 layers) on top of a DINOv2 ViT-L/14 backbone (optionally) pretrained on the LVD-142M dataset and trained on ImageNet-1k.
"""
return _make_dinov2_linear_classifier(arch_name="vit_large", pretrained=pretrained, **kwargs)
def dinov2_vitg14_lc(*, pretrained: bool = True, **kwargs):
"""
Linear classifier (1 or 4 layers) on top of a DINOv2 ViT-g/14 backbone (optionally) pretrained on the LVD-142M dataset and trained on ImageNet-1k.
"""
return _make_dinov2_linear_classifier(arch_name="vit_giant2", ffn_layer="swiglufused", pretrained=pretrained, **kwargs)
| EXA-1-master | exa/models/dinov2/hubconf.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
__version__ = "0.0.1"
| EXA-1-master | exa/models/dinov2/dinov2/__init__.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# References:
# https://github.com/facebookresearch/dino/blob/master/vision_transformer.py
# https://github.com/rwightman/pytorch-image-models/tree/master/timm/models/vision_transformer.py
import logging
from torch import Tensor
from torch import nn
logger = logging.getLogger("dinov2")
try:
from xformers.ops import memory_efficient_attention, unbind, fmha
XFORMERS_AVAILABLE = True
except ImportError:
logger.warning("xFormers not available")
XFORMERS_AVAILABLE = False
class Attention(nn.Module):
def __init__(
self,
dim: int,
num_heads: int = 8,
qkv_bias: bool = False,
proj_bias: bool = True,
attn_drop: float = 0.0,
proj_drop: float = 0.0,
) -> None:
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = head_dim**-0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim, bias=proj_bias)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x: Tensor) -> Tensor:
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0] * self.scale, qkv[1], qkv[2]
attn = q @ k.transpose(-2, -1)
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class MemEffAttention(Attention):
def forward(self, x: Tensor, attn_bias=None) -> Tensor:
if not XFORMERS_AVAILABLE:
assert attn_bias is None, "xFormers is required for nested tensors usage"
return super().forward(x)
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads)
q, k, v = unbind(qkv, 2)
if attn_bias is not None:
self_att_op = fmha.MemoryEfficientAttentionFlashAttentionOp
else:
self_att_op = None
x = memory_efficient_attention(q, k, v, attn_bias=attn_bias, op=self_att_op)
x = x.reshape([B, N, C])
x = self.proj(x)
x = self.proj_drop(x)
return x
| EXA-1-master | exa/models/dinov2/dinov2/layers/attention.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
from torch.nn.init import trunc_normal_
from torch.nn.utils import weight_norm
class DINOHead(nn.Module):
def __init__(
self,
in_dim,
out_dim,
use_bn=False,
nlayers=3,
hidden_dim=2048,
bottleneck_dim=256,
mlp_bias=True,
):
super().__init__()
nlayers = max(nlayers, 1)
self.mlp = _build_mlp(nlayers, in_dim, bottleneck_dim, hidden_dim=hidden_dim, use_bn=use_bn, bias=mlp_bias)
self.apply(self._init_weights)
self.last_layer = weight_norm(nn.Linear(bottleneck_dim, out_dim, bias=False))
self.last_layer.weight_g.data.fill_(1)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=0.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
def forward(self, x):
x = self.mlp(x)
eps = 1e-6 if x.dtype == torch.float16 else 1e-12
x = nn.functional.normalize(x, dim=-1, p=2, eps=eps)
x = self.last_layer(x)
return x
def _build_mlp(nlayers, in_dim, bottleneck_dim, hidden_dim=None, use_bn=False, bias=True):
if nlayers == 1:
return nn.Linear(in_dim, bottleneck_dim, bias=bias)
else:
layers = [nn.Linear(in_dim, hidden_dim, bias=bias)]
if use_bn:
layers.append(nn.BatchNorm1d(hidden_dim))
layers.append(nn.GELU())
for _ in range(nlayers - 2):
layers.append(nn.Linear(hidden_dim, hidden_dim, bias=bias))
if use_bn:
layers.append(nn.BatchNorm1d(hidden_dim))
layers.append(nn.GELU())
layers.append(nn.Linear(hidden_dim, bottleneck_dim, bias=bias))
return nn.Sequential(*layers)
| EXA-1-master | exa/models/dinov2/dinov2/layers/dino_head.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from typing import Callable, Optional
from torch import Tensor, nn
import torch.nn.functional as F
class SwiGLUFFN(nn.Module):
def __init__(
self,
in_features: int,
hidden_features: Optional[int] = None,
out_features: Optional[int] = None,
act_layer: Callable[..., nn.Module] = None,
drop: float = 0.0,
bias: bool = True,
) -> None:
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.w12 = nn.Linear(in_features, 2 * hidden_features, bias=bias)
self.w3 = nn.Linear(hidden_features, out_features, bias=bias)
def forward(self, x: Tensor) -> Tensor:
x12 = self.w12(x)
x1, x2 = x12.chunk(2, dim=-1)
hidden = F.silu(x1) * x2
return self.w3(hidden)
try:
from xformers.ops import SwiGLU
XFORMERS_AVAILABLE = True
except ImportError:
SwiGLU = SwiGLUFFN
XFORMERS_AVAILABLE = False
class SwiGLUFFNFused(SwiGLU):
def __init__(
self,
in_features: int,
hidden_features: Optional[int] = None,
out_features: Optional[int] = None,
act_layer: Callable[..., nn.Module] = None,
drop: float = 0.0,
bias: bool = True,
) -> None:
out_features = out_features or in_features
hidden_features = hidden_features or in_features
hidden_features = (int(hidden_features * 2 / 3) + 7) // 8 * 8
super().__init__(
in_features=in_features,
hidden_features=hidden_features,
out_features=out_features,
bias=bias,
)
| EXA-1-master | exa/models/dinov2/dinov2/layers/swiglu_ffn.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from .dino_head import DINOHead
from .mlp import Mlp
from .patch_embed import PatchEmbed
from .swiglu_ffn import SwiGLUFFN, SwiGLUFFNFused
from .block import NestedTensorBlock
from .attention import MemEffAttention
| EXA-1-master | exa/models/dinov2/dinov2/layers/__init__.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# Modified from: https://github.com/huggingface/pytorch-image-models/blob/main/timm/models/vision_transformer.py#L103-L110
from typing import Union
import torch
from torch import Tensor
from torch import nn
class LayerScale(nn.Module):
def __init__(
self,
dim: int,
init_values: Union[float, Tensor] = 1e-5,
inplace: bool = False,
) -> None:
super().__init__()
self.inplace = inplace
self.gamma = nn.Parameter(init_values * torch.ones(dim))
def forward(self, x: Tensor) -> Tensor:
return x.mul_(self.gamma) if self.inplace else x * self.gamma
| EXA-1-master | exa/models/dinov2/dinov2/layers/layer_scale.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# References:
# https://github.com/facebookresearch/dino/blob/master/vision_transformer.py
# https://github.com/rwightman/pytorch-image-models/tree/master/timm/layers/mlp.py
from typing import Callable, Optional
from torch import Tensor, nn
class Mlp(nn.Module):
def __init__(
self,
in_features: int,
hidden_features: Optional[int] = None,
out_features: Optional[int] = None,
act_layer: Callable[..., nn.Module] = nn.GELU,
drop: float = 0.0,
bias: bool = True,
) -> None:
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features, bias=bias)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features, bias=bias)
self.drop = nn.Dropout(drop)
def forward(self, x: Tensor) -> Tensor:
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
| EXA-1-master | exa/models/dinov2/dinov2/layers/mlp.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# References:
# https://github.com/facebookresearch/dino/blob/master/vision_transformer.py
# https://github.com/rwightman/pytorch-image-models/tree/master/timm/layers/patch_embed.py
from typing import Callable, Optional, Tuple, Union
from torch import Tensor
import torch.nn as nn
def make_2tuple(x):
if isinstance(x, tuple):
assert len(x) == 2
return x
assert isinstance(x, int)
return (x, x)
class PatchEmbed(nn.Module):
"""
2D image to patch embedding: (B,C,H,W) -> (B,N,D)
Args:
img_size: Image size.
patch_size: Patch token size.
in_chans: Number of input image channels.
embed_dim: Number of linear projection output channels.
norm_layer: Normalization layer.
"""
def __init__(
self,
img_size: Union[int, Tuple[int, int]] = 224,
patch_size: Union[int, Tuple[int, int]] = 16,
in_chans: int = 3,
embed_dim: int = 768,
norm_layer: Optional[Callable] = None,
flatten_embedding: bool = True,
) -> None:
super().__init__()
image_HW = make_2tuple(img_size)
patch_HW = make_2tuple(patch_size)
patch_grid_size = (
image_HW[0] // patch_HW[0],
image_HW[1] // patch_HW[1],
)
self.img_size = image_HW
self.patch_size = patch_HW
self.patches_resolution = patch_grid_size
self.num_patches = patch_grid_size[0] * patch_grid_size[1]
self.in_chans = in_chans
self.embed_dim = embed_dim
self.flatten_embedding = flatten_embedding
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_HW, stride=patch_HW)
self.norm = norm_layer(embed_dim) if norm_layer else nn.Identity()
def forward(self, x: Tensor) -> Tensor:
_, _, H, W = x.shape
patch_H, patch_W = self.patch_size
assert H % patch_H == 0, f"Input image height {H} is not a multiple of patch height {patch_H}"
assert W % patch_W == 0, f"Input image width {W} is not a multiple of patch width: {patch_W}"
x = self.proj(x) # B C H W
H, W = x.size(2), x.size(3)
x = x.flatten(2).transpose(1, 2) # B HW C
x = self.norm(x)
if not self.flatten_embedding:
x = x.reshape(-1, H, W, self.embed_dim) # B H W C
return x
def flops(self) -> float:
Ho, Wo = self.patches_resolution
flops = Ho * Wo * self.embed_dim * self.in_chans * (self.patch_size[0] * self.patch_size[1])
if self.norm is not None:
flops += Ho * Wo * self.embed_dim
return flops
| EXA-1-master | exa/models/dinov2/dinov2/layers/patch_embed.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# References:
# https://github.com/facebookresearch/dino/blob/master/vision_transformer.py
# https://github.com/rwightman/pytorch-image-models/tree/master/timm/layers/patch_embed.py
import logging
from typing import Callable, List, Any, Tuple, Dict
import torch
from torch import nn, Tensor
from .attention import Attention, MemEffAttention
from .drop_path import DropPath
from .layer_scale import LayerScale
from .mlp import Mlp
logger = logging.getLogger("dinov2")
try:
from xformers.ops import fmha
from xformers.ops import scaled_index_add, index_select_cat
XFORMERS_AVAILABLE = True
except ImportError:
logger.warning("xFormers not available")
XFORMERS_AVAILABLE = False
class Block(nn.Module):
def __init__(
self,
dim: int,
num_heads: int,
mlp_ratio: float = 4.0,
qkv_bias: bool = False,
proj_bias: bool = True,
ffn_bias: bool = True,
drop: float = 0.0,
attn_drop: float = 0.0,
init_values=None,
drop_path: float = 0.0,
act_layer: Callable[..., nn.Module] = nn.GELU,
norm_layer: Callable[..., nn.Module] = nn.LayerNorm,
attn_class: Callable[..., nn.Module] = Attention,
ffn_layer: Callable[..., nn.Module] = Mlp,
) -> None:
super().__init__()
# print(f"biases: qkv: {qkv_bias}, proj: {proj_bias}, ffn: {ffn_bias}")
self.norm1 = norm_layer(dim)
self.attn = attn_class(
dim,
num_heads=num_heads,
qkv_bias=qkv_bias,
proj_bias=proj_bias,
attn_drop=attn_drop,
proj_drop=drop,
)
self.ls1 = LayerScale(dim, init_values=init_values) if init_values else nn.Identity()
self.drop_path1 = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = ffn_layer(
in_features=dim,
hidden_features=mlp_hidden_dim,
act_layer=act_layer,
drop=drop,
bias=ffn_bias,
)
self.ls2 = LayerScale(dim, init_values=init_values) if init_values else nn.Identity()
self.drop_path2 = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
self.sample_drop_ratio = drop_path
def forward(self, x: Tensor) -> Tensor:
def attn_residual_func(x: Tensor) -> Tensor:
return self.ls1(self.attn(self.norm1(x)))
def ffn_residual_func(x: Tensor) -> Tensor:
return self.ls2(self.mlp(self.norm2(x)))
if self.training and self.sample_drop_ratio > 0.1:
# the overhead is compensated only for a drop path rate larger than 0.1
x = drop_add_residual_stochastic_depth(
x,
residual_func=attn_residual_func,
sample_drop_ratio=self.sample_drop_ratio,
)
x = drop_add_residual_stochastic_depth(
x,
residual_func=ffn_residual_func,
sample_drop_ratio=self.sample_drop_ratio,
)
elif self.training and self.sample_drop_ratio > 0.0:
x = x + self.drop_path1(attn_residual_func(x))
x = x + self.drop_path1(ffn_residual_func(x)) # FIXME: drop_path2
else:
x = x + attn_residual_func(x)
x = x + ffn_residual_func(x)
return x
def drop_add_residual_stochastic_depth(
x: Tensor,
residual_func: Callable[[Tensor], Tensor],
sample_drop_ratio: float = 0.0,
) -> Tensor:
# 1) extract subset using permutation
b, n, d = x.shape
sample_subset_size = max(int(b * (1 - sample_drop_ratio)), 1)
brange = (torch.randperm(b, device=x.device))[:sample_subset_size]
x_subset = x[brange]
# 2) apply residual_func to get residual
residual = residual_func(x_subset)
x_flat = x.flatten(1)
residual = residual.flatten(1)
residual_scale_factor = b / sample_subset_size
# 3) add the residual
x_plus_residual = torch.index_add(x_flat, 0, brange, residual.to(dtype=x.dtype), alpha=residual_scale_factor)
return x_plus_residual.view_as(x)
def get_branges_scales(x, sample_drop_ratio=0.0):
b, n, d = x.shape
sample_subset_size = max(int(b * (1 - sample_drop_ratio)), 1)
brange = (torch.randperm(b, device=x.device))[:sample_subset_size]
residual_scale_factor = b / sample_subset_size
return brange, residual_scale_factor
def add_residual(x, brange, residual, residual_scale_factor, scaling_vector=None):
if scaling_vector is None:
x_flat = x.flatten(1)
residual = residual.flatten(1)
x_plus_residual = torch.index_add(x_flat, 0, brange, residual.to(dtype=x.dtype), alpha=residual_scale_factor)
else:
x_plus_residual = scaled_index_add(
x, brange, residual.to(dtype=x.dtype), scaling=scaling_vector, alpha=residual_scale_factor
)
return x_plus_residual
attn_bias_cache: Dict[Tuple, Any] = {}
def get_attn_bias_and_cat(x_list, branges=None):
"""
this will perform the index select, cat the tensors, and provide the attn_bias from cache
"""
batch_sizes = [b.shape[0] for b in branges] if branges is not None else [x.shape[0] for x in x_list]
all_shapes = tuple((b, x.shape[1]) for b, x in zip(batch_sizes, x_list))
if all_shapes not in attn_bias_cache.keys():
seqlens = []
for b, x in zip(batch_sizes, x_list):
for _ in range(b):
seqlens.append(x.shape[1])
attn_bias = fmha.BlockDiagonalMask.from_seqlens(seqlens)
attn_bias._batch_sizes = batch_sizes
attn_bias_cache[all_shapes] = attn_bias
if branges is not None:
cat_tensors = index_select_cat([x.flatten(1) for x in x_list], branges).view(1, -1, x_list[0].shape[-1])
else:
tensors_bs1 = tuple(x.reshape([1, -1, *x.shape[2:]]) for x in x_list)
cat_tensors = torch.cat(tensors_bs1, dim=1)
return attn_bias_cache[all_shapes], cat_tensors
def drop_add_residual_stochastic_depth_list(
x_list: List[Tensor],
residual_func: Callable[[Tensor, Any], Tensor],
sample_drop_ratio: float = 0.0,
scaling_vector=None,
) -> Tensor:
# 1) generate random set of indices for dropping samples in the batch
branges_scales = [get_branges_scales(x, sample_drop_ratio=sample_drop_ratio) for x in x_list]
branges = [s[0] for s in branges_scales]
residual_scale_factors = [s[1] for s in branges_scales]
# 2) get attention bias and index+concat the tensors
attn_bias, x_cat = get_attn_bias_and_cat(x_list, branges)
# 3) apply residual_func to get residual, and split the result
residual_list = attn_bias.split(residual_func(x_cat, attn_bias=attn_bias)) # type: ignore
outputs = []
for x, brange, residual, residual_scale_factor in zip(x_list, branges, residual_list, residual_scale_factors):
outputs.append(add_residual(x, brange, residual, residual_scale_factor, scaling_vector).view_as(x))
return outputs
class NestedTensorBlock(Block):
def forward_nested(self, x_list: List[Tensor]) -> List[Tensor]:
"""
x_list contains a list of tensors to nest together and run
"""
assert isinstance(self.attn, MemEffAttention)
if self.training and self.sample_drop_ratio > 0.0:
def attn_residual_func(x: Tensor, attn_bias=None) -> Tensor:
return self.attn(self.norm1(x), attn_bias=attn_bias)
def ffn_residual_func(x: Tensor, attn_bias=None) -> Tensor:
return self.mlp(self.norm2(x))
x_list = drop_add_residual_stochastic_depth_list(
x_list,
residual_func=attn_residual_func,
sample_drop_ratio=self.sample_drop_ratio,
scaling_vector=self.ls1.gamma if isinstance(self.ls1, LayerScale) else None,
)
x_list = drop_add_residual_stochastic_depth_list(
x_list,
residual_func=ffn_residual_func,
sample_drop_ratio=self.sample_drop_ratio,
scaling_vector=self.ls2.gamma if isinstance(self.ls1, LayerScale) else None,
)
return x_list
else:
def attn_residual_func(x: Tensor, attn_bias=None) -> Tensor:
return self.ls1(self.attn(self.norm1(x), attn_bias=attn_bias))
def ffn_residual_func(x: Tensor, attn_bias=None) -> Tensor:
return self.ls2(self.mlp(self.norm2(x)))
attn_bias, x = get_attn_bias_and_cat(x_list)
x = x + attn_residual_func(x, attn_bias=attn_bias)
x = x + ffn_residual_func(x)
return attn_bias.split(x)
def forward(self, x_or_x_list):
if isinstance(x_or_x_list, Tensor):
return super().forward(x_or_x_list)
elif isinstance(x_or_x_list, list):
assert XFORMERS_AVAILABLE, "Please install xFormers for nested tensors usage"
return self.forward_nested(x_or_x_list)
else:
raise AssertionError
| EXA-1-master | exa/models/dinov2/dinov2/layers/block.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# References:
# https://github.com/facebookresearch/dino/blob/master/vision_transformer.py
# https://github.com/rwightman/pytorch-image-models/tree/master/timm/layers/drop.py
from torch import nn
def drop_path(x, drop_prob: float = 0.0, training: bool = False):
if drop_prob == 0.0 or not training:
return x
keep_prob = 1 - drop_prob
shape = (x.shape[0],) + (1,) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
random_tensor = x.new_empty(shape).bernoulli_(keep_prob)
if keep_prob > 0.0:
random_tensor.div_(keep_prob)
output = x * random_tensor
return output
class DropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
def __init__(self, drop_prob=None):
super(DropPath, self).__init__()
self.drop_prob = drop_prob
def forward(self, x):
return drop_path(x, self.drop_prob, self.training)
| EXA-1-master | exa/models/dinov2/dinov2/layers/drop_path.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.distributed as dist
import torch.nn.functional as F
from torch import nn
import logging
logger = logging.getLogger("dinov2")
try:
from xformers.ops import cross_entropy
def lossfunc(t, s, temp):
s = s.float()
t = t.float()
if s.ndim == 2:
return -cross_entropy(s.unsqueeze(0), t.unsqueeze(0), temp, bw_inplace=True).squeeze(0)
elif s.ndim == 3:
return -cross_entropy(s, t, temp, bw_inplace=True)
except ImportError:
def lossfunc(t, s, temp):
return torch.sum(t * F.log_softmax(s / temp, dim=-1), dim=-1)
class iBOTPatchLoss(nn.Module):
def __init__(self, patch_out_dim, student_temp=0.1, center_momentum=0.9):
super().__init__()
self.student_temp = student_temp
self.center_momentum = center_momentum
self.register_buffer("center", torch.zeros(1, 1, patch_out_dim))
self.updated = True
self.reduce_handle = None
self.len_teacher_patch_tokens = None
self.async_batch_center = None
@torch.no_grad()
def softmax_center_teacher(self, teacher_patch_tokens, teacher_temp):
self.apply_center_update()
# teacher centering and sharpening
#
# WARNING:
# as self.center is a float32, everything gets casted to float32 afterwards
#
# teacher_patch_tokens = teacher_patch_tokens.float()
# return F.softmax((teacher_patch_tokens.sub_(self.center.to(teacher_patch_tokens.dtype))).mul_(1 / teacher_temp), dim=-1)
return F.softmax((teacher_patch_tokens - self.center) / teacher_temp, dim=-1)
# this is experimental, keep everything in float16 and let's see what happens:
# return F.softmax((teacher_patch_tokens.sub_(self.center)) / teacher_temp, dim=-1)
@torch.no_grad()
def sinkhorn_knopp_teacher(self, teacher_output, teacher_temp, n_masked_patches_tensor, n_iterations=3):
teacher_output = teacher_output.float()
# world_size = dist.get_world_size() if dist.is_initialized() else 1
Q = torch.exp(teacher_output / teacher_temp).t() # Q is K-by-B for consistency with notations from our paper
# B = Q.shape[1] * world_size # number of samples to assign
B = n_masked_patches_tensor
dist.all_reduce(B)
K = Q.shape[0] # how many prototypes
# make the matrix sums to 1
sum_Q = torch.sum(Q)
if dist.is_initialized():
dist.all_reduce(sum_Q)
Q /= sum_Q
for it in range(n_iterations):
# normalize each row: total weight per prototype must be 1/K
sum_of_rows = torch.sum(Q, dim=1, keepdim=True)
if dist.is_initialized():
dist.all_reduce(sum_of_rows)
Q /= sum_of_rows
Q /= K
# normalize each column: total weight per sample must be 1/B
Q /= torch.sum(Q, dim=0, keepdim=True)
Q /= B
Q *= B # the colomns must sum to 1 so that Q is an assignment
return Q.t()
def forward(self, student_patch_tokens, teacher_patch_tokens, student_masks_flat):
"""
Cross-entropy between softmax outputs of the teacher and student networks.
student_patch_tokens: (B, N, D) tensor
teacher_patch_tokens: (B, N, D) tensor
student_masks_flat: (B, N) tensor
"""
t = teacher_patch_tokens
s = student_patch_tokens
loss = torch.sum(t * F.log_softmax(s / self.student_temp, dim=-1), dim=-1)
loss = torch.sum(loss * student_masks_flat.float(), dim=-1) / student_masks_flat.sum(dim=-1).clamp(min=1.0)
return -loss.mean()
def forward_masked(
self,
student_patch_tokens_masked,
teacher_patch_tokens_masked,
student_masks_flat,
n_masked_patches=None,
masks_weight=None,
):
t = teacher_patch_tokens_masked
s = student_patch_tokens_masked
# loss = torch.sum(t * F.log_softmax(s / self.student_temp, dim=-1), dim=-1)
loss = lossfunc(t, s, self.student_temp)
if masks_weight is None:
masks_weight = (
(1 / student_masks_flat.sum(-1).clamp(min=1.0))
.unsqueeze(-1)
.expand_as(student_masks_flat)[student_masks_flat]
)
if n_masked_patches is not None:
loss = loss[:n_masked_patches]
loss = loss * masks_weight
return -loss.sum() / student_masks_flat.shape[0]
@torch.no_grad()
def update_center(self, teacher_patch_tokens):
self.reduce_center_update(teacher_patch_tokens)
@torch.no_grad()
def reduce_center_update(self, teacher_patch_tokens):
self.updated = False
self.len_teacher_patch_tokens = len(teacher_patch_tokens)
self.async_batch_center = torch.sum(teacher_patch_tokens.mean(1), dim=0, keepdim=True)
if dist.is_initialized():
self.reduce_handle = dist.all_reduce(self.async_batch_center, async_op=True)
@torch.no_grad()
def apply_center_update(self):
if self.updated is False:
world_size = dist.get_world_size() if dist.is_initialized() else 1
if self.reduce_handle is not None:
self.reduce_handle.wait()
_t = self.async_batch_center / (self.len_teacher_patch_tokens * world_size)
self.center = self.center * self.center_momentum + _t * (1 - self.center_momentum)
self.updated = True
| EXA-1-master | exa/models/dinov2/dinov2/loss/ibot_patch_loss.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import logging
import torch
import torch.nn as nn
import torch.nn.functional as F
# import torch.distributed as dist
logger = logging.getLogger("dinov2")
class KoLeoLoss(nn.Module):
"""Kozachenko-Leonenko entropic loss regularizer from Sablayrolles et al. - 2018 - Spreading vectors for similarity search"""
def __init__(self):
super().__init__()
self.pdist = nn.PairwiseDistance(2, eps=1e-8)
def pairwise_NNs_inner(self, x):
"""
Pairwise nearest neighbors for L2-normalized vectors.
Uses Torch rather than Faiss to remain on GPU.
"""
# parwise dot products (= inverse distance)
dots = torch.mm(x, x.t())
n = x.shape[0]
dots.view(-1)[:: (n + 1)].fill_(-1) # Trick to fill diagonal with -1
# max inner prod -> min distance
_, I = torch.max(dots, dim=1) # noqa: E741
return I
def forward(self, student_output, eps=1e-8):
"""
Args:
student_output (BxD): backbone output of student
"""
with torch.cuda.amp.autocast(enabled=False):
student_output = F.normalize(student_output, eps=eps, p=2, dim=-1)
I = self.pairwise_NNs_inner(student_output) # noqa: E741
distances = self.pdist(student_output, student_output[I]) # BxD, BxD -> B
loss = -torch.log(distances + eps).mean()
return loss
| EXA-1-master | exa/models/dinov2/dinov2/loss/koleo_loss.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.distributed as dist
import torch.nn.functional as F
from torch import nn
class DINOLoss(nn.Module):
def __init__(
self,
out_dim,
student_temp=0.1,
center_momentum=0.9,
):
super().__init__()
self.student_temp = student_temp
self.center_momentum = center_momentum
self.register_buffer("center", torch.zeros(1, out_dim))
self.updated = True
self.reduce_handle = None
self.len_teacher_output = None
self.async_batch_center = None
@torch.no_grad()
def softmax_center_teacher(self, teacher_output, teacher_temp):
self.apply_center_update()
# teacher centering and sharpening
return F.softmax((teacher_output - self.center) / teacher_temp, dim=-1)
@torch.no_grad()
def sinkhorn_knopp_teacher(self, teacher_output, teacher_temp, n_iterations=3):
teacher_output = teacher_output.float()
world_size = dist.get_world_size() if dist.is_initialized() else 1
Q = torch.exp(teacher_output / teacher_temp).t() # Q is K-by-B for consistency with notations from our paper
B = Q.shape[1] * world_size # number of samples to assign
K = Q.shape[0] # how many prototypes
# make the matrix sums to 1
sum_Q = torch.sum(Q)
if dist.is_initialized():
dist.all_reduce(sum_Q)
Q /= sum_Q
for it in range(n_iterations):
# normalize each row: total weight per prototype must be 1/K
sum_of_rows = torch.sum(Q, dim=1, keepdim=True)
if dist.is_initialized():
dist.all_reduce(sum_of_rows)
Q /= sum_of_rows
Q /= K
# normalize each column: total weight per sample must be 1/B
Q /= torch.sum(Q, dim=0, keepdim=True)
Q /= B
Q *= B # the colomns must sum to 1 so that Q is an assignment
return Q.t()
def forward(self, student_output_list, teacher_out_softmaxed_centered_list):
"""
Cross-entropy between softmax outputs of the teacher and student networks.
"""
# TODO: Use cross_entropy_distribution here
total_loss = 0
for s in student_output_list:
lsm = F.log_softmax(s / self.student_temp, dim=-1)
for t in teacher_out_softmaxed_centered_list:
loss = torch.sum(t * lsm, dim=-1)
total_loss -= loss.mean()
return total_loss
@torch.no_grad()
def update_center(self, teacher_output):
self.reduce_center_update(teacher_output)
@torch.no_grad()
def reduce_center_update(self, teacher_output):
self.updated = False
self.len_teacher_output = len(teacher_output)
self.async_batch_center = torch.sum(teacher_output, dim=0, keepdim=True)
if dist.is_initialized():
self.reduce_handle = dist.all_reduce(self.async_batch_center, async_op=True)
@torch.no_grad()
def apply_center_update(self):
if self.updated is False:
world_size = dist.get_world_size() if dist.is_initialized() else 1
if self.reduce_handle is not None:
self.reduce_handle.wait()
_t = self.async_batch_center / (self.len_teacher_output * world_size)
self.center = self.center * self.center_momentum + _t * (1 - self.center_momentum)
self.updated = True
| EXA-1-master | exa/models/dinov2/dinov2/loss/dino_clstoken_loss.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from .dino_clstoken_loss import DINOLoss
from .ibot_patch_loss import iBOTPatchLoss
from .koleo_loss import KoLeoLoss
| EXA-1-master | exa/models/dinov2/dinov2/loss/__init__.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import random
import re
import socket
from typing import Dict, List
import torch
import torch.distributed as dist
_LOCAL_RANK = -1
_LOCAL_WORLD_SIZE = -1
def is_enabled() -> bool:
"""
Returns:
True if distributed training is enabled
"""
return dist.is_available() and dist.is_initialized()
def get_global_size() -> int:
"""
Returns:
The number of processes in the process group
"""
return dist.get_world_size() if is_enabled() else 1
def get_global_rank() -> int:
"""
Returns:
The rank of the current process within the global process group.
"""
return dist.get_rank() if is_enabled() else 0
def get_local_rank() -> int:
"""
Returns:
The rank of the current process within the local (per-machine) process group.
"""
if not is_enabled():
return 0
assert 0 <= _LOCAL_RANK < _LOCAL_WORLD_SIZE
return _LOCAL_RANK
def get_local_size() -> int:
"""
Returns:
The size of the per-machine process group,
i.e. the number of processes per machine.
"""
if not is_enabled():
return 1
assert 0 <= _LOCAL_RANK < _LOCAL_WORLD_SIZE
return _LOCAL_WORLD_SIZE
def is_main_process() -> bool:
"""
Returns:
True if the current process is the main one.
"""
return get_global_rank() == 0
def _restrict_print_to_main_process() -> None:
"""
This function disables printing when not in the main process
"""
import builtins as __builtin__
builtin_print = __builtin__.print
def print(*args, **kwargs):
force = kwargs.pop("force", False)
if is_main_process() or force:
builtin_print(*args, **kwargs)
__builtin__.print = print
def _get_master_port(seed: int = 0) -> int:
MIN_MASTER_PORT, MAX_MASTER_PORT = (20_000, 60_000)
master_port_str = os.environ.get("MASTER_PORT")
if master_port_str is None:
rng = random.Random(seed)
return rng.randint(MIN_MASTER_PORT, MAX_MASTER_PORT)
return int(master_port_str)
def _get_available_port() -> int:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
# A "" host address means INADDR_ANY i.e. binding to all interfaces.
# Note this is not compatible with IPv6.
s.bind(("", 0))
port = s.getsockname()[1]
return port
_TORCH_DISTRIBUTED_ENV_VARS = (
"MASTER_ADDR",
"MASTER_PORT",
"RANK",
"WORLD_SIZE",
"LOCAL_RANK",
"LOCAL_WORLD_SIZE",
)
def _collect_env_vars() -> Dict[str, str]:
return {env_var: os.environ[env_var] for env_var in _TORCH_DISTRIBUTED_ENV_VARS if env_var in os.environ}
def _is_slurm_job_process() -> bool:
return "SLURM_JOB_ID" in os.environ
def _parse_slurm_node_list(s: str) -> List[str]:
nodes = []
# Extract "hostname", "hostname[1-2,3,4-5]," substrings
p = re.compile(r"(([^\[]+)(?:\[([^\]]+)\])?),?")
for m in p.finditer(s):
prefix, suffixes = s[m.start(2) : m.end(2)], s[m.start(3) : m.end(3)]
for suffix in suffixes.split(","):
span = suffix.split("-")
if len(span) == 1:
nodes.append(prefix + suffix)
else:
width = len(span[0])
start, end = int(span[0]), int(span[1]) + 1
nodes.extend([prefix + f"{i:0{width}}" for i in range(start, end)])
return nodes
def _check_env_variable(key: str, new_value: str):
# Only check for difference with preset environment variables
if key in os.environ and os.environ[key] != new_value:
raise RuntimeError(f"Cannot export environment variables as {key} is already set")
class _TorchDistributedEnvironment:
def __init__(self):
self.master_addr = "127.0.0.1"
self.master_port = 0
self.rank = -1
self.world_size = -1
self.local_rank = -1
self.local_world_size = -1
if _is_slurm_job_process():
return self._set_from_slurm_env()
env_vars = _collect_env_vars()
if not env_vars:
# Environment is not set
pass
elif len(env_vars) == len(_TORCH_DISTRIBUTED_ENV_VARS):
# Environment is fully set
return self._set_from_preset_env()
else:
# Environment is partially set
collected_env_vars = ", ".join(env_vars.keys())
raise RuntimeError(f"Partially set environment: {collected_env_vars}")
if torch.cuda.device_count() > 0:
return self._set_from_local()
raise RuntimeError("Can't initialize PyTorch distributed environment")
# Slurm job created with sbatch, submitit, etc...
def _set_from_slurm_env(self):
# logger.info("Initialization from Slurm environment")
job_id = int(os.environ["SLURM_JOB_ID"])
node_count = int(os.environ["SLURM_JOB_NUM_NODES"])
nodes = _parse_slurm_node_list(os.environ["SLURM_JOB_NODELIST"])
assert len(nodes) == node_count
self.master_addr = nodes[0]
self.master_port = _get_master_port(seed=job_id)
self.rank = int(os.environ["SLURM_PROCID"])
self.world_size = int(os.environ["SLURM_NTASKS"])
assert self.rank < self.world_size
self.local_rank = int(os.environ["SLURM_LOCALID"])
self.local_world_size = self.world_size // node_count
assert self.local_rank < self.local_world_size
# Single node job with preset environment (i.e. torchrun)
def _set_from_preset_env(self):
# logger.info("Initialization from preset environment")
self.master_addr = os.environ["MASTER_ADDR"]
self.master_port = os.environ["MASTER_PORT"]
self.rank = int(os.environ["RANK"])
self.world_size = int(os.environ["WORLD_SIZE"])
assert self.rank < self.world_size
self.local_rank = int(os.environ["LOCAL_RANK"])
self.local_world_size = int(os.environ["LOCAL_WORLD_SIZE"])
assert self.local_rank < self.local_world_size
# Single node and GPU job (i.e. local script run)
def _set_from_local(self):
# logger.info("Initialization from local")
self.master_addr = "127.0.0.1"
self.master_port = _get_available_port()
self.rank = 0
self.world_size = 1
self.local_rank = 0
self.local_world_size = 1
def export(self, *, overwrite: bool) -> "_TorchDistributedEnvironment":
# See the "Environment variable initialization" section from
# https://pytorch.org/docs/stable/distributed.html for the complete list of
# environment variables required for the env:// initialization method.
env_vars = {
"MASTER_ADDR": self.master_addr,
"MASTER_PORT": str(self.master_port),
"RANK": str(self.rank),
"WORLD_SIZE": str(self.world_size),
"LOCAL_RANK": str(self.local_rank),
"LOCAL_WORLD_SIZE": str(self.local_world_size),
}
if not overwrite:
for k, v in env_vars.items():
_check_env_variable(k, v)
os.environ.update(env_vars)
return self
def enable(*, set_cuda_current_device: bool = True, overwrite: bool = False, allow_nccl_timeout: bool = False):
"""Enable distributed mode
Args:
set_cuda_current_device: If True, call torch.cuda.set_device() to set the
current PyTorch CUDA device to the one matching the local rank.
overwrite: If True, overwrites already set variables. Else fails.
"""
global _LOCAL_RANK, _LOCAL_WORLD_SIZE
if _LOCAL_RANK >= 0 or _LOCAL_WORLD_SIZE >= 0:
raise RuntimeError("Distributed mode has already been enabled")
torch_env = _TorchDistributedEnvironment()
torch_env.export(overwrite=overwrite)
if set_cuda_current_device:
torch.cuda.set_device(torch_env.local_rank)
if allow_nccl_timeout:
# This allows to use torch distributed timeout in a NCCL backend
key, value = "NCCL_ASYNC_ERROR_HANDLING", "1"
if not overwrite:
_check_env_variable(key, value)
os.environ[key] = value
dist.init_process_group(backend="nccl")
dist.barrier()
# Finalize setup
_LOCAL_RANK = torch_env.local_rank
_LOCAL_WORLD_SIZE = torch_env.local_world_size
_restrict_print_to_main_process()
| EXA-1-master | exa/models/dinov2/dinov2/distributed/__init__.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.