python_code
stringlengths 0
992k
| repo_name
stringlengths 8
46
| file_path
stringlengths 5
162
|
---|---|---|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from collections import OrderedDict
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple
import torch
import torch.nn.functional as F
from torch import nn, Tensor
from torchmultimodal.modules.losses.mdetr import (
box_losses,
BoxLosses,
soft_token_prediction_loss,
)
def contrastive_alignment_loss(
projected_queries: Tensor,
projected_tokens: Tensor,
target_tokens: List[List[List[int]]],
indices: List[Tuple[Tensor, Tensor]],
num_boxes: int,
tokenized: Any,
temperature: float = 0.07,
) -> Tensor:
"""Contrastive alignment loss.
Enforces alignment between the text representations after cross encoder and the
object representations after the decoder.
projected_queries (Tensor): Tensor containing object representations
projected to query dimension.
Size: (batch_size, num_queries, contrastive_dim)
projected_tokens: Tensor containing text representations projected
to token dimension.
Size: (batch_size, num_tokens, contrastive_dim)
target_tokens (List[List[List[int]]]): A very nested list of tokens
that correspond to each target. From outermost to innermost:
batch, object, list of disjoint (start, end) tokens
indices (List[Tuple[Tensor, Tensor]]): A list of size batch_size,
containing tuples of (index_i, index_j) where:
- index_i is the indices of the selected predictions (in order)
- index_j is the indices of the corresponding selected targets
For each batch element, it holds:
len(index_i) = len(index_j) = min(num_queries, num_target_boxes)
num_boxes (int): Normalization factor. Should equal the average number of
boxes per local batch.
tokenized (Any): Tokenized output from a transformers fast tokenizer.
Used for token lookup based on character positions.
temperature (float): Scaling factor used in calculating the logits.
Default: 0.07
"""
logits = (
torch.matmul(projected_queries, projected_tokens.transpose(-1, -2))
/ temperature
) # BS x (num_queries) x (num_tokens)
positive_map = construct_positive_map(logits, target_tokens, indices, tokenized)
positive_logits = -logits.masked_fill(~positive_map, 0)
negative_logits = logits
# Calculate the contrastive loss for all objects
boxes_with_pos = positive_map.any(2)
pos_term = positive_logits.sum(2)
neg_term = negative_logits.logsumexp(2)
nb_pos = positive_map.sum(2) + 1e-6
box_to_token_loss = (
((pos_term / nb_pos + neg_term)).masked_fill(~boxes_with_pos, 0).sum()
)
# Calculate the contrastive loss for all tokens
tokens_with_pos = positive_map.any(1)
pos_term = positive_logits.sum(1)
neg_term = negative_logits.logsumexp(1)
nb_pos = positive_map.sum(1) + 1e-6
tokens_to_boxes_loss = (
((pos_term / nb_pos + neg_term)).masked_fill(~tokens_with_pos, 0).sum()
)
tot_loss = (box_to_token_loss + tokens_to_boxes_loss) / 2
return tot_loss / num_boxes
def char_to_token(
encodings,
batch_or_char_index: int,
char_index: Optional[int] = None,
sequence_index: int = 0,
):
if char_index is not None:
batch_index = batch_or_char_index
else:
batch_index = 0
char_index = batch_or_char_index
return encodings[batch_index].char_to_token(char_index, sequence_index)
def construct_positive_map(
logits: Tensor,
target_tokens: List[List[List[int]]],
indices: List[Tuple[Tensor, Tensor]],
tokenized: Any,
):
# construct a map such that positive_map[k, i,j] = True iff query i is associated to token j in batch item k
# For efficency, the construction happens on CPU, then the whole matrix is transferred to GPU in one go.
positive_map = torch.zeros(logits.shape, dtype=torch.bool)
for i, ((idx_src, idx_tgt), tgt) in enumerate(zip(indices, target_tokens)):
cur_tokens = [tgt[j] for j in idx_tgt]
for j, tok_list in enumerate(cur_tokens):
for (beg, end) in tok_list:
beg_pos = char_to_token(tokenized, i, beg)
end_pos = char_to_token(tokenized, i, end - 1)
if beg_pos is None and end_pos is None:
raise ValueError(
"At least one of beg_pos and end_pos must not be None"
)
positive_map[i, idx_src[j], beg_pos : end_pos + 1].fill_(True)
return positive_map.to(logits.device)
def masked_dict_accuracy(
pred_dict: Optional[Dict[str, Tensor]] = None,
label_dict: Optional[Dict[str, Tensor]] = None,
mask_dict: Optional[Dict[str, Tensor]] = None,
answer_type_key: Optional[str] = "answer_type",
) -> Dict[str, Tensor]:
accuracies = OrderedDict()
for k in pred_dict.keys():
if mask_dict is None or mask_dict[k] is None:
mask = torch.ones_like(pred_dict[k])
else:
mask = mask_dict[k]
accuracies[f"{k}_accuracy"] = (
(pred_dict[k][mask].argmax(-1) == label_dict[k][mask]).sum() / mask.sum()
if mask.any()
else torch.as_tensor(1.0, device=mask.device)
)
weighted = sum(
[
accuracies[f"{k}_accuracy"] * mask_dict[k].sum()
for k in pred_dict.keys()
if k != answer_type_key
]
)
accuracies["answer_total_accuracy"] = (
accuracies[f"{answer_type_key}_accuracy"]
* weighted
/ label_dict[answer_type_key].numel()
)
return accuracies
def masked_dict_cross_entropy(
pred_dict: Optional[Dict[str, Tensor]] = None,
label_dict: Optional[Dict[str, Tensor]] = None,
mask_dict: Optional[Dict[str, Tensor]] = None,
) -> Dict[str, Tensor]:
losses = OrderedDict()
if pred_dict.keys() != label_dict.keys():
raise ValueError("Keys of pred_dict and label_dict must match")
for k in pred_dict.keys():
if mask_dict is None or mask_dict[k] is None:
mask = torch.ones_like(pred_dict[k])
else:
mask = mask_dict[k]
norm_factor = mask.sum() if mask.any() else 1.0
losses[f"{k}_loss"] = (
F.cross_entropy(pred_dict[k], label_dict[k]).masked_fill(~mask, 0).sum()
/ norm_factor
)
return losses
class MDETRLoss(nn.Module):
def __init__(
self,
soft_token_loss: Callable[..., Tensor],
box_losses: Callable[..., BoxLosses],
contrastive_alignment_loss: Optional[nn.Module] = None,
vqa_losses: Optional[Iterable[Callable[..., Dict[str, Tensor]]]] = None,
):
super().__init__()
self.soft_token_loss = soft_token_loss
self.box_losses = box_losses
self.contrastive_alignment_loss = contrastive_alignment_loss
self.vqa_losses = vqa_losses
def get_average_num_boxes_across_workers(self, num_boxes: Tensor):
# Compute the average number of target boxes across all workers for normalization purposes
if not (
torch.distributed.is_available() and torch.distributed.is_initialized()
):
return torch.clamp(num_boxes, min=1).item()
torch.distributed.all_reduce(num_boxes)
num_boxes_all_workers = torch.clamp(
num_boxes / torch.distributed.get_world_size(), min=1
).item()
return num_boxes_all_workers
def total_losses_with_weights(
self,
loss_dict: Dict[str, Tensor],
weight_dict: Optional[Dict[str, float]] = None,
) -> torch.Tensor:
for k in weight_dict.keys():
if k not in loss_dict.keys():
raise ValueError(f"Weight dict contains invalid key {k}")
return sum([weight_dict[k] * loss_dict[k] for k in weight_dict.keys()])
def forward(
self,
pred_logits: Tensor,
pred_boxes: Tensor,
targets: List[Dict[str, Any]],
positive_map,
indices: List[Tuple[Tensor, Tensor]],
contrastive_query_embeddings: Optional[Tensor] = None,
contrastive_token_embeddings: Optional[Tensor] = None,
tokenized: Optional[Any] = None,
vqa_preds: Optional[Dict[str, Tensor]] = None,
vqa_labels: Optional[Dict[str, Tensor]] = None,
vqa_masks: Optional[Dict[str, Tensor]] = None,
weight_dict: Optional[Dict[str, float]] = None,
) -> Dict[str, Tensor]:
target_boxes = [t["boxes"] for t in targets]
target_tokens = [t["tokens_positive"] for t in targets]
n_target_boxes = [len(t) for t in target_boxes]
num_boxes = sum(n_target_boxes)
num_boxes = torch.as_tensor(
[num_boxes], dtype=torch.float, device=pred_logits.device
)
num_boxes_all_workers = self.get_average_num_boxes_across_workers(num_boxes)
self.pred_logits = pred_logits
self.n_target_boxes = n_target_boxes
self.positive_map = positive_map
self.indices = indices
self.num_boxes_all_workers = num_boxes_all_workers
soft_token_loss = self.soft_token_loss(
pred_logits, n_target_boxes, positive_map, indices, num_boxes_all_workers
)
box_losses = self.box_losses(
pred_boxes, target_boxes, indices, num_boxes_all_workers
)
loss_dict = {
"soft_token_loss": soft_token_loss,
"l1_loss": box_losses.l1_loss,
"giou_loss": box_losses.giou_loss,
}
if self.contrastive_alignment_loss is not None:
if (
contrastive_query_embeddings is None
or contrastive_token_embeddings is None
or tokenized is None
):
raise ValueError(
"For contrastive alignment loss must pass contrastive query/token embeddings and tokenized text"
)
contrastive_alignment_loss = self.contrastive_alignment_loss(
contrastive_query_embeddings,
contrastive_token_embeddings,
target_tokens,
indices,
num_boxes_all_workers,
tokenized,
)
loss_dict.update(contrastive_alignment_loss=contrastive_alignment_loss)
if self.vqa_losses is not None:
if vqa_preds is None or vqa_labels is None:
raise ValueError("For QA loss qa_preds and qa_labels must not be None")
for vqa_loss in self.vqa_losses:
loss_dict.update(vqa_loss(vqa_preds, vqa_labels, vqa_masks))
if weight_dict is not None:
total_loss = self.total_losses_with_weights(loss_dict, weight_dict)
loss_dict.update(total_loss=total_loss)
return loss_dict
def build_mdetr_loss(
do_qa: bool = False,
no_object_weight: float = 0.1,
temperature: Optional[float] = None,
) -> MDETRLoss:
soft_token_loss = partial(
soft_token_prediction_loss, no_object_weight=no_object_weight
)
if temperature is not None:
contrastive_loss = partial(contrastive_alignment_loss, temperature=temperature)
else:
contrastive_loss = None
if do_qa:
vqa_losses = [masked_dict_cross_entropy, masked_dict_accuracy]
else:
vqa_losses = None
loss = MDETRLoss(
soft_token_loss=soft_token_loss,
box_losses=box_losses,
contrastive_alignment_loss=contrastive_loss,
vqa_losses=vqa_losses,
)
return loss
def build_weight_dict(
args,
vqa_keys: Optional[Iterable[str]] = None,
include_contrastive_loss: bool = True,
):
weight_dict = {
"soft_token_loss": args.ce_loss_coef,
"l1_loss": args.bbox_loss_coef,
"giou_loss": args.giou_loss_coef,
}
if vqa_keys is not None:
for k in vqa_keys:
weight_dict.update({f"{k}_loss": args.qa_loss_coef})
if include_contrastive_loss:
weight_dict.update(contrastive_alignment_loss=args.contrastive_align_loss_coef)
return weight_dict
| EXA-1-master | exa/libraries/multimodal-main/examples/mdetr/loss.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
"""Collections of utilities related to optimization."""
from bisect import bisect_right
import torch
def update_ema(model, model_ema, decay):
"""Apply exponential moving average update.
Based on the implementation in the MDETR repo: https://bit.ly/3J5fspI.
The weights are updated in-place as follow:
w_ema = w_ema * decay + (1 - decay) * w
Args:
model: active model that is being optimized
model_ema: running average model
decay: exponential decay parameter
"""
with torch.no_grad():
if hasattr(model, "module"):
# unwrapping DDP
model = model.module
msd = model.state_dict()
for k, ema_v in model_ema.state_dict().items():
model_v = msd[k].detach()
ema_v.copy_(ema_v * decay + (1.0 - decay) * model_v)
def adjust_learning_rate(
optimizer,
epoch: int,
curr_step: int,
num_training_steps: int,
args,
):
"""Adjust the lr according to the schedule.
Based on the implementation in the MDETR repo: https://bit.ly/3J5fspI.
Args:
Optimizer: torch optimizer to update.
epoch(int): number of the current epoch.
curr_step(int): number of optimization step taken so far.
num_training_step(int): total number of optimization steps.
args: additional training dependent args:
- lr_drop(int): number of epochs before dropping the learning rate.
- fraction_warmup_steps(float) fraction of steps over which the lr will be increased to its peak.
- lr(float): base learning rate
- lr_backbone(float): learning rate of the backbone
- text_encoder_backbone(float): learning rate of the text encoder
- schedule(str): the requested learning rate schedule:
"step": all lrs divided by 10 after lr_drop epochs
"multistep": divided by 2 after lr_drop epochs, then by 2 after every 50 epochs
"linear_with_warmup": same as "step" for backbone + transformer, but for the text encoder, linearly
increase for a fraction of the training, then linearly decrease back to 0.
"all_linear_with_warmup": same as "linear_with_warmup" for all learning rates involved.
"""
num_warmup_steps: int = round(args.fraction_warmup_steps * num_training_steps)
if args.schedule == "step":
gamma = 0.1 ** (epoch // args.lr_drop)
text_encoder_gamma = gamma
elif args.schedule == "multistep":
milestones = list(range(args.lr_drop, args.epochs, 50))
gamma = 0.5 ** bisect_right(milestones, epoch)
text_encoder_gamma = gamma
elif args.schedule == "linear_with_warmup":
gamma = 0.1 ** (epoch // args.lr_drop)
if curr_step < num_warmup_steps:
text_encoder_gamma = float(curr_step) / float(max(1, num_warmup_steps))
else:
text_encoder_gamma = max(
0.0,
float(num_training_steps - curr_step)
/ float(max(1, num_training_steps - num_warmup_steps)),
)
elif args.schedule == "all_linear_with_warmup":
if curr_step < num_warmup_steps:
text_encoder_gamma = float(curr_step) / float(max(1, num_warmup_steps))
else:
text_encoder_gamma = max(
0.0,
float(num_training_steps - curr_step)
/ float(max(1, num_training_steps - num_warmup_steps)),
)
gamma = text_encoder_gamma
else:
raise NotImplementedError
base_lrs = [args.lr, args.lr_backbone, args.text_encoder_lr]
gammas = [gamma, gamma, text_encoder_gamma]
assert len(optimizer.param_groups) == len(base_lrs)
for param_group, lr, gamma_group in zip(optimizer.param_groups, base_lrs, gammas):
param_group["lr"] = lr * gamma_group
def build_optimizer(model, args):
param_dicts = [
{
"params": [
p
for n, p in model.named_parameters()
if "backbone" not in n and "text_encoder" not in n and p.requires_grad
]
},
{
"params": [
p
for n, p in model.named_parameters()
if "backbone" in n and p.requires_grad
],
"lr": args.lr_backbone,
},
{
"params": [
p
for n, p in model.named_parameters()
if "text_encoder" in n and p.requires_grad
],
"lr": args.text_encoder_lr,
},
]
optimizer = torch.optim.AdamW(
param_dicts, lr=args.lr, weight_decay=args.weight_decay
)
return optimizer
| EXA-1-master | exa/libraries/multimodal-main/examples/mdetr/optimizer.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import json
import random
from copy import deepcopy
from pathlib import Path
import numpy as np
import torch
import utils.dist as dist
from data.datamodule import GQADataModule
from loss import build_mdetr_loss, build_weight_dict
from matcher import HungarianMatcher
from torchmultimodal.models.mdetr.model import mdetr_for_vqa
from utils.args_parse import get_args_parser
from utils.metrics import MetricLogger
from utils.misc import targets_to
@torch.no_grad()
def evaluate(
model,
matcher,
loss,
data_loader,
device,
weight_dict,
):
model.eval()
metric_logger = MetricLogger(delimiter=" ")
header = "Test:"
for batch_dict in metric_logger.log_every(data_loader, 10, header):
samples = [x.to(device) for x in batch_dict["samples"]]
targets = batch_dict["targets"]
text = [t["tokenized"].to(device) for t in targets]
tokenized = batch_dict["batch_encoding"]
targets = targets_to(targets, device)
target_boxes = [t["boxes"] for t in targets]
answers = {k: v.to(device) for k, v in batch_dict["answers"].items()}
answer_types = {
k: v.to(device) for k, v in batch_dict["answer_type_mask"].items()
}
positive_map = (
batch_dict["positive_map"].to(device)
if "positive_map" in batch_dict
else None
)
outputs = model(
samples,
text,
)
indices = matcher(
outputs.model_output.pred_logits,
outputs.model_output.pred_boxes,
target_boxes,
positive_map,
)
loss_dict = loss(
outputs.model_output.pred_logits,
outputs.model_output.pred_boxes,
targets,
positive_map,
indices,
outputs.contrastive_embeddings.query_embeddings,
outputs.contrastive_embeddings.token_embeddings,
tokenized,
outputs.vqa_preds,
answers,
answer_types,
weight_dict,
)
loss_dict_reduced = dist.reduce_dict(loss_dict)
metric_logger.update(**loss_dict_reduced)
loss_dict_reduced_scaled = {
k: v * weight_dict[k]
for k, v in loss_dict_reduced.items()
if k in weight_dict
}
loss_dict_reduced_unscaled = {
f"{k}_unscaled": v for k, v in loss_dict_reduced.items()
}
metric_logger.update(
loss=sum(loss_dict_reduced_scaled.values()),
**loss_dict_reduced_scaled,
**loss_dict_reduced_unscaled,
)
# gather the stats from all processes
metric_logger.synchronize_between_processes()
stats = {k: meter.global_avg for k, meter in metric_logger.meters.items()}
return stats
def main(args):
# Init distributed mode
dist.init_distributed_mode(args)
# Update dataset specific configs
if args.dataset_config is not None:
# https://stackoverflow.com/a/16878364
d = vars(args)
with open(args.dataset_config, "r") as f:
cfg = json.load(f)
d.update(cfg)
device = torch.device(args.device)
if torch.distributed.is_available() and torch.distributed.is_initialized():
rank = torch.distributed.get_rank()
else:
rank = 0
# fix the seed for reproducibility
seed = args.seed + rank
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
torch.use_deterministic_algorithms(True)
# Set up datamodule
datamodule = GQADataModule(args)
datamodule.setup("val")
val_loader = datamodule.val_dataloader()
# Build the model, matcher, and losses
model = mdetr_for_vqa()
matcher = HungarianMatcher(
args.matcher_cost_class, args.matcher_cost_bbox, args.matcher_cost_giou
)
loss = build_mdetr_loss(True, args.no_object_weight, args.temperature)
model.to(device)
# Loss weights
weight_dict = build_weight_dict(
args, model.vqa_heads.keys(), include_contrastive_loss=False
)
model_ema = deepcopy(model) if args.ema else None
model_without_ddp = model
if args.distributed:
model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[args.gpu], find_unused_parameters=True
)
model_without_ddp = model.module
if args.resume.startswith("https"):
checkpoint = torch.hub.load_state_dict_from_url(
args.resume, map_location="cpu", check_hash=True
)
else:
checkpoint = torch.load(args.resume, map_location="cpu")
model_without_ddp.load_state_dict(checkpoint["model"], strict=False)
# Load EMA model
if "model_ema" not in checkpoint:
print("WARNING: ema model not found in checkpoint, resetting to current model")
model_ema = deepcopy(model_without_ddp)
else:
model_ema.load_state_dict(checkpoint["model_ema"], strict=False)
test_model = model_ema if model_ema is not None else model
test_stats = evaluate(
model=test_model,
matcher=matcher,
loss=loss,
data_loader=val_loader,
device=device,
weight_dict=weight_dict,
)
print(test_stats)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
"DETR training and evaluation script", parents=[get_args_parser()]
)
args = parser.parse_args()
if args.output_dir:
Path(args.output_dir).mkdir(parents=True, exist_ok=True)
main(args)
| EXA-1-master | exa/libraries/multimodal-main/examples/mdetr/vqa_eval.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import random
import pytest
import torch
from examples.mdetr.matcher import HungarianMatcher
from tests.test_utils import assert_expected, set_rng_seed
from torchvision.ops.boxes import box_convert
@pytest.fixture(autouse=True)
def rng():
set_rng_seed(0)
class TestMatcher:
@pytest.fixture()
def batch_size(self):
return 2
@pytest.fixture()
def num_classes(self):
return 7
@pytest.fixture()
def max_slice_len(self):
return 3
@pytest.fixture()
def n_boxes_per_sample(self):
return [3, 8]
@pytest.fixture()
def total_boxes(self, n_boxes_per_sample):
return sum(n_boxes_per_sample)
@pytest.fixture()
def positive_map(self, max_slice_len, total_boxes, num_classes):
positive_map = torch.zeros(total_boxes, num_classes)
for i in range(total_boxes):
start_idx = random.randint(0, num_classes - max_slice_len)
increment = random.randint(2, max_slice_len)
positive_map[i, start_idx : start_idx + increment] = 1
return positive_map
@pytest.fixture()
def construct_valid_boxes(self):
def _construct_valid_boxes(n_boxes):
boxes = []
for _ in range(n_boxes):
x1, y1 = torch.rand(2).unbind(-1)
x2 = random.uniform(x1.item(), 1)
y2 = random.uniform(y1.item(), 1)
box = box_convert(
torch.Tensor([x1, y1, x2, y2]), in_fmt="xyxy", out_fmt="cxcywh"
)
boxes.append(box)
return torch.stack(boxes)
return _construct_valid_boxes
@pytest.fixture()
def target_boxes(self, construct_valid_boxes, n_boxes_per_sample):
return [construct_valid_boxes(n_boxes) for n_boxes in n_boxes_per_sample]
@pytest.fixture()
def matcher(self):
return HungarianMatcher(cost_class=1, cost_bbox=5, cost_giou=2)
@pytest.mark.parametrize(
"num_queries,expected",
[
(
12,
[
(torch.LongTensor([7, 10, 11]), torch.LongTensor([0, 1, 2])),
(
torch.LongTensor([0, 1, 3, 4, 5, 6, 9, 10]),
torch.LongTensor([0, 7, 3, 6, 5, 4, 2, 1]),
),
],
),
(
5,
[
(torch.LongTensor([0, 1, 4]), torch.LongTensor([2, 1, 0])),
(
torch.LongTensor([0, 1, 2, 3, 4]),
torch.LongTensor([1, 5, 4, 6, 2]),
),
],
),
],
)
def test_matcher(
self,
batch_size,
num_classes,
construct_valid_boxes,
target_boxes,
positive_map,
matcher,
num_queries,
expected,
):
pred_logits = torch.randn(batch_size, num_queries, num_classes)
pred_boxes = construct_valid_boxes(batch_size * num_queries).reshape(
batch_size, num_queries, -1
)
actual = matcher(pred_logits, pred_boxes, target_boxes, positive_map)
for actual_sample, expected_sample in zip(actual, expected):
assert_expected(actual_sample[0], expected_sample[0])
assert_expected(actual_sample[1], expected_sample[1])
| EXA-1-master | exa/libraries/multimodal-main/examples/mdetr/tests/test_matcher.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from copy import deepcopy
import pytest
import torch
from examples.mdetr.data.postprocessors import PostProcessFlickr
from tests.test_utils import assert_expected, set_rng_seed
@pytest.fixture(scope="class")
def random():
set_rng_seed(0)
class TestFlickrPostProcessor:
@pytest.fixture(scope="class")
def batch_size(self):
return 2
@pytest.fixture(scope="class")
def n_queries(self):
return 5
@pytest.fixture(scope="class")
def n_classes(self):
return 7
@pytest.fixture(scope="class")
def max_seq_len(self):
return 8
@pytest.fixture(scope="class")
def pred_logits(self, random, batch_size, n_queries, n_classes):
return torch.randn((batch_size, n_queries, n_classes + 1))
@pytest.fixture(scope="class")
def pred_boxes(self, random, batch_size, n_queries):
return torch.rand((batch_size, n_queries, 4))
@pytest.fixture(scope="class")
def target_sizes(self):
return torch.Tensor([[100, 100], [50, 50]])
@pytest.fixture(scope="class")
def n_tokens(self):
return [[2, 3, 4], [2, 2, 2, 3]]
@pytest.fixture(scope="class")
def phrases_per_sample(self, n_tokens):
return [len(x) for x in n_tokens]
@pytest.fixture(scope="class")
def starting_indices(self, random, n_classes, n_tokens):
return [
torch.randint(0, n_classes + 1 - max(tok), (len(tok),)) for tok in n_tokens
]
@pytest.fixture(scope="class")
def pos_map(self, n_tokens, starting_indices, n_classes):
def _construct_test_pos_map_for_sample(n_toks, starting_indices, max_length):
assert len(n_toks) == len(
starting_indices
), "n_toks and starting_indices must have same length"
out = torch.zeros((len(n_toks), max_length))
idx_list = []
for i, (n_tok, starting_idx) in enumerate(zip(n_toks, starting_indices)):
r = torch.arange(starting_idx, starting_idx + n_tok).unsqueeze(-1)
idx_list.append(torch.cat([i * torch.ones_like(r), r], dim=-1))
indices = torch.cat(idx_list)
out.index_put_(tuple(indices.t()), torch.Tensor([1]))
out = out / out.sum(axis=1).unsqueeze(-1)
return out
assert len(n_tokens) == len(
starting_indices
), "n_toks and starting_indices must have same length"
bs = len(n_tokens)
pos_map = [
_construct_test_pos_map_for_sample(
n_tokens[i], starting_indices[i], n_classes + 1
)
for i in range(bs)
]
return pos_map
@pytest.fixture(scope="class")
def batched_pos_map(self, n_tokens, n_classes, pos_map):
n_boxes = sum([len(x) for x in n_tokens])
batched_pos_map = torch.zeros((n_boxes, n_classes + 1), dtype=torch.bool)
cur_count = 0
for sample in pos_map:
batched_pos_map[
cur_count : cur_count + len(sample), : sample.shape[1]
] = sample
cur_count += len(sample)
assert cur_count == len(batched_pos_map)
return batched_pos_map
@pytest.fixture(scope="class")
def transform(self):
transform = PostProcessFlickr()
return transform
def test_invalid_inputs(
self,
transform,
pred_logits,
pred_boxes,
target_sizes,
pos_map,
batched_pos_map,
phrases_per_sample,
):
with pytest.raises(TypeError):
_ = transform(
output_logits=pred_logits,
output_bbox=pred_boxes,
target_sizes=target_sizes,
positive_map=pos_map,
phrases_per_sample=phrases_per_sample,
)
with pytest.raises(AssertionError):
incorrect_phrases_per_sample = deepcopy(phrases_per_sample)
incorrect_phrases_per_sample[-1] -= 1
_ = transform(
output_logits=pred_logits,
output_bbox=pred_boxes,
target_sizes=target_sizes,
positive_map=batched_pos_map,
phrases_per_sample=incorrect_phrases_per_sample,
)
def test_valid_inputs(
self,
transform,
pred_logits,
pred_boxes,
target_sizes,
batched_pos_map,
phrases_per_sample,
batch_size,
n_queries,
):
actual = transform(
output_logits=pred_logits,
output_bbox=pred_boxes,
target_sizes=target_sizes,
positive_map=batched_pos_map,
phrases_per_sample=phrases_per_sample,
)
assert len(actual) == batch_size
assert len(actual[0]) == phrases_per_sample[0]
assert len(actual[1]) == phrases_per_sample[1]
assert len(actual[0][0]) == n_queries
assert len(actual[0][0][0]) == 4
# Corresponds to out[0][1][1]
expected_first_sample_val = torch.Tensor([85.535, 59.915, 86.045, 77.485])
# Corresponds to out[1][2][1]
expected_second_sample_val = torch.Tensor([41.47, 32.49, 55.57, 51.2])
assert_expected(
torch.Tensor(actual[0][1][1]),
expected_first_sample_val,
rtol=0.0,
atol=1e-2,
)
assert_expected(
torch.Tensor(actual[1][2][1]),
expected_second_sample_val,
rtol=0.0,
atol=1e-2,
)
| EXA-1-master | exa/libraries/multimodal-main/examples/mdetr/tests/test_postprocessors.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import pytest
import torch
from examples.mdetr.loss import construct_positive_map, contrastive_alignment_loss
from tests.test_utils import assert_expected, set_rng_seed
from transformers import RobertaTokenizerFast
@pytest.fixture(autouse=True)
def rng():
set_rng_seed(0)
class TestContrastiveAlignmentLoss:
@pytest.fixture()
def batch_size(self):
return 2
@pytest.fixture()
def num_queries(self):
return 20
@pytest.fixture()
def num_tokens(self):
return 255
@pytest.fixture()
def contrastive_dim(self):
return 8
@pytest.fixture()
def projected_tokens(self, batch_size, num_tokens, contrastive_dim):
return torch.randn(batch_size, num_tokens, contrastive_dim)
@pytest.fixture()
def projected_queries(self, batch_size, num_queries, contrastive_dim):
return torch.randn(batch_size, num_queries, contrastive_dim)
@pytest.fixture()
def target_tokens(self):
return [
[
[[39, 44]],
[[39, 44]],
[[39, 44]],
[[39, 44]],
[[39, 44]],
[[39, 44]],
[[39, 44]],
[[39, 44]],
[[39, 44]],
[[39, 44]],
[[48, 57]],
[[39, 44]],
[[15, 22]],
[[39, 44]],
[[39, 44]],
[[0, 3]],
[[39, 44]],
],
[
[[33, 48]],
[[33, 48]],
[[33, 48]],
[[33, 48]],
[[33, 48]],
[[33, 48]],
[[33, 48]],
[[33, 48]],
[[33, 48]],
[[33, 48]],
[[33, 48]],
[[9, 18]],
[[33, 48]],
[[33, 48]],
[[0, 5]],
[[33, 48]],
],
]
@pytest.fixture()
def indices(self):
indices = [
(torch.Tensor([5, 7, 9]), torch.Tensor([2, 1, 0])),
(
torch.Tensor([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]),
torch.Tensor([9, 8, 4, 6, 0, 2, 3, 1, 5, 7]),
),
]
return [(x[0].to(dtype=torch.int), x[1].to(dtype=torch.int)) for x in indices]
@pytest.fixture()
def num_boxes(self):
return 25
@pytest.fixture()
def tokenized(self):
captions = [
"Man talking on a phone , surrounded by books in an office .",
"A man on the phone surrounded by stacks of books .",
]
tokenizer = RobertaTokenizerFast.from_pretrained("roberta-base")
tokenized = tokenizer.batch_encode_plus(
captions, padding="longest", return_tensors="pt"
)
return tokenized
def test_contrastive_alignment_loss(
self,
projected_queries,
projected_tokens,
target_tokens,
indices,
num_boxes,
tokenized,
):
expected = torch.tensor(30.3021)
actual = contrastive_alignment_loss(
projected_queries,
projected_tokens,
target_tokens,
indices,
num_boxes,
tokenized,
)
assert_expected(expected, actual, rtol=0, atol=1e-3)
def test_construct_positive_map(
self, batch_size, num_queries, num_tokens, target_tokens, indices, tokenized
):
logits = torch.ones(batch_size, num_queries, num_tokens)
actual = construct_positive_map(logits, target_tokens, indices, tokenized)
actual_nonzero_entries = torch.nonzero(actual)
expected_size = (batch_size, num_queries, num_tokens)
expected_nonzero_entries = torch.LongTensor(
[
[0, 5, 9],
[0, 7, 9],
[0, 9, 9],
[1, 0, 8],
[1, 0, 9],
[1, 0, 10],
[1, 1, 8],
[1, 1, 9],
[1, 1, 10],
[1, 2, 8],
[1, 2, 9],
[1, 2, 10],
[1, 3, 8],
[1, 3, 9],
[1, 3, 10],
[1, 4, 8],
[1, 4, 9],
[1, 4, 10],
[1, 5, 8],
[1, 5, 9],
[1, 5, 10],
[1, 6, 8],
[1, 6, 9],
[1, 6, 10],
[1, 7, 8],
[1, 7, 9],
[1, 7, 10],
[1, 8, 8],
[1, 8, 9],
[1, 8, 10],
[1, 9, 8],
[1, 9, 9],
[1, 9, 10],
]
)
assert actual.size() == expected_size
assert_expected(actual_nonzero_entries, expected_nonzero_entries)
| EXA-1-master | exa/libraries/multimodal-main/examples/mdetr/tests/test_loss.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
"""
Various utilities related to track and report metrics
"""
import datetime
import time
from collections import defaultdict, deque
from typing import Dict, Sequence
import torch
import torch.distributed as dist
class SmoothedValue:
"""Track a series of values and provide access to smoothed values over a
window or the global series average.
"""
def __init__(self, window_size=20, fmt=None):
if fmt is None:
fmt = "{median:.4f} ({global_avg:.4f})"
self.deque = deque(maxlen=window_size)
self.total = 0.0
self.count = 0
self.fmt = fmt
def update(self, value, num=1):
self.deque.append(value)
self.count += num
self.total += value * num
def synchronize_between_processes(self):
"""
Distributed synchronization of the metric
Warning: does not synchronize the deque!
"""
if not dist.is_available() or not dist.is_initialized():
return
t = torch.tensor([self.count, self.total], dtype=torch.float64, device="cuda")
dist.barrier()
dist.all_reduce(t)
t = t.tolist()
self.count = int(t[0])
self.total = t[1]
@property
def median(self):
d = torch.tensor(list(self.deque))
return d.median().item()
@property
def avg(self):
d = torch.tensor(list(self.deque), dtype=torch.float32)
return d.mean().item()
@property
def global_avg(self):
return self.total / self.count
@property
def max(self):
return max(self.deque)
@property
def value(self):
return self.deque[-1]
def __str__(self):
return self.fmt.format(
median=self.median,
avg=self.avg,
global_avg=self.global_avg,
max=self.max,
value=self.value,
)
class MetricLogger(object):
def __init__(self, delimiter="\t"):
self.meters = defaultdict(SmoothedValue)
self.delimiter = delimiter
def update(self, **kwargs):
for k, v in kwargs.items():
if isinstance(v, torch.Tensor):
v = v.item()
assert isinstance(v, (float, int))
self.meters[k].update(v)
def __getattr__(self, attr):
if attr in self.meters:
return self.meters[attr]
if attr in self.__dict__:
return self.__dict__[attr]
raise AttributeError(
"'{}' object has no attribute '{}'".format(type(self).__name__, attr)
)
def __str__(self):
loss_str = []
for name, meter in self.meters.items():
loss_str.append("{}: {}".format(name, str(meter)))
return self.delimiter.join(loss_str)
def synchronize_between_processes(self):
for meter in self.meters.values():
meter.synchronize_between_processes()
def add_meter(self, name, meter):
self.meters[name] = meter
def log_every(self, iterable, print_freq, header=None):
i = 0
if not header:
header = ""
start_time = time.time()
end = time.time()
iter_time = SmoothedValue(fmt="{avg:.4f}")
data_time = SmoothedValue(fmt="{avg:.4f}")
space_fmt = ":" + str(len(str(len(iterable)))) + "d"
if torch.cuda.is_available():
log_msg = self.delimiter.join(
[
header,
"[{0" + space_fmt + "}/{1}]",
"eta: {eta}",
"{meters}",
"time: {time}",
"data: {data}",
"max mem: {memory:.0f}",
]
)
else:
log_msg = self.delimiter.join(
[
header,
"[{0" + space_fmt + "}/{1}]",
"eta: {eta}",
"{meters}",
"time: {time}",
"data: {data}",
]
)
mb = 1024.0 * 1024.0
for obj in iterable:
data_time.update(time.time() - end)
yield obj
iter_time.update(time.time() - end)
if i % print_freq == 0 or i == len(iterable) - 1:
eta_seconds = iter_time.global_avg * (len(iterable) - i)
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
if torch.cuda.is_available():
print(
log_msg.format(
i,
len(iterable),
eta=eta_string,
meters=str(self),
time=str(iter_time),
data=str(data_time),
memory=torch.cuda.max_memory_allocated() / mb,
)
)
else:
print(
log_msg.format(
i,
len(iterable),
eta=eta_string,
meters=str(self),
time=str(iter_time),
data=str(data_time),
)
)
i += 1
end = time.time()
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print(
"{} Total time: {} ({:.4f} s / it)".format(
header, total_time_str, total_time / len(iterable)
)
)
class RecallTracker:
"""Utility class to track recall@k for various k, split by categories"""
def __init__(self, topk: Sequence[int]):
"""
Parameters:
- topk : tuple of ints corresponding to the recalls being tracked (eg, recall@1, recall@10, ...)
"""
self.total_byk_bycat: Dict[int, Dict[str, int]] = {
k: defaultdict(int) for k in topk
}
self.positives_byk_bycat: Dict[int, Dict[str, int]] = {
k: defaultdict(int) for k in topk
}
def add_positive(self, k: int, category: str):
"""Log a positive hit @k for given category"""
if k not in self.total_byk_bycat:
raise RuntimeError(f"{k} is not a valid recall threshold")
self.total_byk_bycat[k][category] += 1
self.positives_byk_bycat[k][category] += 1
def add_negative(self, k: int, category: str):
"""Log a negative hit @k for given category"""
if k not in self.total_byk_bycat:
raise RuntimeError(f"{k} is not a valid recall threshold")
self.total_byk_bycat[k][category] += 1
def report(self) -> Dict[int, Dict[str, float]]:
"""Return a condensed report of the results as a dict of dict.
report[k][cat] is the recall@k for the given category
"""
report: Dict[int, Dict[str, float]] = {}
for k in self.total_byk_bycat:
assert k in self.positives_byk_bycat
report[k] = {
cat: self.positives_byk_bycat[k][cat] / self.total_byk_bycat[k][cat]
for cat in self.total_byk_bycat[k]
}
return report
| EXA-1-master | exa/libraries/multimodal-main/examples/mdetr/utils/metrics.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Dict, List, Optional
import torch
from torch import Tensor
def interpolate(
input: Tensor,
size: Optional[List[int]] = None,
scale_factor: Optional[float] = None,
mode: str = "nearest",
align_corners: Optional[bool] = None,
) -> Tensor:
"""
Equivalent to nn.functional.interpolate, but with support for empty channel sizes.
"""
if input.numel() > 0:
return torch.nn.functional.interpolate(
input, size, scale_factor, mode, align_corners
)
assert (
input.shape[0] != 0 or input.shape[1] != 0
), "At least one of the two first dimensions must be non zero"
if input.shape[1] == 0:
# Pytorch doesn't support null dimension on the channel dimension, so we transpose to fake a null batch dim
return torch.nn.functional.interpolate(
input.transpose(0, 1), size, scale_factor, mode, align_corners
).transpose(0, 1)
# empty batch dimension is now supported in pytorch
return torch.nn.functional.interpolate(
input, size, scale_factor, mode, align_corners
)
def targets_to(targets: List[Dict[str, Any]], device):
"""Moves the target dicts to the given device."""
excluded_keys = [
"questionId",
"tokens_positive",
"tokens",
"dataset_name",
"sentence_id",
"original_img_id",
"nb_eval",
"task_id",
"original_id",
]
return [
{
k: v.to(device) if k not in excluded_keys else v
for k, v in t.items()
if k != "caption" and k != "answer_type_mask"
}
for t in targets
]
| EXA-1-master | exa/libraries/multimodal-main/examples/mdetr/utils/misc.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
| EXA-1-master | exa/libraries/multimodal-main/examples/mdetr/utils/__init__.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import argparse
def get_args_parser():
parser = argparse.ArgumentParser("MDETR", add_help=False)
parser.add_argument("--dataset_config", default=None, required=True)
# Transformer
parser.add_argument(
"--enc_layers",
default=6,
type=int,
help="Number of encoding layers in the transformer",
)
parser.add_argument(
"--dec_layers",
default=6,
type=int,
help="Number of decoding layers in the transformer",
)
parser.add_argument(
"--dim_feedforward",
default=2048,
type=int,
help="Intermediate size of the feedforward layers in the transformer blocks",
)
parser.add_argument(
"--hidden_dim",
default=256,
type=int,
help="Size of the embeddings (dimension of the transformer)",
)
parser.add_argument(
"--dropout", default=0.1, type=float, help="Dropout applied in the transformer"
)
parser.add_argument(
"--nheads",
default=8,
type=int,
help="Number of attention heads inside the transformer's attentions",
)
parser.add_argument(
"--num_classes", default=255, type=int, help="Number of classes"
)
parser.add_argument(
"--num_queries", default=100, type=int, help="Number of query slots"
)
parser.add_argument("--pre_norm", action="store_true")
parser.add_argument("--output_dir", default="test")
parser.add_argument(
"--freeze_text_encoder",
action="store_true",
help="Whether to freeze the weights of the text encoder",
)
parser.add_argument("--tokenizer_type", type=str, default="roberta-base")
parser.add_argument("--seed", default=42, type=int)
parser.add_argument(
"--test",
action="store_true",
help="Whether to run evaluation on val or test set",
)
parser.add_argument(
"--backbone",
default="resnet101",
type=str,
help="Name of the convolutional backbone to use such as resnet50 resnet101 timm_tf_efficientnet_b3_ns",
)
parser.add_argument(
"--no_contrastive_align_loss",
dest="contrastive_align_loss",
action="store_false",
help="Whether to add contrastive alignment loss",
)
parser.add_argument(
"--contrastive_loss_hdim",
type=int,
default=64,
help="Projection head output size before computing the contrastive alignment loss",
)
parser.add_argument(
"--temperature",
type=float,
default=0.07,
help="Temperature in the contrastive alignment loss",
)
# Matcher
parser.add_argument(
"--matcher_cost_class",
default=1,
type=float,
help="Class coefficient in the matching cost",
)
parser.add_argument(
"--matcher_cost_bbox",
default=5,
type=float,
help="L1 box coefficient in the matching cost",
)
parser.add_argument(
"--matcher_cost_giou",
default=2,
type=float,
help="giou box coefficient in the matching cost",
)
# Loss coefficients
parser.add_argument("--ce_loss_coef", default=1, type=float)
parser.add_argument("--bbox_loss_coef", default=5, type=float)
parser.add_argument("--giou_loss_coef", default=2, type=float)
parser.add_argument("--qa_loss_coef", default=1, type=float)
parser.add_argument(
"--no_object_weight",
default=0.1,
type=float,
help="Relative classification weight of the no-object class",
)
parser.add_argument("--contrastive_align_loss_coef", default=1, type=float)
parser.add_argument("--lr", default=1e-4, type=float)
parser.add_argument("--lr_backbone", default=1e-5, type=float)
parser.add_argument("--text_encoder_lr", default=5e-5, type=float)
parser.add_argument("--batch_size", default=2, type=int)
parser.add_argument("--weight_decay", default=1e-4, type=float)
parser.add_argument("--epochs", default=40, type=int)
parser.add_argument("--lr_drop", default=35, type=int)
parser.add_argument(
"--epoch_chunks",
default=-1,
type=int,
help="If greater than 0, will split the training set into chunks and validate/checkpoint after each chunk",
)
parser.add_argument("--optimizer", default="adam", type=str)
parser.add_argument(
"--clip_max_norm", default=0.1, type=float, help="gradient clipping max norm"
)
parser.add_argument(
"--eval_skip",
default=1,
type=int,
help='do evaluation every "eval_skip" frames',
)
parser.add_argument("--resume", default="", help="resume from checkpoint")
parser.add_argument("--load", default="", help="resume from checkpoint")
parser.add_argument(
"--start-epoch", default=0, type=int, metavar="N", help="start epoch"
)
parser.add_argument("--eval", action="store_true", help="Only run evaluation")
parser.add_argument(
"--schedule",
default="linear_with_warmup",
type=str,
choices=("step", "multistep", "linear_with_warmup", "all_linear_with_warmup"),
)
parser.add_argument("--ema", action="store_true")
parser.add_argument("--ema_decay", type=float, default=0.9998)
parser.add_argument(
"--fraction_warmup_steps",
default=0.01,
type=float,
help="Fraction of total number of steps",
)
parser.add_argument(
"--device", default="cuda", help="device to use for training / testing"
)
parser.add_argument(
"--world-size", default=1, type=int, help="number of distributed processes"
)
parser.add_argument(
"--dist-url", default="env://", help="url used to set up distributed training"
)
return parser
| EXA-1-master | exa/libraries/multimodal-main/examples/mdetr/utils/args_parse.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import functools
import io
import os
import torch
import torch.distributed as dist
_LOCAL_PROCESS_GROUP = None
@functools.lru_cache()
def _get_global_gloo_group():
"""
Return a process group based on gloo backend, containing all the ranks
The result is cached.
"""
if dist.get_backend() == "nccl":
return dist.new_group(backend="gloo")
return dist.group.WORLD
def all_gather(data):
"""
Run all_gather on arbitrary picklable data (not necessarily tensors)
Args:
data: any picklable object
Returns:
list[data]: list of data gathered from each rank
"""
if (
not torch.distributed.is_available()
or not torch.distributed.is_initialized()
or torch.distributed.get_world_size() == 1
):
return [data]
world_size = torch.distributed.get_world_size()
cpu_group = None
if os.getenv("MDETR_CPU_REDUCE") == "1":
cpu_group = _get_global_gloo_group()
buffer = io.BytesIO()
torch.save(data, buffer)
data_view = buffer.getbuffer()
device = "cuda" if cpu_group is None else "cpu"
tensor = torch.ByteTensor(data_view).to(device)
# obtain Tensor size of each rank
local_size = torch.tensor([tensor.numel()], device=device, dtype=torch.long)
size_list = [
torch.tensor([0], device=device, dtype=torch.long) for _ in range(world_size)
]
if cpu_group is None:
dist.all_gather(size_list, local_size)
else:
print("gathering on cpu")
dist.all_gather(size_list, local_size, group=cpu_group)
size_list = [int(size.item()) for size in size_list]
max_size = max(size_list)
assert isinstance(local_size.item(), int)
local_size = int(local_size.item())
# receiving Tensor from all ranks
# we pad the tensor because torch all_gather does not support
# gathering tensors of different shapes
tensor_list = []
for _ in size_list:
tensor_list.append(torch.empty((max_size,), dtype=torch.uint8, device=device))
if local_size != max_size:
padding = torch.empty(
size=(max_size - local_size,), dtype=torch.uint8, device=device
)
tensor = torch.cat((tensor, padding), dim=0)
if cpu_group is None:
dist.all_gather(tensor_list, tensor)
else:
dist.all_gather(tensor_list, tensor, group=cpu_group)
data_list = []
for size, tensor in zip(size_list, tensor_list):
tensor = torch.split(tensor, [size, max_size - size], dim=0)[0]
buffer = io.BytesIO(tensor.cpu().numpy())
obj = torch.load(buffer)
data_list.append(obj)
return data_list
def reduce_dict(input_dict, average=True):
"""
Args:
input_dict (dict): all the values will be reduced
average (bool): whether to do average or sum
Reduce the values in the dictionary from all processes so that all processes
have the averaged results. Returns a dict with the same fields as
input_dict, after reduction.
"""
if not torch.distributed.is_available() or not torch.distributed.is_initialized():
return input_dict
world_size = torch.distributed.get_world_size()
with torch.no_grad():
names = []
values = []
# sort the keys so that they are consistent across processes
for k in sorted(input_dict.keys()):
names.append(k)
values.append(input_dict[k])
values = torch.stack(values, dim=0)
dist.all_reduce(values)
if average:
values /= world_size
reduced_dict = {k: v for k, v in zip(names, values)}
return reduced_dict
def setup_for_distributed(is_master):
"""
This function disables printing when not in master process
"""
import builtins as __builtin__
builtin_print = __builtin__.print
def print(*args, **kwargs):
force = kwargs.pop("force", False)
if is_master or force:
builtin_print(*args, **kwargs)
__builtin__.print = print
def init_distributed_mode(args):
"""Initialize distributed training, if appropriate"""
if "RANK" in os.environ and "WORLD_SIZE" in os.environ:
args.rank = int(os.environ["RANK"])
args.world_size = int(os.environ["WORLD_SIZE"])
args.gpu = int(os.environ["LOCAL_RANK"])
elif "SLURM_PROCID" in os.environ:
args.rank = int(os.environ["SLURM_PROCID"])
args.gpu = args.rank % torch.cuda.device_count()
else:
print("Not using distributed mode")
args.distributed = False
return
args.distributed = True
torch.cuda.set_device(args.gpu)
args.dist_backend = "nccl"
print(
"| distributed init (rank {}): {}".format(args.rank, args.dist_url), flush=True
)
dist.init_process_group(
backend=args.dist_backend,
init_method=args.dist_url,
world_size=args.world_size,
rank=args.rank,
)
dist.barrier()
setup_for_distributed(args.rank == 0)
| EXA-1-master | exa/libraries/multimodal-main/examples/mdetr/utils/dist.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import random
from typing import Any, Callable, Dict, Iterable, Union
import PIL
import torch
import torchvision.transforms.functional as F
from PIL.Image import Image
from torchvision import transforms as T
from torchvision.ops.boxes import box_convert
from utils.misc import interpolate
def crop(image, target, region):
cropped_image = F.crop(image, *region)
target = target.copy()
i, j, h, w = region
target["size"] = torch.tensor([h, w])
fields = ["labels", "area", "iscrowd", "positive_map", "isfinal"]
if "boxes" in target:
boxes = target["boxes"]
max_size = torch.as_tensor([w, h], dtype=torch.float32)
cropped_boxes = boxes - torch.as_tensor([j, i, j, i])
cropped_boxes = torch.min(cropped_boxes.reshape(-1, 2, 2), max_size)
cropped_boxes = cropped_boxes.clamp(min=0)
area = (cropped_boxes[:, 1, :] - cropped_boxes[:, 0, :]).prod(dim=1)
target["boxes"] = cropped_boxes.reshape(-1, 4)
target["area"] = area
fields.append("boxes")
if "masks" in target:
target["masks"] = target["masks"][:, i : i + h, j : j + w]
fields.append("masks")
# remove elements for which the boxes or masks that have zero area
if "boxes" in target or "masks" in target:
# favor boxes selection when defining which elements to keep
# this is compatible with previous implementation
if "boxes" in target:
cropped_boxes = target["boxes"].reshape(-1, 2, 2)
keep = torch.all(cropped_boxes[:, 1, :] > cropped_boxes[:, 0, :], dim=1)
else:
keep = target["masks"].flatten(1).any(1)
for field in fields:
if field in target:
target[field] = target[field][keep]
return cropped_image, target
def hflip(image, target):
flipped_image = F.hflip(image)
w, h = image.size
target = target.copy()
if "boxes" in target:
boxes = target["boxes"]
boxes = boxes[:, [2, 1, 0, 3]] * torch.as_tensor(
[-1, 1, -1, 1]
) + torch.as_tensor([w, 0, w, 0])
target["boxes"] = boxes
if "masks" in target:
target["masks"] = target["masks"].flip(-1)
if "caption" in target:
caption = (
target["caption"]
.replace("left", "[TMP]")
.replace("right", "left")
.replace("[TMP]", "right")
)
target["caption"] = caption
return flipped_image, target
def resize(image, target, size, max_size=None):
# size can be min_size (scalar) or (w, h) tuple
def get_size_with_aspect_ratio(image_size, size, max_size=None):
w, h = image_size
if max_size is not None:
min_original_size = float(min((w, h)))
max_original_size = float(max((w, h)))
if max_original_size / min_original_size * size > max_size:
size = int(round(max_size * min_original_size / max_original_size))
if (w <= h and w == size) or (h <= w and h == size):
return (h, w)
if w < h:
ow = size
oh = int(size * h / w)
else:
oh = size
ow = int(size * w / h)
return (oh, ow)
def get_size(image_size, size, max_size=None):
if isinstance(size, (list, tuple)):
return size[::-1]
else:
return get_size_with_aspect_ratio(image_size, size, max_size)
size = get_size(image.size, size, max_size)
rescaled_image = F.resize(image, size)
if target is None:
return rescaled_image, None
ratios = tuple(
float(s) / float(s_orig) for s, s_orig in zip(rescaled_image.size, image.size)
)
ratio_width, ratio_height = ratios
target = target.copy()
if "boxes" in target:
boxes = target["boxes"]
scaled_boxes = boxes * torch.as_tensor(
[ratio_width, ratio_height, ratio_width, ratio_height]
)
target["boxes"] = scaled_boxes
if "area" in target:
area = target["area"]
scaled_area = area * (ratio_width * ratio_height)
target["area"] = scaled_area
h, w = size
target["size"] = torch.tensor([h, w])
if "masks" in target:
target["masks"] = (
interpolate(target["masks"][:, None].float(), size, mode="nearest")[:, 0]
> 0.5
)
return rescaled_image, target
def pad(image, target, padding):
# assumes that we only pad on the bottom right corners
padded_image = F.pad(image, (0, 0, padding[0], padding[1]))
if target is None:
return padded_image, None
target = target.copy()
target["size"] = torch.tensor(padded_image[::-1])
if "masks" in target:
target["masks"] = torch.nn.functional.pad(
target["masks"], (0, padding[0], 0, padding[1])
)
return padded_image, target
class RandomCrop(object):
def __init__(self, size):
self.size = size
def __call__(self, img, target):
region = T.RandomCrop.get_params(img, self.size)
return crop(img, target, region)
class RandomSizeCrop(object):
def __init__(self, min_size: int, max_size: int, respect_boxes: bool = False):
self.min_size = min_size
self.max_size = max_size
def __call__(self, img: PIL.Image.Image, target: Dict[str, Any]):
init_boxes = len(target["boxes"])
max_patience = 100
for i in range(max_patience):
w = random.randint(self.min_size, min(img.width, self.max_size))
h = random.randint(self.min_size, min(img.height, self.max_size))
region = T.RandomCrop.get_params(img, [h, w])
result_img, result_target = crop(img, target, region)
if len(result_target["boxes"]) == init_boxes or i == max_patience - 1:
return result_img, result_target
return result_img, result_target
class CenterCrop(object):
def __init__(self, size):
self.size = size
def __call__(self, img, target):
image_width, image_height = img.size
crop_height, crop_width = self.size
crop_top = int(round((image_height - crop_height) / 2.0))
crop_left = int(round((image_width - crop_width) / 2.0))
return crop(img, target, (crop_top, crop_left, crop_height, crop_width))
class RandomHorizontalFlip(object):
def __init__(self, p=0.5):
self.p = p
def __call__(self, img, target):
if random.random() < self.p:
return hflip(img, target)
return img, target
class RandomResize(object):
def __init__(self, sizes, max_size=None):
assert isinstance(sizes, (list, tuple))
self.sizes = sizes
self.max_size = max_size
def __call__(self, img, target=None):
size = random.choice(self.sizes)
return resize(img, target, size, self.max_size)
class RandomSelect(object):
"""
Randomly selects between transforms1 and transforms2,
with probability p for transforms1 and (1 - p) for transforms2
"""
def __init__(self, transforms1, transforms2, p=0.5):
self.transforms1 = transforms1
self.transforms2 = transforms2
self.p = p
def __call__(self, img, target):
if random.random() < self.p:
return self.transforms1(img, target)
return self.transforms2(img, target)
class ToTensor(object):
def __call__(self, img, target):
return F.to_tensor(img), target
class Normalize(object):
def __init__(self, mean, std):
self.mean = mean
self.std = std
def __call__(self, image, target=None):
image = F.normalize(image, mean=self.mean, std=self.std)
if target is None:
return image, None
target = target.copy()
h, w = image.shape[-2:]
if "boxes" in target:
boxes = target["boxes"]
boxes = box_convert(boxes, "xyxy", "cxcywh")
boxes = boxes / torch.tensor([w, h, w, h], dtype=torch.float32)
target["boxes"] = boxes
return image, target
class Compose(object):
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, image, target):
for t in self.transforms:
image, target = t(image, target)
return image, target
def __repr__(self):
format_string = self.__class__.__name__ + "("
for t in self.transforms:
format_string += "\n"
format_string += " {0}".format(t)
format_string += "\n)"
return format_string
class MDETRTransform:
IMAGENET_MEAN = [0.485, 0.456, 0.406]
IMAGENET_STD = [0.229, 0.224, 0.225]
def __init__(self, tokenizer: Callable, is_train: bool):
normalize = Compose(
[ToTensor(), Normalize(self.IMAGENET_MEAN, self.IMAGENET_STD)]
)
self.tokenizer = tokenizer
scales = [480, 512, 544, 576, 608, 640, 672, 704, 736, 768, 800]
max_size = 1333
if is_train:
self.image_transform = Compose(
[
RandomSelect(
RandomResize(scales, max_size=max_size),
Compose(
[
RandomResize([400, 500, 600]),
RandomSizeCrop(384, max_size),
RandomResize(scales, max_size=max_size),
]
),
),
normalize,
]
)
else:
self.image_transform = Compose(
[
RandomResize([800], max_size=max_size),
normalize,
]
)
def __call__(
self, image: Union[Iterable[Image], Image], target: Dict[str, Any]
) -> torch.Tensor:
image, target = self.image_transform(image, target)
target["tokenized"] = self.tokenizer(target["caption"], return_tensors="pt")[
"input_ids"
][0]
return image, target
def create_positive_map(tokenized, tokens_positive):
"""construct a map such that positive_map[i,j] = True iff box i is associated to token j"""
positive_map = torch.zeros((len(tokens_positive), 256), dtype=torch.float)
for j, tok_list in enumerate(tokens_positive):
for (beg, end) in tok_list:
beg_pos = tokenized.char_to_token(beg)
end_pos = tokenized.char_to_token(end - 1)
if beg_pos is None:
try:
beg_pos = tokenized.char_to_token(beg + 1)
if beg_pos is None:
beg_pos = tokenized.char_to_token(beg + 2)
except Exception:
beg_pos = None
if end_pos is None:
try:
end_pos = tokenized.char_to_token(end - 2)
if end_pos is None:
end_pos = tokenized.char_to_token(end - 3)
except Exception:
end_pos = None
if beg_pos is None or end_pos is None:
continue
assert beg_pos is not None and end_pos is not None
positive_map[j, beg_pos : end_pos + 1].fill_(1)
return positive_map / (positive_map.sum(-1)[:, None] + 1e-6)
class ConvertCocoPolysToMask:
def __init__(self, return_tokens=False, tokenizer=None):
self.return_tokens = return_tokens
self.tokenizer = tokenizer
def __call__(self, image: Image, target: Dict[str, Any]):
w, h = image.size
image_id = target["image_id"]
image_id = torch.tensor([image_id])
anno = target["annotations"]
caption = target["caption"] if "caption" in target else None
anno = [obj for obj in anno if "iscrowd" not in obj or obj["iscrowd"] == 0]
boxes = [obj["bbox"] for obj in anno]
# guard against no boxes via resizing
boxes = torch.as_tensor(boxes, dtype=torch.float32).reshape(-1, 4)
boxes[:, 2:] += boxes[:, :2]
boxes[:, 0::2].clamp_(min=0, max=w)
boxes[:, 1::2].clamp_(min=0, max=h)
classes = [obj["category_id"] for obj in anno]
classes = torch.tensor(classes, dtype=torch.int64)
keypoints = None
if anno and "keypoints" in anno[0]:
keypoints = [obj["keypoints"] for obj in anno]
keypoints = torch.as_tensor(keypoints, dtype=torch.float32)
num_keypoints = keypoints.shape[0]
if num_keypoints:
keypoints = keypoints.view(num_keypoints, -1, 3)
isfinal = None
if anno and "isfinal" in anno[0]:
isfinal = torch.as_tensor(
[obj["isfinal"] for obj in anno], dtype=torch.float
)
tokens_positive = [] if self.return_tokens else None
if self.return_tokens and anno and "tokens" in anno[0]:
tokens_positive = [obj["tokens"] for obj in anno]
elif self.return_tokens and anno and "tokens_positive" in anno[0]:
tokens_positive = [obj["tokens_positive"] for obj in anno]
keep = (boxes[:, 3] > boxes[:, 1]) & (boxes[:, 2] > boxes[:, 0])
boxes = boxes[keep]
classes = classes[keep]
if keypoints is not None:
keypoints = keypoints[keep]
target = {}
target["boxes"] = boxes
target["labels"] = classes
if caption is not None:
target["caption"] = caption
target["image_id"] = image_id
if keypoints is not None:
target["keypoints"] = keypoints
if tokens_positive is not None:
target["tokens_positive"] = []
for i, k in enumerate(keep):
if k:
target["tokens_positive"].append(tokens_positive[i])
if isfinal is not None:
target["isfinal"] = isfinal
# for conversion to coco api
area = torch.tensor([obj["area"] for obj in anno])
iscrowd = torch.tensor(
[obj["iscrowd"] if "iscrowd" in obj else 0 for obj in anno]
)
target["area"] = area[keep]
target["iscrowd"] = iscrowd[keep]
target["orig_size"] = torch.as_tensor([int(h), int(w)])
target["size"] = torch.as_tensor([int(h), int(w)])
if self.return_tokens and self.tokenizer is not None:
assert len(target["boxes"]) == len(target["tokens_positive"])
tokenized = self.tokenizer(caption, return_tensors="pt")
target["positive_map"] = create_positive_map(
tokenized, target["tokens_positive"]
)
return image, target
| EXA-1-master | exa/libraries/multimodal-main/examples/mdetr/data/transforms.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
| EXA-1-master | exa/libraries/multimodal-main/examples/mdetr/data/__init__.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import json
from pathlib import Path
import torch
from data.transforms import ConvertCocoPolysToMask, create_positive_map
from torchvision.datasets import CocoDetection
class ModulatedDetection(CocoDetection):
"""
The base dataset class for most MDETR datasets.
Follows the API for the COCO dataset. In addition to the usual image and captions,
this class returns bounding boxes and their relationship to tokens in the caption
as part of the target.
"""
def __init__(
self,
img_folder,
ann_file,
transforms,
return_tokens,
tokenizer,
is_train=False,
):
super().__init__(img_folder, ann_file)
self._transforms = transforms
self.prepare = ConvertCocoPolysToMask(return_tokens, tokenizer=tokenizer)
self.is_train = is_train
def __getitem__(self, idx):
img, target = super().__getitem__(idx)
image_id = self.ids[idx]
coco_img = self.coco.loadImgs(image_id)[0]
caption = coco_img["caption"]
dataset_name = coco_img["dataset_name"] if "dataset_name" in coco_img else None
target = {"image_id": image_id, "annotations": target, "caption": caption}
img, target = self.prepare(img, target)
if self._transforms is not None:
img, target = self._transforms(img, target)
target["dataset_name"] = dataset_name
for extra_key in ["sentence_id", "original_img_id", "original_id", "task_id"]:
if extra_key in coco_img:
target[extra_key] = coco_img[extra_key]
if "tokens_positive_eval" in coco_img and not self.is_train:
tokenized = self.prepare.tokenizer(caption, return_tensors="pt")
target["positive_map_eval"] = create_positive_map(
tokenized, coco_img["tokens_positive_eval"]
)
target["nb_eval"] = len(target["positive_map_eval"])
return img, target
GQA_TYPE_TO_ID = {"obj": 0, "attr": 1, "rel": 2, "global": 3, "cat": 4}
class GQADataset(CocoDetection):
def __init__(
self, img_folder, ann_file, transforms, return_tokens, tokenizer, ann_folder
):
super(GQADataset, self).__init__(img_folder, ann_file)
self._transforms = transforms
self.prepare = ConvertCocoPolysToMask(return_tokens, tokenizer=tokenizer)
with open(ann_folder / "gqa_answer2id.json", "r") as f:
self.answer2id = json.load(f)
with open(ann_folder / "gqa_answer2id_by_type.json", "r") as f:
self.answer2id_by_type = json.load(f)
self.type_to_id = GQA_TYPE_TO_ID
def __getitem__(self, idx):
img, target = super(GQADataset, self).__getitem__(idx)
image_id = self.ids[idx]
coco_img = self.coco.loadImgs(image_id)[0]
caption = coco_img["caption"]
dataset_name = coco_img["dataset_name"]
question_id = coco_img["questionId"]
target = {"image_id": image_id, "annotations": target, "caption": caption}
img, target = self.prepare(img, target)
if self._transforms is not None:
img, target = self._transforms(img, target)
target["dataset_name"] = dataset_name
target["questionId"] = question_id
if coco_img["answer"] not in self.answer2id:
answer = "unknown"
else:
answer = coco_img["answer"]
target["answer"] = torch.as_tensor(self.answer2id[answer], dtype=torch.long)
target["answer_type"] = torch.as_tensor(
self.type_to_id[coco_img["question_type"]], dtype=torch.long
)
target["answer_type_mask"] = {
f"answer_{k}": torch.BoolTensor([True])
if coco_img["question_type"] == k
else torch.BoolTensor([False])
for k in self.type_to_id.keys()
}
target["answer_type_mask"]["answer_type"] = torch.BoolTensor([True])
if coco_img["answer"] not in self.answer2id_by_type["answer_attr"]:
answer = "unknown"
else:
answer = coco_img["answer"]
target["answer_attr"] = torch.as_tensor(
self.answer2id_by_type["answer_attr"][answer]
if coco_img["question_type"] == "attr"
else -100,
dtype=torch.long,
)
if coco_img["answer"] not in self.answer2id_by_type["answer_global"]:
answer = "unknown"
else:
answer = coco_img["answer"]
target["answer_global"] = torch.as_tensor(
self.answer2id_by_type["answer_global"][answer]
if coco_img["question_type"] == "global"
else -100,
dtype=torch.long,
)
if coco_img["answer"] not in self.answer2id_by_type["answer_rel"]:
answer = "unknown"
else:
answer = coco_img["answer"]
target["answer_rel"] = torch.as_tensor(
self.answer2id_by_type["answer_rel"][answer]
if coco_img["question_type"] == "rel"
else -100,
dtype=torch.long,
)
if coco_img["answer"] not in self.answer2id_by_type["answer_cat"]:
answer = "unknown"
else:
answer = coco_img["answer"]
target["answer_cat"] = torch.as_tensor(
self.answer2id_by_type["answer_cat"][answer]
if coco_img["question_type"] == "cat"
else -100,
dtype=torch.long,
)
if coco_img["answer"] not in self.answer2id_by_type["answer_obj"]:
answer = "unknown"
else:
answer = coco_img["answer"]
target["answer_obj"] = torch.as_tensor(
self.answer2id_by_type["answer_obj"][answer]
if coco_img["question_type"] == "obj"
else -100,
dtype=torch.long,
)
return img, target
def collate_fn(tokenizer, batch):
batch = list(zip(*batch))
final_batch = {}
final_batch["samples"] = batch[0]
final_batch["targets"] = batch[1]
if "positive_map" in batch[1][0]:
# we batch the positive maps here
# Since in general each batch element will have a different number of boxes,
# we collapse a single batch dimension to avoid padding. This is sufficient for our purposes.
max_len = max([v["positive_map"].shape[1] for v in batch[1]])
nb_boxes = sum([v["positive_map"].shape[0] for v in batch[1]])
batched_pos_map = torch.zeros((nb_boxes, max_len), dtype=torch.bool)
cur_count = 0
for v in batch[1]:
cur_pos = v["positive_map"]
batched_pos_map[
cur_count : cur_count + len(cur_pos), : cur_pos.shape[1]
] = cur_pos
cur_count += len(cur_pos)
assert cur_count == len(batched_pos_map)
final_batch["positive_map"] = batched_pos_map.float()
if "positive_map_eval" in batch[1][0]:
# we batch the positive maps here
# Since in general each batch element will have a different number of boxes,
# we collapse a single batch dimension to avoid padding. This is sufficient for our purposes.
max_len = max([v["positive_map_eval"].shape[1] for v in batch[1]])
nb_boxes = sum([v["positive_map_eval"].shape[0] for v in batch[1]])
batched_pos_map = torch.zeros((nb_boxes, max_len), dtype=torch.bool)
cur_count = 0
for v in batch[1]:
cur_pos = v["positive_map_eval"]
batched_pos_map[
cur_count : cur_count + len(cur_pos), : cur_pos.shape[1]
] = cur_pos
cur_count += len(cur_pos)
assert cur_count == len(batched_pos_map)
final_batch["positive_map_eval"] = batched_pos_map.float()
if "answer_type_mask" in batch[1][0]:
answer_types = {
k: torch.cat([b["answer_type_mask"][k] for b in batch[1]])
for k in batch[1][0]["answer_type_mask"].keys()
}
final_batch["answer_type_mask"] = answer_types
if "answer" in batch[1][0]:
answers = {}
for f in batch[1][0].keys():
if (
"answer" not in f or f == "answer" or f == "answer_type_mask"
): # We only use split_qa_heads = True
continue
answers[f] = torch.stack([b[f] for b in batch[1]])
final_batch["answers"] = answers
batch_encoding = tokenizer.batch_encode_plus(
[v["caption"] for v in batch[1]], padding="longest", return_tensors="pt"
).to(batched_pos_map.device)
final_batch["batch_encoding"] = batch_encoding._encodings
return final_batch
def build_flickr(image_set, tokenizer, transform, args):
img_dir = Path(args.flickr_img_path) / f"{image_set}"
if args.GT_type == "merged":
identifier = "mergedGT"
elif args.GT_type == "separate":
identifier = "separateGT"
else:
raise ValueError(f"{args.GT_type} is not a valid type of annotation for flickr")
if args.test:
ann_file = Path(args.flickr_ann_path) / f"final_flickr_{identifier}_test.json"
else:
ann_file = (
Path(args.flickr_ann_path) / f"final_flickr_{identifier}_{image_set}.json"
)
is_train = image_set == "train"
dataset = ModulatedDetection(
img_dir,
ann_file,
transforms=transform,
return_tokens=True,
tokenizer=tokenizer,
is_train=is_train,
)
return dataset
def build_gqa(image_set, tokenizer, transform, args):
img_dir = Path(args.vg_img_path)
assert img_dir.exists(), f"provided VG img path {img_dir} does not exist"
assert args.gqa_split_type is not None
if image_set == "train":
datasets = []
for imset in ["train", "val"]:
ann_file = (
Path(args.gqa_ann_path)
/ f"finetune_gqa_{imset}_{args.gqa_split_type}.json"
)
datasets.append(
GQADataset(
img_dir,
ann_file,
transforms=transform,
return_tokens=True,
tokenizer=tokenizer,
ann_folder=Path(args.gqa_ann_path),
)
)
return torch.utils.data.ConcatDataset(datasets)
elif image_set == "val":
ann_file = Path(args.gqa_ann_path) / "finetune_gqa_testdev_balanced.json"
return GQADataset(
img_dir,
ann_file,
transforms=transform,
return_tokens=True,
tokenizer=tokenizer,
ann_folder=Path(args.gqa_ann_path),
)
elif image_set in ["test", "challenge", "testdev", "submission"]:
ann_file = (
Path(args.gqa_ann_path)
/ f"finetune_gqa_{image_set}_{args.gqa_split_type}.json"
)
return GQADataset(
img_dir,
ann_file,
transforms=transform,
return_tokens=True,
tokenizer=tokenizer,
ann_folder=Path(args.gqa_ann_path),
)
else:
raise ValueError(f"Unknown image set {image_set}")
| EXA-1-master | exa/libraries/multimodal-main/examples/mdetr/data/dataset.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from functools import partial
from typing import Callable, Optional
import torch
from data.dataset import build_flickr, build_gqa, collate_fn
from data.transforms import MDETRTransform
from pytorch_lightning import LightningDataModule
from torch.utils.data import DataLoader, DistributedSampler
from transformers import RobertaTokenizerFast
class FlickrDataModule(LightningDataModule):
def __init__(self, dataset_config, tokenizer: Optional[Callable] = None):
super().__init__()
self.dataset_config = dataset_config
self.distributed = dataset_config.distributed
self.batch_size = dataset_config.batch_size
self.tokenizer = tokenizer
def setup(self, stage: Optional[str] = None):
if self.tokenizer is None:
self.tokenizer = RobertaTokenizerFast.from_pretrained("roberta-base")
self.transform = MDETRTransform(self.tokenizer, is_train=False)
self.val = build_flickr(
"val", self.tokenizer, self.transform, self.dataset_config
)
def val_dataloader(self):
if self.distributed:
sampler = DistributedSampler(self.val, shuffle=False)
else:
sampler = torch.utils.data.SequentialSampler(self.val)
data_loader_val = DataLoader(
self.val,
batch_size=self.batch_size,
sampler=sampler,
drop_last=False,
collate_fn=partial(collate_fn, self.tokenizer),
)
return data_loader_val
class GQADataModule(LightningDataModule):
def __init__(self, dataset_config, tokenizer: Optional[Callable] = None):
super().__init__()
self.dataset_config = dataset_config
self.distributed = dataset_config.distributed
self.batch_size = dataset_config.batch_size
self.epoch_chunks = dataset_config.epoch_chunks
self.tokenizer = tokenizer
def setup(self, stage: Optional[str] = None):
if self.tokenizer is None:
self.tokenizer = RobertaTokenizerFast.from_pretrained("roberta-base")
self.train_transform = MDETRTransform(self.tokenizer, is_train=True)
self.val_transform = MDETRTransform(self.tokenizer, is_train=False)
if stage == "train":
self.train = build_gqa(
stage, self.tokenizer, self.train_transform, self.dataset_config
)
if stage == "val":
self.val = build_gqa(
stage, self.tokenizer, self.val_transform, self.dataset_config
)
def train_dataloader(self):
# To handle very big datasets, we chunk it into smaller parts.
if self.epoch_chunks > 0:
print(
f"Splitting the training set into {self.epoch_chunks} chunks of size approximately "
f" {len(self.train) // self.epoch_chunks}"
)
chunks = torch.chunk(torch.arange(len(self.train)), self.epoch_chunks)
datasets = [
torch.utils.data.Subset(self.train, chunk.tolist()) for chunk in chunks
]
if self.distributed:
self.samplers_train = [
DistributedSampler(ds, shuffle=True) for ds in datasets
]
else:
self.samplers_train = [
torch.utils.data.RandomSampler(ds) for ds in datasets
]
batch_samplers_train = [
torch.utils.data.BatchSampler(
sampler_train, self.batch_size, drop_last=True
)
for sampler_train in self.samplers_train
]
assert len(batch_samplers_train) == len(datasets)
train_dataloaders = [
DataLoader(
ds,
batch_sampler=batch_sampler_train,
collate_fn=partial(collate_fn, self.tokenizer),
)
for ds, batch_sampler_train in zip(datasets, batch_samplers_train)
]
return train_dataloaders
else:
if self.distributed:
self.sampler_train = DistributedSampler(self.train, shuffle=True)
else:
self.sampler_train = torch.utils.data.RandomSampler(self.train)
batch_sampler_train = torch.utils.data.BatchSampler(
self.sampler_train, self.batch_size, drop_last=True
)
train_dataloader = DataLoader(
self.train,
batch_sampler=batch_sampler_train,
collate_fn=partial(collate_fn, self.tokenizer),
)
return train_dataloader
def val_dataloader(self):
if self.distributed:
sampler = DistributedSampler(self.val, shuffle=False)
else:
sampler = torch.utils.data.SequentialSampler(self.val)
data_loader_val = DataLoader(
self.val,
batch_size=self.batch_size,
sampler=sampler,
drop_last=False,
collate_fn=partial(collate_fn, self.tokenizer),
)
return data_loader_val
| EXA-1-master | exa/libraries/multimodal-main/examples/mdetr/data/datamodule.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from typing import List
import numpy as np
import torch
import torch.nn.functional as F
from torch import Tensor
from torchvision.ops.boxes import box_convert
class PostProcessFlickr:
"""This module converts the model's output for Flickr30k entities evaluation.
This processor is intended for recall@k evaluation with respect to each phrase
in the sentence. It requires a description of each phrase (as a binary mask),
and returns a sorted list of boxes for each phrase. Based on MDETR repo:
https://github.com/ashkamath/mdetr/blob/main/models/postprocessors.py#L13.
Inputs: outputs (Dict[str, Tensor]): raw outputs of the model. Should contain keys
pred_logits and pred_boxes.
target_sizes (Tensor) Size of each image in the batch. For evaluation, this
must be the original image size (before any data augmentation).
Size: (2, batch_size)
positive_map (Tensor): For each phrase in the batch, contains a binary mask
of the tokens that correspond to that sentence. Note that this is a
"collapsed" batch, meaning that all the phrases of all the batch
elements are stored sequentially.
Size: (total_num_phrases, max_seq_len)
phrases_per_sample (List[int]): Number of phrases corresponding
to each batch element.
Returns: a List[List[List[float]]]: List of bounding box coordinates for each
phrase in each sample sorted by probabilities.
"""
def __call__(
self,
output_logits: Tensor,
output_bbox: Tensor,
target_sizes: Tensor,
positive_map: Tensor,
phrases_per_sample: List[int],
) -> List[List[List[float]]]:
assert output_logits.size(0) == target_sizes.size(
0
), "Logits and target sizes should both have first dim = batch_size"
assert target_sizes.size(1) == 2, "Target sizes should have second dim = 2"
batch_size = target_sizes.shape[0]
prob = F.softmax(output_logits, -1)
# convert to [x0, y0, x1, y1] format
boxes = box_convert(output_bbox, in_fmt="cxcywh", out_fmt="xyxy")
img_h, img_w = target_sizes.unbind(1)
scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1)
# and from relative [0, 1] to absolute [0, height] coordinates
boxes = boxes * scale_fct[:, None, :]
cum_sum = np.cumsum(phrases_per_sample)
curr_batch_index = 0
# binarize the map if not already binary
pos = positive_map > 1e-6
predicted_boxes: List[List[List[float]]] = [[] for _ in range(batch_size)]
# The collapsed batch dimension must match the number of items
assert (
pos.size(0) == cum_sum[-1]
), "First dimension of positive map must equal sum of phrases per sample"
if len(pos) == 0:
return predicted_boxes
# if the first batch elements don't contain elements, skip them.
while cum_sum[curr_batch_index] == 0:
curr_batch_index += 1
for i in range(len(pos)):
# scores are computed by taking the max over the scores assigned to the positive tokens
scores, _ = torch.max(
pos[i].unsqueeze(0) * prob[curr_batch_index, :, :], dim=-1
)
_, indices = torch.sort(scores, descending=True)
assert (
phrases_per_sample[curr_batch_index] > 0
), "Each sample must have at least one phrase"
predicted_boxes[curr_batch_index].append(
boxes[curr_batch_index][indices].to("cpu").tolist()
)
if i == len(pos) - 1:
break
# check if we need to move to the next batch element
while i >= cum_sum[curr_batch_index] - 1:
curr_batch_index += 1
assert curr_batch_index < len(
cum_sum
), "Current batch index is not less than total number of phrases"
return predicted_boxes
| EXA-1-master | exa/libraries/multimodal-main/examples/mdetr/data/postprocessors.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
""" Evaluator for Flickr30k """
import xml.etree.ElementTree as Et
from pathlib import Path
from typing import Any, Dict, List, Optional, Sequence, Union
import numpy as np
import torch
import utils.dist as dist
from prettytable import PrettyTable
from torch import Tensor
from torchvision.ops.boxes import box_iou
from utils.metrics import RecallTracker
def get_sentence_data(filename) -> List[Dict[str, Any]]:
"""
Parses a sentence file from the Flickr30K Entities dataset
input:
filename - full file path to the sentence file to parse
output:
a list of dictionaries for each sentence with the following fields:
sentence - the original sentence
phrases - a list of dictionaries for each phrase with the
following fields:
phrase - the text of the annotated phrase
first_word_index - the position of the first word of
the phrase in the sentence
phrase_id - an identifier for this phrase
phrase_type - a list of the coarse categories this
phrase belongs to
"""
with open(filename, "r") as f:
sentences = f.read().split("\n")
annotations = []
for sentence in sentences:
if not sentence:
continue
first_word = []
phrases = []
phrase_id = []
phrase_type = []
words = []
current_phrase = []
add_to_phrase = False
for token in sentence.split():
if add_to_phrase:
if token[-1] == "]":
add_to_phrase = False
token = token[:-1]
current_phrase.append(token)
phrases.append(" ".join(current_phrase))
current_phrase = []
else:
current_phrase.append(token)
words.append(token)
else:
if token[0] == "[":
add_to_phrase = True
first_word.append(len(words))
parts = token.split("/")
phrase_id.append(parts[1][3:])
phrase_type.append(parts[2:])
else:
words.append(token)
sentence_data = {"sentence": " ".join(words), "phrases": []}
for index, phrase, p_id, p_type in zip(
first_word, phrases, phrase_id, phrase_type
):
sentence_data["phrases"].append(
{
"first_word_index": index,
"phrase": phrase,
"phrase_id": p_id,
"phrase_type": p_type,
}
)
annotations.append(sentence_data)
return annotations
def get_annotations(
filename,
) -> Dict[str, Union[int, List[str], Dict[str, List[List[int]]]]]:
"""
Parses the xml files in the Flickr30K Entities dataset
input:
filename - full file path to the annotations file to parse
output:
dictionary with the following fields:
scene - list of identifiers which were annotated as
pertaining to the whole scene
nobox - list of identifiers which were annotated as
not being visible in the image
boxes - a dictionary where the fields are identifiers
and the values are its list of boxes in the
[xmin ymin xmax ymax] format
height - int representing the height of the image
width - int representing the width of the image
depth - int representing the depth of the image
"""
tree = Et.parse(filename)
root = tree.getroot()
size_container = root.findall("size")[0]
anno_info: Dict[str, Union[int, List[str], Dict[str, List[List[int]]]]] = {}
all_boxes: Dict[str, List[List[int]]] = {}
all_noboxes: List[str] = []
all_scenes: List[str] = []
for size_element in size_container:
assert size_element.text
anno_info[size_element.tag] = int(size_element.text)
for object_container in root.findall("object"):
for names in object_container.findall("name"):
box_id = names.text
assert box_id
box_container = object_container.findall("bndbox")
if len(box_container) > 0:
if box_id not in all_boxes:
all_boxes[box_id] = []
xmin = int(box_container[0].findall("xmin")[0].text)
ymin = int(box_container[0].findall("ymin")[0].text)
xmax = int(box_container[0].findall("xmax")[0].text)
ymax = int(box_container[0].findall("ymax")[0].text)
all_boxes[box_id].append([xmin, ymin, xmax, ymax])
else:
nobndbox = int(object_container.findall("nobndbox")[0].text)
if nobndbox > 0:
all_noboxes.append(box_id)
scene = int(object_container.findall("scene")[0].text)
if scene > 0:
all_scenes.append(box_id)
anno_info["boxes"] = all_boxes
anno_info["nobox"] = all_noboxes
anno_info["scene"] = all_scenes
return anno_info
def _merge_boxes(boxes: List[List[int]]) -> List[List[int]]:
"""
Return the boxes corresponding to the smallest enclosing box containing all the provided boxes
The boxes are expected in [x1, y1, x2, y2] format
"""
if len(boxes) == 1:
return boxes
np_boxes = np.asarray(boxes)
return [
[
np_boxes[:, 0].min(),
np_boxes[:, 1].min(),
np_boxes[:, 2].max(),
np_boxes[:, 3].max(),
]
]
class Flickr30kEntitiesRecallEvaluator:
def __init__(
self,
flickr_path: str,
subset: str = "test",
topk: Sequence[int] = (1, 5, 10, -1),
iou_thresh: float = 0.5,
merge_boxes: bool = False,
):
assert subset in ["train", "test", "val"], f"Wrong flickr subset {subset}"
self.topk = topk
self.iou_thresh = iou_thresh
flickr_path = Path(flickr_path)
# Load the image ids corresponding to the current subset
with open(flickr_path / f"{subset}.txt") as file_d:
self.img_ids = [line.strip() for line in file_d]
# Read the box annotations for all the images
self.imgid2boxes: Dict[str, Dict[str, List[List[int]]]] = {}
for img_id in self.img_ids:
anno_info = get_annotations(flickr_path / "Annotations" / f"{img_id}.xml")[
"boxes"
]
if merge_boxes:
merged = {}
for phrase_id, boxes in anno_info.items():
merged[phrase_id] = _merge_boxes(boxes)
anno_info = merged
self.imgid2boxes[img_id] = anno_info
# Read the sentences annotations
self.imgid2sentences: Dict[str, List[List[Optional[Dict]]]] = {}
self.all_ids: List[str] = []
tot_phrases = 0
for img_id in self.img_ids:
sentence_info = get_sentence_data(
flickr_path / "Sentences" / f"{img_id}.txt"
)
self.imgid2sentences[img_id] = [None for _ in range(len(sentence_info))]
# Some phrases don't have boxes, we filter them.
for sent_id, sentence in enumerate(sentence_info):
phrases = [
phrase
for phrase in sentence["phrases"]
if phrase["phrase_id"] in self.imgid2boxes[img_id]
]
if len(phrases) > 0:
self.imgid2sentences[img_id][sent_id] = phrases
tot_phrases += len(phrases)
self.all_ids += [
f"{img_id}_{k}"
for k in range(len(sentence_info))
if self.imgid2sentences[img_id][k] is not None
]
def evaluate(self, predictions: List[Dict]):
evaluated_ids = set()
recall_tracker = RecallTracker(self.topk)
for pred in predictions:
cur_id = f"{pred['image_id']}_{pred['sentence_id']}"
if cur_id in evaluated_ids:
print(
"Warning, multiple predictions found for sentence"
f"{pred['sentence_id']} in image {pred['image_id']}"
)
continue
# Skip the sentences with no valid phrase
if cur_id not in self.all_ids:
if len(pred["boxes"]) != 0:
print(
f"Warning, in image {pred['image_id']} we were not expecting predictions "
f"for sentence {pred['sentence_id']}. Ignoring them."
)
continue
evaluated_ids.add(cur_id)
pred_boxes = pred["boxes"]
if str(pred["image_id"]) not in self.imgid2sentences:
raise RuntimeError(f"Unknown image id {pred['image_id']}")
if (
not 0
<= int(pred["sentence_id"])
< len(self.imgid2sentences[str(pred["image_id"])])
):
raise RuntimeError(
f"Unknown sentence id {pred['sentence_id']}"
f" in image {pred['image_id']}"
)
phrases = self.imgid2sentences[str(pred["image_id"])][
int(pred["sentence_id"])
]
if len(pred_boxes) != len(phrases):
raise RuntimeError(
f"Error, got {len(pred_boxes)} predictions, expected {len(phrases)} "
f"for sentence {pred['sentence_id']} in image {pred['image_id']}"
)
for cur_boxes, phrase in zip(pred_boxes, phrases):
target_boxes = self.imgid2boxes[str(pred["image_id"])][
phrase["phrase_id"]
]
ious = box_iou(Tensor(cur_boxes), Tensor(target_boxes))
for k in self.topk:
maxi = 0
if k == -1:
maxi = ious.max()
else:
assert k > 0
maxi = ious[:k].max()
if maxi >= self.iou_thresh:
recall_tracker.add_positive(k, "all")
for phrase_type in phrase["phrase_type"]:
recall_tracker.add_positive(k, phrase_type)
else:
recall_tracker.add_negative(k, "all")
for phrase_type in phrase["phrase_type"]:
recall_tracker.add_negative(k, phrase_type)
if len(evaluated_ids) != len(self.all_ids):
print(
"ERROR, the number of evaluated sentence doesn't match. Missing predictions:"
)
un_processed = set(self.all_ids) - evaluated_ids
for missing in un_processed:
img_id, sent_id = missing.split("_")
print(f"\t sentence {sent_id} in image {img_id}")
raise RuntimeError("Missing predictions")
return recall_tracker.report()
class FlickrEvaluator(object):
def __init__(
self,
flickr_path,
subset,
top_k=(1, 5, 10, -1),
iou_thresh=0.5,
merge_boxes=False,
):
assert isinstance(top_k, (list, tuple))
self.evaluator = Flickr30kEntitiesRecallEvaluator(
flickr_path,
subset=subset,
topk=top_k,
iou_thresh=iou_thresh,
merge_boxes=merge_boxes,
)
self.predictions = []
self.results = None
def update(self, predictions):
self.predictions += predictions
def synchronize_between_processes(self):
all_predictions = dist.all_gather(self.predictions)
self.predictions = sum(all_predictions, [])
def summarize(self):
is_dist = (
torch.distributed.is_available() and torch.distributed.is_initialized()
)
if (not is_dist) or torch.distributed.get_rank() == 0:
self.results = self.evaluator.evaluate(self.predictions)
table = PrettyTable()
all_cat = sorted(list(self.results.values())[0].keys())
table.field_names = ["Recall@k"] + all_cat
score = {}
for k, v in self.results.items():
cur_results = [v[cat] for cat in all_cat]
header = "Upper_bound" if k == -1 else f"Recall@{k}"
for cat in all_cat:
score[f"{header}_{cat}"] = v[cat]
table.add_row([header] + cur_results)
print(table)
return score
return None, None
| EXA-1-master | exa/libraries/multimodal-main/examples/mdetr/data/flickr_eval.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import pytest
import torch
from examples.mugen.retrieval.video_clip import (
Projection,
TextEncoder,
videoclip,
VideoEncoder,
)
from tests.test_utils import assert_expected, get_asset_path, set_rng_seed
from torchmultimodal import _PATH_MANAGER
from torchmultimodal.utils.common import shift_dim
def patch_load_module_from_url(mocker):
"""Mock the ``load_module_from_url`` utility function used in ``videoclip()`` to allow
loading truncated state dicts with ``strict=False``.
"""
def patched_load_module_from_url(
model: torch.nn.Module, url: str, strict: bool = True, progress: bool = True
) -> None:
local_path = _PATH_MANAGER.get_local_path(url)
if not torch.cuda.is_available():
state_dict = torch.load(local_path, map_location=torch.device("cpu"))
else:
state_dict = torch.load(local_path)
model.load_state_dict(state_dict, strict=False)
return mocker.patch(
"examples.mugen.retrieval.video_clip.load_module_from_url",
new=patched_load_module_from_url,
)
class TestTextEncoder:
@pytest.fixture(autouse=True)
def set_seed(self):
set_rng_seed(1234)
@pytest.fixture
def utils(self, set_seed):
input_ids = torch.Tensor(
[
[101, 6315, 3793, 7099, 2005, 5604, 19204, 17629, 102],
[101, 2117, 7820, 3793, 102, 0, 0, 0, 0],
]
).to(dtype=int)
return input_ids
def test_forward(self, utils):
input_ids = utils
encoder = TextEncoder()
out = encoder(input_ids)
expected_sum = 7.1526e-07
assert_expected(actual=out.shape, expected=torch.Size([2, 768]), rtol=0, atol=0)
assert_expected(
actual=out.sum(), expected=torch.as_tensor(expected_sum), rtol=0, atol=1e-4
)
assert encoder.out_dim == 768
def test_attention_mask(self, utils):
input_ids = utils
encoder = TextEncoder()
attention_mask = encoder.build_attention_mask(input_ids)
assert_expected(
actual=attention_mask,
expected=torch.as_tensor(
[[1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 0, 0, 0, 0]]
),
)
class TestVideoEncoder:
@pytest.fixture(autouse=True)
def set_seed(self):
set_rng_seed(1234)
@pytest.fixture
def utils(self):
def make_input_video(c_dim=1):
input_shape = [1, 3, 16, 224, 224]
input_video = torch.randint(10, input_shape).float()
input_video = (
shift_dim(input_video, 1, c_dim) if c_dim != 1 else input_video
)
return input_video
return make_input_video
def test_forward(self, utils):
make_input_video = utils
input_video = make_input_video()
encoder = VideoEncoder()
out = encoder(input_video)
expected_sum = 408.3521
assert_expected(
actual=out.shape, expected=torch.Size([1, 1024])
) # batch x embedding
assert_expected(
actual=out.sum(), expected=torch.as_tensor(expected_sum), rtol=0, atol=1e-3
)
assert encoder.out_dim == 1024
def test_invalid_channels(self, utils):
make_input_video = utils
input_video = make_input_video(c_dim=3)
encoder = VideoEncoder()
with pytest.raises(ValueError):
encoder(input_video)
class TestProjection:
@pytest.fixture(autouse=True)
def set_seed(self):
set_rng_seed(1234)
@pytest.fixture
def utils(self, set_seed):
input = torch.randint(10, (2, 7)).float()
proj = Projection(in_dim=7, out_dim=3)
return proj, input
def test_forward(self, utils):
proj, input = utils
out = proj(input)
expected = torch.Tensor([[-1.2214, -0.0066, 1.2280], [-1.3886, 0.4626, 0.9260]])
assert_expected(actual=out, expected=expected, rtol=0, atol=1e-4)
class TestVideoCLIPBuilder:
@pytest.fixture(autouse=True)
def set_seed(self):
set_rng_seed(1234)
@pytest.fixture
def utils(self, set_seed):
input_text = torch.Tensor(
[
[101, 6315, 3793, 7099, 2005, 5604, 19204, 17629, 102],
[101, 2117, 7820, 3793, 102, 0, 0, 0, 0],
]
).to(dtype=int)
input_video = torch.randint(10, [2, 3, 16, 224, 224]).float()
return input_text, input_video
def test_forward_pretrained_trainable(self, utils, mocker):
input_text, input_video = utils
patch_load_module_from_url(mocker)
model = videoclip(
video_pretrain_path=get_asset_path("S3D_sample.pt"), proj_out_dim=3
)
assert next(model.encoder_a.parameters()).requires_grad
assert next(model.encoder_b.parameters()).requires_grad
output = model(features_a=input_text, features_b=input_video)
assert_expected(
actual=output.embeddings_a,
expected=torch.Tensor(
[[-0.4496, -0.3655, 0.8150], [0.2190, -0.7907, 0.5717]]
),
rtol=0,
atol=1e-3,
)
assert_expected(
actual=output.embeddings_b,
expected=torch.Tensor(
[[0.7291, -0.0462, -0.6829], [0.7157, -0.0175, -0.6982]],
),
rtol=0,
atol=1e-3,
)
def test_pretrained_untrainable(self, mocker):
patch_load_module_from_url(mocker)
model = videoclip(
text_trainable=False,
video_trainable=False,
video_pretrain_path=get_asset_path("S3D_sample.pt"),
proj_out_dim=3,
)
assert not next(model.encoder_a.parameters()).requires_grad
assert not next(model.encoder_b.parameters()).requires_grad
def test_forward_untrained_trainable(self, utils):
input_text, input_video = utils
model = videoclip(text_pretrained=False, video_pretrained=False, proj_out_dim=3)
assert next(model.encoder_a.parameters()).requires_grad
assert next(model.encoder_b.parameters()).requires_grad
output = model(features_a=input_text, features_b=input_video)
assert_expected(
actual=output.embeddings_a,
expected=torch.Tensor(
[[0.8164, -0.4178, -0.3987], [0.8147, -0.4537, -0.3611]]
),
rtol=0,
atol=1e-3,
)
assert_expected(
actual=output.embeddings_b,
expected=torch.Tensor(
[[-0.0199, 0.7168, -0.6970], [0.5802, 0.2075, -0.7876]]
),
rtol=0,
atol=1e-3,
)
def test_untrained_untrainable(self):
with pytest.warns(UserWarning):
model = videoclip(
text_pretrained=False,
text_trainable=False,
video_pretrained=False,
video_trainable=False,
proj_out_dim=3,
)
assert next(model.encoder_a.parameters()).requires_grad
assert next(model.encoder_b.parameters()).requires_grad
| EXA-1-master | exa/libraries/multimodal-main/examples/mugen/tests/retrieval/test_video_clip.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import pytest
import torch
from examples.mugen.generation.video_vqvae import video_vqvae_mugen
from tests.test_utils import assert_expected, set_rng_seed
@pytest.fixture(autouse=True)
def random():
set_rng_seed(4)
@pytest.fixture(scope="module")
def params():
in_channel_dims = (2, 2)
out_channel_dims = (2, 2)
kernel_sizes = ((2, 2, 2), (2, 2, 2))
strides = ((1, 1, 1), (1, 1, 1))
return in_channel_dims, out_channel_dims, kernel_sizes, strides
@pytest.fixture(scope="module")
def input_tensor():
return torch.ones(1, 2, 2, 2, 2)
class TestVideoVQVAEMUGEN:
@pytest.fixture
def vv(self):
def create_model(model_key=None, freeze_model=False):
model = video_vqvae_mugen(
pretrained_model_key=model_key, freeze_model=freeze_model
)
model.eval()
return model
return create_model
@pytest.fixture
def input_data(self):
def create_data(seq_len):
return torch.randn(1, 3, seq_len, 256, 256)
return create_data
def test_invalid_pretrained_model_key(self, vv):
with pytest.raises(KeyError):
vv("invalid_key")
def test_freeze_model(self, vv):
model = vv("mugen_L32", freeze_model=True)
for param in model.parameters():
assert param.requires_grad is False
@pytest.mark.parametrize(
"seq_len,expected", [(8, -16005.1113), (16, 51112.2266), (32, 450732.6875)]
)
def test_forward(self, vv, input_data, seq_len, expected):
x = input_data(seq_len)
model = vv()
output = model(x)
actual = output.decoded
# ensure embed is uninitialized
assert model.codebook._is_embedding_init is False
assert_expected(actual.shape, (1, 3, seq_len, 256, 256))
assert_expected(actual.sum().item(), expected)
@pytest.mark.parametrize(
"seq_len,expected", [(8, 132017.28125), (16, -109636.0), (32, 1193122.0)]
)
def test_checkpoint(self, vv, input_data, seq_len, expected):
x = input_data(seq_len)
model_key = f"mugen_L{seq_len}"
model = vv(model_key)
# ensure embed is initialized
assert model.codebook._is_embedding_init
output = model(x)
actual_tensor = torch.sum(output.decoded)
expected_tensor = torch.tensor(expected)
assert_expected(actual_tensor, expected_tensor, rtol=1e-5, atol=1e-8)
| EXA-1-master | exa/libraries/multimodal-main/examples/mugen/tests/generation/test_video_vqvae.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import pytest
import torch
from examples.mugen.generation.text_video_gpt import text_video_gpt
from tests.test_utils import assert_expected, set_rng_seed
from torchmultimodal.utils.common import get_current_device
@pytest.fixture(autouse=True)
def set_seed():
return set_rng_seed(0)
@pytest.fixture
def device():
return get_current_device()
@pytest.fixture
def model_fn():
return text_video_gpt
_model_params = {
"text_seq_len": 128,
"video_seq_len": 32,
"resolution": 256,
"downsample": (4, 32, 32),
"d_model": 768,
"n_head": 8,
"dropout": 0.2,
"attn_dropout": 0.3,
"num_decoder_layers": 12,
"use_gpt_init": True,
}
def test_encode_text(model_fn, device):
test_params = {"text_seq_len": 5}
kwargs = {**_model_params, **test_params}
model = model_fn(**kwargs)
model.eval()
x = ["MUGEN walks from right to left."]
actual = model.encode(x, "in", device=device)
expected = torch.tensor([[80, 118, 110, 85, 70]])
assert_expected(actual, expected)
@pytest.mark.parametrize(
"video_seq_len, expected", [(8, (1, 128)), (16, (1, 256)), (32, (1, 512))]
)
def test_encode_video(model_fn, video_seq_len, expected):
test_params = {"video_seq_len": video_seq_len}
kwargs = {**_model_params, **test_params}
video_input_shape = tuple(
[kwargs[_f] for _f in ["video_seq_len", "resolution", "resolution"]]
)
input_shape = (1, 3, *video_input_shape)
x = torch.rand(input_shape)
model = model_fn(**kwargs)
model.eval()
actual = model.encode(x, "out")
assert_expected(actual.shape, expected)
@pytest.mark.parametrize(
"video_seq_len, expected",
[(8, 55462.1719), (16, 112028.1719), (32, 225157.7656)],
)
def test_decode_video(model_fn, video_seq_len, expected):
test_params = {"video_seq_len": video_seq_len}
kwargs = {**_model_params, **test_params}
model = model_fn(**kwargs)
model.eval()
latent_shape = model.latent_shape
latent_seq_len = torch.prod(torch.tensor(latent_shape)).item()
x = torch.randint(0, 10, (1, latent_seq_len)) # tokens
actual = model.decode(x)
assert_expected(actual.shape, (1, 3, video_seq_len, 256, 256))
print(actual.sum())
assert_expected(actual.sum().item(), expected, rtol=1, atol=1e-4)
@pytest.mark.parametrize(
"video_seq_len, expected",
[(8, 116013.4766), (16, 237488.6250), (32, 536481.4375)],
)
def test_decode_video_checkpoint(model_fn, video_seq_len, expected):
vqvae_model_key = f"mugen_L{video_seq_len}"
test_params = {
"video_seq_len": video_seq_len,
"pretrained_video_vqvae_model_key": vqvae_model_key,
}
kwargs = {**_model_params, **test_params}
model = model_fn(**kwargs)
model.eval()
latent_shape = model.latent_shape
latent_seq_len = torch.prod(torch.tensor(latent_shape)).item()
x = torch.randint(0, 10, (1, latent_seq_len)) # tokens
actual = model.decode(x)
assert_expected(actual.shape, (1, 3, video_seq_len, 256, 256))
assert_expected(actual.sum().item(), expected, rtol=1, atol=1e-4)
@pytest.mark.parametrize(
"modality, expected_shape, expected_sum",
[("in", (1, 4, 768), -53.7916), ("out", (1, 4, 256), 42.4742)],
)
def test_lookup(model_fn, modality, expected_shape, expected_sum):
test_params = {"text_seq_len": 5}
kwargs = {**_model_params, **test_params}
x = torch.tensor([[1, 2, 3, 4]])
model = model_fn(**kwargs)
model.eval()
actual = model.lookup(x, modality)
assert_expected(actual.shape, expected_shape) # (b, num_tokens, d_model)
assert_expected(actual.sum().item(), expected_sum, rtol=1, atol=1e-4)
@pytest.mark.parametrize(
"video_seq_len, expected", [(8, 782.1641), (16, -442.4437), (32, 585.2963)]
)
def test_forward_no_pretrained(model_fn, video_seq_len, expected):
test_params = {"video_seq_len": video_seq_len}
kwargs = {**_model_params, **test_params}
n_head = kwargs["n_head"]
x = torch.tensor([[1, 2, 3, 4]])
y = torch.tensor([[5, 6, 7]])
attn_mask = torch.tril(torch.ones(7, 7)).unsqueeze(0) # (b, seq_len, seq_len)
head_mask = torch.ones(1, n_head, 7, 7) # (b, h, seq_len, seq_len)
model = model_fn(**kwargs)
model.eval()
num_tokens = model.num_in_tokens + model.num_out_tokens
logits_mask = torch.ones(1, 7, num_tokens) # (b, seq_len, num_tokens)
out = model(x, y, attn_mask=attn_mask, head_mask=head_mask, logits_mask=logits_mask)
actual = out.decoder_output.last_hidden_states
assert_expected(actual.shape, (1, 7, 768))
assert_expected(actual.sum().item(), expected, rtol=1e-5, atol=1e-4)
@pytest.mark.parametrize(
"video_seq_len, expected",
[(8, 431.3439), (16, -180.2783), (32, 462.27)],
)
def test_forward_vqvae_pretrained(model_fn, video_seq_len, expected):
vqvae_model_key = f"mugen_L{video_seq_len}"
test_params = {
"video_seq_len": video_seq_len,
"pretrained_video_vqvae_model_key": vqvae_model_key,
}
kwargs = {**_model_params, **test_params}
n_head = kwargs["n_head"]
x = torch.tensor([[1, 2, 3, 4]])
y = torch.tensor([[5, 6, 7]])
attn_mask = torch.tril(torch.ones(7, 7)).unsqueeze(0) # (b, seq_len, seq_len)
head_mask = torch.ones(1, n_head, 7, 7) # (b, h, seq_len, seq_len)
model = model_fn(**kwargs)
model.eval()
num_tokens = model.num_in_tokens + model.num_out_tokens
logits_mask = torch.ones(1, 7, num_tokens) # (b, seq_len, num_tokens)
out = model(x, y, attn_mask=attn_mask, head_mask=head_mask, logits_mask=logits_mask)
actual = out.decoder_output.last_hidden_states
assert_expected(actual.shape, (1, 7, 768))
assert_expected(actual.sum().item(), expected, rtol=1, atol=1e-4)
@pytest.mark.parametrize(
"video_seq_len, expected",
[(8, 1520.8452), (16, -2085.2417), (32, -5190.5591)],
)
def test_forward_gpt_pretrained(model_fn, video_seq_len, expected):
gpt_model_key = f"mugen_L{video_seq_len}"
test_params = {
"video_seq_len": video_seq_len,
"pretrained_text_video_gpt_model_key": gpt_model_key,
}
kwargs = {**_model_params, **test_params}
n_head = kwargs["n_head"]
x = torch.tensor([[1, 2, 3, 4]])
y = torch.tensor([[5, 6, 7]])
attn_mask = torch.tril(torch.ones(7, 7)).unsqueeze(0) # (b, seq_len, seq_len)
head_mask = torch.ones(1, n_head, 7, 7) # (b, h, seq_len, seq_len)
model = model_fn(**kwargs)
model.eval()
num_tokens = model.num_in_tokens + model.num_out_tokens
logits_mask = torch.ones(1, 7, num_tokens) # (b, seq_len, num_tokens)
out = model(x, y, attn_mask=attn_mask, head_mask=head_mask, logits_mask=logits_mask)
actual = out.decoder_output.last_hidden_states
assert_expected(actual.shape, (1, 7, 768))
assert_expected(actual.sum().item(), expected, rtol=1, atol=1e-4)
| EXA-1-master | exa/libraries/multimodal-main/examples/mugen/tests/generation/test_text_video_gpt.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from dataclasses import dataclass
from typing import Any, Dict, Optional, Tuple
from examples.mugen.data.mugen_dataset import MUGENDatasetArgs
from torchmultimodal.transforms.video_transform import (
DEFAULT_MEAN,
DEFAULT_RESIZE_SHAPE,
DEFAULT_STD,
MUGEN_DEFAULT_TIME_SAMPLES,
)
@dataclass
class BertTextTransformArgs:
vocab_file: str = "https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt"
do_lower_case: bool = True
start_token: int = 101
end_token: int = 102
padding_value: int = 0
@dataclass
class VideoTransformArgs:
time_samples: int = MUGEN_DEFAULT_TIME_SAMPLES
mean: Tuple[float] = DEFAULT_MEAN
std: Tuple[float] = DEFAULT_STD
resize_shape: Tuple[int, int] = DEFAULT_RESIZE_SHAPE
@dataclass
class DataModuleArgs:
batch_size: int = 16
num_workers: int = 4
shuffle: bool = False
bert_text_transform: BertTextTransformArgs = BertTextTransformArgs()
video_transform: VideoTransformArgs = VideoTransformArgs()
@dataclass
class LightningModuleArgs:
logit_scale: float = 0.07
logit_scale_max: float = 100.0
learning_rate: float = 1e-3
weight_decay: float = 1e-3
recall_ks: Tuple[int] = (1, 5, 10)
@dataclass
class VideoCLIPArgs:
text_pretrained: bool = False
text_trainable: bool = False
text_model_name: str = "distilbert-base-uncased"
text_model_config: Optional[Dict[str, Any]] = None
text_padding_value: int = 0
video_pretrained: bool = False
video_trainable: bool = False
video_pretrain_path: str = (
"https://pytorch.s3.amazonaws.com/models/multimodal/mugen/S3D_kinetics400.pt"
)
proj_out_dim: int = 256
proj_dropout: float = 0.1
@dataclass
class EvaluationArgs:
dataset_args: MUGENDatasetArgs = MUGENDatasetArgs(
get_game_frame=True,
get_text_desc=True,
resolution=256,
fixed_start_idx=False,
use_manual_annotation=True,
use_auto_annotation=False,
)
datamodule_args: DataModuleArgs = DataModuleArgs()
lightningmodule_args: LightningModuleArgs = LightningModuleArgs()
videoclip_args: VideoCLIPArgs = VideoCLIPArgs()
checkpoint_path: str = "https://pytorch.s3.amazonaws.com/models/multimodal/mugen/videoclip_lightning_mugen.pt"
accelerator: str = "auto"
@dataclass
class TrainingArgs:
dataset_args: MUGENDatasetArgs = MUGENDatasetArgs(
get_game_frame=True,
get_text_desc=True,
resolution=224,
fixed_start_idx=False,
use_manual_annotation=True,
use_auto_annotation=False,
)
datamodule_args: DataModuleArgs = DataModuleArgs()
lightningmodule_args: LightningModuleArgs = LightningModuleArgs()
videoclip_args: VideoCLIPArgs = VideoCLIPArgs()
accelerator: str = "auto"
devices: int = 4
max_epochs: int = 1000
log_every_n_steps: int = 100
default_root_dir: Optional[str] = None
| EXA-1-master | exa/libraries/multimodal-main/examples/mugen/retrieval/definitions.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import warnings
from typing import Any, Tuple
import torch
from examples.mugen.retrieval.video_clip import videoclip
from pytorch_lightning import LightningModule
from torchmetrics import Recall
from torchmultimodal.modules.losses.contrastive_loss_with_temperature import (
ContrastiveLossWithTemperature,
)
class VideoCLIPLightningModule(LightningModule):
"""PyTorch Lightning module for evaluating VideoCLIP model.
Args:
logit_scale (float): Initial log-temperature value for contrastive loss funtion.
Defaults to ``0.07``, MUGEN's log-temperature value at initialization.
logit_scale_max (float): Maximum log-temperature value for contrastive loss function.
Defaults to ``100``, MUGEN's maximum log-temperature value.
learning_rate (float): optimizer learning rate.
Defaults to ``1e-3``, MUGEN's learning rate.
weight_decay (float): optimizer weight decay.
Defaults to ``1e-3``, MUGEN's weight decay.
recall_ks (Tuple[int]): tuple of top-``k``'s for calculating recall.
Defaults to ``(1, 5, 10)``, i.e. top-1 recall, top-5 recall, and top-10 recall.
**videoclip_kwargs (Any): Keyword arguments for the videoCLIP model builder.
"""
def __init__(
self,
logit_scale: float = 0.07,
logit_scale_max: float = 100,
learning_rate: float = 1e-3,
weight_decay: float = 1e-3,
recall_ks: Tuple[int] = (1, 5, 10),
**videoclip_kwargs: Any,
):
super().__init__()
self.model = videoclip(**videoclip_kwargs)
self.contrastive_loss = ContrastiveLossWithTemperature(
logit_scale=logit_scale,
logit_scale_min=None,
logit_scale_max=logit_scale_max,
)
self.lr = learning_rate
self.weight_decay = weight_decay
self.recall_ks = set(recall_ks)
if len(self.recall_ks) != len(recall_ks):
warnings.warn("Duplicate `k` values in `recall_ks` are ignored.")
self.metrics = torch.nn.ModuleDict()
for k in self.recall_ks:
self.metrics.update(
{f"v2t_recall_{k}": Recall(top_k=k), f"t2v_recall_{k}": Recall(top_k=k)}
)
def _collect_embeddings(self, outputs):
text_embeddings = [batch.embeddings_a for batch in outputs]
video_embeddings = [batch.embeddings_b for batch in outputs]
embeddings = {
"text": torch.cat(text_embeddings),
"video": torch.cat(video_embeddings),
}
return embeddings
def _compute_recall(self, split, text_embedding, video_embedding):
similarity_matrix = text_embedding @ video_embedding.T
num_samples = similarity_matrix.shape[0]
target_matrix = torch.eye(
n=num_samples, dtype=int, device=similarity_matrix.device
)
for k in self.recall_ks:
v2t_recall = self.metrics[f"v2t_recall_{k}"]
v2t_recall(preds=similarity_matrix.T, target=target_matrix)
self.log(f"{split}/Recall@{k} (video query, text retrieval)", v2t_recall)
t2v_recall = self.metrics[f"t2v_recall_{k}"]
t2v_recall(preds=similarity_matrix, target=target_matrix)
self.log(f"{split}/Recall@{k} (text query, video retrieval)", t2v_recall)
def configure_optimizers(self):
params = self.parameters()
optimizer = torch.optim.AdamW(
params, lr=self.lr, weight_decay=self.weight_decay
)
return optimizer
def training_step(self, batch, batch_idx):
text, video = batch.get("text"), batch.get("video")
model_output = self.model(features_a=text, features_b=video)
loss = self.contrastive_loss(
model_output.embeddings_a, model_output.embeddings_b
)
self.log(
"train/loss", loss, on_step=True, on_epoch=True, prog_bar=True, logger=True
)
return {"loss": loss, "model_output": model_output}
def validation_step(self, batch, batch_idx):
text, video = batch.get("text"), batch.get("video")
model_output = self.model(features_a=text, features_b=video)
loss = self.contrastive_loss(
model_output.embeddings_a, model_output.embeddings_b
)
self.log(
"validation/loss",
loss,
on_step=True,
on_epoch=True,
prog_bar=True,
logger=True,
)
return {"loss": loss, "model_output": model_output}
def validation_epoch_end(self, outputs):
model_outputs = [batch["model_output"] for batch in outputs]
all_embeddings = self._collect_embeddings(model_outputs)
text_embedding, video_embedding = (
all_embeddings["text"],
all_embeddings["video"],
)
self._compute_recall("validation", text_embedding, video_embedding)
def test_step(self, batch, batch_idx):
text, video = batch.get("text"), batch.get("video")
model_output = self.model(features_a=text, features_b=video)
return model_output
def test_epoch_end(self, outputs):
all_embeddings = self._collect_embeddings(outputs)
text_embedding, video_embedding = (
all_embeddings["text"],
all_embeddings["video"],
)
self._compute_recall("test", text_embedding, video_embedding)
| EXA-1-master | exa/libraries/multimodal-main/examples/mugen/retrieval/model.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from examples.mugen.data.mugen_datamodules import MUGENDataModule
from examples.mugen.data.mugen_dataset import MUGENDatasetArgs
from examples.mugen.retrieval.model import VideoCLIPLightningModule
from hydra.utils import instantiate
from omegaconf import OmegaConf
from pytorch_lightning import Trainer
from pytorch_lightning.callbacks import ModelCheckpoint
from torchmultimodal.transforms.bert_text_transform import BertTextTransform
from torchmultimodal.transforms.video_transform import VideoTransform
def get_yaml_config():
cli_conf = OmegaConf.from_cli()
if "config" not in cli_conf:
raise ValueError(
"Please pass 'config' to specify configuration yaml file for running VideoCLIP training"
)
yaml_conf = OmegaConf.load(cli_conf.config)
conf = instantiate(yaml_conf)
return conf
def train():
args = get_yaml_config()
dataset_args: MUGENDatasetArgs = args.dataset_args
datamodule = MUGENDataModule(
dataset_args,
text_transform=BertTextTransform(
**vars(args.datamodule_args.bert_text_transform)
),
video_transform=VideoTransform(**vars(args.datamodule_args.video_transform)),
batch_size=args.datamodule_args.batch_size,
num_workers=args.datamodule_args.num_workers,
shuffle=args.datamodule_args.shuffle,
)
model = VideoCLIPLightningModule(
**vars(args.lightningmodule_args),
**vars(args.videoclip_args),
)
checkpoint_callback = ModelCheckpoint(save_top_k=-1)
trainer = Trainer(
accelerator=args.accelerator,
devices=args.devices,
strategy="ddp_find_unused_parameters_false",
max_epochs=args.max_epochs,
log_every_n_steps=args.log_every_n_steps,
default_root_dir=args.default_root_dir,
callbacks=[checkpoint_callback],
)
trainer.fit(
model=model,
train_dataloaders=datamodule.train_dataloader(),
val_dataloaders=datamodule.val_dataloader(),
)
if __name__ == "__main__":
train()
| EXA-1-master | exa/libraries/multimodal-main/examples/mugen/retrieval/train.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import warnings
from typing import Any, Dict, Optional
import torch
from torch import nn
from torchmultimodal.models.clip.model import CLIP
from torchmultimodal.utils.common import load_module_from_url
from torchvision.models.video import S3D
from transformers import DistilBertConfig, DistilBertModel
PRETRAINED_VIDEO_ENCODER_URL = "https://pytorch.s3.amazonaws.com/models/multimodal/mugen/video_encoder-weights-b0e27f13.pth"
class TextEncoder(nn.Module):
"""Encode tokenized text to the last hidden state representation of the CLS token using
DistilBERT. DistilBERT prepends a CLS (classification) token to every text so the
token's hidden state represents the entire text.
Adapted from MUGEN's text encoder
(https://github.com/mugen-org/MUGEN_baseline/blob/main/lib/models/videoclip/modules.py)
Args:
model_config (Optional[Dict[str, Any]]): model config for DistilBERT.
Defaults to ``None``, indicating the default DistilBERT config.
padding_value (int): value that was used to pad the input text.
Defaults to ``0``, Hugging Face's BERT pad token.
Inputs:
input_ids (Tensor): tensor of (batch, text_length) tokenized text
Returns:
Tensor: encoded text with dimensions (batch, ``model_config.dim``).
Default ``model_config.dim`` is ``768``.
"""
def __init__(
self,
model_config: Optional[Dict[str, Any]] = None,
padding_value: int = 0,
):
super().__init__()
self.padding_value = padding_value
# we are using the CLS token hidden representation as the sentence's embedding
self.target_token_idx = 0
distilbert_config = (
DistilBertConfig(**model_config) if model_config else DistilBertConfig()
)
self.model = DistilBertModel(config=distilbert_config)
self.out_dim = self.model.config.dim
def build_attention_mask(self, input_ids: torch.Tensor) -> torch.Tensor:
return (input_ids != self.padding_value).to(dtype=int)
def forward(self, input_ids: torch.Tensor) -> torch.Tensor:
attention_mask = self.build_attention_mask(input_ids)
output = self.model(input_ids=input_ids, attention_mask=attention_mask)
last_hidden_state = output.last_hidden_state
return last_hidden_state[:, self.target_token_idx, :]
class VideoEncoder(nn.Module):
"""Encode videos to the last layer before the fully-connected layer of S3D.
Adapted from MUGEN's video encoder
(https://github.com/mugen-org/MUGEN_baseline/blob/main/lib/models/videoclip/modules.py)
Attributes:
model (nn.Module): Module extracted from :class:`~torchvision.models.video.S3D`.
Code reference: https://github.com/pytorch/vision/blob/main/torchvision/models/video/s3d.py
out_dim (int): Output dimension of VideoEncoder.
Inputs:
x (Tensor): batch of videos with dimensions (batch, channel, time, height, width)
Size of ``channel`` dimension must be ``3``.
"""
def __init__(self) -> None:
super().__init__()
self.model = S3D()
# Getting input channels from the Conv3d layer
self.out_dim = self.model.classifier[1].in_channels
# Transform the classifier into identity
self.model.classifier = nn.Identity()
def forward(self, x):
if x.shape[1] != 3:
raise ValueError(
"Channels must be at first (zero-indexed) dimension of input and of size 3."
)
return self.model(x)
class Projection(nn.Module):
"""Project embeddings to a fixed dimension by adding the hidden-layer output and final output of a MLP.
Args:
in_dim (int): dimension of input.
out_dim (int): dimension of output.
Defaults to ``256``, the value used by MUGEN.
dropout_prob (float): dropout probability.
Defaults to ``0.1``, the value used by MUGEN.
Inputs:
x (Tensor): embeddings (batch, dim_in)
Returns:
Tensor: projected embeddings (batch, dim_out)
"""
def __init__(self, in_dim, out_dim=256, dropout_prob=0.1) -> None:
super().__init__()
self.linear1 = nn.Linear(in_dim, out_dim, bias=False)
self.gelu = nn.GELU()
self.linear2 = nn.Linear(out_dim, out_dim, bias=False)
self.drop = nn.Dropout(dropout_prob)
self.layer_norm = nn.LayerNorm(out_dim)
def forward(self, x: torch.Tensor) -> torch.Tensor:
embed1 = self.linear1(x)
embed2 = self.gelu(embed1)
embed2 = self.linear2(embed2)
embed2 = self.drop(embed2)
embeds = self.layer_norm(embed1 + embed2)
return embeds
def videoclip(
text_pretrained: bool = True,
text_trainable: bool = True,
text_model_name: str = "distilbert-base-uncased",
text_model_config: Optional[Dict[str, Any]] = None,
text_padding_value: int = 0,
video_pretrained: bool = True,
video_trainable: bool = True,
video_pretrain_path: str = PRETRAINED_VIDEO_ENCODER_URL,
proj_out_dim: int = 256,
proj_dropout: float = 0.1,
) -> CLIP:
"""Create MUGEN's video-text CLIP model with a S3D-backed video encoder and DistilBERT-backed text encoder.
MUGEN paper: https://arxiv.org/abs/2204.08058
Args:
text_pretrained (bool): whether to use a pretrained text encoder or not.
Defaults to ``True``.
text_trainable (bool): whether the text encoder's weights should be trainable.
Defaults to ``True``. Ignored if ``text_pretrained`` is ``False``.
text_model_name (str): name of pretrained model, used when ``text_pretrained`` is ``True``.
Defaults to ``"distilbert-base-uncased"``, Hugging Face's standard DistilBERT model.
text_model_config (Optional[Dict[str, Any]]): model config for DistilBERT.
Defaults to ``None``, indicating the default DistilBERT config.
text_padding_value (int): value that was used to pad the input text.
Defaults to ``0``, Hugging Face's BERT pad token.
video_pretrained (bool): whether to use a pretrained model or not.
Defaults to ``True``.
video_trainable (bool): whether the video encoder's weights should be trainable.
Defaults to ``True``. Ignored if ``video_pretrained`` is ``False``.
video_pretrain_path (str): local path or remote URL to video encoder pretrained weights.
Defaults to ``PRETRAINED_VIDEO_ENCODER_URL``, the weights MUGEN used from
pretraining S3D on Kinetics 400. Ignored if ``video_pretrained`` is ``False``.
proj_out_dim (int): output dimension to project both encoders' outputs to.
Defaults to ``256``, the value used by MUGEN.
proj_dropout (float): dropout probability in the projection layers.
Defaults to ``0.1``, the value used by MUGEN.
Returns:
CLIP: CLIP model using MUGEN's video encoder and text encoder.
"""
text_model = TextEncoder(
model_config=text_model_config,
padding_value=text_padding_value,
)
if text_pretrained:
print(f"Loading pretrained DistilBERT from {text_model_name}.")
text_model.model = DistilBertModel.from_pretrained(text_model_name)
if text_pretrained and not text_trainable:
# check `text_pretrained` because if model isn't pretrained, then it should be trainable
for p in text_model.model.parameters():
p.requires_grad = False
elif not text_trainable:
warnings.warn("`text_trainable` acts as True when `text_pretrained` is False.")
text_encoder = nn.Sequential(
text_model,
Projection(text_model.out_dim, out_dim=proj_out_dim, dropout_prob=proj_dropout),
)
video_model = VideoEncoder()
if video_pretrained:
print(f"Loading pretrained video encoder from {video_pretrain_path}.")
load_module_from_url(video_model, video_pretrain_path)
if video_pretrained and not video_trainable:
# check `video_pretrained` because if model isn't pretrained, then it should be trainable
for p in video_model.model.parameters():
p.requires_grad = False
elif not video_trainable:
warnings.warn(
"`video_trainable` acts as True when `video_pretrained` is False."
)
video_encoder = nn.Sequential(
video_model,
Projection(
video_model.out_dim, out_dim=proj_out_dim, dropout_prob=proj_dropout
),
)
return CLIP(encoder_a=text_encoder, encoder_b=video_encoder)
| EXA-1-master | exa/libraries/multimodal-main/examples/mugen/retrieval/video_clip.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from examples.mugen.data.mugen_datamodules import MUGENDataModule
from examples.mugen.data.mugen_dataset import MUGENDatasetArgs
from examples.mugen.retrieval.model import VideoCLIPLightningModule
from hydra.utils import instantiate
from omegaconf import OmegaConf
from pytorch_lightning import Trainer
from torchmultimodal.transforms.bert_text_transform import BertTextTransform
from torchmultimodal.transforms.video_transform import VideoTransform
def get_yaml_config():
cli_conf = OmegaConf.from_cli()
if "config" not in cli_conf:
raise ValueError(
"Please pass 'config' to specify configuration yaml file for running VideoCLIP evaluation"
)
yaml_conf = OmegaConf.load(cli_conf.config)
conf = instantiate(yaml_conf)
return conf
def evaluate():
args = get_yaml_config()
dataset_args: MUGENDatasetArgs = args.dataset_args
datamodule = MUGENDataModule(
dataset_args,
text_transform=BertTextTransform(
**vars(args.datamodule_args.bert_text_transform)
),
video_transform=VideoTransform(**vars(args.datamodule_args.video_transform)),
batch_size=args.datamodule_args.batch_size,
num_workers=args.datamodule_args.num_workers,
shuffle=args.datamodule_args.shuffle,
)
model = VideoCLIPLightningModule.load_from_checkpoint(
args.checkpoint_path,
**vars(args.lightningmodule_args),
**vars(args.videoclip_args),
)
trainer = Trainer(accelerator=args.accelerator, devices=1)
trainer.test(model, dataloaders=datamodule.test_dataloader())
if __name__ == "__main__":
evaluate()
| EXA-1-master | exa/libraries/multimodal-main/examples/mugen/retrieval/eval.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import json
import os
from dataclasses import dataclass
import numpy as np
import torch
from examples.mugen.data.coinrun.construct_from_json import (
define_semantic_color_map,
draw_game_frame,
generate_asset_paths,
load_assets,
load_bg_asset,
)
from examples.mugen.data.coinrun.game import Game
from torch.utils.data import Dataset
from .audio_utils import AUDIO_SAMPLE_LENGTH, AUDIO_SAMPLE_RATE, load_audio
@dataclass
class MUGENDatasetArgs:
data_path: str = "datasets/coinrun/coinrun_dataset_jsons/release"
asset_path: str = "datasets/coinrun/assets"
sample_every_n_frames: int = 3
sequence_length: int = 32
resolution: int = 256
audio_sample_rate: int = AUDIO_SAMPLE_RATE
audio_sample_length: int = AUDIO_SAMPLE_LENGTH
bbox_smap_for_agent: bool = (
True # render smap for mugen (and shield) as bounding boxes
)
bbox_smap_for_monsters: bool = True # render smap for monsters as bounding boxes
use_manual_annotation: bool = False # if True will only use videos with manual annotation and skip those without
use_auto_annotation: bool = (
True # if True will only use videos with auto annotation and skip those without
)
use_downsampled_trainset: bool = (
False # if True will only use downsampled training set
)
fixed_start_idx: bool = True # fx starting game frame idx to 0
get_game_frame: bool = False # load video data
get_seg_map: bool = False # load semantic map
get_text_desc: bool = False # load text data
get_audio: bool = (
False # load full mix audio for each video, for audio generation models
)
debug: bool = False
class MUGENDataset(Dataset):
"""Dataset class to interface the MUGEN dataset.
Args:
split (str): dataset split. Defines the json file from which to read metadata.
E.g. ``"train"``, ``"valid"``, or ``"test"``.
args (MUGENDatasetArgs): other arguments related to loading data from files.
"""
def __init__(
self,
split: str,
args: MUGENDatasetArgs,
):
super().__init__()
self.args = args
self.train = split == "train"
self.max_label = 21
assert (
self.args.get_game_frame or self.args.get_audio or self.args.get_text_desc
), "Need to return at least one of game frame, audio, or text desc"
if args.use_downsampled_trainset and split == "train":
dataset_json_file = os.path.join(
self.args.data_path, f"{split}_downsampled.json"
)
else:
dataset_json_file = os.path.join(self.args.data_path, f"{split}.json")
print(f"LOADING FROM JSON FROM {dataset_json_file}...")
with open(dataset_json_file, "r") as f:
all_data = json.load(f)
if args.debug:
all_data["data"] = all_data["data"][:16]
self.dataset_metadata = all_data["metadata"]
self.data = []
for data_sample in all_data["data"]:
if (
data_sample["video"]["num_frames"]
> (args.sequence_length - 1) * args.sample_every_n_frames
):
self.data.append(data_sample)
print(f"NUMBER OF FILES LOADED: {len(self.data)}")
self.init_game_assets()
# initialize game assets
def init_game_assets(self):
self.game = Game()
self.game.load_json(
os.path.join(
self.dataset_metadata["data_folder"], self.data[0]["video"]["json_file"]
)
)
# NOTE: only supports rendering square-size coinrun frame for now
self.game.video_res = self.args.resolution
semantic_color_map = define_semantic_color_map(self.max_label)
# grid size for Mugen/monsters/ground
self.kx: float = self.game.zoom * self.game.video_res / self.game.maze_w
self.ky: float = self.kx
# grid size for background
zx = self.game.video_res * self.game.zoom
zy = zx
# NOTE: This is a hacky solution to switch between theme assets
# Sightly inefficient due to Mugen/monsters being loaded twice
# but that only a minor delay during init
# This should be revisited in future when we have more background/ground themes
self.total_world_themes = len(self.game.background_themes)
self.asset_map = {}
for world_theme_n in range(self.total_world_themes):
# reset the paths for background and ground assets based on theme
self.game.world_theme_n = world_theme_n
asset_files = generate_asset_paths(self.game)
# TODO: is it worth to load assets separately for game frame and label?
# this way game frame will has smoother character boundary
self.asset_map[world_theme_n] = load_assets(
asset_files,
self.args.asset_path,
semantic_color_map,
self.kx,
self.ky,
gen_original=False,
)
# background asset is loaded separately due to not following the grid
self.asset_map[world_theme_n]["background"] = load_bg_asset(
asset_files, self.args.asset_path, semantic_color_map, zx, zy
)
def __len__(self):
return len(self.data)
def get_start_end_idx(self, valid_frames=None):
start_idx = 0
end_idx = len(self.game.frames)
if self.args.sequence_length is not None:
assert (
self.args.sequence_length - 1
) * self.args.sample_every_n_frames < end_idx, (
f"not enough frames to sample {self.args.sequence_length} frames "
+ "at every {self.args.sample_every_n_frames} frame"
)
if self.args.fixed_start_idx:
start_idx = 0
else:
if valid_frames:
# we are sampling frames from a full json and we need to ensure that the desired
# class is in the frame range we sample. Resample until this is true
resample = True
while resample:
start_idx = torch.randint(
low=0,
high=end_idx
- (self.args.sequence_length - 1)
* self.args.sample_every_n_frames,
size=(1,),
).item()
for valid_frame_range in valid_frames:
if isinstance(valid_frame_range, list):
# character ranges
st_valid, end_valid = valid_frame_range
else:
# game event has a single timestamp
st_valid, end_valid = (
valid_frame_range,
valid_frame_range,
)
if (
end_valid >= start_idx
and start_idx
+ self.args.sequence_length
* self.args.sample_every_n_frames
>= st_valid
):
# desired class is in the sampled frame range, so stop sampling
resample = False
else:
start_idx = torch.randint(
low=0,
high=end_idx
- (self.args.sequence_length - 1)
* self.args.sample_every_n_frames,
size=(1,),
).item()
end_idx = (
start_idx + self.args.sequence_length * self.args.sample_every_n_frames
)
return start_idx, end_idx
def get_game_video(self, start_idx, end_idx, alien_name="Mugen"):
frames = []
for i in range(start_idx, end_idx, self.args.sample_every_n_frames):
img = draw_game_frame(
self.game,
i,
self.asset_map[self.game.world_theme_n],
self.kx,
self.ky,
gen_original=True,
alien_name=alien_name,
)
frames.append(torch.unsqueeze(torch.as_tensor(np.array(img)), dim=0))
return torch.vstack(frames)
def get_game_audio(self, wav_filename):
data, _ = load_audio(
wav_filename,
sr=self.args.audio_sample_rate,
offset=0,
duration=self.args.audio_sample_length,
)
data = torch.as_tensor(data).permute(1, 0)
return data
def get_smap_video(self, start_idx, end_idx, alien_name="Mugen"):
frames = []
for i in range(start_idx, end_idx, self.args.sample_every_n_frames):
img = draw_game_frame(
self.game,
i,
self.asset_map[self.game.world_theme_n],
self.kx,
self.ky,
gen_original=False,
bbox_smap_for_agent=self.args.bbox_smap_for_agent,
bbox_smap_for_monsters=self.args.bbox_smap_for_monsters,
alien_name=alien_name,
)
frames.append(torch.unsqueeze(torch.as_tensor(np.array(img)), dim=0))
# typical output shape is 16 x 256 x 256 x 1 (sequence_length=16, resolution=256)
return torch.unsqueeze(torch.vstack(frames), dim=3)
def load_json_file(self, idx):
self.game.load_json(
os.path.join(
self.dataset_metadata["data_folder"],
self.data[idx]["video"]["json_file"],
)
)
self.game.video_res = self.args.resolution
def __getitem__(self, idx):
self.load_json_file(idx)
start_idx, end_idx = self.get_start_end_idx()
alien_name = "Mugen"
result_dict = {}
if self.args.get_audio:
wav_file = os.path.join(
self.dataset_metadata["data_folder"],
self.data[idx]["video"]["video_file"],
)
result_dict["audio"] = self.get_game_audio(wav_file)
if self.args.get_game_frame:
game_video = self.get_game_video(start_idx, end_idx, alien_name=alien_name)
result_dict["video"] = game_video
if self.args.get_seg_map:
seg_map_video = self.get_smap_video(
start_idx, end_idx, alien_name=alien_name
)
result_dict["video_smap"] = seg_map_video
if self.args.get_text_desc:
# text description will be generated in the range of start and end frames
# this means we can use full json and auto-text to train transformer too
assert self.args.use_auto_annotation or self.args.use_manual_annotation
if self.args.use_manual_annotation and not self.args.use_auto_annotation:
assert (
len(self.data[idx]["annotations"]) > 1
), "need at least one manual annotation if using only manual annotations"
# exclude the auto-text, which is always index 0
rand_idx = (
torch.randint(
low=1, high=len(self.data[idx]["annotations"]), size=(1,)
).item()
if self.train
else 1
)
elif not self.args.use_manual_annotation and self.args.use_auto_annotation:
rand_idx = 0
else:
rand_idx = torch.randint(
low=0, high=len(self.data[idx]["annotations"]), size=(1,)
).item()
if self.args.use_manual_annotation and not self.args.use_auto_annotation:
assert (
self.data[idx]["annotations"][rand_idx]["type"] == "manual"
), "Should only be sampling manual annotations"
text_desc = self.data[idx]["annotations"][rand_idx]["text"]
result_dict["text"] = text_desc
return result_dict
| EXA-1-master | exa/libraries/multimodal-main/examples/mugen/data/mugen_dataset.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import torch
from tqdm import tqdm
label_color_map = {
0: torch.FloatTensor((0, 0, 0)),
1: torch.FloatTensor((128, 0, 0)),
2: torch.FloatTensor((255, 0, 0)),
3: torch.FloatTensor((139, 69, 19)),
4: torch.FloatTensor((0, 255, 0)),
5: torch.FloatTensor((0, 128, 0)),
6: torch.FloatTensor((0, 100, 0)),
7: torch.FloatTensor((244, 164, 96)),
8: torch.FloatTensor((205, 133, 63)),
9: torch.FloatTensor((255, 192, 203)),
10: torch.FloatTensor((210, 105, 30)),
11: torch.FloatTensor((255, 0, 255)),
12: torch.FloatTensor((230, 230, 250)),
13: torch.FloatTensor((0, 191, 255)),
14: torch.FloatTensor((154, 205, 50)),
15: torch.FloatTensor((255, 215, 0)),
16: torch.FloatTensor((169, 169, 169)),
17: torch.FloatTensor((148, 0, 211)),
18: torch.FloatTensor((127, 255, 212)),
19: torch.FloatTensor((255, 255, 0)),
20: torch.FloatTensor((255, 69, 0)),
21: torch.FloatTensor((255, 255, 255)),
22: torch.FloatTensor((0, 0, 255)),
}
def convert_grayscale_to_color_label(input_tensor):
b_in, t_in, h_in, w_in = input_tensor.shape
input_tensor = input_tensor.reshape(-1)
output_tensor = torch.zeros(input_tensor.shape[0], 3)
for i, t in tqdm(
enumerate(input_tensor.cpu().numpy()), total=input_tensor.shape[0]
):
output_tensor[i] = label_color_map[t]
output_tensor = output_tensor.reshape(b_in, t_in, h_in, w_in, 3).permute(
0, 4, 1, 2, 3
)
return output_tensor
| EXA-1-master | exa/libraries/multimodal-main/examples/mugen/data/video_utils.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import av
import numpy as np
# Audio constants are from MUGEN's audio VQVAE implementation
# (https://github.com/mugen-org/MUGEN_baseline/blob/b60d229/lib/models/audio_vqvae/hparams.py)
AUDIO_SAMPLE_RATE = 22050
AUDIO_SAMPLE_LENGTH = AUDIO_SAMPLE_RATE * 3.2 # each video is 3.2 seconds
# `load_audio` function is from OpenAI's jukebox library
# (https://github.com/openai/jukebox/blob/08efbbc/jukebox/utils/io.py)
def load_audio(
file,
sr,
offset,
duration,
resample=True,
approx=False,
time_base="samples",
check_duration=True,
):
if time_base == "sec":
offset = offset * sr
duration = duration * sr
# Loads at target sr, stereo channels, seeks from offset, and stops after duration
container = av.open(file)
audio = container.streams.get(audio=0)[0] # Only first audio stream
audio_duration = audio.duration * float(audio.time_base)
if approx:
if offset + duration > audio_duration * sr:
# Move back one window. Cap at audio_duration
offset = np.min(audio_duration * sr - duration, offset - duration)
else:
if check_duration:
assert (
offset + duration <= audio_duration * sr
), f"End {offset + duration} beyond duration {audio_duration*sr}"
if resample:
resampler = av.AudioResampler(format="fltp", layout="stereo", rate=sr)
else:
assert sr == audio.sample_rate
offset = int(
offset / sr / float(audio.time_base)
) # int(offset / float(audio.time_base)) # Use units of time_base for seeking
duration = int(
duration
) # duration = int(duration * sr) # Use units of time_out ie 1/sr for returning
sig = np.zeros((2, duration), dtype=np.float32)
container.seek(offset, stream=audio)
total_read = 0
for frame in container.decode(audio=0): # Only first audio stream
if resample:
frame.pts = None
frame = resampler.resample(frame)
frame = frame.to_ndarray(format="fltp") # Convert to floats and not int16
read = frame.shape[-1]
if total_read + read > duration:
read = duration - total_read
sig[:, total_read : total_read + read] = frame[:, :read]
total_read += read
if total_read == duration:
break
assert total_read <= duration, f"Expected {duration} frames, got {total_read}"
return sig, sr
| EXA-1-master | exa/libraries/multimodal-main/examples/mugen/data/audio_utils.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from typing import Callable, Optional
import pytorch_lightning as pl
import torch
import torch.distributed as dist
import torch.utils.data as data
from .mugen_dataset import MUGENDataset, MUGENDatasetArgs
class MUGENDataModule(pl.LightningDataModule):
"""General lightning data module for MUGEN dataset.
Args:
mugen_dataset_args (MUGENDatasetArgs): arguments for MUGENDataset.
text_transform (Optional[Callable]): transform for text batches.
Only used when not ``None`` and when ``mugen_dataset_args.get_text_desc = True``.
Defaults to ``None``.
video_transform (Optional[Callable]): transform for video batches.
Only used when not ``None`` and when ``mugen_dataset_args.get_game_frame = True``.
Defaults to ``None``.
audio_transform (Optional[Callable]): transform for audio batches.
Only used when not ``None`` and when ``mugen_dataset_args.get_audio = True``.
Defaults to ``None``.
batch_size (int): number of samples per batch.
Defaults to ``16``.
num_workers (int): number of subprocesses for data loading.
Defaults to ``0``, meaning data is loaded in the main process.
shuffle (bool): whether to reshuffle data after each epoch.
Defaults to ``True``.
"""
def __init__(
self,
mugen_dataset_args: MUGENDatasetArgs,
text_transform: Optional[Callable] = None,
video_transform: Optional[Callable] = None,
audio_transform: Optional[Callable] = None,
batch_size: int = 16,
num_workers: int = 0,
shuffle: bool = True,
):
super().__init__()
self.mugen_dataset_args = mugen_dataset_args
self.text_transform = text_transform
self.video_transform = video_transform
self.audio_transform = audio_transform
self.batch_size = batch_size
self.num_workers = num_workers
self.shuffle = shuffle
@property
def n_classes(self):
dataset = self._dataset(True)
return dataset.n_classes
def _custom_collate_fn(self, batch):
collated_batch = {}
if self.mugen_dataset_args.get_game_frame:
video = [elem["video"] for elem in batch]
video = torch.stack(video)
video = self.video_transform(video) if self.video_transform else video
collated_batch["video"] = video
if self.mugen_dataset_args.get_text_desc:
text = [elem["text"] for elem in batch]
# cannot be torch.stack'ed because still in raw text form, not Tensor
text = self.text_transform(text) if self.text_transform else text
collated_batch["text"] = text
if self.mugen_dataset_args.get_audio:
audio = [elem["audio"] for elem in batch]
audio = torch.stack(audio)
audio = self.audio_transform(audio) if self.audio_transform else audio
collated_batch["audio"] = audio
return collated_batch
def _dataset(self, split):
dataset = MUGENDataset(args=self.mugen_dataset_args, split=split)
return dataset
def _dataloader(self, split):
dataset = self._dataset(split)
if dist.is_initialized():
sampler = data.distributed.DistributedSampler(
dataset, num_replicas=dist.get_world_size(), rank=dist.get_rank()
)
else:
sampler = None
dataloader = data.DataLoader(
dataset,
batch_size=self.batch_size,
num_workers=self.num_workers,
pin_memory=True,
sampler=sampler,
shuffle=sampler is None and self.shuffle is True,
collate_fn=self._custom_collate_fn,
)
return dataloader
def train_dataloader(self):
return self._dataloader("train")
def val_dataloader(self):
return self._dataloader("val")
def test_dataloader(self):
return self._dataloader("test")
| EXA-1-master | exa/libraries/multimodal-main/examples/mugen/data/mugen_datamodules.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import json
class Game:
def __init__(self, **kwargs):
self.game_id = -1
self.level_seed = 0
self.rl_agent_seed = 0
self.zoom = 5.5
self.bgzoom = 0.4 # NOTE: hard-coded
self.world_theme_n = -1
self.agent_theme_n = -1
self.background_themes = []
self.ground_themes = []
self.agent_themes = []
self.monster_names = {}
self.flattened_monster_names = []
# TODO: save and load these from the game engine
self.video_res = 1024
self.maze_w = 64
self.maze_h = 13 # for zoom 5.5
self.reset_game()
self.__dict__.update(**kwargs)
self.frames = [Frame(**f) for f in self.frames]
def reset_game(self):
self.maze = None
self.frames = []
def asdict(self, f_start=-1, f_end=-1):
if f_end < 0:
# show all frames by default
frames_as_dict = [f.asdict() for f in self.frames]
else:
frames_as_dict = [f.asdict() for f in self.frames[f_start:f_end]]
return {
"game_id": self.game_id,
"level_seed": self.level_seed,
"rl_agent_seed": self.rl_agent_seed,
"zoom": self.zoom,
"bgzoom": self.bgzoom,
"world_theme_n": self.world_theme_n,
"agent_theme_n": self.agent_theme_n,
"background_themes": self.background_themes,
"ground_themes": self.ground_themes,
"agent_themes": self.agent_themes,
"monster_names": self.monster_names,
"video_res": self.video_res,
"maze_w": self.maze_w,
"maze_h": self.maze_h,
"maze": self.maze if self.maze is not None else None,
"frames": frames_as_dict,
}
def __repr__(self):
return json.dumps(self.asdict())
def save_json(self, json_path, f_start=-1, f_end=-1):
with open(json_path, "w") as f:
json.dump(self.asdict(f_start, f_end), f, indent=2)
def load_json(self, json_path):
with open(json_path, "r") as f:
data = json.load(f)
self.reset_game()
self.__dict__.update(**data)
self.frames = [Frame(**f) for f in self.frames]
self.flatten_monster_names()
self.reset_eaten_coins()
def flatten_monster_names(self):
# the order is important!
self.flattened_monster_names = self.monster_names["ground"]
self.flattened_monster_names.extend(self.monster_names["walking"])
self.flattened_monster_names.extend(self.monster_names["flying"])
# NOTE: some coins might be missing due to how 3s clip json is saved
# reset all eaten coins to put them back
# this is a temporary fix until we regenerate all jsons
def reset_eaten_coins(self):
for coin_loc in self.frames[-1].coins_eaten:
# note the game rows are saved as strings
# NOTE: '1' is the yellow coin, we also has another type '2' that is the red gem
# but the json with '2' enabled should not have this issue
if self.maze[coin_loc[1]][coin_loc[0]] == ".":
self.maze[coin_loc[1]] = (
self.maze[coin_loc[1]][: coin_loc[0]]
+ "1"
+ self.maze[coin_loc[1]][(coin_loc[0] + 1) :]
)
class Frame:
def __init__(self, **kwargs):
self.frame_id = -1
self.file_name = ""
self.state_time = 0
self.coins_eaten = []
self.agent = None
self.monsters = []
self.__dict__.update(**kwargs)
if "agent" in self.__dict__ and self.agent is not None:
self.agent = Agent(**self.agent)
if "monsters" in self.__dict__:
self.monsters = [Monster(**m) for m in self.monsters]
def asdict(self):
return {
"frame_id": self.frame_id,
"file_name": self.file_name,
"state_time": self.state_time,
"coins_eaten": self.coins_eaten,
"agent": self.agent.asdict() if self.agent is not None else None,
"monsters": [m.asdict() for m in self.monsters],
}
def __repr__(self):
return json.dumps(self.asdict())
class Agent:
def __init__(
self,
x,
y,
vx=0.0,
vy=0.0,
time_alive=0,
ladder=False,
spring=0,
is_killed=False,
killed_animation_frame_cnt=0,
finished_level_frame_cnt=0,
killed_monster=False,
bumped_head=False,
collected_coin=False,
collected_gem=False,
power_up_mode=False,
**kwargs,
):
self.x = x
self.y = y
self.vx = vx
self.vy = vy
self.time_alive = time_alive
self.ladder = ladder # for climb pose
self.spring = spring # for duck pose
# states related to agent dying or finishing animations
self.is_killed = is_killed
self.killed_animation_frame_cnt = killed_animation_frame_cnt
self.finished_level_frame_cnt = finished_level_frame_cnt
self.killed_monster = killed_monster
self.bumped_head = bumped_head
self.collected_coin = collected_coin
self.collected_gem = collected_gem
self.power_up_mode = power_up_mode
self.anim_freq = 5 # hard-coded
# decide whether to flip asset horizontally
self.is_facing_right = True
if self.vx < 0:
self.is_facing_right = False
# decide which of the two walk/climb asset to use
self.walk1_mode = True
if (self.time_alive // self.anim_freq) % 2 != 0:
self.walk1_mode = False
self.pose = self.get_pose()
# kwargs are ignored
# self.__dict__.update(**kwargs)
def get_pose(self):
if self.is_killed:
return "hit"
if self.ladder:
if self.walk1_mode:
return "climb1"
else:
return "climb2"
if self.vy != 0:
return "jump"
if self.spring != 0:
return "duck"
if self.vx == 0:
return "stand"
if self.walk1_mode:
return "walk1"
else:
return "walk2"
def asdict(self):
return {
"x": self.x,
"y": self.y,
"vx": self.vx,
"vy": self.vy,
"time_alive": self.time_alive,
"ladder": self.ladder,
"spring": self.spring,
"is_killed": self.is_killed,
"killed_animation_frame_cnt": self.killed_animation_frame_cnt,
"finished_level_frame_cnt": self.finished_level_frame_cnt,
"killed_monster": self.killed_monster,
"bumped_head": self.bumped_head,
"collected_coin": self.collected_coin,
"collected_gem": self.collected_gem,
"power_up_mode": self.power_up_mode,
"anim_freq": self.anim_freq,
"is_facing_right": self.is_facing_right,
"walk1_mode": self.walk1_mode,
"pose": self.pose,
}
def __repr__(self):
return json.dumps(self.asdict())
class Monster:
def __init__(
self,
m_id,
x,
y,
vx=0.0,
vy=0.0,
theme=0,
is_flying=False,
is_walking=False,
is_jumping=False,
is_dead=False,
time=0,
anim_freq=1,
monster_dying_frame_cnt=0,
**kwargs,
):
self.m_id = m_id
self.x = x
self.y = y
self.vx = vx
self.vy = vy
self.theme = theme # monster type (saw, snail, slime, etc.)
self.is_flying = is_flying
self.is_walking = is_walking
self.is_jumping = is_jumping
self.is_dead = is_dead
self.time = time
self.anim_freq = anim_freq
self.monster_dying_frame_cnt = monster_dying_frame_cnt
# decide which of the two walk/climb asset to use
self.walk1_mode = True
if self.is_jumping:
# for jumping monster, walk1 asset is decided by vertical speed
if self.vy != 0:
self.walk1_mode = False
elif (self.time // self.anim_freq) % 2 != 0:
self.walk1_mode = False
def asdict(self):
return {
"m_id": self.m_id,
"x": self.x,
"y": self.y,
"vx": self.vx,
"vy": self.vy,
"theme": self.theme,
"is_flying": self.is_flying,
"is_walking": self.is_walking,
"is_jumping": self.is_jumping,
"is_dead": self.is_dead,
"time": self.time,
"anim_freq": self.anim_freq,
"monster_dying_frame_cnt": self.monster_dying_frame_cnt,
"walk1_mode": self.walk1_mode,
}
def __repr__(self):
return json.dumps(self.asdict())
| EXA-1-master | exa/libraries/multimodal-main/examples/mugen/data/coinrun/game.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import math
import os
import numpy as np
from PIL import Image
DEATH_ANIM_LENGTH = 30
FINISHED_LEVEL_ANIM_LENGTH = 20
MONSTER_DEATH_ANIM_LENGTH = 3
SPACE = "."
LADDER = "="
LAVA_SURFACE = "^"
LAVA_MIDDLE = "|"
WALL_SURFACE = "S"
WALL_MIDDLE = "A"
WALL_CLIFF_LEFT = "a"
WALL_CLIFF_RIGHT = "b"
COIN_OBJ1 = "1"
COIN_OBJ2 = "2"
CRATE_NORMAL = "#"
CRATE_DOUBLE = "$"
CRATE_SINGLE = "&"
CRATE_WARNING = "%"
def define_semantic_color_map(max_label=18):
assert max_label in [18, 21, 22], f"max_label {max_label} is not supported!"
semantic_color_map = {}
semantic_color_map["background"] = 0
# alien is always set to max_label (assumes it always appear in a video)
semantic_color_map["alien"] = max_label
if max_label == 18:
semantic_color_map["world"] = {
WALL_MIDDLE: 3,
WALL_SURFACE: 4,
WALL_CLIFF_LEFT: 5,
WALL_CLIFF_RIGHT: 6,
COIN_OBJ1: 17,
COIN_OBJ2: 0,
CRATE_NORMAL: 8,
CRATE_DOUBLE: 8,
CRATE_SINGLE: 8,
CRATE_WARNING: 8,
LAVA_MIDDLE: 1,
LAVA_SURFACE: 2,
LADDER: 7,
}
semantic_color_map["shield"] = 0
semantic_color_map["monster"] = {
"sawHalf": 16,
"bee": 15,
"slimeBlock": 14,
"slimeBlue": 13,
"mouse": 12,
"snail": 11,
"ladybug": 10,
"wormPink": 9,
"barnacle": 0,
"frog": 0,
}
else:
semantic_color_map["world"] = {
WALL_MIDDLE: 3,
WALL_SURFACE: 4,
WALL_CLIFF_LEFT: 5,
WALL_CLIFF_RIGHT: 6,
COIN_OBJ1: 19,
COIN_OBJ2: 20,
CRATE_NORMAL: 8,
CRATE_DOUBLE: 8,
CRATE_SINGLE: 8,
CRATE_WARNING: 8,
LAVA_MIDDLE: 1,
LAVA_SURFACE: 2,
LADDER: 7,
}
semantic_color_map["shield"] = 21
semantic_color_map["monster"] = {
"sawHalf": 16,
"bee": 15,
"slimeBlock": 14,
"slimeBlue": 13,
"mouse": 12,
"snail": 11,
"ladybug": 10,
"wormPink": 9,
"barnacle": 17,
"frog": 18,
}
return semantic_color_map
def generate_asset_paths(game):
# use background corresponding with ground theme
bgtheme = game.background_themes[game.world_theme_n]
gtheme = game.ground_themes[game.world_theme_n]
walls = "kenney/Ground/" + gtheme + "/" + gtheme.lower()
# default option with fixed agent look
atheme = game.agent_themes[game.agent_theme_n]
alien = "kenneyLarge/Players/128x256_no_helmet/" + atheme + "/alien" + atheme
alien_paths = {"Mugen": alien}
tiles = "kenney/Tiles/"
items = "kenneyLarge/Items/"
enemy = "kenneyLarge/Enemies/"
asset_files = {}
asset_files["background"] = bgtheme
asset_files["world"] = {
WALL_MIDDLE: walls + "Center.png",
WALL_SURFACE: walls + "Mid.png",
WALL_CLIFF_LEFT: walls + "Cliff_left.png",
WALL_CLIFF_RIGHT: walls + "Cliff_right.png",
COIN_OBJ1: items + "coinGold.png",
COIN_OBJ2: items + "gemRed.png",
CRATE_NORMAL: tiles + "boxCrate.png",
CRATE_DOUBLE: tiles + "boxCrate_double.png",
CRATE_SINGLE: tiles + "boxCrate_single.png",
CRATE_WARNING: tiles + "boxCrate_warning.png",
LAVA_MIDDLE: tiles + "lava.png",
LAVA_SURFACE: tiles + "lavaTop_low.png",
LADDER: tiles + "ladderMid.png",
}
asset_files["alien"] = {}
for alien_name in alien_paths.keys():
asset_files["alien"][alien_name] = {
"walk1": alien_paths[alien_name] + "_walk1.png",
"walk2": alien_paths[alien_name] + "_walk2.png",
"climb1": alien_paths[alien_name] + "_climb1.png",
"climb2": alien_paths[alien_name] + "_climb2.png",
"stand": alien_paths[alien_name] + "_stand.png",
"jump": alien_paths[alien_name] + "_jump.png",
"duck": alien_paths[alien_name] + "_duck.png",
"hit": alien_paths[alien_name] + "_hit.png",
}
asset_files["shield"] = "bubble_shield.png"
game.flatten_monster_names()
# monster assets are generated based on list of names used at rendering
asset_files["monster"] = {
name: enemy + name + ".png" for name in game.flattened_monster_names
}
return asset_files
# binarize alpha channel if input img is in RGBA mode, set anything above 0 to 255
def binarize_alpha_channel(img):
if img.mode != "RGBA":
return img
w, h = img.size
for i in range(w):
for j in range(h):
pixel = img.getpixel((i, j))
# set alpha to 255 if alpha > 0
if pixel[3] > 0:
img.putpixel((i, j), (pixel[0], pixel[1], pixel[2], 255))
return img
class Asset:
def __init__(
self,
name,
file,
asset_root,
kind="world",
kx=80,
ky=80,
semantic_color=(0, 0, 0),
flip=False,
binarize_alpha=False,
):
self.name = name
self.file = file
self.asset_root = asset_root
self.kind = kind
self.kx = kx
self.ky = ky
self.semantic_color = semantic_color
self.flip = flip
self.binarize_alpha = binarize_alpha
self.load_asset()
def load_asset(self):
asset_path = os.path.join(self.asset_root, self.file)
if not os.path.isfile(asset_path):
# basically remove the '_walk1' postfix
fallback_path = (
"_".join(asset_path.split("_")[:-1]) + "." + asset_path.split(".")[-1]
)
assert os.path.isfile(fallback_path), asset_path
asset_path = fallback_path
self.asset = Image.open(asset_path)
# used for (user control) asset swap, because alien h:w == 2:1 while others is 1:1
# the asset resize at loading and render grid size all need to change respectively
self.aspect_ratio = self.asset.size[1] / self.asset.size[0]
if self.kind == "world":
if self.name != LAVA_MIDDLE and self.name != LAVA_SURFACE:
# LAVA has a special way of rendering animation so don't resize now
self.asset = self.asset.resize(
(math.ceil(self.kx + 0.5), math.ceil(self.ky + 0.5))
)
elif self.kind == "alien":
self.asset = self.asset.resize(
(math.ceil(self.kx), math.ceil(self.aspect_ratio * self.ky))
)
elif self.kind == "shield":
self.asset = self.asset.resize(
(math.ceil(self.kx * 1.15), math.ceil(self.ky * 2.1))
)
elif self.kind == "monster" or self.kind == "background":
self.asset = self.asset.resize((math.ceil(self.kx), math.ceil(self.ky)))
else:
raise NotImplementedError(f"Unknown asset kind {self.kind}")
# flip if needed (for facing left/right)
if self.flip:
self.asset = self.asset.transpose(Image.FLIP_LEFT_RIGHT)
if self.binarize_alpha:
self.asset = binarize_alpha_channel(self.asset)
def load_assets(
asset_files, asset_root, semantic_color_map, kx=80, ky=80, gen_original=False
):
asset_map = {}
for kind in asset_files.keys():
assert kind in semantic_color_map
if kind == "background":
# background will be loaded separately
continue
if kind == "shield":
# asset file for the bubble shield in agent power-up mode
asset_map[kind] = Asset(
name=kind,
file=asset_files[kind],
asset_root=asset_root,
kind=kind,
kx=kx,
ky=ky,
semantic_color=semantic_color_map[kind],
binarize_alpha=not gen_original,
)
continue
for key in asset_files[kind].keys():
if kind == "world":
# ground asset, no need to worry about pose or facing
asset_map[key] = Asset(
name=key,
file=asset_files[kind][key],
asset_root=asset_root,
kind=kind,
kx=kx,
ky=ky,
semantic_color=semantic_color_map[kind][key],
binarize_alpha=not gen_original,
)
elif kind == "alien":
for pose in asset_files[kind][key].keys():
# facing right is default to empty
all_facings = ["", "_left"]
for facing in all_facings:
a_key = key + "_" + pose + facing
asset_map[a_key] = Asset(
name=a_key,
file=asset_files[kind][key][pose],
asset_root=asset_root,
kind=kind,
kx=kx,
ky=ky,
semantic_color=semantic_color_map[kind],
flip=(facing != ""), # flip the asset if facing is not ''
binarize_alpha=not gen_original,
)
elif kind == "monster":
# for monsters, 3 types of assets will be loaded
# for each of them, facing can be left or right
all_poses = ["", "_move", "_dead"] # walk1 is default to empty
all_facings = ["", "_right"] # facing left is default to empty
base_fn = os.path.splitext(asset_files[kind][key])[
0
] # e.g. Enemies/bee
for pose in all_poses:
for facing in all_facings:
m_key = key + pose + facing
file_name = base_fn + pose + ".png"
asset_map[m_key] = Asset(
name=m_key,
file=file_name,
asset_root=asset_root,
kind="monster",
kx=kx,
ky=ky,
semantic_color=semantic_color_map[kind][key],
flip=(facing != ""), # flip the asset if facing is not ''
binarize_alpha=not gen_original,
)
else:
raise NotImplementedError(f"Unknown asset kind {kind}")
return asset_map
# load background asset, zoom is different so need a separate function
def load_bg_asset(asset_files, asset_root, semantic_color_map, zx, zy):
kind = "background"
bg_asset = Asset(
name=kind,
file=asset_files[kind],
asset_root=asset_root,
kind=kind,
kx=zx,
ky=zy,
semantic_color=semantic_color_map[kind],
)
return bg_asset
# used for alien dying animation in gen_original mode
def get_transparent_asset(input_asset, transparency):
assert input_asset.mode == "RGBA"
np_asset = np.array(input_asset, dtype=np.int16)
np_asset[:, :, 3] -= transparency
np_asset[:, :, 3] = np.clip(np_asset[:, :, 3], 0, None)
return Image.fromarray(np_asset.astype(np.uint8))
# return rect in integer values, floor for x1,y1, ceil for x2,y2 or w,h
def integer_rect(rect):
return [
math.floor(rect[0]),
math.floor(rect[1]),
math.ceil(rect[2]),
math.ceil(rect[3]),
]
def convert_xywh_to_xyxy(rect):
return [rect[0], rect[1], rect[0] + rect[2], rect[1] + rect[3]]
def convert_xyxy_to_xywh(rect):
return [rect[0], rect[1], rect[2] - rect[0], rect[3] - rect[1]]
# rect format is xywh, img_size is (w,h)
def check_out_of_bounds(rect, img_size):
if rect[0] + rect[2] < 0:
return True
if rect[0] > img_size[0]:
return True
if rect[1] + rect[3] < 0:
return True
if rect[1] > img_size[1]:
return True
return False
# return intersect of two rects, input and output are both in xywh format
def intersect_rects(rect1, rect2):
xyxy_rect1 = convert_xywh_to_xyxy(rect1)
xyxy_rect2 = convert_xywh_to_xyxy(rect2)
xyxy_res_rect = [
max(xyxy_rect1[0], xyxy_rect2[0]),
max(xyxy_rect1[1], xyxy_rect2[1]),
min(xyxy_rect1[2], xyxy_rect2[2]),
min(xyxy_rect1[3], xyxy_rect2[3]),
]
xywh_res_rect = convert_xyxy_to_xywh(xyxy_res_rect)
# check if the intersection is empty
if xywh_res_rect[2] > 0 and xywh_res_rect[3] > 0:
return xywh_res_rect
else:
return None
# rect is in the format of xywh
def paint_color_in_rect_with_mask(
img, rect, color, mask, gen_original=False, ignore_mask=False, cut_mask_top_ratio=0
):
w, h = mask.size
img_w, img_h = img.size
# in some cases, mask size doesn't match the rect (e.g. monster dying)
if rect[2] != w or rect[3] != h:
if not gen_original:
mask = mask.resize((rect[2], rect[3]), resample=Image.NEAREST)
else:
mask = mask.resize((rect[2], rect[3]))
w, h = mask.size
if not gen_original:
# generate semantic map
if ignore_mask and cut_mask_top_ratio != 0:
# specifically for agent because its asset has a large empty area in the top,
# we don't want it to be fully masked
if cut_mask_top_ratio < 0:
# automatic calculate the first non-empty row from top
np_mask = np.array(mask)
cut_mask_top_rows = (np_mask.T[0].sum(axis=0) != 0).argmax(axis=0)
else:
cut_mask_top_rows = int(cut_mask_top_ratio * rect[2])
rect[1] += cut_mask_top_rows
rect[3] = mask.size[1] - cut_mask_top_rows
img = img.paste(color, convert_xywh_to_xyxy(rect))
else:
# paste in single color if generating semantic maps (so not original)
# if ignore_mask, this will generate a complete block mask same as rect
img = img.paste(
color,
convert_xywh_to_xyxy(rect),
mask if (mask.mode == "RGBA" and not ignore_mask) else None,
)
else:
# generate rgb data
img = img.paste(
mask, convert_xywh_to_xyxy(rect), mask if mask.mode == "RGBA" else None
)
return
def draw_game_frame(
game,
frame_id,
asset_map,
kx,
ky,
gen_original=False,
bbox_smap_for_agent=False,
bbox_smap_for_monsters=False,
alien_name=None,
skip_foreground=False,
skip_background=False,
skip_mugen=False,
only_mugen=False,
):
# set default alien name/key
if alien_name is None:
alien_name = "Mugen"
# initialize an empty image (all zero, for background)
if not gen_original:
img = Image.new("L", (game.video_res, game.video_res))
else:
img = Image.new("RGB", (game.video_res, game.video_res))
video_center = (game.video_res - 1) // 2
frame = game.frames[frame_id]
# for agent-centric
# dx = -frame.agent.x * kx + video_center - 0.5 * kx
# dy = frame.agent.y * ky - video_center - 0.5 * ky
# for video data (no vertical camera move)
dx = -frame.agent.x * kx + video_center - 0.5 * kx
# different dy/ky ratio based on zoom level, to adjust camera view
if game.zoom == 5.5:
dy_ratio = 5.0
elif game.zoom == 4.3:
dy_ratio = 6.5
elif game.zoom == 5.0:
dy_ratio = 5.5
elif game.zoom == 6.0:
dy_ratio = 4.5
else:
raise NotImplementedError(f"zoom level {game.zoom} is not supported!")
dy = -video_center + dy_ratio * ky
# update background image with proper zoom for gen_original mode
# NOTE: if desired background label is not zero, set it here to asset_map['background'].semantic_color
if gen_original and not skip_background and not only_mugen:
zx = game.video_res * game.zoom
zy = zx
for tile_x in range(-1, 3):
for tile_y in range(-1, 2):
bg_rect = [0, 0, zx, zy]
bg_rect[0] = (
zx * tile_x
+ video_center
+ game.bgzoom * (dx + kx * game.maze_h / 2)
- zx * 0.5
)
bg_rect[1] = (
zy * tile_y
+ video_center
+ game.bgzoom * (dy - ky * game.maze_h / 2)
- zy * 0.5
)
if check_out_of_bounds(bg_rect, img.size):
continue
img.paste(
asset_map["background"].asset,
convert_xywh_to_xyxy(integer_rect(bg_rect)),
)
# NOTE: game engine now hard-code 64 for maze_size
radius = int(1 + game.maze_w / game.zoom)
ix = int(frame.agent.x + 0.5)
iy = int(frame.agent.y + 0.5)
x_start = max(ix - radius, 0)
x_end = min(ix + radius + 1, game.maze_w)
y_start = max(iy - radius, 0)
y_end = min(iy + radius + 1, game.maze_h)
win_h = game.video_res
# convert eaten coins to a set for faster checking coordinates
coins_eaten_set = {tuple(coin_coord) for coin_coord in frame.coins_eaten}
if not skip_background and not only_mugen:
for y in range(y_start, y_end):
for x in range(x_start, x_end):
wkey = game.maze[y][x]
if wkey == SPACE:
continue
# eaten coins is treated the same as SPACE, just continue
# but we should not modify the coins in maze to SPACE, or it may cause inconsistency
# if we ever need to render backwards or save json after drawing
if (x, y) in coins_eaten_set:
continue
assert wkey in asset_map, f"{wkey} not in assets!"
tile_rect = [
kx * x + dx - 0.1,
win_h - ky * y + dy - 0.1,
kx + 0.5 + 0.2,
ky + 0.5 + 0.2,
]
# skip tile if the rect is completely out-of-bounds
if check_out_of_bounds(tile_rect, img.size):
continue
if wkey == LAVA_MIDDLE or wkey == LAVA_SURFACE:
d1 = tile_rect[:]
d2 = tile_rect[:]
asset_size = asset_map[wkey].asset.size
sr = [0, 0, asset_size[0], asset_size[1]]
sr1 = sr[:]
sr2 = sr[:]
tr = frame.state_time * 0.1
tr -= int(tr)
tr *= -1
d1[0] += tr * tile_rect[2]
d2[0] += tile_rect[2] + tr * tile_rect[2]
sr1[0] += -tr * asset_size[0]
sr2[0] += -asset_size[0] - tr * asset_size[0]
d1 = intersect_rects(d1, tile_rect)
d2 = intersect_rects(d2, tile_rect)
if d1 is not None:
d1[2] += 0.5
if d2 is not None:
d2[0] -= 0.5
d2[2] += 0.5
sr1 = intersect_rects(sr1, sr)
sr2 = intersect_rects(sr2, sr)
if sr1 is not None and d1 is not None:
# crop and render one half of the asset
crop_mask = asset_map[wkey].asset.crop(
integer_rect(convert_xywh_to_xyxy(sr1))
)
paint_color_in_rect_with_mask(
img,
integer_rect(d1),
asset_map[wkey].semantic_color,
crop_mask,
gen_original=gen_original,
)
if sr2 is not None and d2 is not None:
# crop and render the other half of the asset (swapped places horizontally)
crop_mask = asset_map[wkey].asset.crop(
integer_rect(convert_xywh_to_xyxy(sr2))
)
paint_color_in_rect_with_mask(
img,
integer_rect(d2),
asset_map[wkey].semantic_color,
crop_mask,
gen_original=gen_original,
)
else:
paint_color_in_rect_with_mask(
img,
integer_rect(tile_rect),
asset_map[wkey].semantic_color,
asset_map[wkey].asset,
gen_original=gen_original,
)
if not skip_foreground:
if not only_mugen:
# paint monsters
for mi in range(len(frame.monsters)):
if frame.monsters[mi].is_dead:
dying_frame_cnt = max(0, frame.monsters[mi].monster_dying_frame_cnt)
monster_shrinkage = (
(MONSTER_DEATH_ANIM_LENGTH - dying_frame_cnt)
* 0.8
/ MONSTER_DEATH_ANIM_LENGTH
)
monster_rect = [
math.floor(kx * frame.monsters[mi].x + dx),
math.floor(
win_h
- ky * frame.monsters[mi].y
+ dy
+ ky * monster_shrinkage
),
math.ceil(kx),
math.ceil(ky * (1 - monster_shrinkage)),
]
else:
monster_rect = [
math.floor(kx * frame.monsters[mi].x + dx),
math.floor(win_h - ky * frame.monsters[mi].y + dy),
math.ceil(kx),
math.ceil(ky),
]
m_name = game.flattened_monster_names[frame.monsters[mi].theme]
# add pose and facing to the key to find correct asset
m_pose = "" if frame.monsters[mi].walk1_mode else "_move"
if frame.monsters[mi].is_dead:
m_pose = "_dead"
m_key = (
m_name + m_pose + ("_right" if frame.monsters[mi].vx > 0 else "")
)
paint_color_in_rect_with_mask(
img,
monster_rect,
asset_map[m_key].semantic_color,
asset_map[m_key].asset,
gen_original=gen_original,
ignore_mask=bbox_smap_for_monsters,
)
if not skip_mugen:
# paint agent - do it after monsters so agent is always in front
a_key = (
alien_name
+ "_"
+ frame.agent.pose
+ ("" if frame.agent.is_facing_right else "_left")
)
# note how aspect_ratio is used for alien rect, this can be applied to
# monster rect to support asset that's not 1:1 (e.g. use alien as monster)
alien_rect = [
math.floor(kx * frame.agent.x + dx),
# math.floor(win_h - ky * (frame.agent.y + 1) + dy), # default for 2:1 alien, no asset swap
math.floor(
win_h
- ky * (frame.agent.y + asset_map[a_key].aspect_ratio - 1)
+ dy
),
math.ceil(kx),
# math.ceil(2 * ky), # default for 2:1 alien, no asset swap
math.ceil(asset_map[a_key].aspect_ratio * ky),
]
if frame.agent.is_killed:
transparency = (
DEATH_ANIM_LENGTH + 1 - frame.agent.killed_animation_frame_cnt
) * 12
# only render if not fully transparent
if transparency > 255:
agent_asset = None
else:
if gen_original:
agent_asset = get_transparent_asset(
asset_map[a_key].asset, transparency
)
else:
# when generating semantic map, alien mask won't change unless fully transparent
agent_asset = asset_map[a_key].asset
else:
agent_asset = asset_map[a_key].asset
if agent_asset is not None:
paint_color_in_rect_with_mask(
img,
alien_rect,
asset_map[a_key].semantic_color,
agent_asset,
gen_original=gen_original,
ignore_mask=bbox_smap_for_agent,
cut_mask_top_ratio=0.8,
)
# paint the bubble shield if agent is in power-up mode
if frame.agent.power_up_mode:
shield_rect = [
# NOTE: game engine hard-codes 7 and 8 for co-ordinates which won't work with video-res that's not 1024
# (for training we usually generate with 256 or 128 video_res), so need to convert them
math.floor(kx * frame.agent.x + dx - 7 * game.video_res / 1024),
math.floor(
win_h
- ky * (frame.agent.y + 1)
+ dy
+ 8 * game.video_res / 1024
),
math.ceil(kx * 1.15),
math.ceil(ky * 2.1),
]
# pull bubble down when Mugen crouches
if frame.agent.pose == "duck":
shield_rect[1] += math.floor(8 * game.video_res / 1024)
paint_color_in_rect_with_mask(
img,
shield_rect,
asset_map["shield"].semantic_color,
asset_map["shield"].asset,
gen_original=gen_original,
ignore_mask=bbox_smap_for_agent,
cut_mask_top_ratio=0.45,
)
return img
| EXA-1-master | exa/libraries/multimodal-main/examples/mugen/data/coinrun/construct_from_json.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import json
class Sequence:
def __init__(
self, start_frame, end_frame, pose_type, start_x, start_y, end_x, end_y
):
self.start_frame = start_frame
self.end_frame = end_frame
# 'ground' includes 'walk', 'duck', 'stand'; other types are 'climb', 'jump', 'hit'
self.pose_type = pose_type
self.start_x = start_x
self.start_y = start_y
self.end_x = end_x
self.end_y = end_y
self.time_jumps = 1 if pose_type == "jump" else 0
self.end_maze_above = "."
self.end_maze_below = "."
self.num_coins_eaten = 0
self.num_gems_eaten = 0
self.start_shield = False
self.end_shield = False
self.changed_shield = False
self.killed_monsters = []
self.jump_over_monsters = []
self.killed_by = ""
self.text_desc = ""
# Decide graduarity of text description (skip sequence shorter than this)
self.min_len_for_text_desc = 5
def asdict(self):
return {
"start_frame": self.start_frame,
"end_frame": self.end_frame,
"pose_type": self.pose_type,
"start_xy": (self.start_x, self.start_y),
"end_xy": (self.end_x, self.end_y),
"bumped_head": self.is_bumped_head(),
"same_level_jump": self.is_same_level_jump(),
"num_coins_eaten": self.num_coins_eaten,
"num_gems_eaten": self.num_gems_eaten,
"start_shield": self.start_shield,
"end_shield": self.end_shield,
"changed_shield": self.changed_shield,
"killed_monsters": self.killed_monsters,
"jump_over_monsters": self.jump_over_monsters,
"killed_by": self.killed_by,
"text_desc": self.text_desc,
}
def __repr__(self):
return json.dumps(self.asdict())
# bumped head will show as 'walk' pose and last for 1-2 frames
def is_bumped_head(self):
if (
self.pose_type == "ground"
and (self.end_frame - self.start_frame <= 1)
and self.end_maze_below in ".12"
): # and self.end_maze_above in 'SAab'
return True
return False
def is_same_level_jump(self):
if self.pose_type == "jump" and abs(self.end_y - self.start_y) <= 0.5:
return True
return False
def merge_sequences(self, sequences):
self.end_frame = sequences[-1].end_frame
self.end_x = sequences[-1].end_x
self.end_y = sequences[-1].end_y
self.end_maze_above = sequences[-1].end_maze_above
self.end_maze_below = sequences[-1].end_maze_below
for seq in sequences:
if seq.is_bumped_head():
self.time_jumps -= 1
self.time_jumps += seq.time_jumps
self.num_coins_eaten += seq.num_coins_eaten
self.num_gems_eaten += seq.num_gems_eaten
self.killed_monsters.extend(seq.killed_monsters)
self.jump_over_monsters.extend(seq.jump_over_monsters)
def process_metadata(self, game):
# generate game.flattened_monster_names if not already
# this is used to get monster names
if len(game.flattened_monster_names) == 0:
game.flatten_monster_names()
# count number of coins and gems eaten during the sequence
# start from one frame earlier (if not 0) so we can get change in the first frame
start_frame_id = max(self.start_frame - 1, 0)
if len(game.frames[self.end_frame].coins_eaten) > len(
game.frames[start_frame_id].coins_eaten
):
start_coin_set = {
(coord[0], coord[1])
for coord in game.frames[start_frame_id].coins_eaten
}
end_coin_set = {
(coord[0], coord[1])
for coord in game.frames[self.end_frame].coins_eaten
}
new_coins_eaten = end_coin_set - start_coin_set
for coin_coord in new_coins_eaten:
if game.maze[coin_coord[1]][coin_coord[0]] == "2":
self.num_gems_eaten += 1
else:
self.num_coins_eaten += 1
# check if Mugen changes between shield up and down mode during the sequence
self.start_shield = game.frames[self.start_frame].agent.power_up_mode
self.end_shield = game.frames[self.end_frame].agent.power_up_mode
shield_up_mode = False
shield_down_mode = False
for frame_id in range(self.start_frame, self.end_frame + 1):
if game.frames[frame_id].agent.power_up_mode:
shield_up_mode = True
else:
shield_down_mode = True
if shield_up_mode and shield_down_mode:
self.changed_shield = True
end_frame_id = min(self.end_frame + 2, len(game.frames))
for frame_id in range(self.start_frame, end_frame_id):
frame = game.frames[frame_id]
dead_monsters = set()
for i, m in enumerate(frame.monsters):
if m.is_dead:
dead_monsters.add(i)
# if more monsters are killed, record the monster killed and the frame id
if frame_id > self.start_frame and len(dead_monsters) > len(
prev_dead_monsters
):
killed_monster_theme = frame.monsters[
list(dead_monsters - prev_dead_monsters)[0]
].theme
self.killed_monsters.append(
game.flattened_monster_names[killed_monster_theme]
)
prev_dead_monsters = dead_monsters.copy()
# figure out which monster killed Mugen
killed_by_m_id = -1
if self.pose_type == "hit":
# check the monster distance in the first frame of hit sequence
m_min_dist = 1000 # just put some random large dist here
for m in game.frames[self.start_frame].monsters:
x_dist = self.start_x - m.x
y_dist = self.start_y - m.y
m_dist = x_dist * x_dist + y_dist * y_dist
if m_dist < m_min_dist:
killed_by_m_id = m.theme
m_min_dist = m_dist
if killed_by_m_id != -1:
self.killed_by = game.flattened_monster_names[killed_by_m_id]
# check for monsters jumped over
if self.pose_type == "jump":
# for purpose of checking jumped over monsters,
# ground y is fixed at the y coordinate of the previous frame
# note for jump sequence, start_y already recorded the location before jump starts
ground_y = round(self.start_y)
jump_over_monsters_set = set()
for frame_id in range(self.start_frame, self.end_frame + 1):
frame = game.frames[frame_id]
# this is the location below the agent at the same y level when jump starts
ground_loc = (round(frame.agent.x), ground_y)
for i, m in enumerate(frame.monsters):
if (round(m.x), round(m.y)) == ground_loc:
# use set to avoid adding duplicates
jump_over_monsters_set.add(i)
# now convert these into names, but only keep those that's still not killed by the next frame
for m_i in jump_over_monsters_set:
if not game.frames[end_frame_id - 1].monsters[m_i].is_dead:
self.jump_over_monsters.append(
game.flattened_monster_names[frame.monsters[m_i].theme]
)
def generate_text_desc(self):
# only generate if sequence is long enough
if self.end_frame - self.start_frame < self.min_len_for_text_desc:
self.text_desc = ""
elif self.pose_type == "hit":
if self.killed_by != "":
self.text_desc = f"killed by a {self.killed_by}"
else:
self.text_desc = "killed by a monster"
else:
y_direct = ""
if self.end_y - self.start_y > 0.5:
y_direct = " up"
elif self.start_y - self.end_y > 0.5:
y_direct = " down"
else:
y_direct = " a bit" if self.pose_type == "ground" else ""
x_direct = ""
if self.end_x - self.start_x > 0.5:
x_direct = " to the right"
elif self.start_x - self.end_x > 0.5:
x_direct = " to the left"
else:
x_direct = " a bit" if self.pose_type == "ground" else ""
if self.pose_type == "climb":
self.text_desc = f"climbs{y_direct} on a ladder"
elif self.pose_type == "ground":
self.text_desc = f"walks{x_direct}" # TODO: add random verbs
elif self.pose_type == "jump":
jump_time_desc = ""
if self.time_jumps >= 2:
jump_time_desc = " a few times"
# only add jump destination if it's not a same level jump
jump_dest_desc = ""
if y_direct != "":
if self.end_maze_below in "SAab":
if self.end_y < 1.5:
jump_dest_desc = " to the ground"
else:
jump_dest_desc = " to a platform"
elif self.end_maze_below in "#$&%":
jump_dest_desc = " to a crate"
elif self.end_maze_below == "=":
jump_dest_desc = " to a ladder"
# add desc for monsters jumped over
jumped_over_desc = ""
if len(self.jump_over_monsters) > 0:
jumped_over_desc = " over a " + " and a ".join(
self.jump_over_monsters
)
self.text_desc = f"jumps{y_direct}{jump_time_desc}{x_direct}{jumped_over_desc}{jump_dest_desc}"
if self.num_coins_eaten > 0 or self.num_gems_eaten > 0:
self.text_desc += self.generate_collect_coin_desc()
if len(self.killed_monsters) > 0:
self.text_desc += " and killed a " + " and a ".join(
self.killed_monsters
)
def generate_collect_coin_desc(self):
if self.num_coins_eaten == 0 and self.num_gems_eaten == 0:
return ""
coin_descs = []
# add coin description if collected at least one coin
if self.num_coins_eaten == 1:
coin_descs.append(" a coin")
elif self.num_coins_eaten > 1:
coin_descs.append(" a few coins")
# add gem description if collected at least one gem
if self.num_gems_eaten == 1:
coin_descs.append(" a gem")
elif self.num_gems_eaten > 1:
coin_descs.append(" a few gems")
# connects descriptions for coins and gems with 'and'
coin_descs = " and".join(coin_descs)
# shield change should only be a result of eating gem or coin
if self.changed_shield:
coin_descs += self.generate_shield_desc()
return f" and collects{coin_descs}"
def generate_shield_desc(self):
if not self.start_shield and self.end_shield:
return " to turn on the shield"
elif self.start_shield and not self.end_shield:
return " to turn off the shield"
else:
# start and end in the same shield state but still changed shield during sequence
if self.start_shield:
return " to turn shield off then on again"
else:
return " to turn shield on then off again"
def process_sequence(game, curr_pose_type, start_i, curr_i, last_seq=False):
# different type of pose, construct a sequence
# for 'jump', the start and end location is based on frame before the first and after the last frame
# for others, it's the first and last frame
if curr_pose_type == "jump":
pos_start_frame = max(start_i - 1, 0)
pos_end_frame = curr_i
else:
pos_start_frame = start_i
# curr_i will be one frame after, unless it's the last sequence of video
# however, for jump sequence, we do want one frame after to know where jump lands
pos_end_frame = curr_i - 1 if not last_seq else curr_i
seq = Sequence(
start_frame=start_i,
end_frame=curr_i - 1 if not last_seq else curr_i,
pose_type=curr_pose_type,
start_x=game.frames[pos_start_frame].agent.x,
start_y=game.frames[pos_start_frame].agent.y,
end_x=game.frames[pos_end_frame].agent.x,
end_y=game.frames[pos_end_frame].agent.y,
)
seq.end_maze_above = game.maze[round(seq.end_y) + 1][round(seq.end_x)]
seq.end_maze_below = game.maze[round(seq.end_y) - 1][round(seq.end_x)]
# sometimes jump may end a bit over the edge of cliff, this is to catch and fix that
if curr_pose_type == "jump" and seq.end_maze_below in ".12":
neighbor_x = (
int(seq.end_x) * 2 + 1 - round(seq.end_x)
) # get the opposite of round()
seq.end_maze_below = game.maze[round(seq.end_y) - 1][neighbor_x]
return seq
def convert_game_to_text_desc(game, start_idx=0, end_idx=-1, alien_name="Mugen"):
if alien_name is None:
alien_name = "Mugen"
# if end_idx is not specified, set it to end of the game level
if end_idx == -1:
end_idx = len(game.frames)
start_idx = max(0, start_idx)
end_idx = min(len(game.frames), end_idx)
sequences = []
for i, f in enumerate(game.frames[start_idx:end_idx]):
pose = f.agent.pose.strip("12")
if pose in ["walk", "duck", "stand"]:
pose_type = "ground"
else:
pose_type = pose
if i == 0:
# first frame, initialize some status
start_i = 0
curr_pose_type = pose_type
continue
if pose_type == curr_pose_type:
# same type of pose, same sequence
continue
else:
seq = process_sequence(
game, curr_pose_type, start_idx + start_i, start_idx + i, last_seq=False
)
sequences.append(seq)
start_i = i
curr_pose_type = pose_type
# add the last leftover sequence
seq = process_sequence(
game, curr_pose_type, start_idx + start_i, start_idx + i, last_seq=True
)
sequences.append(seq)
# collapse two jumps into one sequence
# first pass, merge jumps before and after bumped head, this is to correctly identify jumps at the same level
seq_i = 0
reduced_sequences = []
while seq_i < len(sequences):
if seq_i == 0 or seq_i == len(sequences) - 1:
reduced_sequences.append(sequences[seq_i])
seq_i += 1
elif (
sequences[seq_i].is_bumped_head()
and reduced_sequences[-1].pose_type == "jump"
and sequences[seq_i + 1].pose_type == "jump"
):
# in case of bumped head, merge the jumps before and after
reduced_sequences[-1].merge_sequences(sequences[seq_i : seq_i + 2])
seq_i += 2
else:
reduced_sequences.append(sequences[seq_i])
seq_i += 1
sequences = reduced_sequences
# second pass, collapse two jumps into one sequence if they're both same level jumps
# jump up and down are not merged (unless it's separated by bumped head that will be merged in first pass)
result_sequences = []
seq_i = 0
max_ground_seq_len_to_merge = 5
while seq_i < len(sequences):
# only merge if it's a 'ground' sequence, and before/after are both jumps
if (
sequences[seq_i].pose_type != "ground"
or seq_i == 0
or seq_i == len(sequences) - 1
):
result_sequences.append(sequences[seq_i])
seq_i += 1
elif (
result_sequences[-1].pose_type != "jump"
or sequences[seq_i + 1].pose_type != "jump"
):
result_sequences.append(sequences[seq_i])
seq_i += 1
elif (
result_sequences[-1].is_same_level_jump()
and sequences[seq_i + 1].is_same_level_jump()
and (
sequences[seq_i].end_frame - sequences[seq_i].start_frame
< max_ground_seq_len_to_merge
)
):
# not bumped head, then only merge if sequence is short enough, and both jumps are the same level
result_sequences[-1].merge_sequences(sequences[seq_i : seq_i + 2])
seq_i += 2
else:
result_sequences.append(sequences[seq_i])
seq_i += 1
sequences = result_sequences
# generate text description for each sequence
text_descriptions = []
for seq in sequences:
seq.process_metadata(game)
seq.generate_text_desc()
if seq.text_desc != "":
text_descriptions.append(seq.text_desc)
# add Mugen in the beginning, then concat by 'and'
final_text_desc = alien_name + " " + ", and ".join(text_descriptions)
return final_text_desc
| EXA-1-master | exa/libraries/multimodal-main/examples/mugen/data/coinrun/generate_text_desc.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from typing import Optional
from torchmultimodal.models.video_vqvae import (
preprocess_int_conv_params,
VideoDecoder,
VideoEncoder,
)
from torchmultimodal.models.vqvae import VQVAE
from torchmultimodal.utils.common import load_module_from_url, remove_grad
MUGEN_PRETRAINED_MAPPING = {
"mugen_L32": "https://download.pytorch.org/models/multimodal/mugen/mugen_video_vqvae_L32.pt",
"mugen_L16": "https://download.pytorch.org/models/multimodal/mugen/mugen_video_vqvae_L16.pt",
"mugen_L8": "https://download.pytorch.org/models/multimodal/mugen/mugen_video_vqvae_L8.pt",
}
def video_vqvae_mugen(
in_channel_dim: int = 3,
encoder_hidden_dim: int = 240,
encoder_kernel_size: int = 3,
n_res_layers: int = 4,
attn_hidden_dim: int = 240,
num_embeddings: int = 2048,
embedding_dim: int = 256,
decoder_hidden_dim: int = 240,
decoder_kernel_size: int = 3,
pretrained_model_key: Optional[str] = None,
freeze_model: bool = False,
) -> VQVAE:
"""Constructor for MUGEN's Video VQVAE. Expects input video data of shape ``{8,16,32}x256x256``.
Trained for tokenization of video data and use in video-audio-text retrieval and generation tasks.
See Hayes et al. 2022 for more details: https://arxiv.org/pdf/2204.08058.pdf
Code ref:
https://github.com/mugen-org/MUGEN_baseline/blob/main/lib/models/video_vqvae/vqvae.py
https://github.com/mugen-org/MUGEN_baseline/blob/main/generation/experiments/vqvae/VideoVQVAE_L32.sh
Args:
in_channel_dim (int, optional): Size of channel dim in input. Defaults to ``3``.
encoder_hidden_dim (int, optional): Size of channel dims in encoder conv layers. Defaults to ``240``.
encoder_kernel_size (int, optional): Kernel size for encoder. Defaults to ``3``.
n_res_layers (int, optional): Number of ``AttentionResidualBlocks`` to include in encoder and decoder.
Defaults to ``4``.
attn_hidden_dim (int, optional): Size of hidden dim of
:class:`~torchmultimodal.models.video_vqvae.AttentionResidualBlocks`. Defaults to ``240``.
num_embeddings (int, optional): Number of embedding vectors used in
:class:`~torchmultimodal.modules.layers.codebook.Codebook`. Defaults to ``2048``.
embedding_dim (int, optional): Dimensionality of embedding vectors in
:class:`~torchmultimodal.modules.layers.codebook.Codebook`. Defaults to ``256``.
decoder_hidden_dim (int, optional): Size of channel dims in decoder conv tranpose layers.
Defaults to ``240``.
decoder_kernel_size (int, optional): Kernel size for decoder. Defaults to ``3``.
pretrained_model_key (str, optional): Load a specified MUGEN VQVAE checkpoint.
freeze_model (bool): Whether to freeze the weights of the pretrained model. Defaults to ``False``.
Returns:
An instance of :class:`~torchmultimodal.models.vqvae.VQVAE` constructed with:
* :class:`~torchmultimodal.model.video_vqvae.VideoEncoder`
* :class:`~torchmultimodal.model.video_vqvae.VideoDecoder`
"""
encoder_strides = ((2, 2, 2), (2, 2, 2), (1, 2, 2), (1, 2, 2), (1, 2, 2), (1, 1, 1))
decoder_strides = ((2, 2, 2), (2, 2, 2), (1, 2, 2), (1, 2, 2), (1, 2, 2))
encoder_n_layers = len(encoder_strides)
decoder_n_layers = len(decoder_strides)
encoder_in_channel_dims = (in_channel_dim,) + (encoder_hidden_dim,) * max(
encoder_n_layers - 1, 0
)
decoder_out_channel_dims = (decoder_hidden_dim,) * max(decoder_n_layers - 1, 0) + (
in_channel_dim,
)
encoder_kernel_sizes_fixed = preprocess_int_conv_params(
encoder_in_channel_dims, encoder_kernel_size
)
decoder_kernel_sizes_fixed = preprocess_int_conv_params(
decoder_out_channel_dims, decoder_kernel_size
)
encoder = VideoEncoder(
encoder_in_channel_dims,
encoder_kernel_sizes_fixed,
encoder_strides,
embedding_dim,
n_res_layers,
attn_hidden_dim,
)
decoder = VideoDecoder(
decoder_out_channel_dims,
decoder_kernel_sizes_fixed,
decoder_strides,
embedding_dim,
n_res_layers,
attn_hidden_dim,
)
model = VQVAE(encoder, decoder, num_embeddings, embedding_dim)
if pretrained_model_key is not None:
if pretrained_model_key not in MUGEN_PRETRAINED_MAPPING.keys():
raise KeyError(f"Invalid pretrained model key: {pretrained_model_key}")
load_module_from_url(model, MUGEN_PRETRAINED_MAPPING[pretrained_model_key])
if freeze_model:
remove_grad(model)
return model
| EXA-1-master | exa/libraries/multimodal-main/examples/mugen/generation/video_vqvae.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from typing import List, Optional, Tuple
import torch
from examples.mugen.generation.video_vqvae import video_vqvae_mugen
from torch import nn, Tensor
from torchmultimodal.models.gpt import (
MultimodalGPT,
MultimodalTransformerDecoder,
RightShift,
TransformerDecoder,
TransformerDecoderLayer,
)
from torchmultimodal.modules.layers.attention import SelfAttention
from torchmultimodal.modules.layers.position_embedding import (
BroadcastedPositionEmbedding,
)
from torchmultimodal.utils.common import load_module_from_url
from torchtext.transforms import CharBPETokenizer
PRETRAINED_TOKENIZER_ENCODER_URL = "https://pytorch.s3.amazonaws.com/models/multimodal/mugen/tokenizer-coinrun_1024_encoder.json"
PRETRAINED_TOKENIZER_MERGES_URL = "https://pytorch.s3.amazonaws.com/models/multimodal/mugen/tokenizer-coinrun_1024_merges.txt"
PRETRAINED_TEXT_VIDEO_GPT_URL_MAPPING = {
"mugen_L32": "https://pytorch.s3.amazonaws.com/models/multimodal/mugen/text_video_gpt_L32_weights-17db9549.pth",
"mugen_L16": "https://pytorch.s3.amazonaws.com/models/multimodal/mugen/text_video_gpt_L16_weights-5dfc5a0a.pth",
"mugen_L8": "https://pytorch.s3.amazonaws.com/models/multimodal/mugen/text_video_gpt_L8_weights-72b6d2ab.pth",
}
def text_video_gpt(
text_seq_len: int = 128,
video_seq_len: int = 32,
resolution: int = 256,
downsample: Tuple[int, int, int] = (4, 32, 32),
d_model: int = 768,
n_head: int = 8,
dropout: float = 0.2,
attn_dropout: float = 0.3,
num_decoder_layers: int = 12,
use_gpt_init: bool = True,
pretrained_text_tokenizer_encoder_url: str = PRETRAINED_TOKENIZER_ENCODER_URL,
pretrained_text_tokenizer_merges_url: str = PRETRAINED_TOKENIZER_MERGES_URL,
pretrained_video_vqvae_model_key: Optional[str] = None,
pretrained_text_video_gpt_model_key: Optional[str] = None,
) -> MultimodalGPT:
"""Builds a text-to-video GPT model from user inputs
Parameter defaults follow MUGEN project:
* Video VQVAE: https://github.com/mugen-org/MUGEN_baseline/tree/main/generation/experiments/vqvae
* GPT: https://github.com/mugen-org/MUGEN_baseline/blob/main/lib/models/gpt/gpt.py#L252
Args:
text_seq_len (int): Length of text sequences after padding. Defaults to ``128``.
video_seq_len (int): Length of video sequences sampled from the dataset. Defaults to ``32``. Other
values used by MUGEN are ``8``, ``16``.
resolution (int): Resolution of the sampled video sequences defining height and width of each frame.
Defaults to ``256``.
downsample (Tuple[int, int, int]): Ratio by which to disperse along each dimension the sampled sequences.
For example, if the original frame is ``(32, 256, 256)``, after downsampling by ``(4, 32, 32)`` the
new frame will be of shape ``(8, 8, 8)`` with each dim divided by the rate of downsample. Defaults to
``(4, 32, 32)``.
d_model (int): Dimension of the underlying transformer decoder.
See :py:class:`torchmultimodal.models.gpt.TransformerDecoderLayer`. Defaults to ``768``.
n_head (int): Number of attention heads used by the transformer decoder. Defaults to ``8``.
dropout (float): Dropout probability used by the projection layer of the transformer decoder.
Defaults to ``0.2``.
attn_dropout (float): Dropout probability used by the attention layer of the transformer decoder.
Defaults to ``0.3``.
num_decoder_layers (int): Number of transformer decoder layers. Defaults to ``12``.
use_gpt_init (bool): Whether uses parameter initialization of GPT model. Defaults to ``True``.
pretrained_text_tokenizer_encoder_url (str): Remote location of the pretrained text tokenizer encoder file.
Defaults to `"MUGEN pretrained tokenizer encoder file
"<https://pytorch.s3.amazonaws.com/models/multimodal/mugen/tokenizer-coinrun_1024_encoder.json>`_.
pretrained_text_tokenizer_merges_url (str): Remote location of the pretrained text tokenizer merges file.
Defaults to `"MUGEN pretrained tokenizer merges file
"<https://pytorch.s3.amazonaws.com/models/multimodal/mugen/tokenizer-coinrun_1024_merges.txt>`_.
pretrained_video_vqvae_model_key (str, optional): Key to select the pretrained MUGEN VideoVQVAE weights
file. For allowed values, see :py:module:`examples/mugen/generation/video_vqvae.py`.
Defaults to ``None``.
pretrained_text_video_gpt_model_key (str, optional): Key to select the pretrained MUGEN TextVideoGPT
weights file. The provided key should match that of MUGEN VideoVQVAE to ensure the two models were
pretrained for the same video sequence length. For example ``L32`` means the video sequence length
is ``32``. The loaded weights will override those from the frozen VideoVQVAE model.
Defaults to ``None``.
Returns:
An instance of :py:class:`torchmultimodal.models.gpt.MultimodalGPT`.
"""
# builds text tokenizer from pre-trained
tokenizer = CharBPETokenizer(
bpe_encoder_path=pretrained_text_tokenizer_encoder_url,
bpe_merges_path=pretrained_text_tokenizer_merges_url,
unk_token="[UNK]",
special_tokens=["[PAD]", "[CLS]", "[SEP]", "[UNK]", "[MASK]"],
)
# builds text tokenizer
text_tokenizer = TextTokenizer(
context_len=text_seq_len,
d_model=d_model,
tokenizer=tokenizer,
)
num_text_tokens = text_tokenizer.num_text_tokens
# builds video tokenizer
video_vqvae = video_vqvae_mugen(
pretrained_model_key=pretrained_video_vqvae_model_key,
freeze_model=True,
)
video_vqvae.eval()
num_video_tokens = video_vqvae.num_embeddings # size of the codebook
# derives the expected latent shape from video input shape
video_input_shape = (video_seq_len, resolution, resolution)
video_latent_shape = latent_shape(video_input_shape, downsample)
video_vqvae_latent_shape = video_vqvae.latent_shape(video_input_shape)
# video vqvae will apply convolutions to the input shape which effectively
# reduces the size by ``dim//stride`` after each layer
# sanity check that the expected and actual latent shapes are consistent
if video_latent_shape != video_vqvae_latent_shape:
raise ValueError(
f"Latent shape derived from video inputs: {video_latent_shape} "
f"does not match that of video vqvae: {video_vqvae_latent_shape}"
)
# builds text embedding projection: text_emb is already of output shape `d_model`
# generally a projection layer is needed to bridge the tokenizer and
# `torchmultimodal.models.gpt.MultimodalTransformerDecoder`, see `video_projection`
text_projection = nn.Identity()
# builds video embedding projection
video_projection = nn.Linear(video_vqvae.embedding_dim, d_model, bias=False)
# builds multimodal decoder
text_pos_emb = nn.Embedding(text_seq_len, d_model)
video_pos_emb = BroadcastedPositionEmbedding(video_latent_shape, d_model)
attention_layer = SelfAttention(attn_dropout=attn_dropout)
decoder_layer = TransformerDecoderLayer(
d_model, n_head, dropout, attn_module=attention_layer
)
decoder = TransformerDecoder(decoder_layer, num_decoder_layers)
right_shift = RightShift(d_model)
mm_decoder = MultimodalTransformerDecoder(
text_pos_emb, video_pos_emb, decoder, right_shift
)
model = MultimodalGPT(
d_model=d_model,
num_in_tokens=num_text_tokens,
num_out_tokens=num_video_tokens,
latent_shape=video_latent_shape,
in_tokenizer=text_tokenizer,
out_tokenizer=video_vqvae,
mm_decoder=mm_decoder,
in_projection=text_projection,
out_projection=video_projection,
use_gpt_init=use_gpt_init,
)
if pretrained_text_video_gpt_model_key is not None:
if (
pretrained_text_video_gpt_model_key
not in PRETRAINED_TEXT_VIDEO_GPT_URL_MAPPING
):
raise KeyError(
f"Invalid pretrained model key: {pretrained_text_video_gpt_model_key}"
)
load_module_from_url(
model,
PRETRAINED_TEXT_VIDEO_GPT_URL_MAPPING[pretrained_text_video_gpt_model_key],
)
return model
def latent_shape(
input_shape: Tuple[int, ...], downsample: Tuple[int, ...]
) -> Tuple[int, ...]:
"""Derives latent shape of video inputs after VQ-VAE encoding"""
return tuple([s // d for s, d in zip(input_shape, downsample)])
class TextTokenizer(nn.Module):
"""Converts between text and tokens / embedings
Wrapper around the tokenizer to be consistent with the API required by
:py:class:`torchmultimodal.models.gpt.MultimodalGPT`. It also contains the
embedding layer to enable lookup by token ids.
"""
def __init__(
self,
context_len: int,
d_model: int,
tokenizer: nn.Module,
) -> None:
super().__init__()
self.tokenizer = tokenizer
self.pad_id = self.tokenizer.encode("[PAD]")[0] # type: ignore
self.vocab_size = self.tokenizer.vocab_size # type: ignore
self.context_len = context_len
# MUGEN treats padding as unique ids so adding them to the total text tokens
# https://github.com/mugen-org/MUGEN_baseline/blob/main/lib/models/gpt/gpt.py#L44
self.num_text_tokens = self.vocab_size + context_len
self.embedding = nn.Embedding(self.num_text_tokens, d_model)
def text_to_tokens(self, sentences: List[str]) -> Tensor:
"""Pads the sentences to be of equal lengths"""
tokens = [
self.tokenizer.encode(sentence.strip().lower() + " [SEP]") # type: ignore
for sentence in sentences
]
token_ids = [t[: self.context_len] for t in tokens]
# pad each sentence to be of length `context_len`
for i, t in enumerate(token_ids):
t += [self.pad_id] * (self.context_len - len(t))
token_ids[i] = t
return torch.Tensor(token_ids).type(torch.int64)
def encode(self, sentences: List[str], device: str) -> Tensor:
"""Encodes sentences to token ids"""
token_ids = self.text_to_tokens(sentences).to(device)
# bump padding token ids by vocab_size so that they do not coincide with un-padded token ids
# and that the padding token ids themselves are unique
unique_pad_ids = torch.arange(self.context_len, device=device) + self.vocab_size
token_ids = torch.where(token_ids == self.pad_id, unique_pad_ids, token_ids)
return token_ids
def _filter_token_ids(self, token_ids: List[int]) -> List[Optional[int]]:
"""Filters out token ids out side of vocab"""
return [
token_id
for token_id in token_ids
if token_id > 0 and token_id <= self.vocab_size
]
def decode(self, token_ids: Tensor) -> List[str]:
"""Decodes token ids back to sentences"""
sentences = []
for _token_ids in token_ids: # iterate over batches
_token_ids = self._filter_token_ids(_token_ids.tolist())
sentence = self.tokenizer.decode(_token_ids) # type: ignore
sentences.append(sentence)
return sentences
def lookup(self, token_ids: Tensor) -> Tensor:
return self.embedding(token_ids)
| EXA-1-master | exa/libraries/multimodal-main/examples/mugen/generation/text_video_gpt.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from torchmultimodal.utils.file_io import _get_path_manager
_PATH_MANAGER = _get_path_manager()
| EXA-1-master | exa/libraries/multimodal-main/torchmultimodal/__init__.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from typing import Optional
import torch
from torch import Tensor
def get_extended_attention_mask(attention_mask: Tensor) -> Tensor:
"""Makes attention masks broadcastable along head and sequence dimensions.
Accepting two types of attention masks:
- Causal: masks that prevent attending to future positions of dimensions
``(batch_size, query_seq_len, key_seq_len)``
- Padding: masks that prevent attending to token paddings of dimensions
``(batch_size, seq_len)``
Args:
attention_mask (Tensor):
Mask with ones indicating tokens to attend to, zeros for tokens to ignore.
Returns:
extended_attention_mask (Tensor):
The broadcastable attention mask, with the same dtype as ``attention_mask.dtype``.
"""
if attention_mask.dim() == 4:
# Mask has already been broadcasted to the correct shape (either
# [batch_size, num_heads, query_seq_length, key_seq_length] for causal case or
# [batch_size, num_heads, seq_length, seq_length] for padding case)
extended_attention_mask = attention_mask
elif attention_mask.dim() == 3:
# We can provide a self-attention mask of dimensions [batch_size, query_seq_length, key_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads,
# [batch_size, num_heads, query_seq_length, key_seq_length].
extended_attention_mask = attention_mask[:, None, :, :]
elif attention_mask.dim() == 2:
# Provided a padding mask of dimensions [batch_size, seq_length]
# if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length]
extended_attention_mask = attention_mask[:, None, None, :]
else:
raise ValueError(
"Wrong shape for attention_mask (shape {})".format(attention_mask.shape)
)
extended_attention_mask = extended_attention_mask.to(
dtype=attention_mask.dtype
) # fp16 compatibility
return extended_attention_mask
def get_causal_attention_mask(
tgt_seq_len: int, src_seq_len: Optional[int] = None
) -> Tensor:
"""
Generates causal attention masks of dimensions (target_sequence_length, source_sequence_length).
"""
if src_seq_len is None:
src_seq_len = tgt_seq_len
return torch.tril(torch.ones(tgt_seq_len, src_seq_len))
| EXA-1-master | exa/libraries/multimodal-main/torchmultimodal/utils/attention.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import warnings
from typing import Any, List, NamedTuple, Optional, Tuple
import torch
from torch import nn, Tensor
from torch.nn import functional as F
from torchmultimodal.utils.attention import get_causal_attention_mask
class SampleOutput(NamedTuple):
"""Outputs from :meth:`~torchmultimodal.utils.generate.GenerationUtil.sample`.
Attributes:
decoded (Any): Generated sample data for the ouput modality.
tokens (Tensor): Generated tokens ``(b, seq_len)`` for the output modality before being decoded
back to data.
model_outputs (Tuple[Any, ...]): A tuple of length ``seq_len`` containing output objects from
the model's forward pass at each step of generation.
"""
decoded: Any
tokens: Tensor
model_outputs: Tuple[Any, ...]
class GenerationUtil:
"""Utility class containing functions for multimodal auto-regressive generation.
This class wraps around a ``nn.Module`` to generate data of one modality given
inputs from another. While being agnostic to the architecture of the wrapped model,
the latter needs to implement APIs to:
* encode/decode between data and token representations
* look up embeddings given token ids
* compute scores for prediction
See :class:`~torchmultimodal.models.gpt.MultimodalGPT` for the API details.
Args:
model (nn.Module): Model that is wrapped for generation.
Attributes:
num_in_tokens (int): Number of unique token states for the input modality.
num_out_tokens (int): Number of unique token states for the output modality.
"""
def __init__(self, model: nn.Module) -> None:
if model.training:
model = model.eval()
warnings.warn(f"{type(model)} is now switched to 'eval' mode.")
self.model = model
self.num_in_tokens = model.num_in_tokens
self.num_out_tokens = model.num_out_tokens
@torch.no_grad()
def sample(
self,
x: Tensor,
max_seq_len: int,
use_cache: bool = True,
causal: bool = False,
top_k: Optional[int] = None,
top_p: Optional[float] = None,
return_attn_weights: bool = False,
return_hidden_states: bool = False,
**model_kwargs: Any,
) -> SampleOutput:
"""Generates samples of the output modality based on multinomial distribution.
Args:
x (Tensor): Tensor of batched input, i.e., prompt for the generation.
max_seq_len (int): Maximum length of the sequence to generate. For high dimensional data
this should be equivalent to the length of the flattened encoded sequence.
use_cache (bool, optional): If ``True``, key/values of the attention layers will be cached to
speed up generation. Defaults to ``True``.
causal (bool, optional): If ``True``, use causal attention. Defaults to ``False``.
top_k (int, optional): Number of tokens with the highest probability to keep.
Defaults to ``None``.
top_p (float, optional): Threshold that determines the top tokens to keep in terms of
cumulative probability. Defaults to ``None``.
return_attn_weights (bool, optional): If ``True``, returns attention probabilities of each transformer
layer. Defaults to ``False``.
return_hidden_states (bool, optional): If ``True``, returns the embeddings of each transformer layer.
Defaults to ``False``.
model_kwargs (Any): Additional model specific kwargs will be forwarded to the ``forward``
function of the model.
Returns:
An instance of :class:`~torchmultimodal.utils.generate.SampleOutput`.
"""
in_tokens = self.model.encode(x, "in", **model_kwargs) # type: ignore
batch_size, in_seq_len = in_tokens.shape
attn_mask = get_causal_attention_mask(in_seq_len) # (in_seq_len, in_seq_len)
# Construct step-wise logits mask
logits_mask = get_logits_mask(
in_seq_len=0,
num_in_tokens=self.num_in_tokens, # type: ignore
out_seq_len=1,
num_out_tokens=self.num_out_tokens, # type: ignore
)
# Feed the input modality tokens `(b, in_seq_len)` through the model's transformer to learn
# the intermediate context vectors (i.e., key/value).
# The sequence is shifted to the right by one unit so that the predicted token at each location
# along the sequence is based off the previous token.
# Note that the first token is predicted from the learnt start-of-sentence ("sos") token which
# gets prepended to the sequence after the position embedding layer.
# Attention mask is required to avoid attending to future positions along the sequence.
# See :class:`~torchmultimodal.models.gpt.RightShift` for more implementation details.
_ = self.model.fwd( # type: ignore
in_tokens=in_tokens,
attn_mask=attn_mask,
use_cache=use_cache,
causal=causal,
right_shift=True,
return_attn_weights=return_attn_weights,
return_hidden_states=return_hidden_states,
)
model_outputs: Tuple[Any, ...] = ()
samples: List[Tensor] = []
idx = 0
while idx < max_seq_len:
# Attention mask is not required as the cached key/value sequence is only up to the
# current step
if idx == 0:
# Take the last token of the input modality as the "sos" token for the ouput modality
out = self.model(
in_tokens=in_tokens[:, -1:],
in_pos_ids=torch.tensor([in_seq_len - 1]).unsqueeze(0),
logits_mask=logits_mask,
use_cache=use_cache,
causal=causal,
right_shift=False,
return_attn_weights=return_attn_weights,
return_hidden_states=return_hidden_states,
)
else:
out = self.model(
out_tokens=samples[-1],
out_pos_ids=torch.tensor([idx - 1]).unsqueeze(0),
logits_mask=logits_mask,
use_cache=use_cache,
causal=causal,
right_shift=False,
return_attn_weights=return_attn_weights,
return_hidden_states=return_hidden_states,
)
assert hasattr(out, "logits"), f"{type(out)} does not have field 'logits'"
logits = out.logits
logits_view = logits.view(-1, logits.shape[-1]) # (b, num_tokens)
logits_view = self._filter_logits(logits_view, top_k=top_k, top_p=top_p)
probs = F.softmax(logits_view, dim=-1)
samples.append(torch.multinomial(probs, 1) - self.num_in_tokens) # (b, 1)
model_outputs = model_outputs + (out,)
idx += 1
samples = torch.cat(samples, dim=1)
decoded = self.model.decode(samples) # type: ignore
return SampleOutput(
decoded=decoded, tokens=samples, model_outputs=model_outputs
)
def _filter_logits(
self, logits: Tensor, top_k: Optional[int] = None, top_p: Optional[float] = None
) -> Tensor:
logits_filters: List[Any] = []
if top_k is not None:
logits_filters.append(LogitsFilterTopK(top_k))
if top_p is not None:
logits_filters.append(LogitsFilterTopP(top_p))
for _filter in logits_filters:
logits = _filter(logits)
return logits
def get_logits_mask(
in_seq_len: int = 0,
out_seq_len: int = 0,
num_in_tokens: int = 0,
num_out_tokens: int = 0,
) -> Tensor:
"""Applies masks to logits to restrict prediction from being made of tokens of the opposite modality.
Args:
in_seq_len (int, optional): Length of input modality sequence from the logits tensor. Defaults to ``0``.
out_seq_len (int, optional): Length of output modality sequence from the logits tensor.
Defaults to ``0``.
num_in_tokens (int, optional): Number of input modality token states from the model. Defaults to ``0``.
num_out_tokens (int, optional): Number of output modality token states from the model.
Defaults to ``0``.
Returns:
Logits mask tensor containing ``1``s for unmasked positions and ``0``s for masked ones.
"""
mask = torch.zeros(in_seq_len + out_seq_len, num_in_tokens + num_out_tokens)
# the quadrant of input modality sequence, input token states should not be masked
# Similar for that of output modality sequence, output token states
mask[in_seq_len:, num_in_tokens:] = 1
mask[:in_seq_len, :num_in_tokens] = 1
return mask
class LogitsFilterTopK:
"""Filters a distribution of logits using top_k
Code reference: https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317
Args:
top_k (int, optional): Keeps the top_k tokens with the highest probability (top_k filtering).
Defaults to ``None``.
filter_value (float, optional): Constant value to filter unwanted logits. Defaults to ``-inf``.
min_tokens_to_keep (int, optional): Minimum number of tokens to keep per batch example in the output.
Defaults to ``1``.
Raises:
ValueError: If 'top_k' is outside of valid numerical ranges.
"""
def __init__(
self,
top_k: Optional[int] = None,
min_tokens_to_keep: int = 1,
filter_value: float = -float("inf"),
) -> None:
if top_k is not None and top_k < 0:
raise ValueError(f"'top_k' must be non-negative but got {top_k}.")
self.min_tokens_to_keep = min_tokens_to_keep
self.filter_value = filter_value
self.top_k = top_k
def __call__(self, logits: Tensor) -> Tensor:
"""
Args:
logits (Tensor): Logits distribution shape ``(b, num_tokens)`` where ``b`` is batch size,
``num_tokens`` is the number of tokens.
Returns:
Filtered logits tensor.
"""
if self.top_k == 0:
return logits
top_k = min(
max(self.top_k, self.min_tokens_to_keep), logits.size(-1)
) # Safety check
# Remove all tokens with a probability less than the last token of the top-k
indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1:] # (b, 1)
logits[indices_to_remove] = self.filter_value
return logits
class LogitsFilterTopP:
"""Filters a distribution of logits using nucleus (top_p) filtering
Code reference: https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317
Args:
top_p (float, optional): Keeps the top tokens with cumulative probability >= top_p (nucleus filtering).
Nucleus filtering is described in Holtzman et al. (http://arxiv.org/abs/1904.09751).
Defaults to ``None``.
filter_value (float, optional): Constant value to filter unwanted logits. Defaults to ``-inf``.
min_tokens_to_keep (int, optional): Minimum number of tokens to keep per batch example in the output.
Defaults to ``1``.
Raises:
ValueError: If 'top_p' is outside of valid numerical ranges.
"""
def __init__(
self,
top_p: Optional[float] = None,
min_tokens_to_keep: int = 1,
filter_value: float = -float("inf"),
) -> None:
if top_p is not None and (top_p > 1.0 or top_p < 0.0):
raise ValueError(f"'top_p' must be within `[0.0, 1.0]` but got {top_p}.")
self.min_tokens_to_keep = min_tokens_to_keep
self.filter_value = filter_value
self.top_p = top_p
def __call__(self, logits: Tensor) -> Tensor:
"""
Args:
logits (Tensor): Logits distribution shape ``(b, num_tokens)`` where ``b`` is batch size,
``num_tokens`` is the number of tokens.
Returns:
Filtered logits tensor.
"""
if self.top_p == 1.0:
return logits
sorted_logits, sorted_indices = torch.sort(logits, descending=True)
cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
# Remove tokens with cumulative probability above the threshold (token with 0 are kept)
sorted_indices_to_remove = cumulative_probs > self.top_p
if self.min_tokens_to_keep > 1:
# Keep at least min_tokens_to_keep (set to min_tokens_to_keep-1 because we add
# the first one below)
sorted_indices_to_remove[..., : self.min_tokens_to_keep - 1] = 0
# Shift the indices to the right to keep also the first token above the threshold
sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
sorted_indices_to_remove[..., 0] = 0
# scatter sorted tensors to original indexing
indices_to_remove = sorted_indices_to_remove.scatter(
1, sorted_indices, sorted_indices_to_remove
)
logits[indices_to_remove] = self.filter_value
return logits
| EXA-1-master | exa/libraries/multimodal-main/torchmultimodal/utils/generate.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
| EXA-1-master | exa/libraries/multimodal-main/torchmultimodal/utils/__init__.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from typing import Sized
def assert_equal_lengths(
*args: Sized, msg: str = "iterable arguments must have same length."
) -> None:
lengths = set()
for item in args:
lengths.add(len(item))
if len(lengths) != 1:
raise ValueError(msg)
| EXA-1-master | exa/libraries/multimodal-main/torchmultimodal/utils/assertion.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from typing import List
import torch
from torch import Tensor
from torch.distributed import all_gather as all_gather_no_backprop
from torch.distributed.nn.functional import all_gather as all_gather_with_backprop
def gather_tensor(tensor: Tensor, backprop_in_gather: bool = True) -> List[Tensor]:
"""Gathers a tensor across all GPUs.
Args:
tensor (Tensor): Tensors that need to be gathered.
backprop_in_gather (bool): Whether to backpropagate the gradients from
all_gather to all workers (versus just the local worker). Defaults
to {\\double back-quote}True{\\double quote}.
Returns:
List[Tensor]: List of gathered tensors across all GPUs.
"""
world_size = torch.distributed.get_world_size()
# This uses the all_gather from torch.distributed.nn.functional,
# which backpropagates gradients to all workers
if backprop_in_gather:
return all_gather_with_backprop(tensor)
# Otherwise just backprop to the current worker
# This means that the image gradients on a given worker will only
# consider the text samples from the same worker
else:
tensor_all_gpus = [torch.zeros_like(tensor) for _ in range(world_size)]
all_gather_no_backprop(tensor_all_gpus, tensor)
tensor_all_gpus[torch.distributed.get_rank()] = tensor
return tensor_all_gpus
| EXA-1-master | exa/libraries/multimodal-main/torchmultimodal/utils/distributed.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import warnings
from collections import OrderedDict
from copy import deepcopy
from dataclasses import fields
from typing import Any, Callable, List, Tuple, Union
import torch
from torch import nn, Tensor
from torch.utils.checkpoint import checkpoint
from torchmultimodal import _PATH_MANAGER
def get_current_device() -> Union[str, torch.device]:
if torch.cuda.is_available() and torch.cuda.is_initialized():
return f"cuda:{torch.cuda.current_device()}"
else:
return torch.device("cpu")
def shift_dim(
x: Tensor, src_dim: int = -1, dest_dim: int = -1, make_contiguous: bool = True
) -> Tensor:
"""Permutes tensor x by moving src_dim to dest_dim.
i.e. shift_dim(x, 1, -1) would be (b, c, t, h, w) -> (b, t, h, w, c)
Code taken from VideoGPT
https://github.com/wilson1yan/VideoGPT/blob/master/videogpt/utils.py
Args:
x (Tensor): input Tensor you want to permute
src_dim (int, optional): the axis you want to move. Negative indexing supported. Defaults to -1.
dest_dim (int, optional): the axis you want to move to. Negative indexing supported. Defaults to -1.
make_contiguous (bool, optional): if you want the output tensor to be contiguous in memory. Defaults to True.
Returns:
Tensor: permuted Tensor
"""
n_dims = len(x.shape)
# Remap negative dim
if src_dim < 0:
src_dim = n_dims + src_dim
if dest_dim < 0:
dest_dim = n_dims + dest_dim
assert 0 <= src_dim < n_dims and 0 <= dest_dim < n_dims
dims = list(range(n_dims))
del dims[src_dim]
permutation = []
ctr = 0
for i in range(n_dims):
if i == dest_dim:
permutation.append(src_dim)
else:
permutation.append(dims[ctr])
ctr += 1
x = x.permute(permutation)
if make_contiguous:
x = x.contiguous()
return x
def tensor_slice(x: Tensor, begin: List[int], size: List[int]) -> Tensor:
"""Slices a tensor dimension-wise.
The input tensor is sliced along each dimension by specifying the starts and
the increments.
Args:
x (Tensor): tensor to be sliced.
begin (List[int]): list of starts corresponding to each dimension.
size (List[int]): list of increments with respect to the starts along each dimension. Specifically,
``-1`` means slicing from begin to the last element (inclusive) of that dimension.
Returns:
The sliced tensor.
Raises:
ValueError: if any of ``begin`` indices is negative
ValueError: if any of ``size`` is less than ``-1``
"""
if not all([b >= 0 for b in begin]):
raise ValueError("All starting indices must be non-negative.")
if not all([s >= -1 for s in size]):
raise ValueError("All sizes must be either non-negative or -1.")
size = [l - b if s == -1 else s for s, b, l in zip(size, begin, x.shape)]
slices = [slice(b, b + s) for b, s in zip(begin, size)]
return x[slices]
def load_module_from_url(
model: nn.Module, url: str, strict: bool = True, progress: bool = True
) -> None:
local_path = _PATH_MANAGER.get_local_path(url)
if not torch.cuda.is_available():
state_dict = torch.load(local_path, map_location=torch.device("cpu"))
else:
state_dict = torch.load(local_path)
model.load_state_dict(state_dict, strict=strict)
@torch.no_grad()
def remove_grad(model: nn.Module) -> None:
for param in model.parameters():
param.requires_grad = False
@torch.no_grad()
def momentum_update(model: nn.Module, model_m: nn.Module, momentum: float) -> None:
for param, param_m in zip(model.parameters(), model_m.parameters()):
param_m.data = param_m.data * momentum + param.data * (1 - momentum)
class ModelOutput(OrderedDict):
def keys(self) -> Any:
for field in fields(self):
yield field.name
def __getitem__(self, key: Any) -> Any:
return getattr(self, key)
def __iter__(self) -> Any:
yield from self.keys()
def values(self) -> Any:
for field in fields(self):
yield getattr(self, field.name)
def items(self) -> Any:
for field in fields(self):
yield field.name, getattr(self, field.name)
def to_tuple_tuple(
param: Union[int, Tuple[int, ...]], dim_tuple: int, num_tuple: int
) -> Tuple[Tuple[int, ...], ...]:
"""
Convert single integer or single tuple to tuple of tuples.
Used for kernel_size and strides parameters in convolutional models
"""
if isinstance(param, int):
param = (param,) * dim_tuple
if isinstance(param, tuple):
param_fixed = (param,) * num_tuple
return param_fixed
def checkpoint_wrapper(fn: Callable) -> Callable:
"""Decorator to render an nn.Module instance method in checkpointing mode to save memory for training"""
def inner(cls: nn.Module, *inputs: Any, **kwargs: Any) -> Tensor:
if cls.training:
# By default the checkpoint API stashes and restores the RNG state during each checkpointed
# segment such that checkpointed passes making use of RNG (e.g., through dropout, batch norm)
# have deterministic outputs as compared to non-checkpointed passes. This can incur a moderate
# performance hit which we mitigate by checkpointing either before and after the layer that
# requires RNG.
if "use_cache" in kwargs and kwargs["use_cache"] is True:
warnings.warn(
"Using `cache` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
)
kwargs["use_cache"] = False
def create_custom_forward(fn: Callable) -> Callable:
# checkpoint API does not accept user defined kwargs so we need to hide them
def custom_forward(*inputs: Any) -> Callable:
return fn(cls, *inputs, **kwargs)
return custom_forward
return checkpoint(create_custom_forward(fn), *inputs)
else:
return fn(cls, *inputs, **kwargs)
return inner
def get_clones(module: nn.Module, n: int) -> nn.ModuleList:
return nn.ModuleList([deepcopy(module) for i in range(n)])
| EXA-1-master | exa/libraries/multimodal-main/torchmultimodal/utils/common.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from iopath.common.file_io import HTTPURLHandler, PathManager
def _get_path_manager() -> PathManager:
try:
from torchmultimodal.fb.utils.file_io import FBPathManager, register_handlers
pm = FBPathManager()
register_handlers(pm)
return pm
except ImportError:
pm = PathManager()
pm.register_handler(HTTPURLHandler())
return pm
| EXA-1-master | exa/libraries/multimodal-main/torchmultimodal/utils/file_io.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, cast, List, Optional, Tuple, Union
from torch import nn, Size, Tensor
from torchmultimodal.models.vqvae import VQVAE
from torchmultimodal.modules.layers.attention import AxialAttentionBlock
from torchmultimodal.modules.layers.conv import SamePadConv3d, SamePadConvTranspose3d
from torchmultimodal.utils.assertion import assert_equal_lengths
from torchmultimodal.utils.common import to_tuple_tuple
def video_vqvae(
in_channel_dim: int,
encoder_hidden_dim: int,
encoder_kernel_size: int,
encoder_stride: int,
encoder_n_layers: int,
n_res_layers: int,
attn_hidden_dim: int,
num_embeddings: int,
embedding_dim: int,
decoder_hidden_dim: int,
decoder_kernel_size: int,
decoder_stride: int,
decoder_n_layers: int,
) -> VQVAE:
"""Generic Video VQVAE builder
Args:
in_channel_dim (int, optional): Size of channel dim in input.
encoder_hidden_dim (int, optional): Size of channel dims in encoder conv layers.
encoder_kernel_size (int, optional): Kernel size for encoder.
encoder_stride (int, optional): Stride for encoder.
encoder_n_layers (int, optional): Number of layers in encoder. Does not include attention stack
and pre-codebook conv layer.
n_res_layers (int, optional): Number of ``AttentionResidualBlocks`` to include in encoder and decoder.
attn_hidden_dim (int, optional): Size of hidden dim of ``AttentionResidualBlocks``.
num_embeddings (int, optional): Number of embedding vectors used in ``Codebook``.
embedding_dim (int, optional): Dimensionality of embedding vectors in ``Codebook``.
decoder_hidden_dim (int, optional): Size of channel dims in decoder conv transpose layers.
decoder_kernel_size (int, optional): Kernel size for decoder.
decoder_stride (int, optional): Stride for decoder.
decoder_n_layers (int, optional): Number of layers in decoder. Does not include attention stack and
post-codebook conv transpose layer.
Returns:
An instance of :class:`~torchmultimodal.models.vqvae.VQVAE` initialized with ``VideoEncoder``,
``Codebook`` and ``VideoDecoder``
"""
encoder_in_channel_dims = (in_channel_dim,) + (encoder_hidden_dim,) * max(
encoder_n_layers - 1, 0
)
decoder_out_channel_dims = (decoder_hidden_dim,) * max(decoder_n_layers - 1, 0) + (
in_channel_dim,
)
# Reformat kernel and strides to be tuple of tuple for encoder/decoder constructors
encoder_kernel_sizes_fixed, encoder_strides_fixed = preprocess_int_conv_params(
encoder_in_channel_dims, encoder_kernel_size, encoder_stride
)
decoder_kernel_sizes_fixed, decoder_strides_fixed = preprocess_int_conv_params(
decoder_out_channel_dims, decoder_kernel_size, decoder_stride
)
encoder = VideoEncoder(
encoder_in_channel_dims,
encoder_kernel_sizes_fixed,
encoder_strides_fixed,
embedding_dim,
n_res_layers,
attn_hidden_dim,
)
decoder = VideoDecoder(
decoder_out_channel_dims,
decoder_kernel_sizes_fixed,
decoder_strides_fixed,
embedding_dim,
n_res_layers,
attn_hidden_dim,
)
return VQVAE(encoder, decoder, num_embeddings, embedding_dim)
class VideoEncoder(nn.Module):
"""Encoder for Video VQVAE.
Stacks specified number of ``SamePadConv3d`` layers
followed by a stack of ``AttentionResidualBlocks`` and a final ``SamePadConv3d``
layer before the codebook. The residual blocks use Axial Attention to enhance
representations of video data without significantly increasing computational
cost.
Follows VideoGPT's implementation:
https://github.com/wilson1yan/VideoGPT/blob/master/videogpt/vqvae.py
Args:
in_channel_dims (Tuple[int, ...]): Input channel dimension for each layer in conv stack.
kernel_sizes (Tuple[Tuple[int, int, int], ...]): Kernel sizes for each layer in conv stack.
strides (Tuple[Tuple[int, int, int], ...]): Strides for each layer in conv stack.
output_dim (int): Size of hidden dimension of final output.
n_res_layers (int, optional): Number of ``AttentionResidualBlocks`` to include. Default is ``4``.
attn_hidden_dim (int, optional): Size of hidden dimension in attention block. Default is ``240``.
kwargs (Any): Keyword arguments to be passed into ``SamePadConv3d`` and used by ``nn.Conv3d``.
Raises:
ValueError: If the lengths of ``in_channel_dims``, ``kernel_sizes``, and ``strides`` are not
all equivalent.
"""
def __init__(
self,
in_channel_dims: Tuple[int, ...],
kernel_sizes: Tuple[Tuple[int, int, int], ...],
strides: Tuple[Tuple[int, int, int], ...],
output_dim: int,
n_res_layers: int = 4,
attn_hidden_dim: int = 240,
**kwargs: Any,
):
super().__init__()
assert_equal_lengths(
in_channel_dims,
kernel_sizes,
strides,
msg="in_channel_dims, kernel_sizes, and strides must be same length.",
)
convolutions: List[nn.Module] = []
n_conv_layers = len(in_channel_dims)
for i in range(n_conv_layers):
in_channel = in_channel_dims[i]
out_channel = (
in_channel_dims[i + 1] if i < n_conv_layers - 1 else attn_hidden_dim
)
kernel = kernel_sizes[i]
stride = strides[i]
convolutions.append(
SamePadConv3d(
in_channel, out_channel, kernel, stride, bias=True, **kwargs
)
)
# Do not apply relu to last conv layer before res stack
if i < n_conv_layers - 1:
convolutions.append(nn.ReLU())
self.convs = nn.Sequential(*convolutions)
self.res_stack = nn.Sequential(
*[AttentionResidualBlock(attn_hidden_dim) for _ in range(n_res_layers)],
nn.BatchNorm3d(attn_hidden_dim),
nn.ReLU(),
)
self.conv_out = SamePadConv3d(
attn_hidden_dim, output_dim, kernel_size=1, stride=1
)
def get_latent_shape(self, input_shape: Union[Tuple, Size]) -> Tuple:
"""Return shape of encoder output based on number of downsampling conv layers"""
latent_shape = list(input_shape)
for layer in self.convs: # ignore conv_out since it has a stride of 1
if isinstance(layer, SamePadConv3d):
# SamePadConv should downsample input shape by factor of stride
latent_shape = [
latent_shape[dim] // layer.conv.stride[dim]
for dim in range(len(input_shape))
]
return tuple(latent_shape)
def forward(self, x: Tensor) -> Tensor:
"""
Args:
x (Tensor): Input video data with shape ``(b, c, d1, d2, d3)``.
"""
in_channel = x.shape[1]
if in_channel != self.convs[0].conv.in_channels:
raise ValueError(
f"expected input channel dim to be {self.convs[0].conv.in_channels}, but got {in_channel}"
)
h = self.convs(x)
h = self.res_stack(h)
h = self.conv_out(h)
return h
class VideoDecoder(nn.Module):
"""Decoder for Video VQVAE.
Takes quantized output from codebook and applies a ``SamePadConv3d`` layer, a stack of
``AttentionResidualBlocks``, followed by a specified number of ``SamePadConvTranspose3d``
layers. The residual blocks use Axial Attention to enhance representations of video data
without significantly increasing computational cost.
Follows VideoGPT's implementation:
https://github.com/wilson1yan/VideoGPT/blob/master/videogpt/vqvae.py
Args:
out_channel_dims (Tuple[int, ...]): Output channel dimension for each layer in conv stack.
kernel_sizes (Tuple[Tuple[int, int, int], ...]): Kernel sizes for each layer in conv stack.
strides (Tuple[Tuple[int, int, int], ...]): Strides for each layer in conv stack
input_dim (int): Input channel dimension for first conv layer before attention stack
n_res_layers (int): Number of ``AttentionResidualBlocks`` to include. Default is ``4``.
attn_hidden_dim (int): Size of hidden dimension in attention block. Default is ``240``.
kwargs (Any): Keyword arguments to be passed into ``SamePadConvTranspose3d`` and used by
``nn.ConvTranspose3d``.
Raises:
ValueError: If the lengths of ``out_channel_dims``, ``kernel_sizes``, and ``strides`` are not
all equivalent.
"""
def __init__(
self,
out_channel_dims: Tuple[int, ...],
kernel_sizes: Tuple[Tuple[int, int, int], ...],
strides: Tuple[Tuple[int, int, int], ...],
input_dim: int,
n_res_layers: int = 4,
attn_hidden_dim: int = 240,
**kwargs: Any,
):
super().__init__()
assert_equal_lengths(
out_channel_dims,
kernel_sizes,
strides,
msg="out_channel_dims, kernel_sizes, and strides must be same length.",
)
self.conv_in = SamePadConv3d(
input_dim, attn_hidden_dim, kernel_size=1, stride=1
)
self.res_stack = nn.Sequential(
*[AttentionResidualBlock(attn_hidden_dim) for _ in range(n_res_layers)],
nn.BatchNorm3d(attn_hidden_dim),
nn.ReLU(),
)
transpose_convolutions: List[nn.Module] = []
n_conv_layers = len(out_channel_dims)
for i in range(n_conv_layers):
in_channel = out_channel_dims[i - 1] if i > 0 else attn_hidden_dim
out_channel = out_channel_dims[i]
kernel = kernel_sizes[i]
stride = strides[i]
transpose_convolutions.append(
SamePadConvTranspose3d(
in_channel, out_channel, kernel, stride, bias=True, **kwargs
)
)
# Do not apply relu to output convt layer
if i < n_conv_layers - 1:
transpose_convolutions.append(nn.ReLU())
self.convts = nn.Sequential(*transpose_convolutions)
def forward(self, x: Tensor) -> Tensor:
"""
Args:
x (Tensor): Input quantized embeddings with shape ``(b, emb_dim, d1, d2, d3)``.
"""
in_channel = x.shape[1]
if in_channel != self.conv_in.conv.in_channels:
raise ValueError(
f"expected input channel dim to be {self.conv_in.conv.in_channels}, but got {in_channel}"
)
h = self.conv_in(x)
h = self.res_stack(h)
h = self.convts(h)
return h
class AttentionResidualBlock(nn.Module):
"""Residual block with axial attention.
Implements the component as proposed in `"VideoGPT: Video Generation using VQ-VAE and
Transformers (Yan et al. 2022)"<https://arxiv.org/pdf/2104.10157.pdf>`_.
Code reference:
https://github.com/wilson1yan/VideoGPT/blob/master/videogpt/vqvae.py
Args:
hidden_dim (int, optional): Size of channel dim of input. Default is ``240``.
n_head (int, optional): Number of heads in multihead attention. Must divide into hidden_dim evenly.
Default is ``2``.
Raises:
ValueError: If ``hidden_dim`` is less than ``2``.
"""
def __init__(self, hidden_dim: int = 240, n_head: int = 2) -> None:
super().__init__()
# To avoid hidden dim becoming 0 in middle layers
if hidden_dim < 2:
raise ValueError("hidden dim must be at least 2")
self.block = nn.Sequential(
nn.BatchNorm3d(hidden_dim),
nn.ReLU(),
SamePadConv3d(hidden_dim, hidden_dim // 2, 3, bias=False),
nn.BatchNorm3d(hidden_dim // 2),
nn.ReLU(),
SamePadConv3d(hidden_dim // 2, hidden_dim, 1, bias=False),
nn.BatchNorm3d(hidden_dim),
nn.ReLU(),
AxialAttentionBlock(3, hidden_dim, n_head), # Video has 3 dims
)
def forward(self, x: Tensor) -> Tensor:
"""
Args:
x (Tensor): Input of shape ``(b, c, d1, d2, d3)``.
"""
return x + self.block(x)
def preprocess_int_conv_params(
channel_dims: Tuple[int, ...],
kernel_sizes: Optional[int] = None,
strides: Optional[int] = None,
) -> Tuple:
"""Reformats conv params from int to tuple of tuple and assigns correct type"""
if kernel_sizes is None and strides is None:
raise ValueError("must specify at least one of kernel_sizes or strides")
kernel_sizes_fixed = None
strides_fixed = None
n_conv_layers = len(channel_dims)
if kernel_sizes:
kernel_sizes_fixed = to_tuple_tuple(
kernel_sizes, dim_tuple=3, num_tuple=n_conv_layers
)
kernel_sizes_fixed = cast(Tuple[Tuple[int, int, int], ...], kernel_sizes_fixed)
if strides:
strides_fixed = to_tuple_tuple(strides, dim_tuple=3, num_tuple=n_conv_layers)
strides_fixed = cast(Tuple[Tuple[int, int, int], ...], strides_fixed)
if kernel_sizes_fixed and strides_fixed:
return kernel_sizes_fixed, strides_fixed
elif kernel_sizes_fixed:
return kernel_sizes_fixed
else:
return strides_fixed
| EXA-1-master | exa/libraries/multimodal-main/torchmultimodal/models/video_vqvae.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from collections import OrderedDict
from typing import Dict, List, NamedTuple, Optional
from torch import nn, Tensor
from torchmultimodal.models.late_fusion import LateFusion
class TwoTowerOutput(NamedTuple):
output: Tensor
tower_embeddings: Dict[str, Tensor]
class TwoTower(nn.Module):
"""
A two tower architecture with a pair of late fusion models
(for now, can be extended) followed by a fusion for output of each tower.
Args:
tower_id_to_tower (Dict[str, LateFusion]): mapping of tower id
to tower model. Size should be 2, same tower should be passed in
for shared towers
tower fusion (nn.Module): Module fusing list of tensors (tower outputs)
into single output
shared_tower_id_to_channel_mapping (Optional[Dict[str, Dict[str, str]]]): Dict
of shared tower id to mapping of channel names of the shared tower
to the original input channel name
Inputs:
channel_to_input (Dict[str,Tensor]) : Channel name to input tensor dict
"""
def __init__(
self,
tower_id_to_tower: Dict[str, LateFusion],
tower_fusion: nn.Module,
shared_tower_id_to_channel_mapping: Optional[Dict[str, Dict[str, str]]] = None,
):
super().__init__()
# lets add this validation for now,
# we can possibly make this a n tower architecture later.
if len(tower_id_to_tower) != 2:
raise ValueError(
f"Two tower needs 2 towers but found \
{len(tower_id_to_tower)} towers"
)
self.tower_id_to_tower = nn.ModuleDict(tower_id_to_tower)
self.tower_fusion = tower_fusion
if shared_tower_id_to_channel_mapping is not None:
towers = list(tower_id_to_tower.values())
if towers[0] != towers[1]:
raise ValueError(
"Towers should be shared if channel mapping is passed in"
)
self.shared_tower_id_to_channel_mapping: Optional[
Dict[str, Dict[str, str]]
] = shared_tower_id_to_channel_mapping
def forward(self, channel_to_input: Dict[str, Tensor]) -> TwoTowerOutput:
tower_embeddings = OrderedDict()
for tower_id, tower in self.tower_id_to_tower.items():
tower_input = self._get_tower_input(
tower_id, list(tower.encoders.keys()), channel_to_input
)
tower_embeddings[tower_id] = tower(tower_input)
final_out = self.tower_fusion(list(tower_embeddings.values()))
return TwoTowerOutput(output=final_out, tower_embeddings=tower_embeddings)
def _get_tower_input(
self,
tower_id: str,
tower_channels: List[str],
channel_to_input: Dict[str, Tensor],
) -> Dict[str, Tensor]:
tower_input = {}
channel_name_mapping: Dict[str, str] = {}
if self.shared_tower_id_to_channel_mapping is not None:
if self.shared_tower_id_to_channel_mapping.get(tower_id) is not None:
channel_name_mapping = self.shared_tower_id_to_channel_mapping[tower_id]
for channel in tower_channels:
if channel_name_mapping.get(channel) is not None:
input_channel_name = channel_name_mapping[channel]
else:
input_channel_name = channel
tower_input[channel] = channel_to_input[input_channel_name]
return tower_input
| EXA-1-master | exa/libraries/multimodal-main/torchmultimodal/models/two_tower.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from typing import Tuple
from torch import nn
from torchmultimodal.models.gpt import (
MultimodalGPT,
MultimodalTransformerDecoder,
RightShift,
TransformerDecoder,
TransformerDecoderLayer,
)
from torchmultimodal.models.video_vqvae import VideoDecoder, VideoEncoder
from torchmultimodal.models.vqvae import VQVAE
from torchmultimodal.modules.layers.attention import SelfAttention
from torchmultimodal.modules.layers.position_embedding import (
BroadcastedPositionEmbedding,
)
def video_gpt(
input_shape: Tuple[int, int, int] = (16, 64, 64),
latent_shape: Tuple[int, int, int] = (8, 32, 32),
d_model: int = 576,
n_head: int = 4,
dropout: float = 0.2,
attn_dropout: float = 0.3,
num_decoder_layers: int = 16,
use_gpt_init: bool = True,
) -> MultimodalGPT:
"""VideoGPT model
Model architecture follows the paper `"VideoGPT: Video Generation using VQ-VAE and Transformers
"<https://arxiv.org/pdf/2104.10157.pdf>`_.
Source of parameters (with the exception ``d_model``, see parameter docstring below):
* Page 13 Table A.1 Column "BAIR / RoboNet / ViZDoom"
* Page 13 Table A.2 Column "BAIR / RoboNet"
Args:
input_shape (Tuple[int, int, int]): Shape of the input video data ``(time_seq_len, resolution, resolution)``.
Defaults to ``(16, 64, 64)``.
latent_shape (Tuple[int, int, int]): Shape of the encoded video data. This should be consistent with
the actual latent shape inferred by the video encoder.
See :class:`~torchmultimodal.models.video_vqvae.VideoEncoder`.
Defaults to ``(8, 32, 32)``.
d_model (int): Dimension of the underlying transformer decoder.
Value taken from: https://github.com/wilson1yan/VideoGPT/blob/master/videogpt/gpt.py#L177
Note that this is different from the paper due to
:class:`~torchmultimodal.modules.layers.position_embedding.BroadcastedPositionEmbedding`
requires that ``d_model`` is a multiple of ``len(latent_shape)``.
See :py:class:`torchmultimodal.models.gpt.TransformerDecoderLayer`. Defaults to ``576``.
n_head (int): Number of attention heads used by the transformer decoder. Defaults to ``4``.
dropout (float): Dropout probability used by the projection layer of the transformer decoder.
Defaults to ``0.2``.
attn_dropout (float): Dropout probability used by the attention layer of the transformer decoder.
Defaults to ``0.3``.
num_decoder_layers (int): Number of transformer decoder layers. Defaults to ``16``.
use_gpt_init (bool): Whether to use weight initialization of GPT model.
See :class:`~torchmultimodal.models.gpt.MultimodalGPT`. Defaults to ``True``.
Returns:
An instance of :class:`~torchmultimodal.models.gpt.MultimodalGPT`.
"""
# constructs in and out tokenizers
in_tokenizer = video_vqvae()
out_tokenizer = video_vqvae()
num_in_tokens = in_tokenizer.num_embeddings # codebook size
num_out_tokens = out_tokenizer.num_embeddings
# derived parameters
vqvae_latent_shape = in_tokenizer.latent_shape(input_shape)
if latent_shape != vqvae_latent_shape:
raise ValueError(
f"Latent shape required: {latent_shape} does not match that of VQVAE: {vqvae_latent_shape}"
)
# constructs projection layers
in_projection = nn.Linear(in_tokenizer.embedding_dim, d_model, bias=False)
out_projection = nn.Linear(out_tokenizer.embedding_dim, d_model, bias=False)
# constructs multimodal decoder
in_pos_emb = BroadcastedPositionEmbedding(latent_shape, d_model)
out_pos_emb = BroadcastedPositionEmbedding(latent_shape, d_model)
attention_layer = SelfAttention(attn_dropout=attn_dropout)
decoder_layer = TransformerDecoderLayer(
d_model, n_head, dropout, attn_module=attention_layer
)
decoder = TransformerDecoder(decoder_layer, num_decoder_layers)
right_shift = RightShift(d_model)
mm_decoder = MultimodalTransformerDecoder(
in_pos_emb, out_pos_emb, decoder, right_shift
)
return MultimodalGPT(
d_model=d_model,
num_in_tokens=num_in_tokens,
num_out_tokens=num_out_tokens,
latent_shape=latent_shape,
in_tokenizer=in_tokenizer,
out_tokenizer=out_tokenizer,
mm_decoder=mm_decoder,
in_projection=in_projection,
out_projection=out_projection,
use_gpt_init=use_gpt_init,
)
def video_vqvae(
conv_filter_sizes: Tuple[Tuple[int, int, int], ...] = ((4, 4, 4),),
conv_filter_strides: Tuple[Tuple[int, int, int], ...] = ((2, 2, 2),),
encoder_filter_size: Tuple[int, int, int] = (3, 3, 3),
encoder_filter_stride: Tuple[int, int, int] = (1, 1, 1),
in_channel_dim: int = 3,
encoder_hidden_dim: int = 240,
n_res_layers: int = 4,
attn_hidden_dim: int = 240,
num_embeddings: int = 1024,
embedding_dim: int = 256,
decoder_hidden_dim: int = 240,
) -> VQVAE:
"""Video VQVAE builder for VideoGPT
Args:
conv_filter_sizes (Tuple[Tuple[int, int, int], ...], optional):
Tuple of dimension-wise kernel sizes of downsampling (upsampling) conv layers of the encoder
(decoder). Defaults to ``((4, 4, 4),)`` of one layer.
conv_filter_strides (Tuple[Tuple[int, int, int], ...], optional):
Tuple of dimension-wise strides of downsampling (upsampling) conv layers of the encoder (decoder).
Defaults to ``((2, 2, 2),)`` of one layer.
encoder_filter_size (Tuple[int, int, int], optional):
Dimension-wise kernel sizes of the last conv layer of the encoder. Defaults to ``(3, 3, 3)``.
encoder_filter_stride (Tuple[int, int, int], optional):
Dimension-wise strides of the last conv layer of the encoder. Defaults to ``(1, 1, 1)``.
in_channel_dim (int, optional): Size of channel dim in input. Defaults to ``3``.
encoder_hidden_dim (int, optional): Size of channel dims in encoder conv layers. Defaults to ``240``.
n_res_layers (int, optional): Number of :class:`~torchmultimodal.models.video_vqvae.AttentionResidualBlocks`
to include in encoder and decoder. Defaults to ``4``.
attn_hidden_dim (int, optional): Size of hidden dim of ``AttentionResidualBlocks``. Defaults to ``240``.
num_embeddings (int, optional): Number of embedding vectors used in ``Codebook``. Defaults to ``1024``.
embedding_dim (int, optional): Dimensionality of embedding vectors in ``Codebook``. Defaults to ``256``.
decoder_hidden_dim (int, optional): Size of channel dims in decoder conv tranpose layers. Defaults to ``240``.
Note:
Strides of each layer must be either ``1`` or ``2`` due to downsampling (upsampling) rates are
multipliers of ``2``. For example, input_shape = ``(32, 256, 256)``, latent_shape = ``(8, 8, 8)``
corresponds to downsample rates ``(4, 32, 32)``. The corresponding ``conv_filter_strides`` are
``((2, 2, 2), (2, 2, 2), (1, 2, 2), (1, 2, 2), (1, 2, 2))``.
The defaults are chosen to be consistent with those of :func:`video_gpt`.
Returns:
An instance of :class:`~torchmultimodal.models.vqvae.VQVAE` constructed with:
* :class:`~torchmultimodal.model.video_vqvae.VideoEncoder`
* :class:`~torchmultimodal.model.video_vqvae.VideoDecoder`
"""
encoder_kernel_sizes = conv_filter_sizes + (encoder_filter_size,)
encoder_strides = conv_filter_strides + (encoder_filter_stride,)
encoder_n_layers = len(encoder_strides)
decoder_kernel_sizes = conv_filter_sizes
decoder_strides = conv_filter_strides
decoder_n_layers = len(decoder_strides)
encoder_in_channel_dims = (in_channel_dim,) + (encoder_hidden_dim,) * max(
encoder_n_layers - 1, 0
)
decoder_out_channel_dims = (decoder_hidden_dim,) * max(decoder_n_layers - 1, 0) + (
in_channel_dim,
)
encoder = VideoEncoder(
encoder_in_channel_dims,
encoder_kernel_sizes,
encoder_strides,
embedding_dim,
n_res_layers,
attn_hidden_dim,
)
decoder = VideoDecoder(
decoder_out_channel_dims,
decoder_kernel_sizes,
decoder_strides,
embedding_dim,
n_res_layers,
attn_hidden_dim,
)
return VQVAE(encoder, decoder, num_embeddings, embedding_dim)
| EXA-1-master | exa/libraries/multimodal-main/torchmultimodal/models/video_gpt.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from typing import Dict
import torch
from torch import nn
class LateFusion(nn.Module):
"""A generic architecture for late fusion multimodal models.
A late fusion model contains separate encoders for each modality,
followed by a fusion layer and then a head module. For an example of a
late fusion model, see the TorchMultimodal implementation of the cnn-lstm
multimodal classifier (cnn_lstm.py)
Args:
encoders (ModuleDict): Dictionary mapping modalities to their respective
encoders.
Inputs:
modalities (Dict[str, Tensor]): A dictionary mapping modalities to
their tensor representations.
"""
def __init__(
self,
encoders: nn.ModuleDict,
fusion_module: nn.Module,
head_module: nn.Module,
):
super().__init__()
# Sort encoders by key on init for consistency
self.encoders = nn.ModuleDict({k: encoders[k] for k in sorted(encoders.keys())})
self.fusion_module = fusion_module
self.head_module = head_module
def forward(self, modalities: Dict[str, torch.Tensor]) -> torch.Tensor:
embeddings = {}
for key, encoder in self.encoders.items():
assert key in modalities, f"{key} missing in input"
embeddings[key] = encoder(modalities[key])
fused = self.fusion_module(embeddings)
return self.head_module(fused)
| EXA-1-master | exa/libraries/multimodal-main/torchmultimodal/models/late_fusion.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
| EXA-1-master | exa/libraries/multimodal-main/torchmultimodal/models/__init__.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from typing import Callable, List, Optional
import torch
import torchmultimodal.utils.common as common_utils
from torch import nn
from torchmultimodal.modules.encoders.swin_transformer_3d_encoder import (
SwinTransformer3d,
)
from torchvision.models.video.swin_transformer import PatchEmbed3d
_OMNIVORE_PRETRAINED_URLS = {
"swin_t_encoder": "https://download.pytorch.org/models/omnivore_swin_t_encoder-b7e39400.pth",
"swin_s_encoder": "https://download.pytorch.org/models/omnivore_swin_s_encoder-40b05ba1.pth",
"swin_b_encoder": "https://download.pytorch.org/models/omnivore_swin_b_encoder-a9134768.pth",
"swin_t_heads": "https://download.pytorch.org/models/omnivore_swin_t_heads-c8bfb7fd.pth",
"swin_s_heads": "https://download.pytorch.org/models/omnivore_swin_s_heads-c5e77246.pth",
"swin_b_heads": "https://download.pytorch.org/models/omnivore_swin_b_heads-3c38b3ed.pth",
}
def _imagenet1k_head(input_dim: int) -> nn.Module:
return nn.Linear(input_dim, 1000, bias=True)
def _kinetics400_head(input_dim: int) -> nn.Module:
return nn.Sequential(
nn.Dropout(p=0.5),
nn.Linear(input_dim, 400, bias=True),
)
def _sunrgbd_head(input_dim: int) -> nn.Module:
return nn.Linear(input_dim, 19, bias=True)
def _multimodal_head(input_dim: int) -> nn.ModuleDict:
return nn.ModuleDict(
{
"image": _imagenet1k_head(input_dim),
"rgbd": _sunrgbd_head(input_dim),
"video": _kinetics400_head(input_dim),
}
)
class Omnivore(nn.Module):
"""Omnivore is a model that accept multiple vision modality.
Omnivore (https://arxiv.org/abs/2201.08377) is a single model that able to do classification
on images, videos, and single-view 3D data using the same shared parameters of the encoder.
Args:
encoder (nn.Module): Instantiated encoder. It generally accept a video backbone.
The paper use SwinTransformer3d for the encoder.
heads (Optional[nn.ModuleDict]): Dictionary of multiple heads for each dataset type
Inputs:
x (Tensor): 5 Dimensional batched video tensor with format of B C D H W
where B is batch, C is channel, D is time, H is height, and W is width.
input_type (str): The dataset type of the input, this will used to choose
the correct head.
"""
def __init__(self, encoder: nn.Module, heads: nn.ModuleDict):
super().__init__()
self.encoder = encoder
self.heads = heads
def forward(self, x: torch.Tensor, input_type: str) -> torch.Tensor:
x = self.encoder(x)
assert (
input_type in self.heads
), f"Unsupported input_type: {input_type}, please use one of {list(self.heads.keys())}"
x = self.heads[input_type](x)
return x
class PatchEmbedOmnivore(nn.Module):
"""Patch Embedding strategy for Omnivore model
It will use common PatchEmbed3d for image and video,
for single view depth image it will have separate embedding for the depth channel
and add the embedding result with the RGB channel
reference: https://arxiv.org/abs/2201.08377
Args:
patch_size (Tuple[int, int, int]): Patch token size. Default: ``(2, 4, 4)``
embed_dim (int): Number of linear projection output channels. Default: ``96``
norm_layer (nn.Module, optional): Normalization layer. Default: ``None``
"""
def __init__(
self,
patch_size: List[int],
embed_dim: int = 96,
norm_layer: Optional[Callable[..., nn.Module]] = None,
):
super().__init__()
self.patch_embed = PatchEmbed3d(
patch_size=patch_size,
embed_dim=embed_dim,
norm_layer=norm_layer,
)
self.depth_patch_embed = PatchEmbed3d(
patch_size=patch_size,
in_channels=1,
embed_dim=embed_dim,
norm_layer=norm_layer,
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
# x: B C D H W
# Note: D here represent time
assert x.ndim == 5
has_depth = x.shape[1] == 4
if has_depth:
x_rgb = self.patch_embed(x[:, :3, ...])
x_d = self.depth_patch_embed(x[:, 3:, ...])
x = x_rgb + x_d
else:
x = self.patch_embed(x)
return x
def omnivore_swin_t_encoder(
pretrained: bool = False, progress: bool = True
) -> SwinTransformer3d:
encoder = SwinTransformer3d(
patch_size=[2, 4, 4],
embed_dim=96,
depths=[2, 2, 6, 2],
num_heads=[3, 6, 12, 24],
window_size=[8, 7, 7],
stochastic_depth_prob=0.2,
norm_layer=nn.LayerNorm,
patch_embed=PatchEmbedOmnivore,
num_classes=None,
)
if pretrained:
common_utils.load_module_from_url(
encoder,
_OMNIVORE_PRETRAINED_URLS["swin_t_encoder"],
progress=progress,
)
return encoder
def omnivore_swin_s_encoder(
pretrained: bool = False, progress: bool = True
) -> SwinTransformer3d:
encoder = SwinTransformer3d(
patch_size=[2, 4, 4],
embed_dim=96,
depths=[2, 2, 18, 2],
num_heads=[3, 6, 12, 24],
window_size=[8, 7, 7],
stochastic_depth_prob=0.3,
norm_layer=nn.LayerNorm,
patch_embed=PatchEmbedOmnivore,
num_classes=None,
)
if pretrained:
common_utils.load_module_from_url(
encoder,
_OMNIVORE_PRETRAINED_URLS["swin_s_encoder"],
progress=progress,
)
return encoder
def omnivore_swin_b_encoder(
pretrained: bool = False, progress: bool = True
) -> SwinTransformer3d:
encoder = SwinTransformer3d(
patch_size=[2, 4, 4],
embed_dim=128,
depths=[2, 2, 18, 2],
num_heads=[4, 8, 16, 32],
window_size=[16, 7, 7],
stochastic_depth_prob=0.3,
norm_layer=nn.LayerNorm,
patch_embed=PatchEmbedOmnivore,
num_classes=None,
)
if pretrained:
common_utils.load_module_from_url(
encoder,
_OMNIVORE_PRETRAINED_URLS["swin_b_encoder"],
progress=progress,
)
return encoder
def omnivore_swin_t(pretrained: bool = False, progress: bool = True) -> nn.Module:
"""
Builder function to get omnivore model with swin_t variant encoder
Args:
pretrained (bool): If true then the it will load pretrained weight,
otherwise it will have random weight (default: ``False``)
progress (bool): If true then there will be a progress bar for downloading weight (default: ``True``)
"""
encoder = omnivore_swin_t_encoder(pretrained=pretrained)
heads = _multimodal_head(input_dim=encoder.num_features)
if pretrained:
common_utils.load_module_from_url(
heads,
_OMNIVORE_PRETRAINED_URLS["swin_t_heads"],
progress=progress,
)
model = Omnivore(encoder, heads)
return model
def omnivore_swin_s(pretrained: bool = False, progress: bool = True) -> nn.Module:
"""
Builder function to get omnivore model with swin_s variant encoder
Args:
pretrained (bool): If true then the it will load pretrained weight,
otherwise it will have random weight (default: ``False``)
progress (bool): If true then there will be a progress bar for downloading weight (default: ``True``)
"""
encoder = omnivore_swin_s_encoder(pretrained=pretrained)
heads = _multimodal_head(input_dim=encoder.num_features)
if pretrained:
common_utils.load_module_from_url(
heads,
_OMNIVORE_PRETRAINED_URLS["swin_s_heads"],
progress=progress,
)
model = Omnivore(encoder, heads)
return model
def omnivore_swin_b(pretrained: bool = False, progress: bool = True) -> nn.Module:
"""
Builder function to get omnivore model with swin_b variant encoder
Args:
pretrained (bool): If true then the it will load pretrained weight,
otherwise it will have random weight (default: ``False``)
progress (bool): If true then there will be a progress bar for downloading weight (default: ``True``)
"""
encoder = omnivore_swin_b_encoder(pretrained=pretrained)
heads = _multimodal_head(input_dim=encoder.num_features)
if pretrained:
common_utils.load_module_from_url(
heads,
_OMNIVORE_PRETRAINED_URLS["swin_b_heads"],
progress=progress,
)
model = Omnivore(encoder, heads)
return model
| EXA-1-master | exa/libraries/multimodal-main/torchmultimodal/models/omnivore.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from typing import NamedTuple, Tuple, Union
from torch import nn, Size, Tensor
from torchmultimodal.modules.layers.codebook import Codebook, CodebookOutput
from torchmultimodal.utils.common import shift_dim
class VQVAEOutput(NamedTuple):
"""Outputs from :class:`~torchmultimodal.models.vqvae.VQVAE`.
Attributes:
decoded (Tensor): Output of the decoder.
codebook_output (CodebookOutput): Output of codebook layer to be used in loss calculations.
"""
decoded: Tensor
codebook_output: CodebookOutput
class VQVAE(nn.Module):
"""General model for VQVAE that provides codebook layer to link user specified
encoder and decoder.
Vector Quantized Variational Autoencoder is a type of autoencoder that defines
an embedding of discrete vectors as the latent variables in the bottleneck layer
instead of normally distributed latent variables as in a standard VAE. This enables
high-fidelity reconstruction of input data. It was first introduced in "Neural
Discrete Representation Learning" (Oord et al. 2017) and has since seen success in
tokenizing and generating high-resolution image, audio, and video data.
Args:
encoder (nn.Module): Model that accepts single Tensor as input in forward, ``encoder(x)``.
Will be used to project input into codebook layer. Expects channel
dim of encoder output to match ``embedding_dim`` of codebook.
See :class:`~torchmultimodal.modules.layers.codebook.Codebook`.
decoder (nn.Module): Model that accepts single Tensor as input in forward, ``decoder(x)``.
Should be able to accept output shape of codebook layer, which matches output shape of
the encoder.
num_embeddings (int): Number of embedding vectors in codebook.
embedding_dim (int): Dimensionality of embedding vectors in codebook.
"""
def __init__(
self,
encoder: nn.Module,
decoder: nn.Module,
num_embeddings: int,
embedding_dim: int,
) -> None:
super().__init__()
self.encoder = encoder
self.decoder = decoder
self.codebook = Codebook(num_embeddings, embedding_dim)
self.num_embeddings = num_embeddings
self.embedding_dim = embedding_dim
def latent_shape(self, input_shape: Union[Size, Tuple]) -> Tuple[int, ...]:
"""Returns the downsampled shape of the encoder output: (d1, ..., dn)"""
if not hasattr(self.encoder, "get_latent_shape"):
raise AttributeError(
f"Missing attribute 'get_latent_shape' of the encoder {self.encoder}"
)
return self.encoder.get_latent_shape(input_shape) # type: ignore
def encode(
self, x: Tensor, return_embeddings: bool = False
) -> Union[Tuple[Tensor, Tensor], Tensor]:
"""Converts input data to token ids
Args:
x (Tensor): Input data of shape ``(b, c, d1, ..., dn)``.
return_embeddings (bool): Flag to return also the quantized embeddings. Defaults to ``False``.
Returns:
* A tensor of token ids: ``(b, d1, ...., dn)``
* A tuple of token ids and quantized embeddings ``(b, emb_dim, d1, ..., dn)``.
"""
encoded = self.encoder(x)
out = self.codebook(encoded)
indices = out.codebook_indices
quantized = out.quantized
if return_embeddings:
return indices, quantized
return indices
def decode(self, indices: Tensor) -> Tensor:
"""Converts token ids back to data"""
quantized = self.lookup(indices) # (b, latent_shape, emb_dim)
quantized = shift_dim(quantized, -1, 1) # (b, emb_dim, latent_shape)
return self.decoder(quantized) # (b, c, input_shape)
def lookup(self, indices: Tensor) -> Tensor:
if not hasattr(self.codebook, "lookup"):
raise AttributeError(
f"Missing attribute 'lookup' of the codebook {self.codebook}"
)
return self.codebook.lookup(indices)
def forward(self, x: Tensor) -> VQVAEOutput:
"""
Args:
x (Tensor): Input data of shape ``(b, c, d1, ..., dn)``.
Returns:
An instance of :class:`~torchmultimodal.models.vqvae.VQVAEOutput`.
"""
encoded = self.encoder(x)
codebook_output = self.codebook(encoded)
decoded = self.decoder(codebook_output.quantized)
return VQVAEOutput(decoded, codebook_output)
| EXA-1-master | exa/libraries/multimodal-main/torchmultimodal/models/vqvae.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from functools import partial
from typing import Any, Callable, Dict, NamedTuple, Optional, Tuple, Union
import torch
from torch import nn, Tensor
from torchmultimodal.modules.layers.activation import SiLU
from torchmultimodal.modules.layers.attention import MultiHeadAttention, SelfAttention
from torchmultimodal.modules.layers.mlp import MLP
from torchmultimodal.utils.common import checkpoint_wrapper, get_clones
class TransformerDecoderOutput(NamedTuple):
"""Outputs from :class:`~torchmultimodal.models.gpt.TransformerDecoder`.
Attributes:
last_hidden_states (Tensor): Output from the last layer of the transformer.
hidden_states (Tuple[Tensor, ...], optional): Outputs from all layers of the transformer.
Defaults to ``None``.
attention_weights (Tuple[Tensor, ...], optional): Attention probabilities from all layers of the
transformer. Defaults to ``None``.
past_key_values (Tuple[Dict[str, Tensor], ...]], optional): If ``use_cache`` is on, contains
key/value tensors prior to the current step along the sequence. Defaults to ``None``.
"""
last_hidden_states: Tensor
hidden_states: Optional[Tuple[Tensor, ...]] = None
attention_weights: Optional[Tuple[Tensor, ...]] = None
past_key_values: Optional[Tuple[Dict[str, Tensor], ...]] = None
class TransformerLayerOutput(NamedTuple):
"""Outputs from :class:`~torchmultimodal.models.gpt.TransformerDecoderLayer`.
Attributes:
hidden_states (Tensor): Output from the current layer.
attention_weights (Tensor, optional): Attention probability tensor of the current layer.
Defaults to ``None``.
past_key_values (Dict[str, Tensor], optional): If ``use_cache`` is on, contains key/value tensors
prior to the current step along the sequence. Defaults to ``None``.
"""
hidden_states: Tensor
attention_weights: Optional[Tensor] = None
past_key_values: Optional[Dict[str, Tensor]] = None
class MultimodalGPTOutput(NamedTuple):
"""Outputs from :meth:`~torchmultimodal.models.gpt.MultimodalGPT.forward`.
Attributes:
decoder_output (TransformerDeocoderOutput): Contains output from the multimodal transformer decoder.
See :class:`MultimodalTransformerDecoder`.
logits (Tensor): Logits computed from the last hidden state of the multimodal transformer decoder.
"""
decoder_output: TransformerDecoderOutput
logits: Tensor
class MultimodalGPT(nn.Module):
"""Extends the GPT (Generative Pre-Training) model for cross-modality generation.
This module implements the GPT model for generation of one modality given another
following the paper `"Improving Language Understanding by Generative Pre-Training
"<https://cdn.openai.com/research-covers/language-unsupervised/language_understanding_paper.pdf>`_.
Args:
d_model (int): Embedding dimension of the transformer decoder.
num_in_tokens (int): Number of unique token states for the input modality.
num_out_tokens (int): Number of unique token states for the output modality.
latent_shape ([Tuple[int, ...]): Shape of the latent space of the output modality tokenizer. Used to reshape
sequence of generated tokens to be decoded back to data.
in_tokenizer (nn.Module): Tokenizer for the input modality. Must have methods ``encode``, ``lookup``.
out_tokenizer (nn.Module): Tokenizer for the output modality. Must have methods ``encode``, ``decode``.
mm_decoder (nn.Module): Multimodal transformer decoder. An instace of
:py:class:`MultimodalTransformerDecoder`.
in_projection (nn.Module, optional): Projects the input modality token embeddings to match size of the
transformer decoder. Defaults to ``None``.
out_projection (nn.Module, optional): Projects the output modality token embeddings to match size of the
transformer decoder. Defaults to ``None``.
norm_layer (Callable[..., nn.Module], optional): Which normalization layer to use. Supports ``nn.Module`` or
partial. If ``None``, ``nn.LayerNorm`` will be used as the default.
use_gpt_init (bool): Whether to use GPT model specific initialization. Defaults to ``True``.
Raises:
AttributeError: If input tokenizer does not implement methods ``encode`` and ``lookup`` or if output
tokenizer does not implement methods ``encode``, ``lookup`` and ``decode``.
"""
def __init__(
self,
d_model: int,
num_in_tokens: int,
num_out_tokens: int,
latent_shape: Tuple[int, ...],
in_tokenizer: nn.Module,
out_tokenizer: nn.Module,
mm_decoder: nn.Module,
in_projection: Optional[nn.Module] = None,
out_projection: Optional[nn.Module] = None,
norm_layer: Optional[Callable[..., nn.Module]] = None,
use_gpt_init: bool = True,
) -> None:
super().__init__()
if not all(
[hasattr(in_tokenizer, attr_name) for attr_name in ["encode", "lookup"]]
):
raise AttributeError(
"Input modality tokenizer must have methods 'encode' and 'lookup'."
)
if not all(
[
hasattr(out_tokenizer, attr_name)
for attr_name in ["encode", "lookup", "decode"]
]
):
raise AttributeError(
"Output modality tokenizer must have methods 'encode', 'lookup' and 'decode'."
)
num_tokens = num_in_tokens + num_out_tokens
self.num_in_tokens = num_in_tokens
self.num_out_tokens = num_out_tokens
self.latent_shape = latent_shape
self.in_tokenizer = in_tokenizer
self.out_tokenizer = out_tokenizer
self.mm_decoder = mm_decoder
self.in_projection = in_projection
self.out_projection = out_projection
if norm_layer is None:
norm_layer = partial(nn.LayerNorm, eps=1e-5)
self.norm = norm_layer(normalized_shape=d_model)
self.to_logit = nn.Linear(d_model, num_tokens, bias=False)
# This will give us equal probabilities after the soft max layer initially to avoid biasing
# towards any particular prediction category
self.to_logit.weight.data.copy_(torch.zeros(num_tokens, d_model))
if use_gpt_init:
self.initialize_parameters()
def initialize_parameters(self) -> None:
# Initialize weights of the layers in question, e.g., after loading checkpoints
# Only do this when the layers have weights data, e.g., for text tokenizer the projection
# layer is dummy (nn.Identity)
if hasattr(self.in_projection, "weight"):
self.in_projection.weight.data.normal_(std=0.02) # type: ignore
if hasattr(self.out_projection, "weight"):
self.out_projection.weight.data.normal_(std=0.02) # type: ignore
def forward(
self,
in_tokens: Optional[Tensor] = None,
out_tokens: Optional[Tensor] = None,
in_pos_ids: Optional[Tensor] = None,
out_pos_ids: Optional[Tensor] = None,
attn_mask: Optional[Tensor] = None,
head_mask: Optional[Tensor] = None,
logits_mask: Optional[Tensor] = None,
use_cache: bool = False,
causal: bool = False,
right_shift: bool = False,
return_attn_weights: bool = False,
return_hidden_states: bool = False,
) -> MultimodalGPTOutput:
"""
Args:
in_tokens (Tensor, optional): Tensor of dimension ``(b, in_seq_len)`` containing tokens
for the input modality. Defaults to ``None``.
out_tokens (Tensor, optional): Tensor of dimension ``(b, out_seq_len)`` containing tokens
for the output modality. Defaults to ``None``.
in_pos_ids (Tensor, optional): Tensor of dimension ``(b, in_seq_len)`` containing indices for the
input modality position embeddings. Defaults to ``None``.
out_pos_ids (Tensor, optional): Tensor of dimension ``(b, out_seq_len)`` containing indices for the
output modality position embeddings. Defaults to ``None``.
attn_mask (Tensor, optional): Tensor of dimension ``(q_seq_len, k_seq_len)`` or
``(b, q_seq_len, k_seq_len)`` where prefixes ``q`` and ``k`` stand for query and key.
Contains 1s for positions to attend to and 0s for masked positions. Defaults to ``None``.
head_mask (Tensor, optional): Tensor of dimension ``(h, q_seq_len, k_seq_len)`` or
``(b, h, q_seq_len, k_seq_len)``. Masks need to be specified for each attention head.
Defaults to ``None``.
logits_mask (Tensor, optional): Tensor of dimension ``(seq_len, num_tokens)`` or
``(b, seq_len, num_tokens)`` to ensure we only calculate probabilities from tokens of the
corresponding modality sequence.
use_cache (bool, optional): If ``True``, caches past key/value tensors for faster decoding. If ``False``,
recomputes key and value for each decoding step. Defaults to ``False``.
causal (bool, optional): If ``True``, use causal attention. Defaults to ``False``.
right_shift (bool): If ``True``, shifts the embedding vectors to the right and prepends it with start of
sentence token. Defaults to ``False``. This option is disregarded during training mode
return_attn_weights (bool, optional): If ``True``, returns attention probabilities of each transformer
layer. Defaults to ``False``.
return_hidden_states (bool, optional): If ``True``, returns the embeddings of each transformer layer.
Defaults to ``False``.
Returns:
An instance of :class:`~torchmultimodal.models.gpt.MultimodalGPTOutput`.
"""
decoder_output = self.fwd(
in_tokens=in_tokens,
out_tokens=out_tokens,
in_pos_ids=in_pos_ids,
out_pos_ids=out_pos_ids,
attn_mask=attn_mask,
head_mask=head_mask,
use_cache=use_cache,
causal=causal,
right_shift=right_shift,
return_attn_weights=return_attn_weights,
return_hidden_states=return_hidden_states,
)
hidden_states = decoder_output.last_hidden_states
logits = self.logit_projection(hidden_states, logits_mask)
return MultimodalGPTOutput(decoder_output, logits)
def fwd(
self,
in_tokens: Optional[Tensor] = None,
out_tokens: Optional[Tensor] = None,
in_pos_ids: Optional[Tensor] = None,
out_pos_ids: Optional[Tensor] = None,
attn_mask: Optional[Tensor] = None,
head_mask: Optional[Tensor] = None,
use_cache: bool = False,
causal: bool = False,
right_shift: bool = False,
return_attn_weights: bool = False,
return_hidden_states: bool = False,
) -> TransformerDecoderOutput:
# During training this method is used in the forward pass to decode input- and
# output- tokens.
# During generation this method is used for autoregressive decoding.
if (in_tokens is None) and (out_tokens is None):
raise ValueError(
"input-modality token and output-modality token sequences cannot be both empty"
)
# Look up embeddings for the given tokens and project to fit the size of the
# transformer decoder
in_modality = out_modality = None
if in_tokens is not None:
# (b, in_seq_len, in_emb_dim)
in_modality = self.lookup(in_tokens, "in")
if self.in_projection is not None:
in_modality = self.in_projection(
in_modality
) # (b, in_seq_len, d_model)
if out_tokens is not None:
# (b, out_seq_len, out_emb_dim)
out_modality = self.lookup(out_tokens, "out")
if self.out_projection is not None:
out_modality = self.out_projection(
out_modality
) # (b, out_seq_len, d_model)
return self.mm_decoder(
in_modality=in_modality,
out_modality=out_modality,
in_pos_ids=in_pos_ids,
out_pos_ids=out_pos_ids,
attn_mask=attn_mask,
head_mask=head_mask,
use_cache=use_cache,
causal=causal,
right_shift=right_shift,
return_attn_weights=return_attn_weights,
return_hidden_states=return_hidden_states,
)
def logit_projection(
self, hidden_states: Tensor, logits_mask: Optional[Tensor] = None
) -> Tensor:
if logits_mask is not None and logits_mask.dim() == 2:
logits_mask = logits_mask.unsqueeze(
0
) # (seq_len, num_tokens) -> (1, seq_len, num_tokens)
hidden_states = self.norm(hidden_states)
logits = self.to_logit(hidden_states)
max_neg_value = -torch.finfo(logits.dtype).max
if logits_mask is not None:
logits.masked_fill_(logits_mask == 0, max_neg_value)
return logits # (b, seq_len, num_tokens)
def encode(self, x: Any, modality: str, **kwargs: Any) -> Tensor:
"""Converts data to token ids.
Although this is not part of the forward pass, it is used to generate labels for training
as well as inputs for autoregressive decoding.
Args:
x (Any): Data to be encoded, e.g., ``List[str]`` for text, ``Tensor`` of shape
``(b, c, d1, ..., dn)`` for audio/image/video.
modality (str): Input or output modality string used to select the encoder.
kwargs (Any): Other keyword arguments suitable for the encoder.
Returns:
A tensor of token ids of shape ``(b, seq_len)``.
Raises:
ValueError: If ``modality`` is neither ``in`` nor ``out``.
"""
if modality == "in":
encoder = self.in_tokenizer.encode
elif modality == "out":
encoder = self.out_tokenizer.encode
else:
raise ValueError(f"Invalid modality parameter: {modality}")
token_ids = encoder(x, **kwargs) # type: ignore
# For generation we need to flatten the tokens
return token_ids.flatten(
start_dim=1, end_dim=-1
) # (b, d1, ..., dn) -> (b, seq_len)
def decode(self, token_ids: Tensor, **kwargs: Any) -> Any:
"""Converts out-modality tokens ids back to data during generation.
Args:
token_ids (Tensor): Token ID sequence ``(b, seq_len)`` to be decoded.
kwargs (Any): Other keywords arguments suitable for the decoder.
Returns:
The decoded data, e.g., ``List[str]`` for text, a tensor of shape ``(b, c, d1. ,,, dn)`` for
audio/image/video.
Raises:
ValueError: If the shape of ``token_ids`` is not of dimension two.
ValueError: If the sequence dim of ``token_ids`` does not match that inferred from ``latent_shape``.
"""
if len(token_ids.shape) != 2:
raise ValueError(
f"Shape of token ids should be '(batch_size, sequence_length)' but got {token_ids.shape}"
)
# Check if the generated sequence length matches that inferred from the latent embedding space
latent_seq_len = torch.prod(torch.tensor(self.latent_shape)).item()
if token_ids.shape[1] != latent_seq_len:
raise ValueError(
f"Sequence to decode does not match that inferred from the tokenizer: {latent_seq_len}"
)
# Reshape the sequence of token ids back to dim of latent space
token_ids = token_ids.view(
token_ids.shape[0], *self.latent_shape
) # (b, seq_len) -> (b, d1, ..., dn)
return self.out_tokenizer.decode(token_ids, **kwargs) # type: ignore
def lookup(self, token_ids: Tensor, modality: str) -> Tensor:
"""Looks up the latent embeddings corresponding to the token ids during generation.
We ask each tokenizer to implement this method. An example is :class:`torchmultimodal.models.vqvae.VQVAE`.
Args:
token_ids (Tensor): Token ID sequence ``(b, seq_len)``.
modality (str): The modality at which this method is performed.
Returns:
A tensor of embeddings corresponding to the token ids.
Raises:
ValueError: If ``modality`` is neither ``in`` nor ``out``.
"""
if modality == "in":
tokenizer = self.in_tokenizer
elif modality == "out":
tokenizer = self.out_tokenizer
else:
raise ValueError(f"Invalid modality parameter: {modality}")
return tokenizer.lookup(token_ids) # type: ignore
class MultimodalTransformerDecoder(nn.Module):
"""A transformer decoder for two modalities
The token- and position- embedding layers are per modality:
* During training both modalities are fed into the module and concatenated as a single sequence of
tokenized embedding vectors
* During generation the future data points are predicted step-wise from the past. The input modality
is processed before the output modality (see ``torchmultimodal.utils.common.generate``). Therefore,
at any point in time the input data contains only one modality.
Args:
in_pos_emb (nn.Module): Input modality position embedding layer.
out_pos_emb (nn.Module): Output modality position embedding layer.
decoder (nn.Module): The transformer decoder. An instance of :py:class:`TransformerDecoder`.
right_shift (nn.Module): Layer that shifts the embedding vectors to the right and prepends it with
start of sentence token (SOS). An instance of :py:class:`RightShift`.
Note:
* During training mode, the SOS token is prepended to the left of the concatenated input and
output modality sequence;
* During generation mode, the SOS token is only required for the input modality sequence as
the initial token to be learnt from. Right shift should be turned off
(``right_shift = False``, see args) when we start to generate the output modality samples.
"""
def __init__(
self,
in_pos_emb: nn.Module,
out_pos_emb: nn.Module,
decoder: nn.Module,
right_shift: nn.Module,
) -> None:
super().__init__()
self.in_pos_emb = in_pos_emb
self.out_pos_emb = out_pos_emb
self.decoder = decoder
self.right_shift = right_shift
def forward(
self,
in_modality: Optional[Tensor] = None,
out_modality: Optional[Tensor] = None,
in_pos_ids: Optional[Tensor] = None,
out_pos_ids: Optional[Tensor] = None,
attn_mask: Optional[Tensor] = None,
head_mask: Optional[Tensor] = None,
use_cache: bool = False,
causal: bool = False,
right_shift: bool = False,
return_attn_weights: bool = False,
return_hidden_states: bool = False,
) -> TransformerDecoderOutput:
"""
Args:
in_modality (Tensor, optional): Tensor of dimension ``(b, in_seq_len, d_model)`` containing tokenized
embeddings for the input modality. Defaults to ``None``.
out_modality (Tensor, optional): Tensor of dimension ``(b, out_seq_len, d_model')`` containing tokenized
embeddings for the output modality. Defaults to ``None``.
in_pos_ids (Tensor, optional): Tensor of dimension ``(b, in_seq_len)`` containing indices for the
input modality position embeddings. Defaults to ``None``.
out_pos_ids (Tensor, optional): Tensor of dimension ``(b, out_seq_len)`` containing indices for the
output modality position embeddings. Defaults to ``None``.
attn_mask (Tensor, optional): Tensor of dimension ``(q_seq_len, k_seq_len)`` or
``(b, q_seq_len, k_seq_len)`` where prefixes ``q`` and ``k`` stand for query and key.
Contains 1s for positions to attend to and 0s for masked positions. Defaults to ``None``.
head_mask (Tensor, optional): Tensor of dimension ``(h, q_seq_len, k_seq_len)`` or
``(b, h, q_seq_len, k_seq_len)``. Masks need to be specified for each attention head.
Defaults to ``None``.
use_cache (bool, optional): If ``True``, caches past key/value tensors for faster decoding.
If ``False``, recomputes key and value for each decoding step. Defaults to ``False``.
causal (bool, optional): If ``True``, use causal attention. Defaults to ``False``.
right_shift (bool): If ``True``, shifts the embedding vectors to the right and prepends it with start of
sentence token. Defaults to ``False``. This option is disregarded during training mode
return_attn_weights (bool, optional): If ``True``, returns attention probabilities of each transformer
layer. Defaults to ``False``.
return_hidden_states (bool, optional): If ``True``, returns the embeddings of each transformer layer.
Defaults to ``False``.
Returns:
An instace of :class:`~torchmultimodal.models.gpt.TransformerDecoderOutput`.
"""
if (in_modality is None) and (out_modality is None):
raise ValueError(
"in_modality and out_modality sequences cannot be both empty"
)
# Since generation is based on the previous data point (autoregressive) where
# only one modality is needed at any point along the sequence, either input
# or output modality can be None.
# Whereas training is done by paralleling all data points so both modalities
# should be present. Position ids are optional as they can be derived from
# the sequence length of each modality.
if in_modality is None:
out_pos_ids = self._norm_pos_ids(out_modality, out_pos_ids)
x = out_modality + self.out_pos_emb(out_pos_ids)
elif out_modality is None:
in_pos_ids = self._norm_pos_ids(in_modality, in_pos_ids)
x = in_modality + self.in_pos_emb(in_pos_ids)
else:
in_pos_ids = self._norm_pos_ids(in_modality, in_pos_ids)
out_pos_ids = self._norm_pos_ids(out_modality, out_pos_ids)
x_in = in_modality + self.in_pos_emb(in_pos_ids)
x_out = out_modality + self.out_pos_emb(out_pos_ids)
x = torch.cat((x_in, x_out), dim=1)
if self.training or right_shift:
x = self.right_shift(x)
return self.decoder(
x,
attn_mask,
head_mask,
use_cache,
causal,
return_attn_weights,
return_hidden_states,
)
def _norm_pos_ids(self, x: Tensor, pos_ids: Optional[Tensor] = None) -> Tensor:
_, seq_len, _ = x.shape
if pos_ids is None:
pos_ids = torch.arange(seq_len, dtype=torch.long, device=x.device)[
None, :
] # (1, seq_len)
if pos_ids.shape[1] != seq_len:
raise ValueError(
f"Input sequence and position ids must be equal in length: {pos_ids.shape[1]} != {seq_len}"
)
return pos_ids
class TransformerDecoder(nn.Module):
"""A transformer decoder.
Args:
decoder_layer (nn.Module): The transformer decoder layer.
An instance of :class:`TransformerDecoderLayer`.
num_layers (int): The number of transformer decoder layers to be stacked up.
"""
def __init__(
self,
decoder_layer: nn.Module,
num_layers: int = 12,
) -> None:
super().__init__()
self.layers = get_clones(decoder_layer, num_layers)
self.num_layers = num_layers
def forward(
self,
hidden_states: Tensor,
attn_mask: Optional[Tensor] = None,
head_mask: Optional[Tensor] = None,
use_cache: bool = False,
causal: bool = False,
return_attn_weights: bool = False,
return_hidden_states: bool = False,
) -> TransformerDecoderOutput:
"""
Args:
hidden_states (Tensor): Tensor of the embedding vectors of dimension ``(b, seq_len, emb_dim)``.
attn_mask (Tensor, optional): Tensor of dimension ``(q_seq_len, k_seq_len)`` or
``(b, q_seq_len, k_seq_len)`` where prefixes ``q`` and ``k`` stand for query and key.
Contains 1s for positions to attend to and 0s for masked positions. Defaults to ``None``.
head_mask (Tensor, optional): Tensor of dimension ``(h, q_seq_len, k_seq_len)`` or
``(b, h, q_seq_len, k_seq_len)``. Masks need to be specified for each attention head.
Defaults to ``None``.
use_cache (bool, optional): If ``True``, caches past key/value tensors for faster decoding. If ``False``,
recomputes key and value for each decoding step. Defaults to ``False``.
causal (bool, optional): If ``True``, use causal attention. Defaults to ``False``.
return_attn_weights (bool, optional): If ``True``, returns attention probabilities of each transformer
layer. Defaults to ``False``.
return_hidden_states (bool, optional): If ``True``, returns the embeddings of each transformer layer.
Defaults to ``False``.
Returns:
An instance of :class:`~torchmultimodal.models.gpt.TransformerDecoderOutput`.
"""
if attn_mask is not None and attn_mask.dim() == 2:
attn_mask = attn_mask[
None, None, :, :
] # (q_seq_len, k_seq_len) -> (1, 1, q_seq_len, k_seq_len)
if head_mask is not None and head_mask.dim() == 3:
head_mask = head_mask[None, :, :, :]
all_hidden_states: Tuple[Tensor, ...] = () if return_hidden_states else None
all_attentions: Tuple[Tensor, ...] = () if return_attn_weights else None
all_past_key_values: Tuple[Dict[str, Tensor], ...] = () if use_cache else None
for layer in self.layers:
if return_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_outputs = layer(
hidden_states,
attn_mask,
head_mask,
use_cache,
causal,
return_attn_weights,
)
hidden_states = layer_outputs.hidden_states
if return_attn_weights:
all_attentions = all_attentions + (layer_outputs.attention_weights,)
if use_cache:
all_past_key_values = all_past_key_values + (
layer_outputs.past_key_values,
)
if return_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
return TransformerDecoderOutput(
last_hidden_states=hidden_states,
hidden_states=all_hidden_states,
attention_weights=all_attentions,
past_key_values=all_past_key_values,
)
class TransformerDecoderLayer(nn.Module):
"""A single layer from a GPT transformer decoder
Layer norm is applied before the attention layer and the feedforward layer so that the gradients are
well-behaved at initialization for training stability. This is also called "Pre-LN Transformer" studied in
`"On Layer Normalization in the Transformer Architecture"<https://arxiv.org/pdf/2002.04745.pdf>`_
Args:
d_model (int): Dimension of the embeddings.
n_head (int): Number of attention heads.
dropout (float, optional): Dropout probability used in the dropout layers. Defaults to ``0.1``.
activation (Union[str, Callable], optional): Activation used by the feedforward layer. Defaults to
``SiLU``.
attn_module (nn.Module): Self attention module. Defaults to ``SelfAttention`` with dropout rate equal
to ``0.1``.
"""
def __init__(
self,
d_model: int = 768,
n_head: int = 12,
dropout: float = 0.1,
activation: Callable[..., nn.Module] = SiLU,
attn_module: nn.Module = SelfAttention(attn_dropout=0.1),
) -> None:
super().__init__()
self.norm_attn = nn.LayerNorm(d_model)
self.norm_mlp = nn.LayerNorm(d_model)
self.dropout_attn = nn.Dropout(dropout)
self.dropout_mlp = nn.Dropout(dropout)
# No bias when projecting q, k, v in GPT model
# https://github.com/openai/gpt-2/blob/master/src/model.py#L54
self.attention = MultiHeadAttention(
dim_q=d_model,
dim_kv=d_model,
n_head=n_head,
attn_module=attn_module,
add_bias=False,
)
self.mlp = MLP(
in_dim=d_model,
out_dim=d_model,
hidden_dims=[d_model * 4],
dropout=0.0,
activation=activation,
)
def forward(
self,
x: Tensor,
attn_mask: Optional[Tensor] = None,
head_mask: Optional[Tensor] = None,
use_cache: bool = False,
causal: bool = False,
return_attn_weights: bool = False,
) -> TransformerLayerOutput:
"""
Args:
x (Tensor): input embedding vectors.
attn_mask (Tensor, optional): Tensor of dimension ``(b, q_seq_len, k_seq_len)`` where prefixes ``q``
and ``k`` stand for query and key. Contains 1s for positions to attend to and 0s for masked positions.
Defaults to ``None``.
head_mask (Tensor, optional): Tensor of dimension ``(b, h, q_seq_len, k_seq_len)``. Masks need to be
specified for each attention head. Defaults to ``None``.
use_cache (bool, optional): If ``True``, caches past key/value tensors for faster decoding. If ``False``,
recomputes key and value for each decoding step. Defaults to ``False``.
causal (bool, optional): If ``True``, use causal attention. Defaults to ``False``.
return_attn_weights (bool, optional): If ``True``, returns attention probabilities of the layer.
Defaults to ``False``.
Returns:
An instance of :class:`~torchmultimodal.models.gpt.TransformerLayerOutput`.
"""
attn_probs = None
past_key_values = None
attn_out = self._attn(
self.norm_attn(x),
attn_mask,
head_mask,
return_attn_weights,
use_cache=use_cache,
causal=causal,
)
if return_attn_weights:
attn_hidden_states, attn_probs = attn_out
else:
attn_hidden_states = attn_out
if use_cache:
past_key_values = self.attention.cache
x = x + self.dropout_attn(attn_hidden_states)
mlp_hidden_states = self._mlp_block(self.norm_mlp(x))
x = x + self.dropout_mlp(mlp_hidden_states)
return TransformerLayerOutput(
hidden_states=x,
attention_weights=attn_probs,
past_key_values=past_key_values,
)
@checkpoint_wrapper
def _attn(
self,
x: Tensor,
attn_mask: Tensor,
head_mask: Tensor,
return_attn_weights: bool,
use_cache: bool,
causal: bool,
) -> Union[Tensor, Tuple[Tensor, Tensor]]:
return self.attention(
x,
attention_mask=attn_mask,
head_mask=head_mask,
return_attn_weights=return_attn_weights,
use_cache=use_cache,
causal=causal,
)
@checkpoint_wrapper
def _mlp_block(self, x: Tensor) -> Tensor:
return self.mlp(x)
class RightShift(nn.Module):
"""Shifts the embedding vectors along the sequence dimension to the right.
Since the decoder progresses by taking the token it generates in the previous step, before it
has generated anything it needs a token to start with. Hence, the start-of-sentence (SOS) token.
The SOS token is a learnable parameter of the decoder and the choice of its initialization is taken
from VideoGPT: https://github.com/wilson1yan/VideoGPT/blob/master/videogpt/attention.py#L517
Args:
embedding_dim (int): Dimension of the embedding vector for each token along the sequence.
Attributes:
sos (nn.Parameter): The starting token to be prepended to the sequence.
"""
def __init__(self, embedding_dim: int) -> None:
super().__init__()
self.embedding_dim = embedding_dim
self.sos = nn.Parameter(torch.FloatTensor(embedding_dim).normal_(std=0.02))
def forward(self, x: Tensor) -> Tensor:
"""
Args:
x (Tensor): An input tensor of shape ``(b, seq_len, emb_dim)``.
Returns;
A tensor of the same shape as that of the input with the ``sos`` token prepended.
"""
x_shape = list(x.shape)
x = x.flatten(start_dim=1, end_dim=-2) # (batch, seq_len, emb)
sos = self.sos.unsqueeze(0).unsqueeze(1).repeat(x_shape[0], 1, 1) # (b, 1, emb)
# Shift one unit to the right along dim ``seq_len``
x = torch.cat(
(sos.data, x[:, :-1, :]), dim=1
) # (batch, seq_len, embedding_dim)
x = x.view(*x_shape)
return x
| EXA-1-master | exa/libraries/multimodal-main/torchmultimodal/models/gpt.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
| EXA-1-master | exa/libraries/multimodal-main/torchmultimodal/models/flava/__init__.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
# Code for some of the transformers components in this file are initialized
# from their counterparts in Hugging Face Transformers library.
import math
from collections import namedtuple, OrderedDict
from dataclasses import dataclass
from functools import partial
from typing import Any, Callable, List, Optional, Tuple, Union
import torch
from torch import nn, Tensor
from torchmultimodal.models.flava.image_encoder import flava_image_encoder
from torchmultimodal.models.flava.text_encoder import flava_text_encoder
from torchmultimodal.models.flava.transformer import FLAVATransformerWithoutEmbeddings
from torchmultimodal.modules.layers.mlp import MLP
from torchmultimodal.modules.layers.normalizations import Fp32LayerNorm
from torchmultimodal.modules.layers.transformer import (
TransformerEncoder,
TransformerOutput,
)
from torchmultimodal.modules.losses.flava import (
FLAVAPretrainingLoss,
FLAVAPretrainingLossOutput,
Pooler,
)
from torchmultimodal.utils.common import load_module_from_url, ModelOutput
from typing_extensions import Literal
EMBEDDING_OPTIONS = Literal["image", "text", "mm"]
FLAVAOutput = namedtuple(
"FLAVAOutput",
[
"image",
"image_masked",
"text",
"text_masked",
"multimodal",
"multimodal_masked",
"projected_image_embeddings",
"projected_text_embeddings",
],
defaults=(None, None, None, None, None, None, None, None),
)
FLAVAOutput.__annotations__ = {
"image": TransformerOutput,
"image_masked": TransformerOutput,
"text": TransformerOutput,
"text_masked": TransformerOutput,
"multimodal": TransformerOutput,
"multimodal_masked": TransformerOutput,
}
CKPT_KEY = "flava_full"
FLAVA_FOR_PRETRAINED_MAPPING = {
# This will no longer load with the updated model, but keeping here just in case
# "flava_full": "https://huggingface.co/aps/flava_full_pretrained_encoders_torchmm/resolve/main/pytorch_model.bin",
CKPT_KEY: "https://download.pytorch.org/models/multimodal/flava/flava_for_pretraining_unified_text_encoder.pt",
}
FLAVA_MODEL_MAPPING = {
CKPT_KEY: "https://download.pytorch.org/models/multimodal/flava/flava_model_unified_text_encoder.pt",
}
def flava_multimodal_encoder(
hidden_size: int = 768,
num_attention_heads: int = 12,
num_hidden_layers: int = 12,
dropout: float = 0.0,
intermediate_size: int = 3072,
intermediate_activation: Callable[..., nn.Module] = nn.GELU,
layer_norm_eps: float = 1e-12,
) -> FLAVATransformerWithoutEmbeddings:
encoder = TransformerEncoder(
n_layer=num_hidden_layers,
d_model=hidden_size,
n_head=num_attention_heads,
dim_feedforward=intermediate_size,
activation=intermediate_activation,
layer_norm_eps=layer_norm_eps,
dropout=dropout,
norm_first=True,
)
layernorm = Fp32LayerNorm(hidden_size, eps=layer_norm_eps)
pooler = Pooler(hidden_size=hidden_size)
return FLAVATransformerWithoutEmbeddings(
encoder=encoder, layernorm=layernorm, pooler=pooler, hidden_size=hidden_size
)
@dataclass
class FLAVAForClassificationOutput(ModelOutput):
logits: Tensor
loss: Tensor
class FLAVAModel(nn.Module):
def __init__(
self,
image_encoder: nn.Module,
text_encoder: nn.Module,
mm_encoder: nn.Module,
image_to_mm_projection: nn.Module,
text_to_mm_projection: nn.Module,
text_projection: nn.Module,
image_projection: nn.Module,
**kwargs: Any,
) -> None:
super().__init__()
self.image_encoder = image_encoder
self.text_encoder = text_encoder
self.mm_encoder = mm_encoder
self.image_to_mm_projection = image_to_mm_projection
self.text_to_mm_projection = text_to_mm_projection
self.text_projection = text_projection
self.image_projection = image_projection
def forward(
self,
image: Optional[Tensor] = None,
text: Optional[Tensor] = None,
image_patches_mask: Optional[Tensor] = None,
text_masked: Optional[Tensor] = None,
required_embedding: Optional[EMBEDDING_OPTIONS] = None,
skip_unmasked_mm_encoder: bool = True,
) -> FLAVAOutput:
if required_embedding is None:
if image is not None and text is not None:
required_embedding = "mm"
elif image is not None:
required_embedding = "image"
else:
required_embedding = "text"
image_encoding_out = self._encode_data_to_embeddings(
image,
required_embedding,
["image", "mm"],
partial(self.encode_image, projection=True),
)
if len(image_encoding_out) == 2:
image_outputs, projected_image_embeddings = (
image_encoding_out[0],
image_encoding_out[1],
)
else:
image_outputs = image_encoding_out # type: ignore
projected_image_embeddings = None
text_encoding_out = self._encode_data_to_embeddings(
text,
required_embedding,
["text", "mm"],
partial(self.encode_text, projection=True),
)
if len(text_encoding_out) == 2:
text_outputs, projected_text_embeddings = (
text_encoding_out[0],
text_encoding_out[1],
)
else:
text_outputs = text_encoding_out # type: ignore
projected_text_embeddings = None
image_masked_outputs = self._encode_data_to_embeddings(
image,
required_embedding,
["image", "mm"],
partial(self.encode_image, image_patches_mask=image_patches_mask),
)
assert type(image_masked_outputs) == TransformerOutput
text_masked_outputs = self._encode_data_to_embeddings(
text_masked,
required_embedding,
["text", "mm"],
self.encode_text,
)
assert type(text_masked_outputs) == TransformerOutput
multimodal_outputs = TransformerOutput()
multimodal_masked_outputs = TransformerOutput()
if required_embedding == "mm":
# Take last hidden state and not the last_hidden_state because
# for flava we want the hidden state without final layernorm.
if not skip_unmasked_mm_encoder:
# Unmasked multimodal embedding is not currently used by any of the FLAVA losses.
multimodal_outputs = self.encode_mm(
image_outputs.hidden_states[-1] # type: ignore
if image_outputs.hidden_states # type: ignore
else None,
text_outputs.hidden_states[-1] # type: ignore
if text_outputs.hidden_states # type: ignore
else None,
)
multimodal_masked_outputs = self.encode_mm(
image_masked_outputs.hidden_states[-1]
if image_masked_outputs.hidden_states
else None,
text_masked_outputs.hidden_states[-1]
if text_masked_outputs.hidden_states
else None,
)
return FLAVAOutput(
image=image_outputs,
image_masked=image_masked_outputs,
text=text_outputs,
text_masked=text_masked_outputs,
multimodal=multimodal_outputs,
multimodal_masked=multimodal_masked_outputs,
projected_image_embeddings=projected_image_embeddings,
projected_text_embeddings=projected_text_embeddings,
)
def encode_image(
self,
image: Tensor,
image_patches_mask: Optional[Tensor] = None,
projection: bool = False,
) -> Union[Tuple[TransformerOutput, Tensor], Optional[TransformerOutput]]:
if image_patches_mask is not None:
encoded_image = self.image_encoder(image, image_patches_mask)
else:
encoded_image = self.image_encoder(image)
if projection:
projected_embeddings = self.image_projection(
encoded_image.last_hidden_state[:, 0, :]
)
return encoded_image, projected_embeddings
return encoded_image
def encode_text(
self, text: Tensor, text_mask: Optional[Tensor] = None, projection: bool = False
) -> Union[Tuple[TransformerOutput, Tensor], Optional[TransformerOutput]]:
# TODO(asg): Give proper parameter names when implementing text encoder
encoded_text = self.text_encoder(
input_ids=text,
attention_mask=text_mask,
return_attn_weights=True,
return_hidden_states=True,
)
if projection:
projected_embeddings = self.text_projection(
encoded_text.last_hidden_state[:, 0, :]
)
return encoded_text, projected_embeddings
return encoded_text
def _encode_data_to_embeddings(
self,
data: Optional[Tensor],
selected_head_encoder: EMBEDDING_OPTIONS,
encoder_options: List[EMBEDDING_OPTIONS],
encode_callable: Callable[
...,
Union[Tuple[TransformerOutput, Tensor], Optional[TransformerOutput]],
],
) -> Union[Tuple[TransformerOutput, Tensor], Optional[TransformerOutput]]:
output: Union[
Tuple[TransformerOutput, Tensor], TransformerOutput
] = TransformerOutput()
if data is not None and selected_head_encoder in encoder_options:
output = encode_callable(data)
return output
def encode_mm(
self,
image_embedding: Tensor,
text_embedding: Tensor,
) -> TransformerOutput:
if image_embedding is None or text_embedding is None:
# Since nothing is passed, it might be case without
# masked data let's say.
return TransformerOutput()
image_embedding = self.image_to_mm_projection(image_embedding)
text_embedding = self.text_to_mm_projection(text_embedding)
fused_state = torch.cat([image_embedding, text_embedding], dim=1)
return self.mm_encoder(fused_state)
class FLAVAForPreTraining(nn.Module):
# TODOs:
# 1. Expose logit scale
# 2. For FLAVA model, allow interpolating the embeddings to
# for patch embeddings
def __init__(
self, model: FLAVAModel, image_codebook: nn.Module, loss: FLAVAPretrainingLoss
) -> None:
super().__init__()
self.model = model
self.image_codebook = image_codebook
self.loss = loss
def encode_image(
self,
image: Tensor,
cls_index: int = 0,
) -> Tensor:
encoded_result = self.model.encode_image(image, projection=True)
encoded_image = encoded_result[1]
return encoded_image
def encode_text(
self,
text: Tensor,
text_mask: Optional[Tensor] = None,
cls_index: int = 0,
) -> Tensor:
encoded_result = self.model.encode_text(text, text_mask, projection=True)
encoded_text = encoded_result[1]
return encoded_text
# TODO: Add options to enable losses selectively
def forward(
self,
image: Optional[Tensor] = None,
text: Optional[Tensor] = None,
image_for_codebook: Optional[Tensor] = None,
image_patches_mask: Optional[Tensor] = None,
text_masked: Optional[Tensor] = None,
required_embedding: Optional[EMBEDDING_OPTIONS] = None,
skip_unmasked_mm_encoder: bool = True,
itm_labels: Optional[Tensor] = None,
mlm_labels: Optional[Tensor] = None,
) -> FLAVAPretrainingLossOutput:
image_labels = None
if image_for_codebook is not None:
image_labels = self.image_codebook(image_for_codebook).flatten(1)
image_patches_mask = image_patches_mask.flatten(1).to(torch.bool)
image_labels[~image_patches_mask] = -1
flava_output: FLAVAOutput = self.model(
image=image,
text=text,
image_patches_mask=image_patches_mask,
text_masked=text_masked,
required_embedding=required_embedding,
skip_unmasked_mm_encoder=skip_unmasked_mm_encoder,
)
return self.loss(
image_sequence=flava_output.image.last_hidden_state,
text_sequence=flava_output.text.last_hidden_state,
image_masked_sequence=flava_output.image_masked.last_hidden_state,
text_masked_sequence=flava_output.text_masked.last_hidden_state,
multimodal_sequence=flava_output.multimodal.last_hidden_state
if not skip_unmasked_mm_encoder
else None,
multimodal_masked_sequence=flava_output.multimodal_masked.last_hidden_state,
itm_labels=itm_labels,
mim_labels=image_labels,
mlm_labels=mlm_labels,
projected_image_embeddings=flava_output.projected_image_embeddings,
projected_text_embeddings=flava_output.projected_text_embeddings,
)
class FLAVAForClassification(nn.Module):
def __init__(
self,
model: FLAVAModel,
classifier: nn.Module,
loss: Union[nn.Module, Callable[[Tensor, Tensor], Tensor]],
**kwargs: Any,
) -> None:
super().__init__()
self.model = model
self.classifier = classifier
self.loss = loss
def forward(
self,
image: Optional[Tensor] = None,
text: Optional[Tensor] = None,
required_embedding: Optional[EMBEDDING_OPTIONS] = None,
labels: Optional[Tensor] = None,
cls_index: int = 0,
) -> FLAVAForClassificationOutput:
flava_output: FLAVAOutput = self.model(
image=image,
text=text,
required_embedding=required_embedding,
# Don't skip the encoder for classification
skip_unmasked_mm_encoder=False,
)
hidden_state: Optional[Tensor] = None
if required_embedding == "image":
hidden_state = flava_output.image.last_hidden_state
elif required_embedding == "text":
hidden_state = flava_output.text.last_hidden_state
else:
hidden_state = flava_output.multimodal.last_hidden_state
scores = self.classifier(hidden_state[:, cls_index])
loss = self.loss(scores, labels)
return FLAVAForClassificationOutput(
logits=scores,
loss=loss,
)
# NOTE:
# 1) There is a possibility of using dataclass for similar
# style kwargs for encoders. Didn't explore due to readability.
def flava_model(
# Image encoder specific parameters
image_hidden_size: int = 768,
image_num_attention_heads: int = 12,
image_num_hidden_layers: int = 12,
image_dropout: float = 0.0,
image_intermediate_size: int = 3072,
image_intermediate_activation: Callable[..., nn.Module] = nn.GELU,
image_layer_norm_eps: float = 1e-12,
use_image_masking: bool = True,
image_size: int = 224,
patch_size: int = 16,
num_channels: int = 3,
# Text encoder specific parameters
text_hidden_size: int = 768,
text_num_attention_heads: int = 12,
text_num_hidden_layers: int = 12,
text_dropout: float = 0.0,
text_intermediate_size: int = 3072,
text_intermediate_activation: Callable[..., nn.Module] = nn.GELU,
text_layer_norm_eps: float = 1e-12,
vocab_size: int = 30522,
pad_token_id: int = 0,
type_vocab_size: int = 2,
max_position_embeddings: int = 512,
# Multimodal encoder specific parameters
multimodal_hidden_size: int = 768,
multimodal_num_attention_heads: int = 12,
multimodal_num_hidden_layers: int = 6,
multimodal_dropout: float = 0.0,
multimodal_intermediate_size: int = 3072,
multimodal_intermediate_activation: Callable[..., nn.Module] = nn.GELU,
multimodal_layer_norm_eps: float = 1e-12,
# projection
text_and_image_proj_size: int = 768,
pretrained: bool = False,
**kwargs: Any,
) -> FLAVAModel:
image_encoder = flava_image_encoder(
hidden_size=image_hidden_size,
num_attention_heads=image_num_attention_heads,
num_hidden_layers=image_num_hidden_layers,
use_image_masking=use_image_masking,
dropout=image_dropout,
intermediate_size=image_intermediate_size,
intermediate_activation=image_intermediate_activation,
layer_norm_eps=image_layer_norm_eps,
image_size=image_size,
patch_size=patch_size,
num_channels=num_channels,
)
text_encoder = flava_text_encoder(
hidden_size=text_hidden_size,
num_attention_heads=text_num_attention_heads,
num_hidden_layers=text_num_hidden_layers,
dropout=text_dropout,
intermediate_size=text_intermediate_size,
intermediate_activation=text_intermediate_activation,
layer_norm_eps=text_layer_norm_eps,
vocab_size=vocab_size,
pad_token_id=pad_token_id,
type_vocab_size=type_vocab_size,
max_position_embeddings=max_position_embeddings,
)
mm_encoder = flava_multimodal_encoder(
hidden_size=multimodal_hidden_size,
num_attention_heads=multimodal_num_attention_heads,
num_hidden_layers=multimodal_num_hidden_layers,
dropout=multimodal_dropout,
intermediate_size=multimodal_intermediate_size,
intermediate_activation=multimodal_intermediate_activation,
layer_norm_eps=multimodal_layer_norm_eps,
)
image_to_mm_projection = nn.Linear(image_hidden_size, multimodal_hidden_size)
text_to_mm_projection = nn.Linear(text_hidden_size, multimodal_hidden_size)
image_projection = nn.Linear(image_hidden_size, text_and_image_proj_size)
text_projection = nn.Linear(text_hidden_size, text_and_image_proj_size)
flava = FLAVAModel(
image_encoder=image_encoder,
text_encoder=text_encoder,
mm_encoder=mm_encoder,
image_to_mm_projection=image_to_mm_projection,
text_to_mm_projection=text_to_mm_projection,
text_projection=text_projection,
image_projection=image_projection,
)
if pretrained:
load_module_from_url(flava, FLAVA_MODEL_MAPPING[CKPT_KEY])
return flava
def flava_model_for_pretraining(
codebook_image_size: int = 112,
pretrained: bool = False,
**flava_model_kwargs: Any,
# TODO: Add parameters for loss here
) -> FLAVAForPreTraining:
model = flava_model(**flava_model_kwargs)
hidden_size = flava_model_kwargs.get("multimodal_hidden_size", 768)
losses = FLAVAPretrainingLoss(hidden_size=hidden_size)
codebook = DalleVAEEncoder(image_size=codebook_image_size)
flava = FLAVAForPreTraining(
model=model,
image_codebook=codebook,
loss=losses,
)
if pretrained:
load_module_from_url(flava, FLAVA_FOR_PRETRAINED_MAPPING[CKPT_KEY])
return flava
def flava_model_for_classification(
num_classes: int,
classifier_in_dim: int = 768,
classifier_hidden_sizes: Union[int, List[int]] = 768,
classifier_dropout: float = 0.5,
classifier_activation: Callable[..., nn.Module] = nn.ReLU,
classifier_normalization: Optional[Callable[..., nn.Module]] = None,
loss_fn: Optional[Callable[..., Tensor]] = None,
pretrained: bool = True,
**flava_model_kwargs: Any,
) -> FLAVAForClassification:
classifier = MLP(
in_dim=classifier_in_dim,
out_dim=num_classes,
hidden_dims=classifier_hidden_sizes,
dropout=classifier_dropout,
activation=classifier_activation,
normalization=classifier_normalization,
)
model = flava_model(**flava_model_kwargs)
if loss_fn is None:
loss_fn = nn.CrossEntropyLoss()
classification_model = FLAVAForClassification(
model=model, classifier=classifier, loss=loss_fn
)
if pretrained:
load_module_from_url(
classification_model,
FLAVA_FOR_PRETRAINED_MAPPING[CKPT_KEY],
strict=False,
)
return classification_model
class DalleConv2d(nn.Module):
def __init__(self, n_in: int, n_out: int, kw: int) -> None:
super().__init__()
w = torch.empty((n_out, n_in, kw, kw), dtype=torch.float32)
w.normal_(std=1 / math.sqrt(n_in * kw**2))
b = torch.zeros((n_out,), dtype=torch.float32)
self.w, self.b = nn.Parameter(w), nn.Parameter(b)
self.kw = kw
def forward(self, x: torch.Tensor) -> torch.Tensor:
return nn.functional.conv2d(x, self.w, self.b, padding=(self.kw - 1) // 2)
class DalleEncoderBlock(nn.Module):
def __init__(self, n_in: int, n_out: int, n_layers: int) -> None:
super().__init__()
n_hid = n_out // 4
self.post_gain = 1 / (n_layers**2)
self.id_path = DalleConv2d(n_in, n_out, 1) if n_in != n_out else nn.Identity()
self.res_path = nn.Sequential(
OrderedDict(
[
("relu_1", nn.ReLU()),
("conv_1", DalleConv2d(n_in, n_hid, 3)),
("relu_2", nn.ReLU()),
("conv_2", DalleConv2d(n_hid, n_hid, 3)),
("relu_3", nn.ReLU()),
("conv_3", DalleConv2d(n_hid, n_hid, 3)),
("relu_4", nn.ReLU()),
("conv_4", DalleConv2d(n_hid, n_out, 1)),
]
)
)
def forward(self, x: Tensor) -> Tensor:
return self.id_path(x) + self.post_gain * self.res_path(x)
class DalleEncoder(nn.Module):
def __init__(
self,
group_count: int = 4,
n_hid: int = 256,
n_blk_per_group: int = 2,
input_channels: int = 3,
vocab_size: int = 8192,
**kwargs: Any,
) -> None:
super().__init__()
self.input_channels = input_channels
n_layers = group_count * n_blk_per_group
output_conv = DalleConv2d(8 * n_hid, vocab_size, 1)
self.blocks = nn.Sequential(
OrderedDict(
[
("input", DalleConv2d(input_channels, 1 * n_hid, 7)),
(
"group_1",
self._create_group(
n_layers, n_blk_per_group, 1 * n_hid, 1 * n_hid
),
),
(
"group_2",
self._create_group(
n_layers, n_blk_per_group, 1 * n_hid, 2 * n_hid
),
),
(
"group_3",
self._create_group(
n_layers, n_blk_per_group, 2 * n_hid, 4 * n_hid
),
),
(
"group_4",
self._create_group(
n_layers,
n_blk_per_group,
4 * n_hid,
8 * n_hid,
use_pool=False,
),
),
(
"output",
nn.Sequential(
OrderedDict([("relu", nn.ReLU()), ("conv", output_conv)])
),
),
]
)
)
def _create_group(
self,
n_layers: int,
n_blk_per_group: int,
n_in: int,
n_hid: int,
use_pool: bool = True,
) -> nn.Module:
make_blk = partial(DalleEncoderBlock, n_layers=n_layers)
blk_range = range(n_blk_per_group)
blocks: OrderedDict[str, nn.Module] = OrderedDict()
for i in blk_range:
if i == 0:
blocks[f"block_{i+1}"] = make_blk(n_in, n_hid)
else:
blocks[f"block_{i+1}"] = make_blk(n_hid, n_hid)
if use_pool:
blocks["pool"] = nn.MaxPool2d(kernel_size=2)
return nn.Sequential(blocks)
def forward(self, x: torch.Tensor) -> torch.Tensor:
if len(x.shape) != 4:
raise ValueError(f"input shape {x.shape} is not 4d")
if x.shape[1] != self.input_channels:
raise ValueError(
f"input has {x.shape[1]} channels but model built for {self.input_channels}"
)
# if x.dtype != torch.float32:
# raise ValueError('input must have dtype torch.float32')
return self.blocks(x)
class DalleVAEEncoder(nn.Module):
def __init__(
self, image_size: Union[int, Tuple[int, int]] = 112, pretrained: bool = True
):
super().__init__()
self.image_size = image_size
self.encoder = DalleEncoder()
if pretrained:
self.load_model()
def load_model(self) -> Any: # type: ignore
# TODO (T116682215): Network error due to FLAVA model relying on access to openAI
encoder_state_dict = torch.hub.load_state_dict_from_url(
"https://cdn.openai.com/dall-e/encoder.pkl"
)
self.encoder.load_state_dict(encoder_state_dict.state_dict()) # type: ignore
return self.state_dict()
def get_codebook_indices(self, images: Tensor) -> Tensor:
z_logits = self.encoder(images)
return torch.argmax(z_logits, axis=1) # type: ignore
def get_codebook_probs(self, images: Tensor) -> Tensor:
z_logits = self.encoder(images)
return nn.Softmax(dim=1)(z_logits)
def forward(self, img_seq_prob: Tensor) -> Tensor:
return self.get_codebook_indices(img_seq_prob)
| EXA-1-master | exa/libraries/multimodal-main/torchmultimodal/models/flava/model.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from functools import partial
from typing import Callable
from torch import nn
from torchmultimodal.models.flava.transformer import init_transformer_weights
from torchmultimodal.modules.encoders.bert_text_encoder import BERTTextEncoder
from torchmultimodal.modules.layers.normalizations import Fp32LayerNorm
from torchmultimodal.modules.layers.text_embedding import BERTTextEmbeddings
from torchmultimodal.modules.layers.transformer import TransformerEncoder
from torchmultimodal.modules.losses.flava import Pooler
def flava_text_encoder(
# TransformerEncoder params
num_hidden_layers: int = 12,
hidden_size: int = 768,
num_attention_heads: int = 12,
intermediate_size: int = 3072,
intermediate_activation: Callable[..., nn.Module] = nn.GELU,
layer_norm_eps: float = 1e-12,
dropout: float = 0.0,
# TextEmbeddings params
vocab_size: int = 30522,
pad_token_id: int = 0,
type_vocab_size: int = 2,
max_position_embeddings: int = 512,
# TextEncoder params
initializer_range: float = 0.02,
) -> BERTTextEncoder:
embeddings = BERTTextEmbeddings(
hidden_size=hidden_size,
vocab_size=vocab_size,
pad_token_id=pad_token_id,
type_vocab_size=type_vocab_size,
max_position_embeddings=max_position_embeddings,
layer_norm_eps=layer_norm_eps,
dropout=dropout,
)
encoder = TransformerEncoder(
n_layer=num_hidden_layers,
d_model=hidden_size,
n_head=num_attention_heads,
dim_feedforward=intermediate_size,
activation=intermediate_activation,
layer_norm_eps=layer_norm_eps,
dropout=dropout,
norm_first=True,
)
layernorm = Fp32LayerNorm(hidden_size, eps=layer_norm_eps)
pooler = Pooler(hidden_size=hidden_size)
weight_init_fn = partial(
init_transformer_weights, initializer_range=initializer_range
)
return BERTTextEncoder(
embeddings=embeddings,
encoder=encoder,
layernorm=layernorm,
pooler=pooler,
weight_init_fn=weight_init_fn,
)
| EXA-1-master | exa/libraries/multimodal-main/torchmultimodal/models/flava/text_encoder.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from functools import partial
from typing import Any, Callable, Optional
import torch
from torch import nn, Tensor
from torchmultimodal.modules.layers.transformer import TransformerOutput
class FLAVATransformerWithoutEmbeddings(nn.Module):
# TODO(asg): Add support for pretrained checkpoint loading
def __init__(
self,
encoder: nn.Module,
layernorm: nn.Module,
pooler: nn.Module,
hidden_size: int = 768,
weight_init_fn: Optional[Callable] = None,
initializer_range: float = 0.02,
use_cls_token: bool = True,
**kwargs: Any,
):
super().__init__()
self.encoder = encoder
self.layernorm = layernorm
self.pooler = pooler
if use_cls_token:
self.cls_token = nn.Parameter(torch.zeros(1, 1, hidden_size))
else:
self.cls_token = None
if weight_init_fn is None:
weight_init_fn = partial(
init_transformer_weights, initializer_range=initializer_range
)
self.apply(weight_init_fn)
def forward(
self,
hidden_states: Optional[Tensor] = None,
attention_mask: Optional[Tensor] = None,
) -> TransformerOutput:
if hidden_states is None:
raise ValueError("You have to specify hidden_states")
if self.cls_token is not None:
batch_size = hidden_states.shape[0]
cls_tokens = self.cls_token.expand(batch_size, -1, -1)
hidden_states = torch.cat((cls_tokens, hidden_states), dim=1)
encoder_output = self.encoder(
hidden_states,
attention_mask=attention_mask,
return_hidden_states=True,
return_attn_weights=True,
)
sequence_output = encoder_output.last_hidden_state
sequence_output = self.layernorm(sequence_output)
pooled_output = (
self.pooler(sequence_output) if self.pooler is not None else None
)
return TransformerOutput(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
hidden_states=encoder_output.hidden_states,
attentions=encoder_output.attentions,
)
def init_transformer_weights(module: nn.Module, initializer_range: float) -> None:
"""Initialize the weights"""
if isinstance(module, (nn.Linear, nn.Conv2d)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
| EXA-1-master | exa/libraries/multimodal-main/torchmultimodal/models/flava/transformer.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import math
import warnings
from functools import partial
from typing import Any, Callable, Dict, Optional, Tuple
import torch
from torch import nn, Tensor
from torchmultimodal.models.flava.transformer import init_transformer_weights
from torchmultimodal.modules.layers.normalizations import Fp32LayerNorm
from torchmultimodal.modules.layers.transformer import (
TransformerEncoder,
TransformerOutput,
)
from torchmultimodal.modules.losses.flava import Pooler
def to_2tuple(x: int) -> Tuple[int, int]:
return (x, x)
# Based on timm implementation, which can be found here:
# https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
class PatchEmbeddings(nn.Module):
"""
Image to Patch Embedding.
"""
def __init__(
self,
image_size: int = 224,
patch_size: int = 16,
num_channels: int = 3,
embed_dim: int = 768,
) -> None:
super().__init__()
image_size = to_2tuple(image_size)
patch_size = to_2tuple(patch_size)
num_patches = (image_size[1] // patch_size[1]) * (
image_size[0] // patch_size[0]
)
self.image_size = image_size
self.patch_size = patch_size
self.num_patches = num_patches
self.projection = nn.Conv2d(
num_channels, embed_dim, kernel_size=self.patch_size, stride=self.patch_size
)
def forward(
self, pixel_values: Tensor, interpolate_pos_encoding: bool = False
) -> Tensor:
_, _, height, width = pixel_values.shape
if not interpolate_pos_encoding:
if height != self.image_size[0] or width != self.image_size[1]:
raise ValueError(
f"Input image size ({height}*{width}) doesn't match model ({self.image_size[0]}*{self.image_size[1]})."
)
x = self.projection(pixel_values).flatten(2).transpose(1, 2)
return x
class ImageEmbeddings(nn.Module):
"""
Construct the CLS token, position and patch embeddings.
"""
def __init__(
self,
image_size: int = 224,
patch_size: int = 16,
num_channels: int = 3,
hidden_size: int = 768,
hidden_dropout_prob: float = 0.0,
use_image_masking: bool = True,
) -> None:
super().__init__()
self.cls_token = nn.Parameter(torch.zeros(1, 1, hidden_size))
self.patch_embeddings = PatchEmbeddings(
image_size=image_size,
patch_size=patch_size,
num_channels=num_channels,
embed_dim=hidden_size,
)
num_patches = self.patch_embeddings.num_patches
self.position_embeddings = nn.Parameter(
torch.zeros(1, num_patches + 1, hidden_size)
)
self.dropout = nn.Dropout(hidden_dropout_prob)
if use_image_masking:
self.mask_token = nn.Parameter(torch.zeros(1, 1, hidden_size))
else:
self.mask_token = None
def interpolate_pos_encoding(
self, embeddings: Tensor, height: int, width: int
) -> Tensor:
"""
This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher
resolution images.
Source:
https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174
"""
npatch = embeddings.shape[1] - 1
n = self.position_embeddings.shape[1] - 1
if npatch == n and height == width:
return self.position_embeddings
class_pos_embed = self.position_embeddings[:, 0]
patch_pos_embed = self.position_embeddings[:, 1:]
dim = embeddings.shape[-1]
h0 = height // self.patch_embeddings.patch_size[0]
w0 = width // self.patch_embeddings.patch_size[1]
# we add a small number to avoid floating point error in the interpolation
# see discussion at https://github.com/facebookresearch/dino/issues/8
h0, w0 = h0 + 0.1, w0 + 0.1
patch_pos_embed = nn.functional.interpolate(
patch_pos_embed.reshape(
1, int(math.sqrt(n)), int(math.sqrt(n)), dim
).permute(0, 3, 1, 2),
scale_factor=(h0 / math.sqrt(n), w0 / math.sqrt(n)),
mode="bicubic",
align_corners=False,
)
assert (
int(h0) == patch_pos_embed.shape[-2]
and int(w0) == patch_pos_embed.shape[-1]
)
patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
return torch.cat((class_pos_embed.unsqueeze(0), patch_pos_embed), dim=1)
def forward(
self,
pixel_values: Tensor,
image_patches_mask: Optional[Tensor] = None,
interpolate_pos_encoding: bool = False,
) -> Tensor:
batch_size, num_channels, height, width = pixel_values.shape
embeddings = self.patch_embeddings(
pixel_values, interpolate_pos_encoding=interpolate_pos_encoding
)
_, seq_len, _ = embeddings.size()
if image_patches_mask is not None:
if self.mask_token is not None:
mask_tokens = self.mask_token.expand(batch_size, seq_len, -1)
# replace the masked visual tokens by mask_tokens
w = image_patches_mask.unsqueeze(-1).type_as(mask_tokens)
embeddings = embeddings * (1 - w) + mask_tokens * w
else:
warnings.warn(
"image_patches_mask passed but use_image_masking in init was false. Ignoring."
)
# add the [CLS] token to the embedded patch tokens
cls_tokens = self.cls_token.expand(batch_size, -1, -1)
embeddings = torch.cat((cls_tokens, embeddings), dim=1)
# add positional encoding to each token
if interpolate_pos_encoding:
embeddings = embeddings + self.interpolate_pos_encoding(
embeddings, height, width
)
else:
embeddings = embeddings + self.position_embeddings
embeddings = self.dropout(embeddings)
return embeddings
class ImageTransformer(nn.Module):
# TODO(asg): Add support for pretrained checkpoint loading
def __init__(
self,
embeddings: nn.Module,
encoder: nn.Module,
layernorm: nn.Module,
pooler: nn.Module,
weight_init_fn: Optional[Callable] = None,
initializer_range: float = 0.02,
**kwargs: Any,
) -> None:
super().__init__()
self.embeddings = embeddings
self.encoder = encoder
self.layernorm = layernorm
self.pooler = pooler
if weight_init_fn is None:
weight_init_fn = partial(
init_transformer_weights, initializer_range=initializer_range
)
self.apply(weight_init_fn)
def forward(
self,
pixel_values: Optional[Tensor] = None,
image_patches_mask: Optional[Tensor] = None,
attention_mask: Optional[Tensor] = None,
) -> TransformerOutput:
if pixel_values is None:
raise ValueError("You have to specify pixel_values")
embedding_output = self.embeddings(
pixel_values, image_patches_mask=image_patches_mask
)
encoder_output = self.encoder(
embedding_output,
attention_mask=attention_mask,
return_attn_weights=True,
return_hidden_states=True,
)
sequence_output = encoder_output.last_hidden_state
sequence_output = self.layernorm(sequence_output)
pooled_output = (
self.pooler(sequence_output) if self.pooler is not None else None
)
return TransformerOutput(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
hidden_states=encoder_output.hidden_states,
attentions=encoder_output.attentions,
)
def flava_image_encoder(
hidden_size: int = 768,
num_attention_heads: int = 12,
num_hidden_layers: int = 12,
use_image_masking: bool = False,
dropout: float = 0.0,
intermediate_size: int = 3072,
intermediate_activation: Callable[..., nn.Module] = nn.GELU,
layer_norm_eps: float = 1e-12,
image_size: int = 224,
patch_size: int = 16,
num_channels: int = 3,
) -> ImageTransformer:
embeddings = ImageEmbeddings(
image_size=image_size,
patch_size=patch_size,
num_channels=num_channels,
hidden_size=hidden_size,
hidden_dropout_prob=dropout,
use_image_masking=use_image_masking,
)
encoder = TransformerEncoder(
n_layer=num_hidden_layers,
d_model=hidden_size,
n_head=num_attention_heads,
dim_feedforward=intermediate_size,
activation=intermediate_activation,
layer_norm_eps=layer_norm_eps,
dropout=dropout,
norm_first=True,
)
layernorm = Fp32LayerNorm(hidden_size, eps=layer_norm_eps)
pooler = Pooler(hidden_size=hidden_size)
return ImageTransformer(
embeddings=embeddings,
encoder=encoder,
layernorm=layernorm,
pooler=pooler,
)
class ImageTransformerWithVAE(nn.Module):
def __init__(
self,
image_transformer: nn.Module,
vae: nn.Module,
**kwargs: Dict[str, Any],
) -> None:
super().__init__()
self.image_transformer = image_transformer
self.vae = vae
def forward(
self,
pixel_values: Optional[Tensor] = None,
image_patches_mask: Optional[Tensor] = None,
attention_mask: Optional[Tensor] = None,
) -> TransformerOutput:
image_labels = self.vae(pixel_values).flatten(1)
image_patches_mask = image_patches_mask.flatten(1).to(torch.bool)
image_labels[image_patches_mask == False] = -1 # noqa
output = self.image_transformer(
pixel_values=pixel_values,
image_patches_mask=image_patches_mask,
attention_mask=attention_mask,
)
return TransformerOutput(
last_hidden_state=output.last_hidden_state,
pooler_output=output.pooler_output,
hidden_states=output.hidden_states,
attentions=output.attentions,
)
| EXA-1-master | exa/libraries/multimodal-main/torchmultimodal/models/flava/image_encoder.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
| EXA-1-master | exa/libraries/multimodal-main/torchmultimodal/models/clip/__init__.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from typing import NamedTuple
import torch
import torch.nn.functional as F
from torch import nn
from torchmultimodal.models.clip.image_encoder import CLIPViTEncoder, ResNetForCLIP
from torchmultimodal.models.clip.text_encoder import CLIPTextEncoder
from torchmultimodal.utils.common import load_module_from_url
from torchvision.models.resnet import Bottleneck, ResNet
class CLIPOutput(NamedTuple):
embeddings_a: torch.Tensor
embeddings_b: torch.Tensor
CLIP_MODEL_MAPPING = {
"vit_b16": "https://download.pytorch.org/models/multimodal/clip/clip_vit_b16.pt",
"vit_b32": "https://download.pytorch.org/models/multimodal/clip/clip_vit_b32.pt",
"vit_l14": "https://download.pytorch.org/models/multimodal/clip/clip_vit_l14.pt",
"rn50": "https://download.pytorch.org/models/multimodal/clip/clip_rn50.pt",
"rn101": "https://download.pytorch.org/models/multimodal/clip/clip_rn101.pt",
"rn50x4": "https://download.pytorch.org/models/multimodal/clip/clip_rn50x4.pt",
"rn50x16": "https://download.pytorch.org/models/multimodal/clip/clip_rn50x16.pt",
"rn50x64": "https://download.pytorch.org/models/multimodal/clip/clip_rn50x64.pt",
}
class CLIP(nn.Module):
"""CLIP is a model for contrastive pretraining between two modalities.
CLIP (https://arxiv.org/pdf/2103.00020.pdf) jointly trains an image encoder
(either ResNet or ViT) and a text encoder (Transformer) to predict correct
(image, text) pairings via a contrastive loss function. This module contains the
encoders, while the loss is implemented in ContrastiveLossWithTemperature.
Args: encoder_a (nn.Module): Instantiated encoder for modality A.
See e.g. ResNetForCLIP class.
encoder_b (nn.Module): Instantiated encoder for modality B.
See e.g. CLIPTextEncoder class.
Inputs: features_a (Tensor): Tensor containing features of modality A.
features_b (Tensor): Tensor containing features of modality B.
"""
def __init__(
self,
encoder_a: nn.Module,
encoder_b: nn.Module,
):
super().__init__()
torch._C._log_api_usage_once(f"torchmultimodal.{self.__class__.__name__}")
self.encoder_a = encoder_a
self.encoder_b = encoder_b
def forward(
self,
features_a: torch.Tensor,
features_b: torch.Tensor,
) -> CLIPOutput:
embeddings_a = self.encoder_a(features_a)
embeddings_b = self.encoder_b(features_b)
embeddings_a = F.normalize(embeddings_a)
embeddings_b = F.normalize(embeddings_b)
return CLIPOutput(embeddings_a=embeddings_a, embeddings_b=embeddings_b)
def clip_vit_b16(pretrained: bool = False) -> CLIP:
vision_encoder = CLIPViTEncoder(
image_size=224, patch_size=16, layers=12, heads=12, width=768, embedding_dim=512
)
text_encoder = CLIPTextEncoder(embedding_dim=512)
clip = CLIP(vision_encoder, text_encoder)
if pretrained:
load_module_from_url(clip, CLIP_MODEL_MAPPING["vit_b16"])
return clip
def clip_vit_b32(pretrained: bool = False) -> CLIP:
vision_encoder = CLIPViTEncoder(
image_size=224, patch_size=32, layers=12, heads=12, width=768, embedding_dim=512
)
text_encoder = CLIPTextEncoder(embedding_dim=512)
clip = CLIP(vision_encoder, text_encoder)
if pretrained:
load_module_from_url(clip, CLIP_MODEL_MAPPING["vit_b32"])
return clip
def clip_vit_l14(pretrained: bool = False) -> CLIP:
vision_encoder = CLIPViTEncoder(
image_size=224,
patch_size=14,
layers=24,
heads=16,
width=1024,
embedding_dim=768,
)
text_encoder = CLIPTextEncoder(
embedding_dim=768, width=768, dim_feedforward=3072, heads=12
)
clip = CLIP(vision_encoder, text_encoder)
if pretrained:
load_module_from_url(clip, CLIP_MODEL_MAPPING["vit_l14"])
return clip
def clip_rn50(pretrained: bool = False) -> CLIP:
vision_encoder = ResNetForCLIP(
layers=(3, 4, 6, 3),
output_dim=1024,
heads=32,
width=64,
)
text_encoder = CLIPTextEncoder(embedding_dim=1024)
clip = CLIP(vision_encoder, text_encoder)
if pretrained:
load_module_from_url(clip, CLIP_MODEL_MAPPING["rn50"])
return clip
def clip_rn101(pretrained: bool = False) -> CLIP:
vision_encoder = ResNetForCLIP(
layers=(3, 4, 23, 3),
output_dim=512,
heads=32,
width=64,
)
text_encoder = CLIPTextEncoder(embedding_dim=512)
clip = CLIP(vision_encoder, text_encoder)
if pretrained:
load_module_from_url(clip, CLIP_MODEL_MAPPING["rn101"])
return clip
# Note: these models require larger image sizes
def clip_rn50x4(pretrained: bool = False) -> CLIP:
vision_encoder = ResNetForCLIP(
layers=(4, 6, 10, 6),
output_dim=640,
heads=40,
input_resolution=288,
width=80,
)
text_encoder = CLIPTextEncoder(
embedding_dim=640, width=640, dim_feedforward=2560, heads=10
)
clip = CLIP(vision_encoder, text_encoder)
if pretrained:
load_module_from_url(clip, CLIP_MODEL_MAPPING["rn50x4"])
return clip
def clip_rn50x16(pretrained: bool = False) -> CLIP:
vision_encoder = ResNetForCLIP(
layers=(6, 8, 18, 8),
output_dim=768,
heads=48,
input_resolution=384,
width=96,
)
text_encoder = CLIPTextEncoder(
embedding_dim=768, width=768, dim_feedforward=3072, heads=12
)
clip = CLIP(vision_encoder, text_encoder)
if pretrained:
load_module_from_url(clip, CLIP_MODEL_MAPPING["rn50x16"])
return clip
def clip_rn50x64(pretrained: bool = False) -> CLIP:
vision_encoder = ResNetForCLIP(
layers=(3, 15, 36, 10),
output_dim=1024,
heads=64,
input_resolution=448,
width=128,
)
text_encoder = CLIPTextEncoder(
embedding_dim=1024, width=1024, dim_feedforward=4096, heads=16
)
clip = CLIP(vision_encoder, text_encoder)
if pretrained:
load_module_from_url(clip, CLIP_MODEL_MAPPING["rn50x64"])
return clip
# Note: these models use torchvision's ResNet
def clip_rn50_tv() -> CLIP:
vision_encoder = ResNet(
block=Bottleneck,
layers=(3, 4, 6, 3),
num_classes=1024,
)
text_encoder = CLIPTextEncoder()
return CLIP(vision_encoder, text_encoder)
def clip_rn101_tv() -> CLIP:
vision_encoder = ResNet(
block=Bottleneck,
layers=(3, 4, 23, 3),
num_classes=512,
)
text_encoder = CLIPTextEncoder()
return CLIP(vision_encoder, text_encoder)
| EXA-1-master | exa/libraries/multimodal-main/torchmultimodal/models/clip/model.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import torch
from torch import nn, Tensor
from torch.nn import TransformerEncoder, TransformerEncoderLayer
from torchmultimodal.modules.layers.activation import SiLU
from torchmultimodal.modules.layers.normalizations import Fp32LayerNorm
class CLIPTextEncoder(nn.Module):
"""CLIP text encoder class. Should be instantiated and passed to
CLIP (models/clip.py)
As in CLIP, the text encoder follows a Transformer architecture.
Args:
embedding_dim (int): Embedding dimension for text and image encoders projections.
context_length (int): Maximum sequence length for Transforer.
vocab_size (int): Vocab size.
width (int): Embedding dimension for Transformer encoder.
dim_feedforward (int): Dimension of the feedfoward networks.
heads (int): Number of heads in Transformer encoder.
layers (int): Number of layers in Transformer encoder.
use_clip_init (bool): Whether to use CLIP-specific initialization.
Inputs: text (Tensor): Tensor containing text features.
"""
TOKEN_EMBEDDING_INIT_STD = 0.02
POS_EMBEDDING_INIT_STD = 0.01
def __init__(
self,
embedding_dim: int = 512,
context_length: int = 77,
vocab_size: int = 49408,
width: int = 512,
dim_feedforward: int = 2048,
heads: int = 8,
layers: int = 12,
use_clip_init: bool = True,
):
super().__init__()
torch._C._log_api_usage_once(f"torchmultimodal.{self.__class__.__name__}")
self.token_embedding = torch.nn.Embedding(vocab_size, width)
self.positional_embedding = torch.nn.Parameter(
torch.empty(context_length, width)
)
encoder_layer = TransformerEncoderLayer(
d_model=width,
dim_feedforward=dim_feedforward,
nhead=heads,
dropout=0.0,
activation=SiLU(),
norm_first=True,
)
self.encoder = TransformerEncoder(encoder_layer, num_layers=layers)
self.width = width
self.context_length = context_length
self.ln_final = Fp32LayerNorm(width)
self.projection = nn.Linear(width, embedding_dim, bias=False)
if use_clip_init:
self.initialize_parameters()
def initialize_parameters(self) -> None:
# Initialize token and positional embeddings
nn.init.normal_(self.token_embedding.weight, std=self.TOKEN_EMBEDDING_INIT_STD)
nn.init.normal_(
self.positional_embedding,
std=self.POS_EMBEDDING_INIT_STD,
)
proj_std = (self.width**-0.5) * ((2 * self.encoder.num_layers) ** -0.5)
attn_std = self.width**-0.5
fc_std = (2 * self.width) ** -0.5
for layer in self.encoder.layers:
nn.init.normal_(layer.self_attn.in_proj_weight, std=attn_std)
nn.init.normal_(layer.self_attn.out_proj.weight, std=proj_std)
# c_fc in CLIP corresponds to the first residual MLP layer
nn.init.normal_(layer.linear1.weight, std=fc_std)
# c_proj in CLIP corresponds to the last residual MLP layer
nn.init.normal_(layer.linear2.weight, std=proj_std)
# Initialize projection
nn.init.normal_(self.projection.weight, std=self.width**-0.5)
def build_attention_mask(self) -> Tensor:
# To support torchscripting, we have to pass an int as fill_value
mask = torch.full(
(self.context_length, self.context_length), float("-inf")
).triu(1)
return mask
def forward(self, text: Tensor) -> Tensor:
if text.size(1) != self.context_length:
raise ValueError(
f"length of input should be {self.context_length} but found {text.size(1)}"
)
embeddings = self.token_embedding(text)
embeddings = embeddings + self.positional_embedding
embeddings = embeddings.permute(1, 0, 2)
embeddings = self.encoder(
embeddings, mask=self.build_attention_mask().to(device=text.device)
)
# [n_ctx, bs, transformer.width] -> [bs, n_ctx, transformer.width]
embeddings = torch.permute(embeddings, (1, 0, 2))
embeddings = self.ln_final(embeddings)
# take features from the eot embedding (the highest number in each sequence)
embeddings = self.projection(
embeddings[torch.arange(embeddings.shape[0]), text.argmax(dim=-1)]
)
# embeddings now has size [bs, embedding_dim]
return embeddings
| EXA-1-master | exa/libraries/multimodal-main/torchmultimodal/models/clip/text_encoder.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
# This code is based on https://github.com/openai/CLIP/blob/main/clip/model.py
from collections import OrderedDict
from typing import Tuple
import torch
import torch.nn.functional as F
from torch import nn, Tensor
from torchmultimodal.modules.layers.activation import SiLU
from torchmultimodal.modules.layers.normalizations import Fp32LayerNorm
EXPANSION = 4
class CLIPViTEncoder(nn.Module):
"""
Vision transformer encoder for CLIP.
Args:
embedding_dim (int): Embedding dimension for text and image encoders projections.
patch_size (int): The dimension of each patch
image_size(int): The size (width==height) of input image
width (int): Dimensionality of the encoder layers and the pooler layer
heads (int): Number of attention heads for each attention layer in the Transformer encoder
layers (int): Number of hidden layers in the Transformer encoder
Inputs:
x (Tensor): image tensor with dimensions B x C(3) x image_size x image_size
"""
def __init__(
self,
embedding_dim: int,
patch_size: int,
image_size: int,
width: int,
heads: int,
layers: int,
):
super().__init__()
torch._C._log_api_usage_once(f"torchmultimodal.{self.__class__.__name__}")
self.conv = nn.Conv2d(
in_channels=3,
out_channels=width,
kernel_size=patch_size,
stride=patch_size,
bias=False,
)
self.image_size = image_size
scale = width**-0.5
self.cls_token_embedding = nn.Parameter(scale * torch.randn(width))
self.positional_embedding = nn.Parameter(
scale * torch.randn((image_size // patch_size) ** 2 + 1, width)
)
self.ln_pre = Fp32LayerNorm(width)
encoder_layer = nn.TransformerEncoderLayer(
d_model=width,
nhead=heads,
dropout=0.0,
activation=SiLU(),
norm_first=True,
dim_feedforward=4 * width,
batch_first=True,
)
self.encoder = nn.TransformerEncoder(
encoder_layer,
num_layers=layers,
)
self.ln_post = Fp32LayerNorm(width)
self.projection = nn.Parameter(scale * torch.randn(width, embedding_dim))
def forward(self, x: Tensor) -> Tensor:
if x.size(2) != self.image_size or x.size(3) != self.image_size:
raise ValueError(
f"Expected input with width and height as {self.image_size}, found {x.size(2)} by {x.size(3)} "
)
if x.size(1) != 3:
raise ValueError(f"Expected 3 channels found {x.size(1)}")
# B x C x image_size x image_size => B x width (out_channel) x patch_size x patch_size
x = self.conv(x)
# B x width x patch_size x patch_size => B x width x patch_size ** 2
x = torch.flatten(x, start_dim=2)
# B x width x patch_size ** 2 => B x patch_size ** 2 x width
x = x.permute(0, 2, 1)
x = torch.cat(
[
self.cls_token_embedding.unsqueeze(0).expand(x.shape[0], -1, -1),
x,
],
dim=1,
)
x = x + self.positional_embedding
x = self.ln_pre(x)
x = self.encoder(x)
# Take embedding of the cls token
x = self.ln_post(x[:, 0, :])
x = x @ self.projection
return x
class ResNetForCLIPBottleneck(nn.Module):
def __init__(self, inplanes: int, planes: int, stride: int = 1):
super().__init__()
torch._C._log_api_usage_once(f"torchmultimodal.{self.__class__.__name__}")
# all conv layers have stride 1.
# an avgpool is performed after the second convolution when stride > 1
self.conv1 = nn.Conv2d(inplanes, planes, 1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.relu1 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(planes, planes, 3, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.relu2 = nn.ReLU(inplace=True)
self.avgpool = nn.AvgPool2d(stride) if stride > 1 else nn.Identity()
self.conv3 = nn.Conv2d(planes, planes * EXPANSION, 1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * EXPANSION)
self.relu3 = nn.ReLU(inplace=True)
self.downsample = None
self.stride = stride
if stride > 1 or inplanes != planes * EXPANSION:
# downsampling layer is prepended with an avgpool,
# and the subsequent convolution has stride 1
self.downsample = nn.Sequential(
OrderedDict(
[
("-1", nn.AvgPool2d(stride)),
(
"0",
nn.Conv2d(
inplanes,
planes * EXPANSION,
1,
stride=1,
bias=False,
),
),
("1", nn.BatchNorm2d(planes * EXPANSION)),
]
)
)
def forward(self, x: Tensor) -> Tensor:
identity = x
out = self.relu1(self.bn1(self.conv1(x)))
out = self.relu2(self.bn2(self.conv2(out)))
out = self.avgpool(out)
out = self.bn3(self.conv3(out))
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu3(out)
return out
class AttentionPool2d(nn.Module):
def __init__(
self, spacial_dim: int, embed_dim: int, num_heads: int, output_dim: int = None
):
super().__init__()
torch._C._log_api_usage_once(f"torchmultimodal.{self.__class__.__name__}")
self.positional_embedding = nn.Parameter(
torch.randn(spacial_dim**2 + 1, embed_dim) / embed_dim**0.5
)
self.k_proj = nn.Linear(embed_dim, embed_dim)
self.q_proj = nn.Linear(embed_dim, embed_dim)
self.v_proj = nn.Linear(embed_dim, embed_dim)
self.c_proj = nn.Linear(embed_dim, output_dim or embed_dim)
self.num_heads = num_heads
def forward(self, x: Tensor) -> Tensor:
x = x.reshape(x.shape[0], x.shape[1], -1).permute(2, 0, 1) # NCHW -> (HW)NC
x = torch.cat([x.mean(dim=0, keepdim=True), x], dim=0) # (HW+1)NC
x = x + self.positional_embedding[:, None, :].to(x.dtype) # (HW+1)NC
x, _ = F.multi_head_attention_forward(
query=x,
key=x,
value=x,
embed_dim_to_check=x.shape[-1],
num_heads=self.num_heads,
q_proj_weight=self.q_proj.weight,
k_proj_weight=self.k_proj.weight,
v_proj_weight=self.v_proj.weight,
in_proj_weight=None,
in_proj_bias=torch.cat(
[self.q_proj.bias, self.k_proj.bias, self.v_proj.bias]
),
bias_k=None,
bias_v=None,
add_zero_attn=False,
dropout_p=0,
out_proj_weight=self.c_proj.weight,
out_proj_bias=self.c_proj.bias,
use_separate_proj_weight=True,
training=self.training,
need_weights=False,
)
return x[0]
class ResNetForCLIP(nn.Module):
"""Modified ResNet used by CLIP.
Based on https://github.com/openai/CLIP/blob/main/clip/model.py#L93, this class
differs from Torchvision's ResNet in the following ways:
- There are now 3 "stem" convolutions as opposed to 1, with an
average pool instead of a max pool.
- Performs anti-aliasing strided convolutions, where an avgpool is
prepended to convolutions with stride > 1.
- The final pooling layer is a QKV attention instead of an average pool.
Args:
layers (Tuple[int]):
output_dim (int): dimension of output tensor
heads (int): number of heads in the attention pooling layer
input_resolution (int): resolution of image input to encoder
width (int): ResNet width
use_clip_init (bool): Whether to use CLIP-specific initialization.
Inputs:
x (Tensor): Tensor containing image features
"""
def __init__(
self,
layers: Tuple[int, int, int, int] = (3, 4, 6, 3),
output_dim: int = 512,
heads: int = 1024,
input_resolution: int = 224,
width: int = 64,
use_clip_init: bool = True,
):
super().__init__()
torch._C._log_api_usage_once(f"torchmultimodal.{self.__class__.__name__}")
self.output_dim = output_dim
self.input_resolution = input_resolution
# the 3-layer stem
self.conv1 = nn.Conv2d(
3, width // 2, kernel_size=3, stride=2, padding=1, bias=False
)
self.bn1 = nn.BatchNorm2d(width // 2)
self.relu1 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(
width // 2, width // 2, kernel_size=3, padding=1, bias=False
)
self.bn2 = nn.BatchNorm2d(width // 2)
self.relu2 = nn.ReLU(inplace=True)
self.conv3 = nn.Conv2d(width // 2, width, kernel_size=3, padding=1, bias=False)
self.bn3 = nn.BatchNorm2d(width)
self.relu3 = nn.ReLU(inplace=True)
self.avgpool = nn.AvgPool2d(2)
# residual layers
self._inplanes = width # this is a *mutable* variable used during construction
self.layer1 = self._make_layer(width, layers[0])
self.layer2 = self._make_layer(width * 2, layers[1], stride=2)
self.layer3 = self._make_layer(width * 4, layers[2], stride=2)
self.layer4 = self._make_layer(width * 8, layers[3], stride=2)
embed_dim = width * 32 # the ResNet feature dimension
self.attnpool = AttentionPool2d(
input_resolution // 32, embed_dim, heads, output_dim
)
if use_clip_init:
self.initialize_parameters()
def _make_layer(self, planes: int, blocks: int, stride: int = 1) -> nn.Module:
layers = [ResNetForCLIPBottleneck(self._inplanes, planes, stride)]
self._inplanes = planes * EXPANSION
for _ in range(1, blocks):
layers.append(ResNetForCLIPBottleneck(self._inplanes, planes))
return nn.Sequential(*layers)
def initialize_parameters(self) -> None:
if self.attnpool is not None:
std = self.attnpool.c_proj.in_features**-0.5
nn.init.normal_(self.attnpool.q_proj.weight, std=std)
nn.init.normal_(self.attnpool.k_proj.weight, std=std)
nn.init.normal_(self.attnpool.v_proj.weight, std=std)
nn.init.normal_(self.attnpool.c_proj.weight, std=std)
# Zero-initialize each block's third batch normalization weights
# Based on CLIP initialization in https://git.io/JDbGX
for resnet_block in [
self.layer1,
self.layer2,
self.layer3,
self.layer4,
]:
for name, param in resnet_block.named_parameters():
if name.endswith("bn3.weight"):
nn.init.zeros_(param)
def forward(self, x: Tensor) -> Tensor:
def stem(x: Tensor) -> Tensor:
x = self.relu1(self.bn1(self.conv1(x)))
x = self.relu2(self.bn2(self.conv2(x)))
x = self.relu3(self.bn3(self.conv3(x)))
x = self.avgpool(x)
return x
x = x.type(self.conv1.weight.dtype)
x = stem(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.attnpool(x)
return x
| EXA-1-master | exa/libraries/multimodal-main/torchmultimodal/models/clip/image_encoder.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from typing import Callable, Optional
from torch import nn, Tensor
from torchmultimodal.modules.layers.transformer import TransformerCrossAttentionLayer
from torchmultimodal.utils.attention import get_extended_attention_mask
class ALBEFMultimodalEncoder(nn.Module):
"""
Construct multimodal embeddings from image embeddings, text embeddings, and text attention mask.
The ALBEFMultimodalEncoder is similar to ALBEFTextEncoder, with the addition of image-text cross attention in encoder layers.
Args:
hidden_size (int): Dimensionality of the encoder layers.
Default is 768.
num_hidden_layers (int): Number of hidden layers in the Transformer encoder.
Default is 6.
num_attention_heads (int): Number of attention heads for each attention layer in the Transformer encoder.
Default is 12.
intermediate_size (int): Dimensionality of the “intermediate” (i.e., feed-forward) layer in the Transformer encoder.
Default is 3072.
layer_norm_eps (float): The epsilon used by the layer normalization layers.
Default is 1e-12.
transform_act_fn (Callable[[Tensor], Tensor]): The activation function for the Transformer encoder layer.
Default is GELU.
Inputs:
hidden_states (Tensor of shape (batch_size, seq_len, hidden_size)):
Unimodal input hidden states.
attention_mask (Tensor of shape (batch_size, seq_len)):
Input attention mask to avoid performing attention on padding token indices.
encoder_hidden_states (Tensor of shape (batch_size, encoder_seq_len, hidden_size)):
The encoder hidden states.
encoder_attention_mask (Optional[Tensor] of shape (batch_size, encoder_seq_len)):
The attention mask for encoder hidden states.
is_decoder (bool): Whether this module is used as a decoder. Default is False.
"""
def __init__(
self,
hidden_size: int = 768,
num_hidden_layers: int = 6,
num_attention_heads: int = 12,
intermediate_size: int = 3072,
layer_norm_eps: float = 1e-12,
transform_act_fn: Callable[..., nn.Module] = nn.GELU,
) -> None:
super().__init__()
self.layer = nn.ModuleList(
[
TransformerCrossAttentionLayer(
d_model=hidden_size,
n_head=num_attention_heads,
dim_feedforward=intermediate_size,
activation=transform_act_fn,
layer_norm_eps=layer_norm_eps,
)
for _ in range(num_hidden_layers)
]
)
def forward(
self,
hidden_states: Tensor,
attention_mask: Tensor,
encoder_hidden_states: Tensor,
encoder_attention_mask: Optional[Tensor] = None,
) -> Tensor:
attention_mask = get_extended_attention_mask(attention_mask)
if encoder_attention_mask is not None:
encoder_attention_mask = get_extended_attention_mask(encoder_attention_mask)
for layer_module in self.layer:
hidden_states = layer_module(
hidden_states,
encoder_hidden_states=encoder_hidden_states,
attention_mask=attention_mask,
cross_attention_mask=encoder_attention_mask,
)
return hidden_states
| EXA-1-master | exa/libraries/multimodal-main/torchmultimodal/models/albef/multimodal_encoder.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
| EXA-1-master | exa/libraries/multimodal-main/torchmultimodal/models/albef/__init__.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import copy
from collections import namedtuple
from typing import Tuple
import torch
import torch.nn.functional as F
from torch import nn, Tensor
from torchmultimodal.utils.common import momentum_update, remove_grad
ALBEFOutput = namedtuple(
"ALBEFOutput",
[
"image_embeddings",
"image_embeddings_m",
"text_embeddings",
"text_embeddings_m",
"multimodal_embeddings",
"multimodal_embeddings_m",
],
defaults=(None, None, None, None, None, None),
)
ALBEFSimilarity = namedtuple(
"ALBEFSimilarity",
[
"sim_i2t", # image to text similarity
"sim_t2i", # text to image similarity
"sim_i2t_m", # image to text similarity for momentum embeddings
"sim_t2i_m", # text to image similarity for momentum embeddings
],
defaults=(None, None, None, None),
)
ALBEFWithSimilarityOutput = namedtuple(
"ALBEFWithSimilarityOutput",
[
"image_embeddings",
"text_embeddings",
"multimodal_embeddings",
"multimodal_embeddings_neg",
"similarity",
"sim_targets",
],
defaults=(None, None, None, None, None, None),
)
class ALBEFModel(nn.Module):
"""
ALBEF is a model to ALign the image and text representations BEfore Fusing
(ALBEF) them through cross-modal attention, which enables more grounded vision
and language representation learning. (https://arxiv.org/pdf/2107.07651.pdf)
Args: vision_encoder (nn.Module): Instantiated vision encoder.
text_encoder (nn.Module): Instantiated text encoder.
multimodal_encoder (nn.Module): Instantiated multimodal encoder.
momentum (float): Momentum parameter. Default is 0.995.
Inputs: image (Tensor): Tensor of shape (B, C, H, W) containing image features.
text (Tensor): Tensor of shape (B, L) containing text features.
text_atts (Tensor): Tensor of shape (B, L) containing text attention mask.
"""
def __init__(
self,
vision_encoder: nn.Module,
text_encoder: nn.Module,
multimodal_encoder: nn.Module,
momentum: float = 0.995,
) -> None:
super().__init__()
self.vision_encoder = vision_encoder
self.text_encoder = text_encoder
self.multimodal_encoder = multimodal_encoder
self.vision_encoder_m = copy.deepcopy(vision_encoder)
self.text_encoder_m = copy.deepcopy(text_encoder)
self.multimodal_encoder_m = copy.deepcopy(multimodal_encoder)
remove_grad(self.vision_encoder_m)
remove_grad(self.text_encoder_m)
remove_grad(self.multimodal_encoder_m)
self.momentum = momentum
def forward(
self,
image: Tensor,
text: Tensor,
text_atts: Tensor,
) -> ALBEFOutput:
image_embeds = self.vision_encoder(image)
text_embeds = self.text_encoder(text, text_atts)
multimodal_embeddings = self.multimodal_encoder(
hidden_states=text_embeds.last_hidden_state,
attention_mask=text_atts,
encoder_hidden_states=image_embeds,
)
with torch.no_grad():
momentum_update(self.vision_encoder, self.vision_encoder_m, self.momentum)
momentum_update(self.text_encoder, self.text_encoder_m, self.momentum)
momentum_update(
self.multimodal_encoder, self.multimodal_encoder_m, self.momentum
)
image_embeds_m = self.vision_encoder_m(image)
text_embeds_m = self.text_encoder_m(text, text_atts)
multimodal_embeddings_m = self.multimodal_encoder_m(
hidden_states=text_embeds_m.last_hidden_state,
attention_mask=text_atts,
encoder_hidden_states=image_embeds_m,
)
return ALBEFOutput(
image_embeddings=image_embeds,
image_embeddings_m=image_embeds_m,
text_embeddings=text_embeds.last_hidden_state,
text_embeddings_m=text_embeds_m.last_hidden_state,
multimodal_embeddings=multimodal_embeddings,
multimodal_embeddings_m=multimodal_embeddings_m,
)
class ALBEFModelWithSimilarity(nn.Module):
"""
ALBEFModelWithSimilarity outputs image embeddings, text embeddings, multimodal embeddings,
negative image-text pairs multimodal embeddings, and image-text similarity, as used in ITC
and ITM losses.
Args: albef_model (ALBEFModel): Instantiated ALBEF model.
vision_proj (nn.Module): Instantiated vision projection layer.
text_proj (nn.Module): Instantiated text projection layer.
embed_size (int): Embedding size of the vision and text projection layers. Default is 256.
queue_size (int): Size of image and text queues for momentum distillation. Default is 65536.
masked_token_id (int): The token id indicating a masked token. Default is -100.
temp (float): Temperature parameter. Default is 0.07.
Inputs: image (Tensor): Tensor of shape (B, C, H, W) containing image features.
text (Tensor): Tensor of shape (B, L) containing text features.
text_atts (Tensor): Tensor of shape (B, L) containing text attention mask.
idx (Tensor): Tensor of shape (B) containing unique identifiers for each sample.
"""
def __init__(
self,
albef_model: ALBEFModel,
vision_proj: nn.Module,
text_proj: nn.Module,
embed_size: int = 256,
queue_size: int = 65536,
mask_token_id: int = -100,
temp: float = 0.07,
) -> None:
super().__init__()
self.albef_model = albef_model
self.vision_proj = vision_proj
self.text_proj = text_proj
self.vision_proj_m = copy.deepcopy(vision_proj)
self.text_proj_m = copy.deepcopy(text_proj)
remove_grad(self.vision_proj_m)
remove_grad(self.text_proj_m)
self.queue_size = queue_size
self.temp = nn.Parameter(torch.ones([]) * temp)
# queues keep track of the most recent M image and text representations for momentum distillation
# queues decouple M from the batch size, allowing it to be big
self.register_buffer("image_queue", torch.randn(embed_size, queue_size))
self.register_buffer("text_queue", torch.randn(embed_size, queue_size))
self.register_buffer(
"idx_queue", torch.full((1, self.queue_size), mask_token_id)
)
self.register_buffer("queue_ptr", torch.zeros(1, dtype=torch.long))
self.image_queue: Tensor
self.text_queue: Tensor
self.idx_queue: Tensor
self.queue_ptr: Tensor
self.image_queue = nn.functional.normalize(self.image_queue, dim=0)
self.text_queue = nn.functional.normalize(self.text_queue, dim=0)
def forward(
self,
image: Tensor,
text: Tensor,
text_atts: Tensor,
idx: Tensor,
) -> ALBEFWithSimilarityOutput:
outputs = self.albef_model(image, text, text_atts)
# reshape idx to (B, 1)
idx = idx.view(-1, 1)
# get identifiers for the most recent M samples
idx_all = torch.cat([idx.t(), self.idx_queue.detach().clone()], dim=1)
# check for seen identifiers in the most recent M samples
pos_idx = torch.eq(idx, idx_all).float()
sim_targets = pos_idx / pos_idx.sum(1, keepdim=True)
similarity = self._similarity(
outputs.image_embeddings,
outputs.image_embeddings_m,
outputs.text_embeddings,
outputs.text_embeddings_m,
idx,
)
image_embeds_neg, text_embeds_neg, text_atts_neg = self._neg_embeddings(
outputs.image_embeddings, outputs.text_embeddings, text_atts, similarity
)
multimodal_embeddings_neg = self.albef_model.multimodal_encoder(
torch.cat([outputs.text_embeddings, text_embeds_neg], dim=0),
torch.cat([text_atts, text_atts_neg], dim=0),
torch.cat([image_embeds_neg, outputs.image_embeddings], dim=0),
)
return ALBEFWithSimilarityOutput(
image_embeddings=outputs.image_embeddings,
text_embeddings=outputs.text_embeddings,
multimodal_embeddings=outputs.multimodal_embeddings,
multimodal_embeddings_neg=multimodal_embeddings_neg,
similarity=similarity,
sim_targets=sim_targets,
)
@torch.no_grad()
def _dequeue_and_enqueue(
self, image_feat_m: Tensor, text_feat_m: Tensor, idx: Tensor
) -> None:
# gather keys before updating queue
image_feats = _gather_embeddings(image_feat_m)
text_feats = _gather_embeddings(text_feat_m)
idxs = _gather_embeddings(idx)
batch_size = image_feats.shape[0]
ptr = int(self.queue_ptr)
assert (
self.queue_size % batch_size == 0
), "queue_size should be divisible by batch_size"
# replace the keys at ptr (dequeue and enqueue)
self.image_queue[:, ptr : ptr + batch_size] = image_feats.T
self.text_queue[:, ptr : ptr + batch_size] = text_feats.T
self.idx_queue[:, ptr : ptr + batch_size] = idxs.T
ptr = (ptr + batch_size) % self.queue_size
self.queue_ptr[0] = ptr
def _similarity(
self,
image_embeds: Tensor,
image_embeds_m: Tensor,
text_embeds: Tensor,
text_embeds_m: Tensor,
idx: Tensor,
) -> ALBEFSimilarity:
# transform the [CLS] embeddings to normalized lower-dimensional representations
image_feat = F.normalize(self.vision_proj(image_embeds[:, 0, :]), dim=-1)
text_feat = F.normalize(self.text_proj(text_embeds[:, 0, :]), dim=-1)
with torch.no_grad():
momentum_update(
self.vision_proj, self.vision_proj_m, self.albef_model.momentum
)
momentum_update(self.text_proj, self.text_proj_m, self.albef_model.momentum)
image_feat_m = F.normalize(
self.vision_proj_m(image_embeds_m[:, 0, :]), dim=-1
)
text_feat_m = F.normalize(self.text_proj_m(text_embeds_m[:, 0, :]), dim=-1)
image_feat_all = torch.cat(
[image_feat_m.t(), self.image_queue.detach().clone()], dim=1
)
text_feat_all = torch.cat(
[text_feat_m.t(), self.text_queue.detach().clone()], dim=1
)
sim_i2t_m = image_feat_m @ text_feat_all / self.temp
sim_t2i_m = text_feat_m @ image_feat_all / self.temp
sim_i2t = image_feat @ text_feat_all / self.temp
sim_t2i = text_feat @ image_feat_all / self.temp
self._dequeue_and_enqueue(image_feat_m, text_feat_m, idx)
return ALBEFSimilarity(
sim_i2t=sim_i2t,
sim_t2i=sim_t2i,
sim_i2t_m=sim_i2t_m,
sim_t2i_m=sim_t2i_m,
)
def _neg_embeddings(
self,
image_embeds: Tensor,
text_embeds: Tensor,
text_atts: Tensor,
similarity: ALBEFSimilarity,
) -> Tuple[Tensor, Tensor, Tensor]:
with torch.no_grad():
bs = image_embeds.size(0)
weights_i2t = F.softmax(similarity.sim_i2t[:, :bs], dim=1)
weights_t2i = F.softmax(similarity.sim_t2i[:, :bs], dim=1)
weights_i2t.fill_diagonal_(0)
weights_t2i.fill_diagonal_(0)
image_embeds_neg, text_embeds_neg, text_atts_neg = [], [], []
for b in range(bs):
neg_idx = int(torch.multinomial(weights_t2i[b], 1).item())
image_embeds_neg.append(image_embeds[neg_idx])
image_embeds_neg = torch.stack(image_embeds_neg, dim=0)
for b in range(bs):
neg_idx = int(torch.multinomial(weights_i2t[b], 1).item())
text_embeds_neg.append(text_embeds[neg_idx])
text_atts_neg.append(text_atts[neg_idx])
text_embeds_neg = torch.stack(text_embeds_neg, dim=0)
text_atts_neg = torch.stack(text_atts_neg, dim=0)
return image_embeds_neg, text_embeds_neg, text_atts_neg
def _gather_embeddings(embeddings: torch.Tensor) -> torch.Tensor:
if not torch.distributed.is_available() or not torch.distributed.is_initialized():
return embeddings
embeddings_all_gpus = [
torch.zeros_like(embeddings) for _ in range(torch.distributed.get_world_size())
]
torch.distributed.all_gather(embeddings_all_gpus, embeddings)
return torch.cat(embeddings_all_gpus)
| EXA-1-master | exa/libraries/multimodal-main/torchmultimodal/models/albef/model.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from functools import partial
from torch import nn, Tensor
from torchvision.models.feature_extraction import create_feature_extractor
from torchvision.models.vision_transformer import VisionTransformer
class ALBEFVisionEncoder(nn.Module):
"""
Modified VisionTransformer used by ALBEF.
This class returns the output of the encoder ('encoder.ln'), without passing it to the heads.
Args:
image_size (int): The size (resolution) of each image.
Default is 256.
patch_size (int) The size (resolution) of each patch.
Default is 16.
num_hidden_layers (int): Number of hidden layers in the Transformer encoder.
Default is 12.
num_attention_heads (int): Number of attention heads for each attention layer in the Transformer encoder.
Default is 12.
hidden_size (int): Dimensionality of the encoder layers and the pooler layer.
Default is 768.
mlp_dim (int): Dimensionality of the MLP Block in the encoder layers.
Default is 3072.
dropout (float): The dropout ratio for the encoder probabilities.
Default is 0.
attention_dropout (float): The dropout ratio for the attention probabilities.
Default is 0.
layer_norm_eps (float): The epsilon used by the layer normalization layers.
Default is 1e-6.
Inputs:
x (Tensor): Tensor of size (n, c, image_size, image_size) containing image features
"""
def __init__(
self,
image_size: int = 256,
patch_size: int = 16,
num_hidden_layers: int = 12,
num_attention_heads: int = 12,
hidden_size: int = 768,
mlp_dim: int = 3072,
dropout: float = 0.0,
attention_dropout: float = 0.0,
layer_norm_eps: float = 1e-6,
) -> None:
super().__init__()
vision_transformer = VisionTransformer(
image_size,
patch_size,
num_hidden_layers,
num_attention_heads,
hidden_size,
mlp_dim,
dropout,
attention_dropout,
norm_layer=partial(nn.LayerNorm, eps=layer_norm_eps),
)
self.encoder_layer_name = "encoder.ln"
self.encoder = create_feature_extractor(
vision_transformer, [self.encoder_layer_name]
)
def forward(self, x: Tensor) -> Tensor:
return self.encoder(x)[self.encoder_layer_name]
| EXA-1-master | exa/libraries/multimodal-main/torchmultimodal/models/albef/image_encoder.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
| EXA-1-master | exa/libraries/multimodal-main/torchmultimodal/models/mdetr/__init__.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import math
from collections import OrderedDict
from typing import Dict, List, NamedTuple, Optional, Tuple
import torch
import torch.nn.functional as F
from torch import nn, Tensor
from torchmultimodal.models.mdetr.image_encoder import (
mdetr_resnet101_backbone,
PositionEmbedding2D,
)
from torchmultimodal.models.mdetr.text_encoder import (
FeatureResizer,
mdetr_roberta_text_encoder,
)
from torchmultimodal.models.mdetr.transformer import (
mdetr_transformer,
MDETRTransformerOutput,
)
from torchmultimodal.modules.layers.mlp import MLP
class MDETRModelOutput(NamedTuple):
transformer_output: MDETRTransformerOutput
pred_logits: torch.Tensor
pred_boxes: torch.Tensor
extra_embeddings: Optional[torch.Tensor]
class MDETR(nn.Module):
"""
MDETR (https://arxiv.org/abs/2104.12763) is a modulated detection model
used to detect objects in an image conditioned on text or captions.
This class contains the entire MDETR architecture, including the
image backbone, text encoder, and multimodal transformer. (Note that the
matcher and losses are provided elsewhere.)
Args: image_backbone (nn.Module): Torch module of the backbone to be used.
See image_encoder.py.
text_encoder (nn.Module): Torch module of the text encoder to be used.
See text_encoder.py.
transformer (nn.Module): The multimodal transformer module. See the
Transformer class in this file.
pos_embed (nn.Module): Module for positional embedding of images.
text_projection (nn.Module): Module to resize text encoder outputs before feeding
them to the multimodal transformer.
image_projection (nn.Module): Projection module applied to image embeddings
prior to the multimodal transformer.
query_embed (nn.Module): Learned object query embeddings (used in
transformer decoder).
bbox_embed (nn.Module): Embedding mapping transformer outputs to
bounding boxes.
class_embed (nn.Module): Embedding mapping transformer outputs to classes.
extra_query_embeddings (Optional[nn.Embedding]): Additional query embeddings,
as used in e.g. VQA. Default: None
Inputs: images (List[Tensor]): A list of image Tensors (possibly of different sizes).
text (List[Tensor]): A list of Tensors of tokenized texts (possibly of different lengths).
Returns:
A dict with the following elements:
- "pred_logits": the classification logits (including no-object) for all queries.
Shape= [batch_size x num_queries x (num_classes + 1)]
- "pred_boxes": The normalized boxes coordinates for all queries, represented as
(center_x, center_y, height, width). These values are normalized in [0, 1],
relative to the size of each individual image (disregarding possible padding).
See PostProcess for information on how to retrieve the unnormalized bounding box.
"""
def __init__(
self,
image_backbone: nn.Module,
text_encoder: nn.Module,
transformer: nn.Module,
pos_embed: nn.Module,
text_projection: nn.Module,
image_projection: nn.Module,
query_embed: nn.Embedding,
bbox_embed: nn.Module,
class_embed: nn.Module,
extra_query_embeddings: Optional[nn.Embedding] = None,
):
super().__init__()
self.image_backbone = image_backbone
self.text_encoder = text_encoder
self.text_projection = text_projection
self.transformer = transformer
self.pos_embed = pos_embed
self.image_projection = image_projection
self.query_embed = query_embed
self.bbox_embed = bbox_embed
self.class_embed = class_embed
self.extra_query_embeddings = extra_query_embeddings
def _pad_images(self, images: List[Tensor]) -> Tuple[Tensor, Tensor]:
max_size = tuple(max(s) for s in zip(*[img.shape for img in images]))
batch_shape = (len(images),) + max_size
b, _, h, w = batch_shape
dtype = images[0].dtype
device = images[0].device
padded_images = torch.zeros(batch_shape, dtype=dtype, device=device)
mask = torch.ones((b, h, w), dtype=torch.bool, device=device)
for img, pad_img, m in zip(images, padded_images, mask):
pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img)
m[: img.shape[1], : img.shape[2]] = False
return padded_images, mask
def _pad_text(
self, text: List[Tensor], padding_idx: int = 1
) -> Tuple[Tensor, Tensor]:
padded_text = nn.utils.rnn.pad_sequence(
text, batch_first=True, padding_value=padding_idx
)
mask = padded_text == padding_idx
return padded_text, mask
def forward(self, images: List[Tensor], text: List[Tensor]) -> MDETRModelOutput:
images, image_mask = self._pad_images(images)
text, text_attention_mask = self._pad_text(text)
encoded_text = self.text_encoder(text, text_attention_mask)
# Transpose memory because pytorch's attention expects sequence first
text_memory = encoded_text.last_hidden_state.transpose(0, 1)
image_embeddings, image_mask = self.image_backbone(images, image_mask)
pos = self.pos_embed(image_mask).to(image_embeddings.dtype)
query_embed = self.query_embed.weight
# If extra embeddings are provided for VQA, we concatenate them with
# the other query embeddings prior to the transformer
if self.extra_query_embeddings is not None:
n_extra_embeddings = self.extra_query_embeddings.num_embeddings
query_embed = torch.cat([query_embed, self.extra_query_embeddings.weight])
text_memory_resized = self.text_projection(text_memory)
transformer_output = self.transformer(
self.image_projection(image_embeddings),
image_mask,
query_embed,
pos,
text_memory=text_memory_resized,
text_attention_mask=text_attention_mask,
)
# Detach the extra embeddings from the hidden states returned by the decoder
if self.extra_query_embeddings is not None:
extra_embeddings = transformer_output.decoder_hidden_states[
0, :, -n_extra_embeddings:
]
decoder_hidden_states_truncated = transformer_output.decoder_hidden_states[
:, :, :-n_extra_embeddings
]
transformer_output = transformer_output._replace(
decoder_hidden_states=decoder_hidden_states_truncated
)
else:
extra_embeddings = None
final_hidden_state = transformer_output.decoder_hidden_states[-1]
outputs_class = self.class_embed(final_hidden_state)
outputs_coord = self.bbox_embed(final_hidden_state).sigmoid()
return MDETRModelOutput(
transformer_output, outputs_class, outputs_coord, extra_embeddings
)
def mdetr_resnet101(
num_queries: int = 100,
num_classes: int = 255,
embedding_dim: int = 768,
transformer_d_model: int = 256,
transformer_num_heads: int = 8,
transformer_encoder_layers: int = 6,
transformer_decoder_layers: int = 6,
transformer_dim_feedforward: int = 2048,
transformer_dropout: float = 0.1,
return_intermediate_dec: bool = True,
num_extra_query_embeddings: Optional[int] = None,
) -> MDETR:
image_backbone = mdetr_resnet101_backbone()
pos_embed = PositionEmbedding2D(128, scale=2 * math.pi)
# Size of layer4 output in ResNet101. See
# https://github.com/pytorch/vision/blob/main/torchvision/models/resnet.py#L204
image_backbone_num_channels = 2048
text_encoder = mdetr_roberta_text_encoder()
transformer = mdetr_transformer(
transformer_d_model,
transformer_num_heads,
transformer_encoder_layers,
transformer_decoder_layers,
transformer_dim_feedforward,
transformer_dropout,
return_intermediate_dec,
)
hidden_dim = transformer_d_model
text_projection = FeatureResizer(embedding_dim, hidden_dim)
image_projection = nn.Conv2d(image_backbone_num_channels, hidden_dim, kernel_size=1)
query_embed = nn.Embedding(num_queries, hidden_dim)
# 4 gives the number of coordinates that represent the bounding box
bbox_embed = MLP(hidden_dim, 4, [hidden_dim] * 2, dropout=0.0)
# The + 1 here corresponds to the "no class" label
class_embed = nn.Linear(hidden_dim, num_classes + 1)
if num_extra_query_embeddings is not None:
extra_query_embeddings = nn.Embedding(num_extra_query_embeddings, hidden_dim)
else:
extra_query_embeddings = None
mdetr = MDETR(
image_backbone,
text_encoder,
transformer,
pos_embed,
text_projection,
image_projection,
query_embed,
bbox_embed,
class_embed,
extra_query_embeddings,
)
return mdetr
def mdetr_gqa_heads(hidden_dim: int = 256) -> nn.ModuleDict:
answer_type_head = nn.Linear(hidden_dim, 5) # Number of answer types
answer_obj_head = nn.Linear(hidden_dim, 3)
answer_attr_head = nn.Linear(hidden_dim, 403)
answer_rel_head = nn.Linear(hidden_dim, 1594)
answer_global_head = nn.Linear(hidden_dim, 111)
answer_cat_head = nn.Linear(hidden_dim, 678)
heads = nn.ModuleDict(
{
"answer_type": answer_type_head,
"answer_obj": answer_obj_head,
"answer_rel": answer_rel_head,
"answer_attr": answer_attr_head,
"answer_cat": answer_cat_head,
"answer_global": answer_global_head,
}
)
return heads
class ContrastiveEmbeddingsOutput(NamedTuple):
query_embeddings: Tensor
token_embeddings: Tensor
class MDETRVQAOutput(NamedTuple):
model_output: MDETRModelOutput
vqa_preds: Dict[str, Tensor]
contrastive_embeddings: ContrastiveEmbeddingsOutput
class MDETRForVQA(nn.Module):
def __init__(
self,
model: MDETR,
vqa_heads: nn.ModuleDict,
contrastive_alignment_image_projection: nn.Module,
contrastive_alignment_text_projection: nn.Module,
):
super().__init__()
self.model = model
self.vqa_heads = vqa_heads
if self.model.extra_query_embeddings is None:
raise ValueError("MDETRForVQA requires extra query embeddings ")
if self.model.extra_query_embeddings.num_embeddings != len(
self.vqa_heads.keys()
):
raise ValueError("Number of heads must match number of QA embeddings")
self.contrastive_alignment_image_projection = (
contrastive_alignment_image_projection
)
self.contrastive_alignment_text_projection = (
contrastive_alignment_text_projection
)
def forward(
self,
images: List[Tensor],
text: List[Tensor],
) -> MDETRVQAOutput:
# Calculate MDETR model outputs
model_output = self.model(images, text)
final_hidden_state = model_output.transformer_output.decoder_hidden_states[-1]
# Perform projections for contrastive alignment loss.
contrastive_query_embeddings = F.normalize(
self.contrastive_alignment_image_projection(final_hidden_state),
p=2,
dim=-1,
)
contrastive_token_embeddings = F.normalize(
self.contrastive_alignment_text_projection(
model_output.transformer_output.text_memory
).transpose(0, 1),
p=2,
dim=-1,
)
contrastive_outputs = ContrastiveEmbeddingsOutput(
contrastive_query_embeddings, contrastive_token_embeddings
)
# Apply VQA heads to get answer predictions
answer_preds = OrderedDict()
vqa_embeddings = model_output.extra_embeddings.transpose(0, 1)
for (head_name, head), embedding in zip(self.vqa_heads.items(), vqa_embeddings):
answer_preds[head_name] = head(embedding)
return MDETRVQAOutput(model_output, answer_preds, contrastive_outputs)
def mdetr_for_vqa(
num_queries: int = 100,
num_classes: int = 255,
embedding_dim: int = 768,
transformer_d_model: int = 256,
transformer_num_heads: int = 8,
transformer_encoder_layers: int = 6,
transformer_decoder_layers: int = 6,
transformer_dim_feedforward: int = 2048,
transformer_dropout: float = 0.1,
return_intermediate_dec: bool = True,
vqa_heads: Optional[nn.ModuleDict] = None,
contrastive_dim: int = 64,
) -> MDETRForVQA:
if vqa_heads is None:
vqa_heads = mdetr_gqa_heads()
hidden_dim = transformer_d_model
num_heads = len(vqa_heads.keys())
model = mdetr_resnet101(
num_queries,
num_classes,
embedding_dim,
transformer_d_model,
transformer_num_heads,
transformer_encoder_layers,
transformer_decoder_layers,
transformer_dim_feedforward,
transformer_dropout,
return_intermediate_dec,
num_extra_query_embeddings=num_heads,
)
contrastive_alignment_image_projection = nn.Linear(hidden_dim, contrastive_dim)
contrastive_alignment_text_projection = nn.Linear(hidden_dim, contrastive_dim)
return MDETRForVQA(
model,
vqa_heads,
contrastive_alignment_image_projection,
contrastive_alignment_text_projection,
)
class MDETRPhraseGroundingOutput(NamedTuple):
model_output: MDETRModelOutput
contrastive_embeddings: ContrastiveEmbeddingsOutput
class MDETRForPhraseGrounding(nn.Module):
def __init__(
self,
model: MDETR,
contrastive_alignment_image_projection: nn.Module,
contrastive_alignment_text_projection: nn.Module,
):
super().__init__()
self.model = model
self.contrastive_alignment_image_projection = (
contrastive_alignment_image_projection
)
self.contrastive_alignment_text_projection = (
contrastive_alignment_text_projection
)
def forward(
self,
images: List[Tensor],
text: List[Tensor],
) -> MDETRPhraseGroundingOutput:
model_output = self.model(images, text)
final_hidden_state = model_output.transformer_output.decoder_hidden_states[-1]
contrastive_query_embeddings = F.normalize(
self.contrastive_alignment_image_projection(final_hidden_state),
p=2,
dim=-1,
)
contrastive_token_embeddings = F.normalize(
self.contrastive_alignment_text_projection(
model_output.transformer_output.text_memory
).transpose(0, 1),
p=2,
dim=-1,
)
contrastive_outputs = ContrastiveEmbeddingsOutput(
contrastive_query_embeddings, contrastive_token_embeddings
)
return MDETRPhraseGroundingOutput(model_output, contrastive_outputs)
def mdetr_for_phrase_grounding(
num_queries: int = 100,
num_classes: int = 255,
embedding_dim: int = 768,
transformer_d_model: int = 256,
transformer_num_heads: int = 8,
transformer_encoder_layers: int = 6,
transformer_decoder_layers: int = 6,
transformer_dim_feedforward: int = 2048,
transformer_dropout: float = 0.1,
return_intermediate_dec: bool = True,
contrastive_dim: int = 64,
) -> MDETRForPhraseGrounding:
model = mdetr_resnet101(
num_queries,
num_classes,
embedding_dim,
transformer_d_model,
transformer_num_heads,
transformer_encoder_layers,
transformer_decoder_layers,
transformer_dim_feedforward,
transformer_dropout,
return_intermediate_dec,
)
hidden_dim = transformer_d_model
contrastive_alignment_image_projection = nn.Linear(hidden_dim, contrastive_dim)
contrastive_alignment_text_projection = nn.Linear(hidden_dim, contrastive_dim)
return MDETRForPhraseGrounding(
model,
contrastive_alignment_image_projection,
contrastive_alignment_text_projection,
)
| EXA-1-master | exa/libraries/multimodal-main/torchmultimodal/models/mdetr/model.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from typing import Optional
import torch
from torch import nn, Tensor
from torchmultimodal.modules.encoders.bert_text_encoder import BERTTextEncoder
from torchmultimodal.modules.layers.text_embedding import BERTTextEmbeddings
from torchmultimodal.modules.layers.transformer import TransformerOutput
class ModifiedTransformerEncoder(nn.Module):
"""
Modified version of TorchText's RoBERTa transformer encoder
taking in embeddings instead of input IDs.
Args: embedding_dim (int): Number of features in the input.
num_encoder_layers (int): Number of layers in the encoder.
num_attention_heads (int): Number of heads in multi-head attention.
ffn_dimension (int): Dimension of feedforward network inside
attention layers.
dropout (float): dropout value in each layer. Default: 0.1.
normalize_before (bool): Whether to do PreNorm in encoder layers.
Default: False
return_all_layers (bool) Whether to return all layers (or just the last
one). Default: False
Inputs: embeddings (Tensor): Tensor of embeddings of a batch of input IDs.
attention_mask (Optional[Tensor]) Batch attention mask returned from
tokenizer (applied as padding mask inside self-attention).
Default: None
"""
def __init__(
self,
embedding_dim: int,
num_encoder_layers: int,
num_attention_heads: int,
ffn_dimension: int,
dropout: float = 0.1,
normalize_before: bool = False,
):
super().__init__()
layer = torch.nn.TransformerEncoderLayer(
d_model=embedding_dim,
nhead=num_attention_heads,
dim_feedforward=ffn_dimension,
dropout=dropout,
activation="gelu",
batch_first=True,
norm_first=normalize_before,
)
self.layers = torch.nn.TransformerEncoder(
encoder_layer=layer, num_layers=num_encoder_layers
)
self.embedding_dim = embedding_dim
def forward(
self,
embeddings: Tensor,
attention_mask: Optional[Tensor] = None,
return_attn_weights: bool = False,
return_hidden_states: bool = False,
) -> TransformerOutput:
encoded = embeddings
batch_size, seq_len = embeddings.size()[:2]
mask = attention_mask.reshape(batch_size, seq_len)
# Do this in a loop because otherwise it can cause OOM
for layer in self.layers.layers:
encoded = layer(encoded, src_key_padding_mask=mask)
return TransformerOutput(last_hidden_state=encoded)
class FeatureResizer(nn.Module):
"""
This class takes as input a set of embeddings of dimension C1 and outputs a set of
embedding of dimension C2, after a linear transformation, dropout and normalization (LN).
Args: input_feat_size (int): Dimension of input features.
output_feat_size (int): Dimension of output features.
dropout (float): Dropout probability for final features. Default: 0.1
do_ln (bool): Whether to perform layer normalization after the linear layer.
Inputs: encoder_features (Tensor): Features to be resized.
"""
def __init__(
self,
input_feat_size: int,
output_feat_size: int,
dropout: float = 0.1,
do_ln: bool = True,
):
super().__init__()
self.do_ln = do_ln
# Object feature encoding
self.fc = nn.Linear(input_feat_size, output_feat_size, bias=True)
self.layer_norm = nn.LayerNorm(output_feat_size, eps=1e-12) if do_ln else None
self.dropout = nn.Dropout(dropout)
def forward(self, encoder_features: Tensor) -> Tensor:
x = self.fc(encoder_features)
if self.do_ln:
x = self.layer_norm(x)
output = self.dropout(x)
return output
def mdetr_roberta_text_encoder(
embedding_dim: int = 768,
vocab_size: int = 50265,
pad_token_id: int = 1,
type_vocab_size: int = 1,
max_position_embeddings: int = 514,
layer_norm_eps: float = 1e-05,
embedding_dropout_prob: float = 0.1,
ffn_dimension: int = 3072,
num_attention_heads: int = 12,
num_encoder_layers: int = 12,
encoder_dropout_prob: float = 0.1,
normalize_before: bool = False,
) -> BERTTextEncoder:
embeddings = BERTTextEmbeddings(
hidden_size=embedding_dim,
vocab_size=vocab_size,
pad_token_id=pad_token_id,
type_vocab_size=type_vocab_size,
max_position_embeddings=max_position_embeddings,
layer_norm_eps=layer_norm_eps,
dropout=embedding_dropout_prob,
offset_pos_ids=True,
)
modified_transformer_encoder = ModifiedTransformerEncoder(
embedding_dim=embedding_dim,
ffn_dimension=ffn_dimension,
num_attention_heads=num_attention_heads,
num_encoder_layers=num_encoder_layers,
dropout=encoder_dropout_prob,
normalize_before=normalize_before,
)
text_encoder = BERTTextEncoder(
embeddings=embeddings, encoder=modified_transformer_encoder
)
return text_encoder
| EXA-1-master | exa/libraries/multimodal-main/torchmultimodal/models/mdetr/text_encoder.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from typing import Callable, NamedTuple, Optional
import torch
from torch import nn, Tensor
from torchmultimodal.modules.layers.mlp import MLP
from torchmultimodal.utils.common import get_clones
class MDETRTransformerOutput(NamedTuple):
decoder_hidden_states: torch.Tensor
text_memory: torch.Tensor
class MDETRTransformer(nn.Module):
"""
Transformer class for MDETR model.
Args: d_model (int): Number of features in the input.
num_heads (int): Number of heads in multi-head attention.
num_encoder_layers (int): Number of layers in the encoder. Default: 6
num_decoder_layers (int): Number of layers in the decoder. Default: 6
dim_feedforward (int): Dimension of feedforward network. Default: 2048
dropout (float): Dropout value. Default: 0.1.
activation (Callable[..., nn.Module]): The activation function of the
intermediate layer. Default: nn.ReLU
normalize_before (bool): Whether to do PreNorm. Default: False
return_intermediate_dec (bool): Whether to return intermediate decoder outputs.
Default: True
Inputs: image_embeddings Tensor: The image input.
image_mask (Tensor) The mask for the image sequence.
query_embed (Tensor): Positional embeddings applied to Q
cross-attention matrix in decoder.
pos_embed (Tensor): Positional embeddings applied to Q and K
self-attention matrices in decoder.
text_memory (Tensor): Text input.
text_attention_mask (Tensor): Attention mask for text input.
"""
def __init__(
self,
d_model: int = 512,
num_heads: int = 8,
num_encoder_layers: int = 6,
num_decoder_layers: int = 6,
dim_feedforward: int = 2048,
dropout: float = 0.1,
activation: Callable[..., nn.Module] = nn.ReLU,
normalize_before: bool = False,
return_intermediate_dec: bool = True,
):
super().__init__()
encoder_layer = TransformerEncoderLayer(
d_model, num_heads, dim_feedforward, dropout, activation, normalize_before
)
encoder_final_norm = nn.LayerNorm(d_model) if normalize_before else None
self.encoder = TransformerEncoder(
encoder_layer, num_encoder_layers, encoder_final_norm
)
decoder_layer = TransformerDecoderLayer(
d_model, num_heads, dim_feedforward, dropout, activation
)
decoder_norm = nn.LayerNorm(d_model)
self.decoder = TransformerDecoder(
decoder_layer,
num_decoder_layers,
decoder_norm,
return_intermediate=return_intermediate_dec,
)
self.d_model = d_model
self._init_parameters()
# Initialize all (non-normalization-layer) weights
# Biases will be unaffected
def _init_parameters(self) -> None:
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
def forward(
self,
image_embeddings: Tensor,
image_mask: Tensor,
query_embed: Tensor,
pos_embed: Tensor,
text_memory: Tensor,
text_attention_mask: Tensor,
) -> MDETRTransformerOutput:
# flatten NxCxHxW to HWxNxC
bs = image_embeddings.size(0)
image_embeddings = image_embeddings.flatten(2).permute(2, 0, 1)
pos_embed = pos_embed.flatten(2).permute(2, 0, 1)
# Object query embeddings for each sample in the batch
# Size: (num_queries, batch_size, hidden_dim)
query_embed = query_embed.unsqueeze(1).repeat(1, bs, 1)
image_mask = image_mask.flatten(1)
tgt = torch.zeros_like(query_embed)
# Concat on the sequence dimension
mm_embeddings = torch.cat([image_embeddings, text_memory], dim=0)
# For mask, sequence dimension is second
image_mask = torch.cat([image_mask, text_attention_mask], dim=1)
# Pad the pos_embed with 0 so that the addition will be a no-op for the text tokens
pos_embed = torch.cat([pos_embed, torch.zeros_like(text_memory)], dim=0)
mm_memory = self.encoder(
mm_embeddings, src_key_padding_mask=image_mask, pos=pos_embed
)
text_memory = mm_memory[-len(text_memory) :]
assert mm_memory.shape[1] == text_memory.shape[1] == tgt.shape[1]
assert mm_memory.shape[1] == text_memory.shape[1] == tgt.shape[1]
hs = self.decoder(
tgt,
mm_memory,
memory_key_padding_mask=image_mask,
pos=pos_embed,
query_pos=query_embed,
)
return MDETRTransformerOutput(
decoder_hidden_states=hs.transpose(1, 2), text_memory=text_memory
)
class TransformerEncoder(nn.Module):
"""
A transformer encoder.
Args: encoder_layer (nn.Module): Module for an individual encoder layer.
num_layers (int): Number of encoder layers.
norm (Optional[nn.Module]): Normalization applied after last encoder layer.
Default: None
Inputs: src (Tensor): The sequence to the encoder layer.
mask (Optional[Tensor]) The mask for the src sequence. Default: None
src_key_padding_mask (Optional[Tensor]): The mask for the src keys per batch.
Default: None
pos (Optional[Tensor]): Positional embeddings applied to Q and K
self-attention matrices. Default: None
"""
def __init__(
self,
encoder_layer: nn.Module,
num_layers: int,
norm: Optional[nn.Module] = None,
):
super().__init__()
self.layers = get_clones(encoder_layer, num_layers)
self.norm = norm
def forward(
self,
src: Tensor,
mask: Optional[Tensor] = None,
src_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
) -> Tensor:
output = src
for layer in self.layers:
output = layer(
output,
src_mask=mask,
src_key_padding_mask=src_key_padding_mask,
pos=pos,
)
if self.norm is not None:
output = self.norm(output)
return output
class TransformerDecoder(nn.Module):
"""
A transformer decoder.
Args: decoder_layer (nn.Module): Module for an individual decoder layer.
num_layers (int): Number of decoder layers.
norm (Optional[nn.Module]): Normalization applied after last decoder layer.
Default: None
return_intermediate (bool): Whether to return intermediate decoder outputs.
Default: True
Inputs: tgt (Tensor): The sequence to the decoder layer.
memory (Tensor): The sequence from the last layer of the decoder.
tgt_mask (Optional[Tensor]) The mask for the tgt sequence. Default: None
memory_mask (Optional[Tensor]): The mask for the memory sequence.
Default: None
tgt_key_padding_mask (Optional[Tensor]): The mask for the tgt keys per batch.
Default: None
memory_key_padding_mask (Optional[Tensor]):
pos (Optional[Tensor]): Positional embeddings applied to Q and K
self-attention matrices. Default: None
query_pos (Optional[Tensor]): Positional embeddings applied to Q
cross-attention matrix. Default: None
"""
def __init__(
self,
decoder_layer: nn.Module,
num_layers: int,
norm: Optional[nn.Module] = None,
return_intermediate: bool = True,
):
super().__init__()
self.layers = get_clones(decoder_layer, num_layers)
self.norm = norm
self.return_intermediate = return_intermediate
def forward(
self,
tgt: Tensor,
memory: Tensor,
tgt_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None,
) -> Tensor:
output = tgt
intermediate = []
for layer in self.layers:
output = layer(
output,
memory,
tgt_mask=tgt_mask,
memory_mask=memory_mask,
tgt_key_padding_mask=tgt_key_padding_mask,
memory_key_padding_mask=memory_key_padding_mask,
pos=pos,
query_pos=query_pos,
)
if self.return_intermediate:
if self.norm is not None:
intermediate.append(self.norm(output))
else:
intermediate.append(output)
if self.return_intermediate:
return torch.stack(intermediate)
if self.norm is not None:
return self.norm(output)
return output
class TransformerEncoderLayer(nn.Module):
"""
A single layer from a transformer encoder.
Args: d_model (int): Number of features in the input.
num_heads (int): Number of heads in multi-head attention.
dim_feedforward (int): Dimension of feedforward network. Default: 2048
dropout (float): Dropout value. Default: 0.1.
activation (Callable[..., nn.Module]): The activation function of the
intermediate layer. Default: nn.ReLU
normalize_before (bool): Whether to do PreNorm. Default: False
Inputs: src (Tensor): The sequence to the encoder layer.
src_mask (Optional[Tensor]) The mask for the src sequence. Default: None
src_key_padding_mask (Optional[Tensor]): The mask for the src keys per batch.
Default: None
pos (Optional[Tensor]): Positional embeddings applied to Q and K
self-attention matrices. Default: None
"""
def __init__(
self,
d_model: int,
num_heads: int,
dim_feedforward: int = 2048,
dropout: float = 0.1,
activation: Callable[..., nn.Module] = nn.ReLU,
normalize_before: bool = False,
):
super().__init__()
self.self_attn = nn.MultiheadAttention(d_model, num_heads, dropout=dropout)
self.mlp = MLP(d_model, d_model, [dim_feedforward], dropout, activation)
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.activation = activation
self.normalize_before = normalize_before
def with_pos_embed(self, tensor: Tensor, pos: Optional[Tensor]) -> Tensor:
return tensor if pos is None else tensor + pos
def forward_post(
self,
src: Tensor,
src_mask: Optional[Tensor] = None,
src_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
) -> Tensor:
x = src
q = k = self.with_pos_embed(x, pos)
self_attention_outputs = self.self_attn(
q, k, value=x, attn_mask=src_mask, key_padding_mask=src_key_padding_mask
)[0]
x = x + self.dropout1(self_attention_outputs)
x = self.norm1(x)
mlp_outputs = self.mlp(x)
x = x + self.dropout2(mlp_outputs)
x = self.norm2(x)
return x
def forward_pre(
self,
src: Tensor,
src_mask: Optional[Tensor] = None,
src_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
) -> Tensor:
x = src
x = self.norm1(x)
q = k = self.with_pos_embed(x, pos)
self_attention_outputs = self.self_attn(
q, k, value=x, attn_mask=src_mask, key_padding_mask=src_key_padding_mask
)[0]
x = x + self.dropout1(self_attention_outputs)
x = self.norm2(x)
mlp_outputs = self.mlp(x)
x = x + self.dropout2(mlp_outputs)
return x
def forward(
self,
src: Tensor,
src_mask: Optional[Tensor] = None,
src_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
) -> Tensor:
if self.normalize_before:
return self.forward_pre(src, src_mask, src_key_padding_mask, pos)
return self.forward_post(src, src_mask, src_key_padding_mask, pos)
class TransformerDecoderLayer(nn.Module):
"""
A single layer from a transformer decoder.
Args: d_model (int): Number of features in the input.
num_heads (int): Number of heads in multi-head attention.
dim_feedforward (int): Dimension of feedforward network. Default: 2048
dropout (float): Dropout value. Default: 0.1.
activation (Callable[..., nn.Module]): The activation function of the
intermediate layer. Default: nn.ReLU
Inputs: tgt (Tensor): The sequence to the decoder layer.
memory (Tensor): The sequence from the last layer of the decoder.
tgt_mask (Optional[Tensor]) The mask for the tgt sequence. Default: None
memory_mask (Optional[Tensor]): The mask for the memory sequence.
Default: None
tgt_key_padding_mask (Optional[Tensor]): The mask for the tgt keys per batch.
Default: None
memory_key_padding_mask (Optional[Tensor]):
pos (Optional[Tensor]): Positional embeddings applied to Q and K
self-attention matrices. Default: None
query_pos (Optional[Tensor]): Positional embeddings applied to Q
cross-attention matrix. Default: None
"""
def __init__(
self,
d_model: int,
num_heads: int,
dim_feedforward: int = 2048,
dropout: float = 0.1,
activation: Callable[..., nn.Module] = nn.ReLU,
):
super().__init__()
self.self_attn = nn.MultiheadAttention(d_model, num_heads, dropout=dropout)
self.cross_attn_image = nn.MultiheadAttention(
d_model, num_heads, dropout=dropout
)
self.mlp = MLP(d_model, d_model, [dim_feedforward], dropout, activation)
self.norm1 = nn.LayerNorm(d_model)
self.norm3 = nn.LayerNorm(d_model)
self.norm4 = nn.LayerNorm(d_model)
self.dropout1 = nn.Dropout(dropout)
self.dropout3 = nn.Dropout(dropout)
self.dropout4 = nn.Dropout(dropout)
self.activation = activation
def with_pos_embed(self, tensor: Tensor, pos: Optional[Tensor]) -> Tensor:
return tensor if pos is None else tensor + pos
def forward(
self,
tgt: Tensor,
memory: Tensor,
tgt_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None,
) -> Tensor:
x = tgt
q = k = self.with_pos_embed(x, query_pos)
# Self attention
self_attention_outputs = self.self_attn(
q, k, value=x, attn_mask=tgt_mask, key_padding_mask=tgt_key_padding_mask
)[0]
x = x + self.dropout1(self_attention_outputs)
x = self.norm1(x)
# Cross attention to image
cross_attention_outputs = self.cross_attn_image(
query=self.with_pos_embed(x, query_pos),
key=self.with_pos_embed(memory, pos),
value=memory,
attn_mask=memory_mask,
key_padding_mask=memory_key_padding_mask,
)[0]
x = x + self.dropout3(cross_attention_outputs)
x = self.norm3(x)
# FFN
mlp_outputs = self.mlp(x)
x = x + self.dropout4(mlp_outputs)
x = self.norm4(x)
return x
def mdetr_transformer(
d_model: int = 256,
num_heads: int = 8,
num_encoder_layers: int = 6,
num_decoder_layers: int = 6,
dim_feedforward: int = 2048,
dropout: float = 0.1,
return_intermediate_dec: bool = True,
) -> MDETRTransformer:
return MDETRTransformer(
d_model=d_model,
num_heads=num_heads,
num_encoder_layers=num_encoder_layers,
num_decoder_layers=num_decoder_layers,
dim_feedforward=dim_feedforward,
dropout=dropout,
return_intermediate_dec=return_intermediate_dec,
)
| EXA-1-master | exa/libraries/multimodal-main/torchmultimodal/models/mdetr/transformer.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from typing import Callable, Tuple
import torch
import torch.nn.functional as F
from torch import nn, Tensor
from torchvision.models._api import Weights
from torchvision.models._utils import IntermediateLayerGetter
from torchvision.models.resnet import resnet101, ResNet101_Weights
class FrozenBatchNorm2d(nn.Module):
"""
BatchNorm2d where the batch statistics and the affine parameters are fixed.
Copied from torchvision.ops.misc with added eps before rsqrt,
without which any other models than torchvision.models.resnet[18,34,50,101]
produce nans. This module is a useful replacement for BatchNorm2d in the
case of very small batches, see https://bit.ly/3xQvmiJ.
Args: n (int): Number of features ``C`` from expected input size ``(N, C, H, W)``
eps (float): Value added to denominator for numerical stability.
Default = 1e-5
Inputs: x (Tensor): Tensor to be normalized
"""
def __init__(self, n: int, eps: float = 1e-5):
super().__init__()
self.eps = eps
self.register_buffer("weight", torch.ones(n))
self.register_buffer("bias", torch.zeros(n))
self.register_buffer("running_mean", torch.zeros(n))
self.register_buffer("running_var", torch.ones(n))
def forward(self, x: Tensor) -> Tensor:
# move reshapes to the beginning to make it fuser-friendly
# ignore mypy errors because fixing them would break checkpoint loading
w = self.weight.reshape(1, -1, 1, 1) # type: ignore
b = self.bias.reshape(1, -1, 1, 1) # type: ignore
rv = self.running_var.reshape(1, -1, 1, 1) # type: ignore
rm = self.running_mean.reshape(1, -1, 1, 1) # type: ignore
scale = w * (rv + self.eps).rsqrt()
bias = b - rm * scale
return x * scale + bias
class PositionEmbedding2D(nn.Module):
"""
This is a more standard version of the position embedding, very similar to the one
used by the Attention Is All You Need paper (https://arxiv.org/abs/1706.03762),
generalized to work on images.
Args: num_pos_feats (int): Number of positional features
(should be half the output embedding size). Default = 64
temperature (int): Base for generating frequency mesh. Default = 10000
scale (float): Scaling factor when performing normalization. Setting
scale = s will rescale values to fall in [0, s].
Default = None (no normalization)
Inputs: mask (Tensor): Padding mask (used to infer size of each image in batch).
Input size: (batch_size, height, width)
Returns: Tensor of size (batch_size, 2 * num_pos_feats, height, width)
"""
def __init__(
self,
num_pos_feats: int = 64,
temperature: int = 10000,
scale: float = None,
):
super().__init__()
self.num_pos_feats = num_pos_feats
self.temperature = temperature
self.scale = scale
def forward(self, mask: Tensor) -> Tensor:
not_mask = ~mask
y_embed = not_mask.cumsum(1, dtype=torch.float32)
x_embed = not_mask.cumsum(2, dtype=torch.float32)
if self.scale is not None:
eps = 1e-6
y_embed = y_embed / (y_embed[:, -1:, :] + eps) * self.scale
x_embed = x_embed / (x_embed[:, :, -1:] + eps) * self.scale
dim_t = torch.arange(
self.num_pos_feats, dtype=torch.float32, device=mask.device
)
dim_t = self.temperature ** (2 * (dim_t // 2) / self.num_pos_feats)
pos_x = x_embed[:, :, :, None] / dim_t
pos_y = y_embed[:, :, :, None] / dim_t
pos_x = torch.stack(
(pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4
).flatten(3)
pos_y = torch.stack(
(pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4
).flatten(3)
pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2)
return pos
class MaskedIntermediateLayer(nn.Module):
"""
This class wraps a backbone returning an intermediate layer (e.g. a ResNet
where we do not want to perform pooling) while casting masks to the appropriate
sizes.
Note: for simplicity we only support returning a single intermediate layer.
Args: body (nn.Module): The module to return the intermediate layer from.
intermediate_layer (str): Name of the layer to return from body.
Inputs: images (Tensor): Batch of images to pass to the backbone.
image_masks (Tensor): Masks to cast to backbone output size.
"""
def __init__(self, body: nn.Module, intermediate_layer: str):
super().__init__()
# Note that we need this to skip pooler, flatten, and FC layers in
# the standard ResNet implementation.
self.body = IntermediateLayerGetter(body, return_layers={intermediate_layer: 0})
def forward(
self, images: torch.Tensor, image_masks: torch.Tensor
) -> Tuple[Tensor, Tensor]:
out = self.body(images)
tensor = out[next(iter(out))]
mask = F.interpolate(image_masks[None].float(), size=tensor.shape[-2:]).bool()[
0
]
return tensor, mask
def mdetr_resnet101_backbone(
weights: Weights = ResNet101_Weights.IMAGENET1K_V1,
norm_layer: Callable[..., nn.Module] = FrozenBatchNorm2d,
freeze_weights: bool = True,
) -> MaskedIntermediateLayer:
body = resnet101(
replace_stride_with_dilation=[False, False, False],
weights=weights,
norm_layer=norm_layer,
)
if freeze_weights:
for name, parameter in body.named_parameters():
if "layer2" not in name and "layer3" not in name and "layer4" not in name:
parameter.requires_grad_(False)
backbone = MaskedIntermediateLayer(body, intermediate_layer="layer4")
return backbone
| EXA-1-master | exa/libraries/multimodal-main/torchmultimodal/models/mdetr/image_encoder.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from typing import List
import torch
from torchtext.transforms import (
AddToken,
BERTTokenizer,
Sequential,
StrToIntTransform,
ToTensor,
)
class BertTextTransform:
"""Transform raw text into a Tensor of token ids for using BERT models.
Args:
vocab_file (str): local or URL path to pre-trained vocab file.
Defaults to HuggingFace BERT base (uncased) model's vocab file.
do_lower_case (bool): whether to convert input text to lowercase.
Defaults to True.
start_token (int): value to represent the start of each text.
Defaults to 101, Hugging Face's BERT start token.
end_token (int): value to represent the end of each text.
Defaults to 102, Hugging Face's BERT end token.
padding_value (int): value with which to pad each text so that all texts are the same length.
Defaults to 0, Hugging Face's BERT pad token.
Inputs:
raw_text (List[str]): list of raw texts to transform
Returns:
Tensor: Token IDs representing the input text, of dimensions
(len(raw_text), max length of input text)
"""
def __init__(
self,
vocab_file: str = "https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt",
do_lower_case: bool = True,
start_token: int = 101,
end_token: int = 102,
padding_value: int = 0,
):
self.tokenizer = Sequential(
BERTTokenizer(
vocab_path=vocab_file, do_lower_case=do_lower_case, return_tokens=False
),
StrToIntTransform(),
AddToken(start_token, begin=True),
AddToken(end_token, begin=False),
ToTensor(padding_value=padding_value),
)
def __call__(self, raw_text: List[str]) -> torch.Tensor:
input_ids = self.tokenizer(raw_text)
return input_ids
| EXA-1-master | exa/libraries/multimodal-main/torchmultimodal/transforms/bert_text_transform.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import math
import random
import warnings
from typing import Any, Dict, List, Mapping, Optional, Tuple, Union
import numpy as np
import torch
import torchvision.transforms.functional as F
from PIL import Image
from torch import Tensor
from torchvision import transforms
IMAGE_PRETRAINING_MEAN = (0.48145466, 0.4578275, 0.40821073)
IMAGE_PRETRAINING_STD = (0.26862954, 0.26130258, 0.27577711)
LOGIT_LAPLACE_EPS: float = 0.1
def map_pixels(x: torch.Tensor) -> torch.Tensor:
if x.dtype != torch.float:
raise ValueError("expected input to have type float")
return (1 - 2 * LOGIT_LAPLACE_EPS) * x + LOGIT_LAPLACE_EPS
class ImageMaskingGenerator:
def __init__(
self,
input_size: Union[Tuple[int, int], int],
num_masking_patches: int,
min_num_patches: int = 4,
max_num_patches: Optional[int] = None,
min_aspect: float = 0.3,
max_aspect: Optional[float] = None,
) -> None:
if not isinstance(input_size, tuple):
input_size = (input_size,) * 2
self.height, self.width = input_size
self.num_patches = self.height * self.width
self.num_masking_patches = num_masking_patches
self.min_num_patches = min_num_patches
self.max_num_patches = (
num_masking_patches if max_num_patches is None else max_num_patches
)
max_aspect = max_aspect or 1 / min_aspect
self.log_aspect_ratio = (math.log(min_aspect), math.log(max_aspect))
def __repr__(self) -> str:
repr_str = "Generator(%d, %d -> [%d ~ %d], max = %d, %.3f ~ %.3f)" % (
self.height,
self.width,
self.min_num_patches,
self.max_num_patches,
self.num_masking_patches,
self.log_aspect_ratio[0],
self.log_aspect_ratio[1],
)
return repr_str
def get_shape(self) -> Tuple[int, int]:
return self.height, self.width
def _mask(self, mask: np.ndarray, max_mask_patches: int) -> int:
delta = 0
for _attempt in range(10):
target_area = random.uniform(self.min_num_patches, max_mask_patches)
aspect_ratio = math.exp(random.uniform(*self.log_aspect_ratio))
h = int(round(math.sqrt(target_area * aspect_ratio)))
w = int(round(math.sqrt(target_area / aspect_ratio)))
if w < self.width and h < self.height:
top = random.randint(0, self.height - h)
left = random.randint(0, self.width - w)
num_masked = mask[top : top + h, left : left + w].sum()
# Overlap
if 0 < h * w - num_masked <= max_mask_patches:
for i in range(top, top + h):
for j in range(left, left + w):
if mask[i, j] == 0:
mask[i, j] = 1
delta += 1
if delta > 0:
break
return delta
def __call__(self) -> np.ndarray:
mask = np.zeros(shape=self.get_shape(), dtype=np.int64) # type: ignore
mask_count = 0
while mask_count < self.num_masking_patches:
max_mask_patches = self.num_masking_patches - mask_count
max_mask_patches = min(max_mask_patches, self.max_num_patches)
delta = self._mask(mask, max_mask_patches)
if delta == 0:
break
else:
mask_count += delta
return mask
class TwoWayResize(transforms.Resize):
def __init__(
self,
size: Union[int, Tuple[int, int]],
second_size: Optional[Union[int, Tuple[int, int]]] = None,
second_interpolation: transforms.InterpolationMode = transforms.InterpolationMode.LANCZOS,
**kwargs: Any,
) -> None:
if not isinstance(size, (list, tuple)):
size = (size, size)
super().__init__(size, **kwargs)
# Backward compatibility with integer value
if isinstance(second_interpolation, int):
warnings.warn(
"Argument second_interpolation should be of type InterpolationMode instead of int. "
"Please, use InterpolationMode enum."
)
second_interpolation = transforms._interpolation_modes_from_int(
second_interpolation
)
if not isinstance(second_size, (list, tuple)):
second_size = (second_size, second_size)
self.second_size = second_size
self.second_interpolation = second_interpolation
def forward(self, img: Image.Image) -> Tuple[Image.Image, Image.Image]:
img = F.resize(
img, self.size, self.interpolation, self.max_size, self.antialias
)
second_img = F.resize(
img,
self.second_size,
self.second_interpolation,
self.max_size,
self.antialias,
)
return img, second_img
class TwoWayRandomResizedCrop(transforms.RandomResizedCrop):
"""
Similar to RandomResizedCrop but returns two versions of the
random crop with different sizings and interpolations.
Note that the crop is same but the two returned images
have different final sizes and interpolations
"""
def __init__(
self,
size: Union[int, Tuple[int, int]],
second_size: Optional[Union[int, Tuple[int, int]]] = None,
second_interpolation: transforms.InterpolationMode = transforms.InterpolationMode.LANCZOS,
**kwargs: Any,
) -> None:
super().__init__(size, **kwargs)
# Backward compatibility with integer value
if isinstance(second_interpolation, int):
warnings.warn(
"Argument second_interpolation should be of type InterpolationMode instead of int. "
"Please, use InterpolationMode enum."
)
second_interpolation = transforms._interpolation_modes_from_int(
second_interpolation
)
if not isinstance(second_size, (list, tuple)):
second_size = (second_size, second_size)
self.second_size = second_size
self.second_interpolation = second_interpolation
def __call__(
self, img: Image.Image
) -> Union[Image.Image, Tuple[Image.Image, Image.Image]]:
i, j, h, w = self.get_params(img, self.scale, self.ratio)
if isinstance(self.interpolation, (tuple, list)):
interpolation = random.choice(self.interpolation)
else:
interpolation = self.interpolation
if self.second_size is None:
return F.resized_crop(img, i, j, h, w, self.size, interpolation)
else:
return (
F.resized_crop(img, i, j, h, w, self.size, interpolation),
F.resized_crop(
img, i, j, h, w, self.second_size, self.second_interpolation
),
)
class FLAVAImageTransform:
"""FLAVA image transform which does basic transforms like resize etc on images,
randomly masks patches in an image based on scheme from Beit https://arxiv.org/pdf/2106.08254.pdf
and generates codebook tokens
Args:
is_train (bool): whether transform is applied during training or not. Random crop and interpolation is enabled for training.
Defaults to True.
encoder_input_size (int): size of image that is input to the image encoder. Default is 224.
codebook_input_size (int): size of image that is input to the visual codebook. Default is 112.
scale (Tuple[float, float]): scale passed to RandomResizedCrop transform. Default is 112.
encoder_interpolation(str): interpolation for RandomResizedCrop or Resize transform for image passed to encoder.\
Default is BICUBIC
codebook_interpolation(str): interpolation for RandomResizedCrop or Resize transform for image passed to visual codebook. \
Default is LANCZOS
image_mean (Tuple[float, float, float]): mean for image normalization. Default is (0.48145466, 0.4578275, 0.40821073)
image_std (Tuple[float, float, float]): standard deviation for image normalization. \
Default is (0.26862954, 0.26130258, 0.27577711)
mask_window_size (int): dimension of mask. Default is 14.
mask_num_patches (int): number of patches to mask. Default is 75.
mask_max_patches (Optional[int]): max number of patches to mask. Default is None.
mask_min_patches (int): min number of patches to mask. Default is 16.
Inputs:
images (Union[List[Image.Image], Image.Image]): input image / list of images
"""
def __init__(
self,
is_train: bool = True,
encoder_input_size: int = 224,
codebook_input_size: int = 112,
scale: Tuple[float, float] = (0.9, 1.0),
encoder_interpolation: str = transforms.InterpolationMode.BICUBIC,
codebook_interpolation: str = transforms.InterpolationMode.LANCZOS,
image_mean: Tuple[float, float, float] = IMAGE_PRETRAINING_MEAN,
image_std: Tuple[float, float, float] = IMAGE_PRETRAINING_STD,
mask_window_size: int = 14,
mask_num_patches: int = 75,
mask_max_patches: Optional[int] = None,
mask_min_patches: int = 16,
) -> None:
if is_train:
resize_func = TwoWayRandomResizedCrop(
size=encoder_input_size,
second_size=codebook_input_size,
scale=scale,
interpolation=encoder_interpolation,
second_interpolation=codebook_interpolation,
)
else:
resize_func = TwoWayResize(
size=encoder_input_size,
second_size=codebook_input_size,
interpolation=encoder_interpolation,
second_interpolation=codebook_interpolation,
)
self.common_transform = transforms.Compose(
[
transforms.Lambda(
lambda img: img.convert("RGB") if img.mode != "RGB" else img
),
resize_func,
]
)
self.image_transform = transforms.Compose(
[
transforms.ToTensor(),
transforms.Normalize(
mean=torch.tensor(image_mean),
std=torch.tensor(image_std),
),
]
)
self.codebook_transform = transforms.Compose(
[
transforms.ToTensor(),
map_pixels,
]
)
self.masked_position_generator = ImageMaskingGenerator(
mask_window_size,
num_masking_patches=mask_num_patches,
max_num_patches=mask_max_patches,
min_num_patches=mask_min_patches,
)
def transform(self, image: Image.Image) -> Dict[str, Tensor]:
image, image_for_codebook = self.common_transform(image)
return {
"image": self.image_transform(image),
"image_for_codebook": self.codebook_transform(image_for_codebook),
"image_patches_mask": torch.from_numpy(self.masked_position_generator()),
}
def __call__(
self, images: Union[List[Image.Image], Image.Image]
) -> Mapping[str, Union[Tensor, List[Tensor]]]:
if isinstance(images, list):
output: Dict[str, List[Tensor]] = {}
for image in images:
transformed_output = self.transform(image)
for key in transformed_output:
if key not in output:
output[key] = []
output[key].append(transformed_output[key])
return output
else:
return self.transform(images)
| EXA-1-master | exa/libraries/multimodal-main/torchmultimodal/transforms/flava_transform.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
| EXA-1-master | exa/libraries/multimodal-main/torchmultimodal/transforms/__init__.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from typing import Tuple
import torch
import torch.nn.functional as F
from torchmultimodal.utils.common import shift_dim
from torchvision.transforms.functional import normalize, resize
MUGEN_DEFAULT_TIME_SAMPLES = 32
DEFAULT_MEAN = (0.43216, 0.394666, 0.37645)
DEFAULT_STD = (0.22803, 0.22145, 0.216989)
DEFAULT_RESIZE_SHAPE = (224, 224)
class VideoTransform:
"""Transform videos for encoding
Args:
time_samples (int): number of frames to sample in the time dimension
mean (Tuple[float, float, float]): sequence of means of each channel
std (Tuple[float, float, float]): sequence of standard deviations of each channel
resize_shape (Tuple[int, int]): shape to resize each frame to
Inputs:
video (Tensor): batch of videos with dimensions (batch, time, height, width, channel)
Returns:
Tensor: processed batch of videos with dimensions
(batch, channel, time_samples, resize_shape[0], resize_shape[1])
"""
def __init__(
self,
time_samples: int = MUGEN_DEFAULT_TIME_SAMPLES,
mean: Tuple[float, float, float] = DEFAULT_MEAN,
std: Tuple[float, float, float] = DEFAULT_STD,
resize_shape: Tuple[int, int] = DEFAULT_RESIZE_SHAPE,
):
self.time_samples = time_samples
self.mean = mean
self.std = std
self.resize_shape = resize_shape
def __call__(self, video: torch.Tensor) -> torch.Tensor:
if video.shape[-1] != 3:
raise ValueError("Video must have 3 channels")
video = self.sample_frames(video)
video = self.resize_hw(video)
video = self.normalize(video)
video = shift_dim(video, -1, 1)
return video
def sample_frames(self, video: torch.Tensor) -> torch.Tensor:
"""Samples frames from video of dims (b, t, h, w, c)"""
_, t, h, w, _ = video.shape
if t != self.time_samples:
video = F.interpolate(
shift_dim(video, -1, 1), size=[self.time_samples, h, w]
) # "b t h w c -> b c t h w"
video = shift_dim(video, 1, -1) # "b c t h w -> b t h w c"
return video
def resize_hw(self, video: torch.Tensor) -> torch.Tensor:
"""Resizes height and width of video of dims (b, t, h, w, c)"""
b, t, h, w, _ = video.shape
video = video.flatten(start_dim=0, end_dim=1) # "b t h w c -> (b t) h w c"
video = shift_dim(video, -1, 1) # "(b t) h w c -> (b t) c h w"
video = (
resize(video, self.resize_shape) if (h, w) != self.resize_shape else video
)
video = video.unflatten(dim=0, sizes=(b, t)) # "(b t) c h w -> b t c h w"
video = shift_dim(video, 2, -1) # "b t c h w -> b t h w c"
return video
def normalize(self, video: torch.Tensor) -> torch.Tensor:
"""Normalizes video of dims (b, t, h, w, c) to mean 0, std 1"""
b, t, _, _, _ = video.shape
video = video.flatten(start_dim=0, end_dim=1) # "b t h w c -> (b t) h w c"
video = shift_dim(video, -1, 1) # "(b t) h w c -> (b t) c h w"
video = video.float() / 255.0
video = normalize(video, mean=self.mean, std=self.std)
video = video.unflatten(dim=0, sizes=(b, t)) # "(b t) c h w -> b t c h w"
video = shift_dim(video, 2, -1) # "b t c h w -> b t h w c"
return video
| EXA-1-master | exa/libraries/multimodal-main/torchmultimodal/transforms/video_transform.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from typing import Callable, List, Optional, Tuple, Union
import torch
from PIL.Image import Image
from torch import nn, Tensor
from torchmultimodal import _PATH_MANAGER
from torchtext import transforms as text_transforms
from torchtext.transforms import CLIPTokenizer
from torchvision import transforms as image_transforms
from torchvision.transforms import InterpolationMode
CLIP_DEFAULT_MEAN = (0.48145466, 0.4578275, 0.40821073)
CLIP_DEFAULT_STD = (0.26862954, 0.26130258, 0.27577711)
CLIP_DEFAULT_VOCAB_BPE_PATH = "http://download.pytorch.org/models/text/clip_merges.bpe"
def convert_to_rgb(img: Image) -> Image:
return img.convert("RGB")
class CLIPTextTransform(nn.Module):
"""CLIP text transform
CLIP BPE tokenizer transform, adds start and end tokens, then pads/truncates to text_max_length as necessary.
This transform is torch scriptable.
Args:
text_max_length (int): Maximum length of text token sequences.
text_start_token (str): Special start token passed to BPE tokenizer.
text_end_token (str): Special end token passed to BPE tokenizer.
text_bpe_merges_path (str): Location of BPE merges file for text transform.
text_encoder_json_path (str, optional): Location of BPE encoder JSON file.
num_merges (int, optional): Number of merges to use from BPE merges file.
Default: 48894 = 49152 (vocab size) - 256 (# bytes) - 2 (bos/eos tokens)
Inputs:
text (Union[List[str],str]): Text or batch of texts upon which to apply
the transform.
"""
def __init__(
self,
text_max_length: int = 77,
text_start_token: str = "<|startoftext|>",
text_end_token: str = "<|endoftext|>",
text_bpe_merges_path: str = CLIP_DEFAULT_VOCAB_BPE_PATH,
text_encoder_json_path: Optional[str] = None,
num_merges: Optional[int] = 48894,
) -> None:
super().__init__()
local_merges_path = _PATH_MANAGER.get_local_path(text_bpe_merges_path)
tokenizer = CLIPTokenizer(
local_merges_path, text_encoder_json_path, num_merges=num_merges
)
text_start_token = tokenizer([text_start_token])[0][0]
text_end_token = tokenizer([text_end_token])[0][0]
text_max_length = text_max_length
self.text_transform = text_transforms.Sequential(
*[
tokenizer,
text_transforms.Truncate(text_max_length - 2),
text_transforms.AddToken(text_start_token, begin=True),
text_transforms.AddToken(text_end_token, begin=False),
text_transforms.StrToIntTransform(),
text_transforms.ToTensor(padding_value=0),
text_transforms.PadTransform(max_length=text_max_length, pad_value=0),
]
)
def forward(self, text: Union[List[str], str]) -> Tensor:
text_result = self.text_transform(text)
assert torch.jit.isinstance(text_result, Tensor)
return text_result
class CLIPImageTransform(nn.Module):
"""CLIP image transform
random resized crop (train mode) or resize and center crop, followed by RGB conversion, tensor conversion, and normalization.
Args:
image_size (Union[int, Tuple[int]): desired output image size.
image_interpolation (torchvision.transforms.InterpolationMode):
Torchvision interpolation mode used during resizing. Defaults to bicubic.
image_mean (Tuple[float]): mean of images, used for normalization.
image_std (Tuple[float]): std of images, used for normalization.
is_train (bool): Whether transform is run in train mode.
Inputs:
image (Union[List[Image], Image]): Image or batch of images upon which
to apply the transform.
"""
def __init__(
self,
image_size: Union[int, Tuple[int, int]] = 224,
image_interpolation: InterpolationMode = InterpolationMode.BICUBIC,
image_mean: Tuple[float, float, float] = CLIP_DEFAULT_MEAN,
image_std: Tuple[float, float, float] = CLIP_DEFAULT_STD,
is_train: bool = True,
) -> None:
super().__init__()
joint_transforms: List[Callable] = [
convert_to_rgb,
image_transforms.ToTensor(),
image_transforms.Normalize(image_mean, image_std),
]
base_transform: List[Callable]
if is_train:
base_transform = [
image_transforms.RandomResizedCrop(
image_size, interpolation=image_interpolation
)
]
else:
base_transform = [
image_transforms.Resize(image_size, interpolation=image_interpolation),
image_transforms.CenterCrop(image_size),
]
self.image_transform = image_transforms.Compose(
base_transform + joint_transforms
)
def forward(self, image: Union[List[Image], Image]) -> Tensor:
if isinstance(image, Image):
return self.image_transform(image)
image_result = torch.stack([self.image_transform(x) for x in image])
return image_result
class CLIPTransform(nn.Module):
"""Image and text transform for CLIP model.
Image transform: either random resized crop (train mode) or resize and center
crop, followed by RGB conversion, tensor conversion, and normalization.
Text transform: applies CLIP's BPE tokenizer transform, adds start and end
tokens, then pads/truncates to text_max_length as necessary.
Args:
image_size (Union[int, Tuple[int]): desired output image size.
image_interpolation (torchvision.transforms.InterpolationMode):
Torchvision interpolation mode used during resizing. Defaults to bicubic.
image_mean (Tuple[float]): mean of images, used for normalization.
image_std (Tuple[float]): std of images, used for normalization.
text_max_length (int): Maximum length of text token sequences.
is_train (bool): Whether transform is run in train mode.
text_start_token (str): Special start token passed to BPE tokenizer.
text_end_token (str): Special end token passed to BPE tokenizer.
text_bpe_merges_path (str): Location of BPE merges file for text transform.
text_encoder_json_path (str, optional): Location of BPE encoder JSON file.
num_merges (int, optional): Number of merges to use from BPE merges file.
Default: 48894 = 49152 (vocab size) - 256 (# bytes) - 2 (bos/eos tokens)
Inputs:
image (Union[List[Image], Image]): Image or batch of images upon which
to apply the transform.
text (Union[List[str],str]): Text or batch of texts upon which to apply
the transform.
"""
def __init__(
self,
image_size: Union[int, Tuple[int, int]] = 224,
image_interpolation: InterpolationMode = InterpolationMode.BICUBIC,
image_mean: Tuple[float, float, float] = CLIP_DEFAULT_MEAN,
image_std: Tuple[float, float, float] = CLIP_DEFAULT_STD,
text_max_length: int = 77,
is_train: bool = True,
text_start_token: str = "<|startoftext|>",
text_end_token: str = "<|endoftext|>",
text_bpe_merges_path: str = CLIP_DEFAULT_VOCAB_BPE_PATH,
text_encoder_json_path: Optional[str] = None,
num_merges: Optional[int] = 48894,
) -> None:
super().__init__()
self.image_transform = CLIPImageTransform(
image_size, image_interpolation, image_mean, image_std, is_train
)
self.text_transform = CLIPTextTransform(
text_max_length,
text_start_token,
text_end_token,
text_bpe_merges_path,
text_encoder_json_path,
num_merges,
)
def forward(
self, image: Union[List[Image], Image], text: Union[List[str], str]
) -> Tuple[torch.Tensor, torch.Tensor]:
return self.image_transform(image), self.text_transform(text)
| EXA-1-master | exa/libraries/multimodal-main/torchmultimodal/transforms/clip_transform.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
| EXA-1-master | exa/libraries/multimodal-main/torchmultimodal/modules/__init__.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import math
import warnings
from dataclasses import dataclass, field
from typing import Any, Callable, Optional, OrderedDict, Union
import torch
from torch import nn, Tensor
from torchmultimodal.modules.layers.normalizations import Fp32LayerNorm
from torchmultimodal.modules.losses.contrastive_loss_with_temperature import (
contrastive_loss_with_temperature,
)
from torchmultimodal.utils.common import ModelOutput
def assert_labels_are_present(
labels: Optional[Tensor], category: str = "labels"
) -> None:
assert (
labels is not None
), f"Model is in training model but {category} are not passed"
@dataclass
class ITMLossOutput(ModelOutput):
logits: Tensor
loss: Tensor
@dataclass
class MaskedPredictionLossOutput(ModelOutput):
logits: Tensor
loss: Tensor
@dataclass
class FLAVAGlobalContrastiveLossOutput(OrderedDict):
text_embedding: Tensor
image_embedding: Tensor
logit_scale: Tensor
image_logits: Tensor
text_logits: Tensor
image_loss: Tensor
text_loss: Tensor
loss: Tensor
@dataclass
class FLAVAPretrainingLossesCollection(ModelOutput):
mmm_text_loss: Optional[Tensor] = None
mmm_image_loss: Optional[Tensor] = None
mim_loss: Optional[Tensor] = None
mlm_loss: Optional[Tensor] = None
itm_loss: Optional[Tensor] = None
global_contrastive_loss: Optional[Tensor] = None
@dataclass
class FLAVAPretrainingLossOutput(ModelOutput):
losses: FLAVAPretrainingLossesCollection = field(
default_factory=FLAVAPretrainingLossesCollection
)
mlm_output: Optional[MaskedPredictionLossOutput] = None
mim_output: Optional[MaskedPredictionLossOutput] = None
mmm_text_output: Optional[MaskedPredictionLossOutput] = None
mmm_image_output: Optional[MaskedPredictionLossOutput] = None
itm_output: Optional[ITMLossOutput] = None
global_contrastive_output: Optional[FLAVAGlobalContrastiveLossOutput] = None
image_sequence: Optional[Tensor] = None
text_sequence: Optional[Tensor] = None
image_masked_sequence: Optional[Tensor] = None
text_masked_sequence: Optional[Tensor] = None
multimodal_sequence: Optional[Tensor] = None
multimodal_masked_sequence: Optional[Tensor] = None
# TODO(asg): Replace later with MLP classifier if checkpoint permits
class Pooler(nn.Module):
def __init__(self, hidden_size: int = 768, **kwargs: Any):
super().__init__()
self.dense = nn.Linear(hidden_size, hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states: Tensor) -> Tensor:
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class TwoWayHead(nn.Module):
def __init__(self, hidden_size: int = 768, **kwargs: Any):
super().__init__()
self.seq_relationship = nn.Linear(hidden_size, 2)
def forward(self, pooled_output: Tensor) -> Tensor:
return self.seq_relationship(pooled_output)
class ITMLoss(nn.Module):
def __init__(
self,
hidden_size: int = 768,
ignore_index: int = -1,
**kwargs: Any,
):
super().__init__()
self.pooler = Pooler(hidden_size=hidden_size)
self.cls = TwoWayHead(hidden_size=hidden_size)
self.ce_loss = nn.CrossEntropyLoss(ignore_index=ignore_index)
def forward(
self,
hidden_states: Tensor,
labels: Tensor,
) -> ITMLossOutput:
if self.training:
assert_labels_are_present(labels, "itm labels")
pooled_output = self.pooler(hidden_states)
scores = self.cls(pooled_output)
if labels is None:
loss = pooled_output.sum() * 0
else:
loss = self.ce_loss(
scores.view(-1, 2),
labels.view(-1),
)
return ITMLossOutput(logits=scores, loss=loss)
class MaskedPredictionHead(nn.Module):
def __init__(
self,
hidden_size: int = 768,
vocab_size: int = 30522,
transform_act_fn: Callable[[Tensor], Tensor] = nn.functional.gelu,
layer_norm_eps: float = 1e-5,
use_fp32_layer_norm: bool = True,
**kwargs: Any,
):
super().__init__()
self.dense = nn.Linear(hidden_size, hidden_size)
self.transform_act_fn = transform_act_fn
self.layer_norm: nn.LayerNorm
if use_fp32_layer_norm:
self.layer_norm = Fp32LayerNorm(hidden_size, eps=layer_norm_eps)
else:
self.layer_norm = nn.LayerNorm(hidden_size, eps=layer_norm_eps)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(hidden_size, vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(vocab_size))
# Need a link between the two variables so that the bias is
# correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states: Tensor) -> Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.layer_norm(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
class MaskedPredictionLoss(nn.Module):
def __init__(
self,
hidden_size: int = 768,
vocab_size: int = 30522,
transform_act_fn: Callable[[Tensor], Tensor] = nn.functional.gelu,
layer_norm_eps: float = 1e-5,
ignore_index: int = -1,
ignore_nan: bool = False,
**kwargs: Any,
):
super().__init__()
self.cls = MaskedPredictionHead(
hidden_size=hidden_size,
vocab_size=vocab_size,
transform_act_fn=transform_act_fn,
layer_norm_eps=layer_norm_eps,
)
self.ignore_index = ignore_index
self.vocab_size = vocab_size
self.ce_loss = nn.CrossEntropyLoss(ignore_index=ignore_index)
self.ignore_nan = ignore_nan
def forward(
self, hidden_states: Tensor, masked_labels: Optional[Tensor] = None
) -> MaskedPredictionLossOutput:
if self.training:
assert_labels_are_present(masked_labels, "masked labels")
if masked_labels is not None:
masked_tokens = masked_labels.ne(self.ignore_index)
masked_labels = masked_labels[masked_tokens]
sequence_output = hidden_states[masked_tokens, :]
else:
sequence_output = hidden_states
prediction = self.cls(sequence_output)
if masked_labels is None:
masked_loss = prediction.sum() * 0
else:
masked_loss = self.ce_loss(
prediction.view(-1, self.vocab_size),
masked_labels.view(-1),
)
# When masked_labels are all ignore_index then masked_lm_loss is NaN,
# so we replace NaN with 0.
if torch.isnan(masked_loss) and self.ignore_nan:
warnings.warn("NaN detected in masked_loss. Replacing it with 0.")
masked_loss = torch.nan_to_num(masked_loss, nan=0.0)
return MaskedPredictionLossOutput(
logits=prediction,
loss=masked_loss,
)
class FLAVAGlobalContrastiveLoss(nn.Module):
def __init__(
self,
logit_scale: Union[float, nn.Parameter] = None,
image_embedding_size: int = 768,
text_embedding_size: int = 768,
projection_size: int = 768,
image_embedding_index: int = 0,
text_embedding_index: int = 0,
):
super().__init__()
if logit_scale is None:
logit_scale = math.log(1 / 0.07)
# If already initialized, set to what was passed
if isinstance(logit_scale, nn.Parameter):
self.logit_scale = logit_scale
else:
self.logit_scale = nn.Parameter(logit_scale * torch.ones([]))
def forward(
self,
image_sequence: Tensor,
text_sequence: Tensor,
mask: Tensor,
) -> FLAVAGlobalContrastiveLossOutput:
text_embedding = nn.functional.normalize(text_sequence, dim=-1)
image_embedding = nn.functional.normalize(
image_sequence,
dim=-1,
)
self.logit_scale.data.clamp_(0, 4.6052)
output = contrastive_loss_with_temperature(
embeddings_a=image_embedding,
embeddings_b=text_embedding,
logit_scale=self.logit_scale,
mask=mask,
# Always true for FLAVA global contrastive loss
backprop_in_gather=True,
)
return FLAVAGlobalContrastiveLossOutput(
loss=output.loss,
image_logits=output.logits_a,
text_logits=output.logits_b,
image_loss=output.loss_a,
text_loss=output.loss_b,
text_embedding=text_embedding,
image_embedding=image_embedding,
logit_scale=self.logit_scale.data,
)
class FLAVAPretrainingLoss(nn.Module):
def __init__(
self,
logit_scale: Union[float, nn.Parameter] = None,
hidden_size: int = 768,
text_vocab_size: int = 30522,
image_vocab_size: int = 8192,
transform_act_fn: Callable[[Tensor], Tensor] = nn.functional.gelu,
layer_norm_eps: float = 1e-5,
ignore_index: int = -1,
mlm_weight: float = 1.0,
mim_weight: float = 1.0,
contrastive_loss_weight: float = 1.0,
mmm_image_loss_weight: float = 1.0,
mmm_text_loss_weight: float = 1.0,
itm_loss_weight: float = 1.0,
**kwargs: Any,
):
super().__init__()
self.contrastive_loss = FLAVAGlobalContrastiveLoss(
logit_scale=logit_scale,
image_embedding_size=hidden_size,
text_embedding_size=hidden_size,
projection_size=hidden_size,
)
self.mlm_loss = MaskedPredictionLoss(
hidden_size=hidden_size,
vocab_size=text_vocab_size,
transform_act_fn=transform_act_fn,
layer_norm_eps=layer_norm_eps,
ignore_index=ignore_index,
)
self.mim_loss = MaskedPredictionLoss(
hidden_size=hidden_size,
vocab_size=image_vocab_size,
transform_act_fn=transform_act_fn,
layer_norm_eps=layer_norm_eps,
ignore_index=ignore_index,
)
# Create separate weights for MMM loss
self.mmm_loss = nn.ModuleDict(
{
"mlm": MaskedPredictionLoss(
hidden_size=hidden_size,
vocab_size=text_vocab_size,
transform_act_fn=transform_act_fn,
layer_norm_eps=layer_norm_eps,
ignore_index=ignore_index,
),
"mim": MaskedPredictionLoss(
hidden_size=hidden_size,
vocab_size=image_vocab_size,
transform_act_fn=transform_act_fn,
layer_norm_eps=layer_norm_eps,
ignore_index=ignore_index,
),
}
)
self.itm_loss = ITMLoss(
hidden_size=hidden_size,
ignore_index=ignore_index,
)
self.mim_weight = mim_weight
self.mlm_weight = mlm_weight
self.contrastive_loss_weight = contrastive_loss_weight
self.mmm_image_loss_weight = mmm_image_loss_weight
self.mmm_text_loss_weight = mmm_text_loss_weight
self.itm_loss_weight = itm_loss_weight
# TODO: Some refactoring is needed in this function to make it look better
# TODO: Possibly refactor this into functional and class component
# for better usability
def forward(
self,
image_sequence: Optional[Tensor] = None,
text_sequence: Optional[Tensor] = None,
image_masked_sequence: Optional[Tensor] = None,
text_masked_sequence: Optional[Tensor] = None,
multimodal_sequence: Optional[Tensor] = None,
multimodal_masked_sequence: Optional[Tensor] = None,
itm_labels: Optional[Tensor] = None,
mim_labels: Optional[Tensor] = None,
mlm_labels: Optional[Tensor] = None,
projected_image_embeddings: Optional[Tensor] = None,
projected_text_embeddings: Optional[Tensor] = None,
) -> FLAVAPretrainingLossOutput:
outputs = FLAVAPretrainingLossOutput()
pos_mask = None
# Check multimodal_masked_sequence to make sure this is unimodal case
# This specific case can though be backpropagated directly as MIM is independent of
# text, but that is a research question :)
if (
image_masked_sequence is not None
and self.mim_weight > 0
and multimodal_masked_sequence is None
):
# Remove CLS token from image_masked_sequence
start_index = -mim_labels.size(1) if mim_labels is not None else 1
outputs.mim_output = self.mim_loss(
image_masked_sequence[:, start_index:, :], mim_labels
)
outputs.mim_output.loss *= self.mim_weight
outputs.losses.mim_loss = outputs.mim_output.loss
# Check multimodal_masked_sequence to make sure this is unimodal case
if (
text_masked_sequence is not None
and self.mlm_weight > 0
and multimodal_masked_sequence is None
):
start_index = -mlm_labels.size(1) if mlm_labels is not None else 1
outputs.mlm_output = self.mlm_loss(
text_masked_sequence[:, start_index:, :], mlm_labels
)
outputs.mlm_output.loss *= self.mlm_weight
outputs.losses.mlm_loss = outputs.mlm_output.loss
if multimodal_masked_sequence is not None and self.itm_loss_weight > 0:
if itm_labels is not None:
pos_pairs = itm_labels.ne(0)
pos_mask = torch.where(
pos_pairs.any(), pos_pairs, pos_pairs.new([True])
)
else:
pos_mask = torch.ones(
multimodal_masked_sequence.size(0),
device=multimodal_masked_sequence.device,
).bool()
outputs.itm_output = self.itm_loss(multimodal_masked_sequence, itm_labels)
outputs.itm_output.loss *= self.itm_loss_weight
outputs.losses.itm_loss = outputs.itm_output.loss
multimodal_masked_sequence = multimodal_masked_sequence[pos_mask]
if mlm_labels is not None:
mlm_labels = mlm_labels[pos_mask]
if mim_labels is not None:
mim_labels = mim_labels[pos_mask]
if multimodal_masked_sequence is not None and self.mmm_text_loss_weight > 0:
start_index = (
-mlm_labels.size(1)
if mlm_labels is not None
else -(text_masked_sequence.size(1) - 1)
)
sequence_for_text = multimodal_masked_sequence[:, start_index:, :]
outputs.mmm_text_output = self.mmm_loss.mlm(
sequence_for_text,
mlm_labels,
) # type: ignore
outputs.mmm_text_output.loss *= self.mmm_text_loss_weight
outputs.losses.mmm_text_loss = outputs.mmm_text_output.loss
if multimodal_masked_sequence is not None and self.mmm_image_loss_weight > 0:
# Starts from 2 because of 2 CLS, one for multimodal encoder and one
# that comes from image encoder.
total_indices = (
mim_labels.size(1)
if mlm_labels is not None
else (image_masked_sequence.size(1) - 1)
)
sequence_for_image = multimodal_masked_sequence[:, 2 : 2 + total_indices, :]
outputs.mmm_image_output = self.mmm_loss.mim(
sequence_for_image,
mim_labels,
) # type: ignore
outputs.mmm_image_output.loss *= self.mmm_image_loss_weight
outputs.losses.mmm_image_loss = outputs.mmm_image_output.loss
if (
projected_image_embeddings is not None
and projected_text_embeddings is not None
and self.contrastive_loss_weight > 0
):
outputs.global_contrastive_output = self.contrastive_loss(
projected_image_embeddings,
projected_text_embeddings,
pos_mask,
)
outputs.global_contrastive_output.loss *= self.contrastive_loss_weight
outputs.losses.global_contrastive_loss = (
outputs.global_contrastive_output.loss
)
return outputs
| EXA-1-master | exa/libraries/multimodal-main/torchmultimodal/modules/losses/flava.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
| EXA-1-master | exa/libraries/multimodal-main/torchmultimodal/modules/losses/__init__.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any
from torch import nn, Tensor
from torch.nn import functional as F
class CommitmentLoss(nn.Module):
"""Commitment loss calculates the mean Euclidean distance between pairs of encoder output vectors
and their corresponding quantized vectors. It encourages an encoder to generate outputs closer to an embedding.
This is the beta in Eq. 3 of Oord et al. 2017 (https://arxiv.org/pdf/1711.00937.pdf)
Args:
commitment_cost (float): multiplicative weight for the commitment loss value
"""
def __init__(self, commitment_cost: float = 1.0, **kwargs: Any) -> None:
super().__init__()
self.commitment_cost = commitment_cost
def forward(self, quantized: Tensor, encoded: Tensor) -> Tensor:
# Quantized vectors must be detached because commitment loss only lets gradient flow through encoder output
loss = F.mse_loss(quantized.detach(), encoded) * self.commitment_cost
return loss
| EXA-1-master | exa/libraries/multimodal-main/torchmultimodal/modules/losses/vqvae.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import math
from dataclasses import dataclass
from typing import Any, Dict, Optional, OrderedDict, Tuple, Union
import torch
import torch.nn.functional as F
from torch import nn, Tensor
from torchmultimodal.utils.distributed import gather_tensor
@dataclass
class ContrastiveLossOutput(OrderedDict):
loss: Tensor
logits_a: Tensor
logits_b: Tensor
loss_a: Tensor
loss_b: Tensor
def _gather_embeddings_and_labels(
embeddings_a: Tensor,
embeddings_b: Tensor,
backprop_in_gather: bool = True,
) -> Tuple[Tensor, Tensor, Tensor]:
if not torch.distributed.is_available() or not torch.distributed.is_initialized():
labels = torch.arange(embeddings_a.size(0), device=embeddings_a.device)
return embeddings_a, embeddings_b, labels
embeddings_a_all_gpus = gather_tensor(embeddings_a, backprop_in_gather)
embeddings_b_all_gpus = gather_tensor(embeddings_b, backprop_in_gather)
# embeddings_a has shape [local_batch_size, embedding_dim]
local_batch_size = embeddings_a.size(0)
labels = local_batch_size * torch.distributed.get_rank() + torch.arange(
local_batch_size, device=embeddings_a.device
)
return (
torch.cat(embeddings_a_all_gpus),
torch.cat(embeddings_b_all_gpus),
labels,
)
def contrastive_loss_with_temperature(
embeddings_a: Tensor,
embeddings_b: Tensor,
logit_scale: nn.Parameter,
mask: Optional[Tensor] = None,
backprop_in_gather: bool = True,
cross_entropy_kwargs: Optional[Dict[str, Any]] = None,
) -> ContrastiveLossOutput:
"""Functional component for the ContrastiveLossWithTemperature. Please
check the class for more details
Args:
embeddings_a (Tensor): Tensor containing features from the first input or modality.
(In the CLIP model, these are the outputs of the image encoder.)
embeddings_b (Tensor): Tensor containing features from the second input or modality.
(In the CLIP model, these are the outputs of the text encoder.)
logit_scale (nn.Parameter): Parameter with value of log of the learned temperature
mask (Optional[Tensor], optional): If certain elements of the inputs shouldn't
be considered in the loss calculation use this option to pass a boolean
mask. Size is (BatchSize,). Defaults to None.
backprop_in_gather (bool): Whether to backpropagate the gradients from
all_gather to all workers (versus just the local worker).
cross_entropy_kwargs (Optional[Dict[str, Any]]): Any additional inputs to cross entropy loss (ex: label_smoothing)
Returns:
ContrastiveLossOutput: instance of ContrastiveLossOutput with all of the
relevant fields.
"""
# this temperature implementation follows CLIP Figure 3
temperature = torch.exp(logit_scale)
(
embeddings_a_all_gpus,
embeddings_b_all_gpus,
labels,
) = _gather_embeddings_and_labels(embeddings_a, embeddings_b, backprop_in_gather)
# logits_per_image has shape [local_batch_size, global_batch_size]
logits_per_input_a = (
torch.matmul(embeddings_a, embeddings_b_all_gpus.transpose(0, 1)) * temperature
)
logits_per_input_b = (
torch.matmul(embeddings_b, embeddings_a_all_gpus.transpose(0, 1)) * temperature
)
if mask is not None:
logits_per_input_a = logits_per_input_a[mask]
logits_per_input_b = logits_per_input_b[mask]
labels = labels[mask]
if cross_entropy_kwargs is None:
cross_entropy_kwargs = {}
loss_a = F.cross_entropy(logits_per_input_a, labels, **cross_entropy_kwargs)
loss_b = F.cross_entropy(logits_per_input_b, labels, **cross_entropy_kwargs)
loss = (loss_a + loss_b) / 2
return ContrastiveLossOutput(
loss=loss,
logits_a=logits_per_input_a,
logits_b=logits_per_input_b,
loss_a=loss_a,
loss_b=loss_b,
)
DEFAULT_LOGIT_SCALE = nn.Parameter(math.log(1 / 0.07) * torch.ones([]))
class ContrastiveLossWithTemperature(nn.Module):
"""Contrastive loss with a temperature parameter, as used in CLIP and FLAVA.
CLIP: https://arxiv.org/pdf/2103.00020.pdf
FLAVA: https://arxiv.org/pdf/2112.04482.pdf
A contrastive loss over pairs of input embeddings a and b. For each input_a
embedding, we compute a weighted cosine similarity with all input_b embeddings,
then calculate the cross entropy loss against the true (input_a, input_b) pairing.
Each input_b embedding is evaluated against all input_a embeddings similarly.
The batch's loss is the average cross entropy over all input_a and input_b embeddings
in the batch.
Temperature is a learned parameter clamped to ``[1, 100]`` and
initialized to 1 / 0.07 as in the CLIP paper.
Args:
logit_scale (Union[float, nn.Module]): Log of the learnable temperature parameter value
A nn.Parameter instantiation can also be passed directly in case parent class
is handling the initialization.
Defaults to ``ln(1/0.07)``, as in the CLIP paper.
logit_scale_min (Optional[float]): Log of the minimum temperature value.
If ``None``, then temperature will not be clamped to a minimum value.
Defaults to ``ln(1)``, as in the CLIP paper.
logit_scale_max (Optional[float]): Log of the maximum temperature value.
If ``None``, then temperature will not be clamped to a maximum value.
Defaults to ``ln(100)``, as in the CLIP paper.
Inputs: embeddings_a (Tensor): Tensor containing features from the first input or modality.
(In the CLIP model, these are the outputs of the image encoder.)
embeddings_b (Tensor): Tensor containing features from the second input or modality.
(In the CLIP model, these are the outputs of the text encoder.)
backprop_in_gather (bool): Whether to backpropagate the gradients from
all_gather to all workers (versus just the local worker).
cross_entropy_kwargs (Optional[Dict[str, Any]]): Any additional inputs to cross entropy loss (ex: label_smoothing)
mask (Optional[Tensor], optional): If certain elements of the inputs shouldn't
be considered in the loss calculation use this option to pass a boolean
mask. Size is (BatchSize,). Defaults to None.
"""
def __init__(
self,
logit_scale: Union[float, nn.Parameter] = DEFAULT_LOGIT_SCALE,
logit_scale_min: Optional[float] = math.log(1),
logit_scale_max: Optional[float] = math.log(100),
):
super().__init__()
if not logit_scale_min and not logit_scale_max:
raise ValueError(
"Only one of `logit_scale_min` and `logit_scale_max` can be None."
)
self.logit_scale_min = logit_scale_min
self.logit_scale_max = logit_scale_max
# If already initialized, set to what was passed
if isinstance(logit_scale, nn.Parameter):
self.logit_scale = logit_scale
else:
self.logit_scale = nn.Parameter(logit_scale * torch.ones([]))
def forward(
self,
embeddings_a: Tensor,
embeddings_b: Tensor,
backprop_in_gather: bool = True,
cross_entropy_kwargs: Optional[Dict[str, Any]] = None,
mask: Optional[Tensor] = None,
) -> Tensor:
self.logit_scale.data.clamp_(self.logit_scale_min, self.logit_scale_max)
return contrastive_loss_with_temperature(
embeddings_a=embeddings_a,
embeddings_b=embeddings_b,
logit_scale=self.logit_scale,
backprop_in_gather=backprop_in_gather,
cross_entropy_kwargs=cross_entropy_kwargs,
mask=mask,
).loss
| EXA-1-master | exa/libraries/multimodal-main/torchmultimodal/modules/losses/contrastive_loss_with_temperature.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from typing import Optional
import torch
import torch.nn.functional as F
from torch import nn, Tensor
class ImageTextContrastiveLoss(nn.Module):
"""
Compute the image-text contrastive loss from image-text similarity, as used in ALBEF.
Support loss distillation with pseudo-targets for non-zero alpha. Compute standard contrastive loss for zero alpha.
Inputs:
image_to_text_sim (Tensor): Image to text similarity.
text_to_image_sim (Tensor): Text to image similarity.
image_to_text_sim_m (Optional[Tensor]): Image to text similarity from momentum models.
Required if alpha is non-zero.
text_to_image_sim_m (Optional[Tensor]): Text to image similarity from momentum models.
Required if alpha is non-zero.
sim_targets (Optional[Tensor]): Similarity pseudo-targets from momentum models. Default is the diagonal matrix.
Requires all Tensor inputs to have the same size.
alpha (Optional[float]): The interpolation value of momentum similarity and sim_targets. Default is 0.
"""
def __init__(
self,
) -> None:
super().__init__()
def forward(
self,
image_to_text_sim: Tensor,
text_to_image_sim: Tensor,
image_to_text_sim_m: Optional[Tensor] = None,
text_to_image_sim_m: Optional[Tensor] = None,
sim_targets: Optional[Tensor] = None,
alpha: Optional[float] = 0.0,
) -> Tensor:
if sim_targets is None:
sim_targets = torch.zeros(image_to_text_sim.size()).to(
image_to_text_sim.device
)
sim_targets.fill_diagonal_(1)
if alpha != 0:
assert (
image_to_text_sim_m is not None and text_to_image_sim_m is not None
), "sim_i2t_m and sim_t2i_m cannot be none for non-zero alpha"
with torch.no_grad():
image_to_text_sim_targets = (
alpha * F.softmax(image_to_text_sim_m, dim=1)
+ (1 - alpha) * sim_targets
)
text_to_image_sim_targets = (
alpha * F.softmax(text_to_image_sim_m, dim=1)
+ (1 - alpha) * sim_targets
)
else:
image_to_text_sim_targets = sim_targets
text_to_image_sim_targets = sim_targets
loss_i2t = -torch.sum(
F.log_softmax(image_to_text_sim, dim=1) * image_to_text_sim_targets, dim=1
).mean()
loss_t2i = -torch.sum(
F.log_softmax(text_to_image_sim, dim=1) * text_to_image_sim_targets, dim=1
).mean()
loss_itc = (loss_i2t + loss_t2i) / 2
return loss_itc
class CausalLanguageModelingLoss(nn.Module):
"""
Compute the autoregressive masked language modeling loss by predicting the next token, as used in VQA.
Support loss distillation for non-zero alpha. Compute standard mlm loss for zero alpha.
Args:
mask_token_id (int): The token id indicating a masked token. Default is -100.
Inputs:
labels (Tensor of shape (batch_size, seq_length)): The masked output tokens.
prediction_scores (Tensor of shape (batch_size, seq_length, vocab_size)):
The prediction scores from a prediction head.
prediction_scores_m (Optional[Tensor] of shape (batch_size, seq_length, vocab_size)):
The prediction scores from a momentum prediction head.
Required if alpha is non-zero.
alpha (float): The interpolation value between mlm_loss and loss_distill. Default is 0.
"""
def __init__(
self,
mask_token_id: int = -100,
) -> None:
super().__init__()
self.mask_token_id = mask_token_id
def forward(
self,
labels: Tensor,
prediction_scores: Tensor,
prediction_scores_m: Optional[Tensor] = None,
alpha: Optional[float] = 0.0,
) -> Tensor:
batch_size = labels.size(0)
# shift prediction scores and labels by one for next-token predict
prediction_scores = prediction_scores[:, :-1, :].contiguous()
labels = labels[:, 1:].contiguous()
mlm_loss = F.cross_entropy(
prediction_scores.view(-1, prediction_scores.shape[-1]),
labels.view(-1),
reduction="none",
)
mlm_loss = mlm_loss.view(batch_size, -1).sum(1)
if alpha != 0:
assert (
prediction_scores_m is not None
), "prediction_scores_m cannot be None for non-zero alpha"
with torch.no_grad():
prediction_scores_m = prediction_scores_m[:, :-1, :].contiguous()
loss_distill = -torch.sum(
F.log_softmax(prediction_scores, dim=-1)
* F.softmax(prediction_scores_m, dim=-1),
dim=-1,
)
loss_distill = (loss_distill * (labels != self.mask_token_id)).sum(1)
mlm_loss = (1 - alpha) * mlm_loss + alpha * loss_distill
return mlm_loss
| EXA-1-master | exa/libraries/multimodal-main/torchmultimodal/modules/losses/albef.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from typing import List, NamedTuple, Tuple
import torch
import torch.nn.functional as F
from torch import Tensor
from torchvision.ops.boxes import box_convert, generalized_box_iou
def _get_src_permutation_idx(
indices: List[Tuple[Tensor, Tensor]]
) -> Tuple[Tensor, Tensor]:
"""
Given a list of matched (src, tgt) indices, concatenate the src indices and
return along with a tensor identifying which sample they came from.
Args:
indices (List[Tuple[Tensor, Tensor]]): A list of size batch_size, containing
tuples of ``(index_i, index_j)`` where:
- ``index_i`` is the indices of the selected predictions (i.e. srcs)
- ``index_j`` is the indices of the corresponding selected targets
For each batch element, it holds:
len(index_i) = len(index_j) = min(num_queries, num_target_boxes)
Returns:
A Tuple[Tensor, Tensor] x, where
x[0] gives the index of the sample in the batch
x[1] gives the src value from indices
Both x[0] and x[1] have size = (sum([len(index_i) for index_i in indices]))
"""
batch_idx = torch.cat(
[torch.full_like(src, i) for i, (src, _) in enumerate(indices)]
)
src_idx = torch.cat([src for (src, _) in indices])
return batch_idx, src_idx
# TODO: we can calculate num_boxes in dataloader and add it to targets
# Note: num_tokens = num_classes + 1. We should make this clear in other docstrings
def soft_token_prediction_loss(
pred_logits: Tensor,
n_target_boxes: List[int],
positive_map: Tensor,
indices: List[Tuple[Tensor, Tensor]],
num_boxes: int,
no_object_weight: float = 0.1,
) -> Tensor:
"""Classification loss (NLL).
Calculate the negative log-likelihood loss between the predicted logits and the
uniform distribution over matched tokens from the ground truth, as in MDETR. The
loss for unmatched boxes is downweighted by the value no_object_weight.
Ref: https://github.com/ashkamath/mdetr/blob/main/models/mdetr.py#L464
Args:
pred_logits (Tensor): Logits predicted by the model.
Shape: (batch_size, num_queries, num_tokens)
n_target_boxes (List[int]): Number of boxes in each target
positive_map (Tensor): Map from boxes to tokens for the entire batch.
positive_map[i,j] = 1 iff box i is associated to token j.
Shape: (sum([len(target["boxes"]) for target in batch]), num_tokens)
indices (List[Tuple[Tensor, Tensor]]): A list of size batch_size, containing
tuples of ``(index_i, index_j)`` where:
- ``index_i`` is the indices of the selected predictions (in order)
- ``index_j`` is the indices of the corresponding selected targets
For each batch element, it holds:
len(index_i) = len(index_j) = min(num_queries, num_target_boxes)
num_boxes (int): Normalization factor. Should equal the average number of
boxes per local batch.
no_object_weight (float): Relative classification weight of the no-object class.
Returns:
Negative log likelihood for the batch normalized by num_boxes
"""
logits = pred_logits.log_softmax(-1)
src_idx = _get_src_permutation_idx(indices)
tgt_idx = []
offset = 0
for i, (_, tgt) in enumerate(indices):
tgt_idx.append(tgt + offset)
offset += n_target_boxes[i]
# tgt_idx concatenates the target indices across samples in the batch,
# giving each box a unique value which will be used to permute positive_map
tgt_idx = torch.cat(tgt_idx)
# Permute the rows of positive map based on target box indices
tgt_pos = positive_map[tgt_idx]
target_sim = torch.zeros_like(logits)
# Default is the no match value
target_sim[:, :, -1] = 1
# Fill each of the corresponding rows of target_sim with the ground truth
target_sim[src_idx] = tgt_pos
loss_ce = -(logits * target_sim).sum(-1)
# Downweight the loss for unmatched boxes by no_object_weight
no_object_tensor = torch.full(
loss_ce.shape, no_object_weight, device=target_sim.device
)
no_object_tensor[src_idx] = 1
loss_ce = loss_ce * no_object_tensor
loss_ce = loss_ce.sum() / num_boxes
return loss_ce
class BoxLosses(NamedTuple):
l1_loss: torch.Tensor
giou_loss: torch.Tensor
def box_losses(
pred_boxes: Tensor,
target_boxes: List[Tensor],
indices: List[Tuple[Tensor, Tensor]],
num_boxes: int,
) -> BoxLosses:
"""Box losses: L1 loss and GIoU loss
Inputs: pred_boxes (Tensor): Bounding boxes predicted by the model.
Shape: (batch_size, num_queries, 4)
target_boxes (List[Tensor]): List of box coordinates for each sample in batch.
Length = batch_size, Tensor size = [len(target["boxes"]), 4]
indices (List[Tuple[Tensor, Tensor]]): A list of size batch_size,
containing tuples of (index_i, index_j) where:
- index_i is the indices of the selected predictions (in order)
- index_j is the indices of the corresponding selected targets
For each batch element, it holds:
len(index_i) = len(index_j) = min(num_queries, num_target_boxes)
num_boxes (int): Normalization factor. Should equal the average number of
boxes per local batch.
Returns: BoxLosses NamedTuple with elements l1_loss and giou_loss
"""
idx = _get_src_permutation_idx(indices)
src_boxes = pred_boxes[idx]
target_boxes = torch.cat([t[i] for t, (_, i) in zip(target_boxes, indices)], dim=0)
l1_loss = F.l1_loss(src_boxes, target_boxes, reduction="sum") / num_boxes
giou_loss = 1 - torch.diag(
generalized_box_iou(
box_convert(src_boxes, in_fmt="cxcywh", out_fmt="xyxy"),
box_convert(target_boxes, in_fmt="cxcywh", out_fmt="xyxy"),
)
)
giou_loss = giou_loss.sum() / num_boxes
return BoxLosses(l1_loss=l1_loss, giou_loss=giou_loss)
| EXA-1-master | exa/libraries/multimodal-main/torchmultimodal/modules/losses/mdetr.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from typing import Dict, List, Optional
import torch
from torch import nn, Tensor
class AttentionFusionModule(nn.Module):
"""
Fuse embeddings through weighted sum of the corresponding linear projections.
Linear layer for learning the weights.
Args:
channel_to_encoder_dim: mapping of channel name to the encoding dimension
encoding_projection_dim: common dimension to project the encodings to.
defaults to min of the encoder dim if not set
"""
def __init__(
self,
channel_to_encoder_dim: Dict[str, int],
encoding_projection_dim: Optional[int] = None,
):
super().__init__()
attn_in_dim = sum(channel_to_encoder_dim.values())
self.attention = nn.Sequential(
nn.Linear(attn_in_dim, len(channel_to_encoder_dim)),
nn.Softmax(-1),
)
if encoding_projection_dim is None:
encoding_projection_dim = min(channel_to_encoder_dim.values())
encoding_projection = {}
for channel in sorted(channel_to_encoder_dim.keys()):
encoding_projection[channel] = nn.Linear(
channel_to_encoder_dim[channel], encoding_projection_dim
)
self.encoding_projection = nn.ModuleDict(encoding_projection)
def forward(self, embeddings: Dict[str, Tensor]) -> Tensor:
concatenated_in = torch.cat(
[embeddings[k] for k in sorted(embeddings.keys())], dim=-1
)
attention_weights = self.attention(concatenated_in)
projected_embeddings: List[Tensor] = []
for channel, projection in self.encoding_projection.items():
projected_embedding = projection(embeddings[channel])
projected_embeddings.append(projected_embedding)
for i in range(len(projected_embeddings)):
projected_embeddings[i] = (
attention_weights[:, i].unsqueeze(-1) * projected_embeddings[i]
)
fused = torch.sum(torch.stack(projected_embeddings), dim=0)
return fused
| EXA-1-master | exa/libraries/multimodal-main/torchmultimodal/modules/fusions/attention_fusion.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from typing import Dict
import torch
from torch import nn
class ConcatFusionModule(nn.Module):
"""Module to fuse modalities via concatenation. Sorted by keys for consistency.
Inputs:
embeddings (Dict[str, Tensor]): A dictionary mapping modalities to their
tensor representations.
"""
def __init__(self, projection: nn.Module = None):
super().__init__()
if projection:
self.projection = projection
else:
self.projection = nn.Identity()
def forward(self, embeddings: Dict[str, torch.Tensor]) -> torch.Tensor:
concatenated_in = torch.cat(
[embeddings[k] for k in sorted(embeddings.keys())], dim=-1
)
return self.projection(concatenated_in)
| EXA-1-master | exa/libraries/multimodal-main/torchmultimodal/modules/fusions/concat_fusion.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
| EXA-1-master | exa/libraries/multimodal-main/torchmultimodal/modules/fusions/__init__.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from typing import Callable, Dict, Optional, Tuple
import torch
import torch.nn.functional as F
from torch import nn, Tensor
class DeepsetFusionModule(nn.Module):
"""
Fuse embeddings through stacking followed by pooling strategy and MLP
See https://arxiv.org/pdf/2003.01607.pdf
Args:
channel_to_encoder_dim (Dict[str, int]): mapping of channel name to the\
encoding dimension
mlp (nn.Module): MLP with in dim as projection dim (min of embed dim).\
Use MLP for mlp_classifier for default mlp.
pooling_function (Callable): Pooling function to combine the tensors,\
like torch.median\
apply_attention (bool): If self attention (2 layer net) is applied before\
stacking embeddings, defaults to False.
attention_dim (int): intermediate dim for attention layer.\
defaults to projection dim / 2
modality_normalize (bool): If normalization is applied along the modality axis,\
defaults to False
norm_factor(float): norm factor for normalization, defaults to 2.0
use_auto_mapping(bool): If true, projection layer to min embedding dim \
is applied to the embeddings. defaults to False
"""
def __init__(
self,
channel_to_encoder_dim: Dict[str, int],
mlp: nn.Module,
pooling_function: Callable,
apply_attention: bool = False,
attention_dim: Optional[int] = None,
modality_normalize: bool = False,
norm_factor: float = 2.0,
use_auto_mapping: bool = False,
):
super().__init__()
self.apply_attention = apply_attention
self.modality_normalize = modality_normalize
self.norm_factor = norm_factor
self.use_auto_mapping = use_auto_mapping
projection_dim = DeepsetFusionModule.get_projection_dim(
channel_to_encoder_dim, use_auto_mapping
)
if self.use_auto_mapping:
self.projections = nn.ModuleDict(
{
channel: nn.Linear(dim, projection_dim)
for channel, dim in channel_to_encoder_dim.items()
}
)
else:
self.projections = nn.ModuleDict(
{channel: nn.Identity() for channel in channel_to_encoder_dim}
)
if self.apply_attention:
self.attention: nn.Module
if attention_dim is None:
# default value as per older implementation
attention_dim = projection_dim // 2
self.attention = nn.Sequential(
nn.Linear(projection_dim, attention_dim),
nn.Tanh(),
nn.Linear(attention_dim, 1),
# channel axis
nn.Softmax(dim=-2),
)
else:
self.attention = nn.Identity()
self.pooling_function = pooling_function
self.mlp = mlp
def forward(self, embeddings: Dict[str, Tensor]) -> Tensor:
projections = {}
for channel, projection in self.projections.items():
projections[channel] = projection(embeddings[channel])
embedding_list = [projections[k] for k in sorted(projections.keys())]
# bsz x channels x projected_dim
stacked_embeddings = torch.stack(embedding_list, dim=1)
if self.apply_attention:
attn_weights = self.attention(stacked_embeddings)
stacked_embeddings = stacked_embeddings * attn_weights
if self.modality_normalize:
normalized_embeddings = F.normalize(
stacked_embeddings, p=self.norm_factor, dim=1
)
else:
normalized_embeddings = F.normalize(
stacked_embeddings, p=self.norm_factor, dim=2
)
pooled_features = self._pool_features(normalized_embeddings)
fused = self.mlp(pooled_features)
return fused
@classmethod
def get_projection_dim(
cls, channel_to_encoder_dim: Dict[str, int], use_auto_mapping: bool
) -> int:
if use_auto_mapping:
projection_dim = min(channel_to_encoder_dim.values())
else:
encoder_dim = set(channel_to_encoder_dim.values())
if len(encoder_dim) != 1:
raise ValueError(
"Encoder dimension should be same for all channels \
if use_auto_mapping is set to false"
)
projection_dim = encoder_dim.pop()
return projection_dim
def _pool_features(self, embeddings: Tensor) -> Tensor:
pooled_embeddings = self.pooling_function(embeddings, dim=1)
if torch.jit.isinstance(pooled_embeddings, Tuple[Tensor, Tensor]):
return pooled_embeddings.values
if not isinstance(pooled_embeddings, Tensor):
raise ValueError(
f"Result from pooling function should be a tensor.\
{self.pooling_function} does not satisfy that"
)
return pooled_embeddings
class DeepsetFusionWithTransformer(DeepsetFusionModule):
def __init__(
self,
channel_to_encoder_dim: Dict[str, int],
mlp: nn.Module,
pooling_function: nn.TransformerEncoder,
apply_attention: bool = False,
attention_dim: Optional[int] = None,
modality_normalize: bool = False,
norm_factor: float = 2.0,
use_auto_mapping: bool = False,
):
super().__init__(
channel_to_encoder_dim,
mlp,
pooling_function,
apply_attention,
attention_dim,
modality_normalize,
norm_factor,
use_auto_mapping,
)
def _pool_features(self, embeddings: Tensor) -> Tensor:
pooled = self.pooling_function(embeddings)
# take representation of the first token as the pooled feature
return pooled[:, 0, :]
def deepset_transformer(
channel_to_encoder_dim: Dict[str, int],
mlp: nn.Module,
apply_attention: bool = False,
attention_dim: Optional[int] = None,
modality_normalize: bool = False,
norm_factor: float = 2.0,
use_auto_mapping: bool = False,
num_transformer_att_heads: int = 8,
num_transformer_layers: int = 1,
) -> nn.Module:
"""
Helper wrapper function around DeepsetFusionWithTransformer, \
to instantiate the transformer and pass it to the fusion module
Args:
channel_to_encoder_dim (Dict[str, int]): mapping of channel name to the\
encoding dimension
mlp (nn.Module): MLP with in dim as projection dim (min of embed dim).\
Use MLP for mlp_classifier for default mlp.
pooling_function (Callable): Pooling function to combine the tensors,\
like torch.median
apply_attention (bool): If self attention is applied before\
stacking embeddings, defaults to False
attention_dim (int): intermediate dim for attention layer. \
defaults to projection dim / 2
modality_normalize (bool): If normalization is applied along the modality axis,\
defaults to False
norm_factor(float): norm factor for normalization, defaults to 2.0
use_auto_mapping(bool): If true, projection layer to min embedding dim \
is applied to the embeddings. defaults to False
num_transformer_att_heads (int): number of attention heads. \
Used only if pooling function set to transformer
num_transformer_layers (int): number of transformer layers,\
used only if pooling function set to transformer
"""
projection_dim = DeepsetFusionWithTransformer.get_projection_dim(
channel_to_encoder_dim, use_auto_mapping
)
if projection_dim % num_transformer_att_heads != 0:
raise ValueError(
f"projection dim should be divisible by attention heads\
found {projection_dim} and {num_transformer_att_heads}"
)
transformer = nn.TransformerEncoder(
encoder_layer=nn.TransformerEncoderLayer(
d_model=projection_dim, nhead=num_transformer_att_heads, batch_first=True
),
num_layers=num_transformer_layers,
norm=nn.LayerNorm(projection_dim),
)
fusion = DeepsetFusionWithTransformer(
channel_to_encoder_dim,
mlp,
transformer,
apply_attention,
attention_dim,
modality_normalize,
norm_factor,
use_auto_mapping,
)
return fusion
| EXA-1-master | exa/libraries/multimodal-main/torchmultimodal/modules/fusions/deepset_fusion.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Dict, Optional, Tuple, Union
import torch
from torch import nn, Tensor
from torch.nn import functional as F
from torchmultimodal.utils.common import shift_dim
class SelfAttention(nn.Module):
"""Computes attention over the entire n-dimensional input.
Args:
attn_dropout (float, optional): Probability of dropout after softmax. Default is ``0.0``.
"""
def __init__(self, attn_dropout: float = 0.0) -> None:
super().__init__()
self.attn_dropout = attn_dropout
def forward(
self,
q: Tensor,
k: Tensor,
v: Tensor,
attention_mask: Optional[Tensor] = None,
head_mask: Optional[Tensor] = None,
) -> Tuple[Tensor, Tensor]:
"""
Args:
q (Tensor): Query input of shape ``(b, h, d1, ..., dn, dim_q)`` where ``h`` is the number of
attention heads, ``(d1, ..., dn)`` are the query latent dimensions and ``dim_q`` is the dimension
of the query embeddings.
k, v (Tensor): Key/value input of shape ``(b, h, d1', ..., dn', dim_kv)`` where ``h`` is the number
of attention heads, ``(d1', ..., dn')`` are the key/value latent dimensions and ``dim_kv`` is
the dimension of the key/value embeddings.
attention_mask (Tensor, optional): Tensor of shape ``(b, h, q_dn, k_dn)`` where ``q_dn`` is the
dimension of the flattened query input along its latent dimensions and ``k_dn`` that of the
flattened key input. Contains 1s for positions to attend to and 0s for masked positions.
head_mask (Tensor, optional): Tensor of shape ``(b, h, q_dn, k_dn)``.
Contains 1s for positions to attend to and 0s for masked positions.
Returns:
A tuple of output tensor and attention probabilities.
"""
_, _, *shape, _ = q.shape
# flatten to b, h, (d1, ..., dn), dim_q/dim_kv
q = q.flatten(start_dim=2, end_dim=-2)
k = k.flatten(start_dim=2, end_dim=-2)
v = v.flatten(start_dim=2, end_dim=-2)
out, attn_probs = scaled_dot_product_attention(
q,
k,
v,
attention_mask=attention_mask,
head_mask=head_mask,
attn_dropout=self.attn_dropout if self.training else 0.0,
)
return out.unflatten(2, shape), attn_probs
class AxialAttention(nn.Module):
"""Computes attention over a single axis of the input. Other dims are flattened into the batch dimension.
Args:
axial_dim (int): Dimension to compute attention on, indexed by input dimensions
(i.e., ``0`` for first input dimension, ``1`` for second).
attn_dropout (float): Probability of dropout after softmax. Default is ``0.0``.
"""
def __init__(self, axial_dim: int, attn_dropout: float = 0.0) -> None:
super().__init__()
self.axial_dim = axial_dim + 2 # account for batch, head
self.attn_dropout = attn_dropout
def forward(
self,
q: Tensor,
k: Tensor,
v: Tensor,
attention_mask: Optional[Tensor] = None,
head_mask: Optional[Tensor] = None,
) -> Tuple[Tensor, Tensor]:
"""
Args:
q (Tensor): Query input of shape ``(b, h, d1, ..., dn, dim_q)`` where ``h`` is the number of
attention heads, ``(d1, ..., dn)`` are the query latent dimensions and ``dim_q`` is the dimension
of the query embeddings.
k, v (Tensor): Key/value input of shape ``(b, h, d1', ..., dn', dim_kv)`` where ``h`` is the number
of attention heads, ``(d1', ..., dn')`` are the key/value latent dimensions and ``dim_kv`` is
the dimension of the key/value embeddings.
attention_mask (Tensor, optional): Tensor of shape ``(b, h, d1, ..., q_dn, k_dn)`` where ``q_dn`` is
the dimension of the axis to compute attention on of the query and ``k_dn`` that of the key.
Contains 1s for positions to attend to and 0s for masked positions.
head_mask (Tensor, optional): Tensor of shape ``(b, h, d1, ..., q_dn, k_dn)``.
Contains 1s for positions to attend to and 0s for masked positions.
Returns:
A tuple of output tensor and attention probabilities.
"""
# Ensure axial dim is within right dimensions, should be between head dim and embedding dim
if self.axial_dim >= len(q.shape) - 1:
raise ValueError("axial dim does not match input shape")
# flatten all dims into batch dimension except chosen axial dim and channel dim
# b, h, d1, ..., dn, dim_q/dim_kv -> (b, h, d1, ..., dn-1), axial_dim, dim_q/dim_kv
q = shift_dim(q, self.axial_dim, -2).flatten(end_dim=-3)
k = shift_dim(k, self.axial_dim, -2).flatten(end_dim=-3)
v = shift_dim(v, self.axial_dim, -2)
old_shape = list(v.shape)
v = v.flatten(end_dim=-3)
out, attn_probs = scaled_dot_product_attention(
q,
k,
v,
attention_mask=attention_mask,
head_mask=head_mask,
attn_dropout=self.attn_dropout if self.training else 0.0,
)
out = out.view(*old_shape)
out = shift_dim(out, -2, self.axial_dim)
return out, attn_probs
class MultiHeadAttention(nn.Module):
"""Computes multihead attention with flexible attention mechanism and caching for fast decoding.
Multihead attention linearly projects and divides queries, keys, and values into
multiple 'heads'. This enables the computation of attention multiple times in
parallel, creating more varied representations and allows the model to jointly
attend to information from different representation subspaces at different positions,
as described in `"Attention Is All You Need (Vaswani et al. 2017)"<https://arxiv.org/pdf/1706.03762.pdf>`_.
Args:
dim_q (int): Dimensionality of query input. Also the embedding dimension of the model.
dim_kv (int): Dimensionality of key/value input. Projects to the embedding dimension of the model, ``dim_q``.
n_head (int): Number of attention heads.
attn_module (nn.Module): Module of attention mechanism to use. Default is ``SelfAttention``.
See :class:`~torchmultimodal.modules.layers.attention.SelfAttention` for API details.
add_bias (bool): Whether to add bias to the q, k, v, linear layers or not. Default is ``True``.
Attributes:
cache (Dict[str, Tensor]): Dictionary that stores past key/value vectors.
Raises:
ValueError: When ``dim_q`` or ``dim_kv`` is not divisible by ``n_head``.
"""
def __init__(
self,
dim_q: int,
dim_kv: int,
n_head: int,
attn_module: nn.Module = SelfAttention(),
add_bias: bool = True,
) -> None:
super().__init__()
if dim_q % n_head != 0 or dim_kv % n_head != 0:
raise ValueError(
"The hidden size of q, k, v must be a multiple of the number of attention heads."
)
self.dim_q = dim_q
self.dim_kv = dim_kv
self.n_head = n_head
self.query = nn.Linear(dim_q, dim_q, bias=add_bias) # q
self.key = nn.Linear(dim_kv, dim_q, bias=add_bias) # k
self.value = nn.Linear(dim_kv, dim_q, bias=add_bias) # v
self.output = nn.Linear(dim_q, dim_q, bias=True) # c
self.attn = attn_module
self.cache: Optional[Dict[str, Tensor]] = None
def forward(
self,
q: Tensor,
kv: Optional[Tensor] = None,
return_attn_weights: bool = False,
use_cache: bool = False,
causal: bool = False,
**attn_kwargs: Any,
) -> Union[Tensor, Tuple[Tensor, Tensor]]:
"""
Args:
q (Tensor): Query of shape ``(b, d1, ..., dn, dim_q)`` or ``(b, seq_len, dim_q)``
(for autoregressive decoding it's typical to pass in flattened tensors).
kv (Tensor, optional): Key (and value) of shape ``(b, d1', ..., dn', dim_kv)`` or
``(b, seq_len', dim_kv)``. If this argument is specified, cross-attention will be applied.
Default is ``None``.
use_cache (bool): If ``True``, caches past ``k`` and ``v`` tensors for faster decoding.
If ``False``, recomputes ``k`` and ``v`` for each decoding step. Default is ``False``.
causal (bool): Whether to use causal attention or not. Default is ``False``.
Returns:
* If ``return_attn_weights`` is ``True``: A tuple of output tensor and attention probabilities.
* If ``return_attn_weights`` is ``False``: A single output tensor.
Raises:
TypeError: An error occurred when ``causal`` is ``True`` and ``attn_module`` is ``AxialAttention``.
"""
if isinstance(self.attn, AxialAttention) and causal:
raise TypeError("Causal axial attention is not supported.")
# If kv is specified use those inputs for cross-attention, otherwise use q
k = v = q if kv is None else kv
# compute q
q = split_multihead(self.query(q), self.n_head)
# For causal k, v are provided step-wise so we should always compute them
# For non-causal skip computing k, v if they have been cached
if causal or not self.cache:
k = split_multihead(self.key(k), self.n_head)
v = split_multihead(self.value(v), self.n_head)
# fast decoding by caching past key, value tensors
if use_cache:
if not self.cache:
# initialize the cache with the present k, v
self.cache = dict(k=k.clone(), v=v.clone())
else:
if causal:
# append present k, v to past k, v
# for autoregressive decoding inputs are flattened as 1D sequences
# so are the cached tensors: (b, n_heads, seq_len, c)
k_, v_ = self.cache["k"], self.cache["v"]
self.cache["k"] = torch.cat([k_, k], dim=2)
self.cache["v"] = torch.cat([v_, v], dim=2)
# override the present k, v with the cache
k, v = self.cache["k"], self.cache["v"]
attn_out = self.attn(q, k, v, **attn_kwargs)
attn_probs = None
# Unpack if attn module also returns attn probs
if isinstance(attn_out, tuple):
attn_out, attn_probs = attn_out
a = merge_multihead(attn_out)
a = self.output(a)
if return_attn_weights:
return a, attn_probs
else:
return a
class AxialAttentionBlock(nn.Module):
"""Computes multihead axial attention across all dims of the input.
Axial attention is an alternative to standard full attention, where instead
of computing attention across the entire flattened input, you compute it for
each dimension. To capture the global context that full attention does, stacking
multiple axial attention layers will allow information to propagate among the
multiple dimensions of the input. This enables attention calculations on high
dimensional inputs (images, videos) where full attention would be computationally
expensive and unfeasible. For more details, see `"Axial Attention in
Multidimensional Transformers (Ho et al. 2019)"<https://arxiv.org/pdf/1912.12180.pdf>`_
and `"CCNet: Criss-Cross Attention for Semantic Segmentation (Huang et al. 2019)
"<https://arxiv.org/pdf/1811.11721.pdf>`_.
Follows implementation by VideoGPT:
https://github.com/wilson1yan/VideoGPT/blob/master/videogpt/vqvae.py
Args:
n_dims (int): Dimensionality of input data, not including batch or embedding dims.
qkv_dim (int): Dimensionality of query/key/value embedding vectors.
n_head (int): Number of heads in multihead attention. Must divide into ``qkv_dim``
evenly.
"""
def __init__(self, n_dims: int, qkv_dim: int, n_head: int) -> None:
super().__init__()
self.qkv_dim = qkv_dim
self.mha_attns = nn.ModuleList(
[
MultiHeadAttention(
dim_q=qkv_dim,
dim_kv=qkv_dim,
n_head=n_head,
attn_module=AxialAttention(d),
add_bias=False,
)
for d in range(n_dims)
]
)
def forward(self, x: Tensor) -> Tensor:
n_channel = x.shape[1]
if n_channel != self.qkv_dim:
raise ValueError(
f"Input channel dimension is {n_channel}, expected {self.qkv_dim}"
)
h = shift_dim(x, 1, -1) # (b, c, d1, ..., dn) -> (b, d1, ..., dn, c)
attn_out = torch.zeros_like(h)
for mha_attn in self.mha_attns:
attn_out += mha_attn(h)
h = attn_out
h = shift_dim(h, -1, 1) # (b, d1, ..., dn, c) -> (b, c, d1, ..., dn)
return h
def scaled_dot_product_attention(
q: Tensor,
k: Tensor,
v: Tensor,
attention_mask: Optional[Tensor] = None,
head_mask: Optional[Tensor] = None,
attn_dropout: float = 0.0,
) -> Tuple[Tensor, Tensor]:
"""Similar to PyTorch Core's _scaled_dot_product_attention but generalized
to handle n-dimensional input tokens (images, video) and support multihead.
Computes attention as described in Attention Is All You Need (Vaswani et al. 2017)
Args:
q (Tensor): Query of shape ``(b, h, d1, ..., dn, dim_qk)`` or ``(b, h, seq_len, dim_qk)`` where
``h`` is number of attention heads, ``d1, ..., dn`` are latent dimensions and ``dim_qk` is
the embedding dim of the query tensor.
k (Tensor): Key of shape ``(b, h, d1', ...., dn', dim_qk)`` or ``(b, h, seq_len', dim_qk)`` where
``h`` is the number of attention heads, ``d1', ..., dn'` are latent dimensions and ``dim_qk``
is the key embedding dim aligned with query embedding dim,
see :class:`~torchmultimodal.modules.layers.attention.MultiHeadAttention`.
v (Tensor): Value of shape ``(b, h, d1', ..., dn', dim_v)`` or ``(b, h, seq_len', dim_v)`` where
``h`` is the number of attention heads, ``d1', ..., dn'`` are latent dimensions and ``dim_v``
is the embedding dim of the value tensor.
attention_mask (Tensor, optional): Tensor of shape ``(b, h, d1, ..., q_dn, k_dn)``.
Contains 1s for positions to attend to and 0s for masked positions. Applied before softmax.
head_mask (Tensor, optional): Tensor of shape ``(b, h, d1, ..., q_dn, k_dn)``.
Contains 1s for positions to attend to and 0s for masked positions.
Applied after dropout, before matrix multiplication with values.
attn_dropout (float): Probability of dropout after softmax. Default is ``0.0``.
Returns:
A tuple of output tensor and attention probabilities.
"""
# Take the dot product between "query" and "key" and scale to get the raw attention scores.
attn = torch.matmul(q, k.transpose(-1, -2))
attn = attn / torch.sqrt(torch.tensor(q.shape[-1]))
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor with the computed attention weights
# at the positions we want to attend and -inf for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
if attention_mask is not None:
attn = attn.masked_fill(attention_mask == 0, float("-inf"))
# Normalize the attention scores to probabilities
attn_float = F.softmax(attn, dim=-1)
attn = attn_float.type_as(attn) # b, h, d1, ..., q_dn, k_dn
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attn = F.dropout(attn, p=attn_dropout)
# Mask heads if we want to
if head_mask is not None:
attn = attn * head_mask
# For each query sum over the key/value dim with attention weights
a = torch.matmul(attn, v) # b, h, d1, ..., q_dn, c
return a, attn
def split_multihead(x: Tensor, n_head: int) -> Tensor:
"""Splits channel dimension of input tensor of size (b, d1, ..., dn, c)
into multiple heads, (b, n_head, d1, ..., dn, c // n_head)"""
x = x.unflatten(-1, (n_head, -1))
# Rearrange to put head dim first, (b, n_head, d1, ..., dn, c // n_head)
x = shift_dim(x, -2, 1)
return x
def merge_multihead(x: Tensor) -> Tensor:
"""Moves head dim back to original location and concatenates heads
(b, n_head, d1, ..., dn, c // n_head) -> (b, d1, ..., dn, c)"""
return shift_dim(x, 1, -2).flatten(start_dim=-2)
| EXA-1-master | exa/libraries/multimodal-main/torchmultimodal/modules/layers/attention.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from typing import Optional
import torch
from torch import nn, Tensor
class BERTTextEmbeddings(nn.Module):
"""Construct word, position, and token type embeddings following BERT, similar to HuggingFace BertEmbeddings
Attributes:
hidden_size (int): size of embedding space. Default is 768.
vocab_size (int): size of vocabulary. Default is 30522.
pad_token_id (int): id used for padding token. Default is 0.
max_position_embeddings (int): the highest position id number, or max sequence length. Default is 512.
type_vocab_size (int): the highest token type id number. Default is 2.
layer_norm_eps (float): the eps value in layer norms. Default is 1e-12.
dropout (float): dropout probability after all embeddings and layernorm
offset_pos_ids (bool): if True, shift position ids by one for the padding token. Used in RoBERTa.
Default is False.
Args:
input_ids (Tensor, optional): Tensor of input vocab token ids of shape [batch, seq_len].
token_type_ids (Tensor, optional): Tensor of input token type ids of shape [batch, seq_len]. In BERT,
used to indicate whether a word is in sentence A or B for next sentence prediction
position_ids (Tensor, optional): Tensor of input position ids of shape [batch, seq_len]
inputs_embeds (Tensor, optional): Tensor of input embeddings of shape [batch, hidden_size],
if embeddings are calculated elsewhere
"""
def __init__(
self,
hidden_size: int = 768,
vocab_size: int = 30522,
pad_token_id: int = 0,
max_position_embeddings: int = 512,
type_vocab_size: int = 2,
layer_norm_eps: float = 1e-12,
dropout: float = 0.0,
offset_pos_ids: bool = False,
) -> None:
super().__init__()
self.word_embeddings = nn.Embedding(vocab_size, hidden_size, pad_token_id)
self.position_embeddings = nn.Embedding(max_position_embeddings, hidden_size)
self.token_type_embeddings = nn.Embedding(type_vocab_size, hidden_size)
self.layer_norm = nn.LayerNorm(hidden_size, eps=layer_norm_eps)
self.dropout = nn.Dropout(dropout)
self.pad_token_id = pad_token_id
self.offset_pos_ids = offset_pos_ids
def create_position_ids_from_input_ids(self, input_ids: Tensor) -> Tensor:
"""
Replace non-padding symbols with their position numbers.
Position numbers begin at pad_token_id+1. Padding symbols
are ignored. This is modified from fairseq's `utils.make_positions`.
Inputs: input_ids (Tensor): Tensor from which to create position IDs.
pad_token_id (int): Padding index
(determines starting point of position IDs).
"""
mask = input_ids.ne(self.pad_token_id).int()
incremental_indices = torch.cumsum(mask, dim=1).type_as(mask) * mask
return incremental_indices.long() + self.pad_token_id
def forward(
self,
input_ids: Optional[Tensor] = None,
token_type_ids: Optional[Tensor] = None,
position_ids: Optional[Tensor] = None,
inputs_embeds: Optional[Tensor] = None,
) -> Tensor:
if input_ids is not None:
input_shape = input_ids.size()
device = input_ids.device
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
device = inputs_embeds.device
else:
raise ValueError("input_ids or inputs_embeds must not be None")
seq_length = input_shape[1]
if position_ids is None:
if self.offset_pos_ids:
position_ids = self.create_position_ids_from_input_ids(input_ids)
else:
position_ids = torch.arange(seq_length, dtype=torch.long, device=device)
position_ids = position_ids.unsqueeze(0).expand(input_shape)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = inputs_embeds + position_embeddings + token_type_embeddings
embeddings = self.layer_norm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
| EXA-1-master | exa/libraries/multimodal-main/torchmultimodal/modules/layers/text_embedding.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, List, Mapping, NamedTuple, Tuple, Union
import torch
from torch import nn, Size, Tensor
from torch.nn import functional as F
from torchmultimodal.utils.common import shift_dim
class CodebookOutput(NamedTuple):
"""Outputs from :class:`~torchmultimodal.modules.layers.codebook.Codebook`.
Attributes:
encoded_flat (Tensor): The flattened encoder output of shape ``(b x d1 x ... x dn, c)``.
quantized_flat (Tensor): The nearest embeddings for the encoded of shape ``(b x d1 x ... x dn, emb_dim)``.
codebook_indices (Tensor): Indices of the nearest embeddings of shape ``(b, d1, d2, ..., dn)``.
quantized (Tensor): The nearest embeddings reshaped back to ``(b, emb_dim, d1, ..., dn)``.
"""
encoded_flat: Tensor
quantized_flat: Tensor
codebook_indices: Tensor
quantized: Tensor
class Codebook(nn.Module):
"""Bottleneck layer of VQVAE model
Codebook provides an embedding layer that takes in the output of an encoder
and performs a nearest-neighbor lookup in the embedding space.
Vector quantization was introduced in Oord et al. 2017 (https://arxiv.org/pdf/1711.00937.pdf)
to generate high-fidelity images, videos, and audio data.
The embedding weights are trained with exponential moving average updates as described
in original paper.
Code was largely inspired by a PyTorch implementation of the author's original code, found here:
https://colab.research.google.com/github/zalandoresearch/pytorch-vq-vae/blob/master/vq-vae.ipynb
and by the implementation in MUGEN (Hayes et al. 2022), found here:
https://github.com/mugen-org/MUGEN_baseline/blob/main/lib/models/video_vqvae/vqvae.py
Args:
num_embeddings (int): Number of vectors in the embedding space.
embedding_dim (int): Dimensionality of the embedding vectors.
decay (float, optional): Factor used in exponential moving average update of the embeddings.
Defaults to ``0.99``.
codebook_usage_threshold (float, optional): Threshold for the average number of times an embedding vector
is chosen below which it will be re-initialized. Defaults to ``1.0``.
epsilon (float, optional): Noise used in Laplace smoothing of codebook usage. Defaults to ``1e-7``.
"""
def __init__(
self,
num_embeddings: int,
embedding_dim: int,
decay: float = 0.99,
codebook_usage_threshold: float = 1.0,
epsilon: float = 1e-7,
) -> None:
super().__init__()
# Embedding weights and parameters for EMA update will be registered to buffer, as they
# will not be updated by the optimizer but are still model parameters.
# code_usage and code_avg correspond with N and m, respectively, from Oord et al.
randn_init_embedding = torch.randn(num_embeddings, embedding_dim)
self.register_buffer("embedding", randn_init_embedding.clone())
self.register_buffer("code_usage", torch.zeros(num_embeddings))
self.register_buffer("code_avg", randn_init_embedding.clone())
self.embedding_dim = embedding_dim
self.num_embeddings = num_embeddings
self._decay = decay
# Used in Laplace smoothing of code usage
self._epsilon = epsilon
# Threshold for randomly reseting unused embedding vectors
self.codebook_usage_threshold = codebook_usage_threshold
# Flag to track if we need to initialize embedding with encoder output
self._is_embedding_init = False
def _load_from_state_dict(
self,
state_dict: Mapping[str, Any],
prefix: str,
local_metadata: Mapping,
strict: bool,
missing_keys: List[str],
unexpected_keys: List[str],
error_msgs: List[str],
) -> None:
# Override nn.Module's _load_from_state_dict to ensure embedding init is turned off
# when state dict is loaded.
#
# This can also be handled with _register_load_state_dict_pre_hook but since this is
# an internal function, it may change. Overriding _load_from_state_dict seems more
# stable and cleaner.
super()._load_from_state_dict(
state_dict,
prefix,
local_metadata,
strict,
missing_keys,
unexpected_keys,
error_msgs,
)
self._is_embedding_init = True
def _tile(self, x: Tensor, n: int) -> Tensor:
# Repeat vectors in x if x has less than n vectors
num_vectors, num_channels = x.shape
if num_vectors < n:
num_repeats = (n + num_vectors - 1) // num_vectors
# Add a small amount of noise to repeated vectors
std = 0.01 / torch.sqrt(torch.tensor(num_channels))
x = x.repeat(num_repeats, 1)
x = x + torch.randn_like(x) * std
return x
def _get_random_vectors(self, x: Tensor, n: int) -> Tensor:
# Gets n random row vectors from 2D tensor x
x_tiled = self._tile(x, n)
idx = torch.randperm(x_tiled.shape[0])
x_rand = x_tiled[idx][:n]
return x_rand
def _preprocess(self, encoded: Tensor) -> Tuple[Tensor, Size]:
# Rearrange from batch x channel x n dims to batch x n dims x channel
encoded_permuted = shift_dim(encoded, 1, -1)
permuted_shape = encoded_permuted.shape
# Flatten input
encoded_flat = encoded_permuted.view(-1, permuted_shape[-1])
# channel dimension should be embedding dim so that each element in encoder
# output volume gets associated with single embedding vector
if encoded_flat.shape[-1] != self.embedding_dim:
raise ValueError(
f"Expected {encoded_flat.shape[-1]} to be embedding size of {self.embedding_dim}"
)
return encoded_flat, permuted_shape
def _postprocess(
self, quantized_flat: Tensor, permuted_shape: Union[Size, Tuple]
) -> Tensor:
# Rearrange back to batch x channel x n dims
quantized_permuted = quantized_flat.view(permuted_shape)
quantized = shift_dim(quantized_permuted, -1, 1)
return quantized
def _init_embedding(self, encoded_flat: Tensor) -> None:
# Embedding should be initialized with random output vectors from the encoder
# on the first forward pass for faster convergence, as in VideoGPT (Yan et al. 2021)
#
# This requires the preprocessed encoder output to flattened
self._is_embedding_init = True
encoded_flat_rand = self._get_random_vectors(encoded_flat, self.num_embeddings)
# Initialize embedding and intermediate values for EMA updates
self.embedding = encoded_flat_rand
self.code_avg = encoded_flat_rand
self.code_usage = torch.ones(self.num_embeddings)
def _ema_update_embedding(
self, encoded_flat: Tensor, codebook_indices: Tensor
) -> None:
# Closed form solution of codebook loss, ||e - E(x)||^2, is simply the average
# of the encoder output. However, we can't compute this in minibatches, so we
# must use exponential moving average.
# Convert indices to one hot encoding
codebook_onehot = nn.functional.one_hot(
codebook_indices, num_classes=self.num_embeddings
).type(torch.float)
# Count how often each embedding vector was looked up
codebook_selection_count = torch.sum(codebook_onehot, 0)
# Update usage value for each embedding vector
self.code_usage.mul_(self._decay).add_(
codebook_selection_count, alpha=(1 - self._decay)
)
# Laplace smoothing of codebook usage - to prevent zero counts
n = torch.sum(self.code_usage)
self.code_usage.add_(self._epsilon).divide_(
n + self.num_embeddings * self._epsilon
).mul_(n)
# Get all encoded vectors attracted to each embedding vector
encoded_per_codebook = torch.matmul(codebook_onehot.t(), encoded_flat)
# Update each embedding vector with new encoded vectors that are attracted to it,
# divided by its usage to yield the mean of encoded vectors that choose it
self.code_avg.mul_(self._decay).add_(
encoded_per_codebook, alpha=(1 - self._decay)
)
self.embedding = self.code_avg / self.code_usage.unsqueeze(1)
# Reset any embedding vectors that fall below threshold usage with random encoded vectors
encoded_flat_rand = self._get_random_vectors(encoded_flat, self.num_embeddings)
self.embedding = torch.where(
self.code_usage.unsqueeze(1) >= self.codebook_usage_threshold,
self.embedding,
encoded_flat_rand,
)
def _quantize(self, encoded_flat: Tensor) -> Tuple[Tensor, Tensor]:
# Calculate distances from each encoder, E(x), output vector to each embedding vector, e, ||E(x) - e||^2
distances = torch.cdist(encoded_flat, self.embedding, p=2.0) ** 2
# Encoding - select closest embedding vectors
codebook_indices_flat = torch.argmin(distances, dim=1)
# Quantize
quantized_flat = F.embedding(codebook_indices_flat, self.embedding)
# Use exponential moving average to update the embedding instead of a codebook loss,
# as suggested by Oord et al. 2017 and Razavi et al. 2019.
if self.training:
self._ema_update_embedding(encoded_flat, codebook_indices_flat)
# Straight through estimator
quantized_flat = encoded_flat + (quantized_flat - encoded_flat).detach()
return quantized_flat, codebook_indices_flat
def forward(self, z: Tensor) -> CodebookOutput:
"""
Args:
z (Tensor): Tensor containing a batch of encoder outputs of shape ``(b, c, d1, ..., dn)``.
Returns:
An instance of :class:`~torchmultimodal.modules.layers.codebook.CodebookOutput`.
"""
# Flatten encoder outputs, tile to match num embeddings, get random encoder outputs
encoded_flat, permuted_shape = self._preprocess(z)
# First check if embedding is initialized correctly
if not self._is_embedding_init and self.training:
self._init_embedding(encoded_flat)
# Quantization via nearest neighbor lookup
quantized_flat, codebook_indices_flat = self._quantize(
encoded_flat
) # (b x d1 x ... x dn, emb_dim)
# Reshape back to original dims
# Note: This part could also happen before ema_update_embedding by first reshaping the indices
# and then looking up the codebook for quantized. But that will require us to pass shape info
# into `self._quantized`. We decide to keep the reshape and the quantized ops separate for clarity.
quantized = self._postprocess(
quantized_flat, permuted_shape
) # (b, emb_dim, d1, ...., dn)
codebook_indices = codebook_indices_flat.view(
z.shape[0], *z.shape[2:]
) # (b, d1, ..., dn)
return CodebookOutput(encoded_flat, quantized_flat, codebook_indices, quantized)
def extra_repr(self) -> str:
return "num_embeddings={}, embedding_dim={}".format(
self.num_embeddings, self.embedding_dim
)
def lookup(self, indices: Tensor) -> Tensor:
# Returns the embeddings of shape ``[b, indices.shape, emb_dim]``
return F.embedding(indices, self.embedding)
| EXA-1-master | exa/libraries/multimodal-main/torchmultimodal/modules/layers/codebook.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
| EXA-1-master | exa/libraries/multimodal-main/torchmultimodal/modules/layers/__init__.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from typing import Callable, List, Optional, Union
import torch
from torch import nn
class MLP(nn.Module):
"""A multi-layer perceptron module.
This module is a sequence of linear layers plus activation functions.
The user can optionally add normalization and/or dropout to each of the layers.
Args:
in_dim (int): Input dimension.
out_dim (int): Output dimension.
hidden_dims (Optional[List[int]]): Output dimension for each hidden layer.
dropout (float): Probability for dropout layer.
activation (Callable[..., nn.Module]): Which activation
function to use. Supports module type or partial.
normalization (Optional[Callable[..., nn.Module]]): Which
normalization layer to use (None for no normalization).
Supports module type or partial.
Inputs:
x (Tensor): Tensor containing a batch of input sequences.
"""
def __init__(
self,
in_dim: int,
out_dim: int,
hidden_dims: Optional[Union[int, List[int]]] = None,
dropout: float = 0.5,
activation: Callable[..., nn.Module] = nn.ReLU,
normalization: Optional[Callable[..., nn.Module]] = None,
) -> None:
super().__init__()
layers = nn.ModuleList()
if hidden_dims is None:
hidden_dims = []
if isinstance(hidden_dims, int):
hidden_dims = [hidden_dims]
for hidden_dim in hidden_dims:
layers.append(nn.Linear(in_dim, hidden_dim))
if normalization:
layers.append(normalization(hidden_dim))
layers.append(activation())
if dropout > 0:
layers.append(nn.Dropout(dropout))
in_dim = hidden_dim
layers.append(nn.Linear(in_dim, out_dim))
self.model = nn.Sequential(*layers)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.model(x)
| EXA-1-master | exa/libraries/multimodal-main/torchmultimodal/modules/layers/mlp.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any
from torch import nn, Tensor
class Fp32LayerNorm(nn.LayerNorm):
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
def forward(self, x: Tensor) -> Tensor:
output = nn.functional.layer_norm(
x.float(),
self.normalized_shape,
self.weight.float() if self.weight is not None else None,
self.bias.float() if self.bias is not None else None,
self.eps,
)
return output.type_as(x)
class Fp32GroupNorm(nn.GroupNorm):
"""
GroupNorm that supports mixed-precision / fp16 training by performing normalization
in fp32 and converting back.
Code ref:
https://github.com/facebookresearch/fairseq/blob/0338cdc3094ca7d29ff4d36d64791f7b4e4b5e6e/fairseq/modules/fp32_group_norm.py#L13
"""
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
def forward(self, x: Tensor) -> Tensor:
output = nn.functional.group_norm(
x.float(),
self.num_groups,
self.weight.float() if self.weight is not None else None,
self.bias.float() if self.bias is not None else None,
self.eps,
)
return output.type_as(x)
| EXA-1-master | exa/libraries/multimodal-main/torchmultimodal/modules/layers/normalizations.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import torch
from torch import nn, Tensor
class SiLU(nn.Module):
r"""Sigmoid Linear Unit
.. math:: \text{SiLU}(x) = x * \sigma(1.702 * x)
where :math:`\sigma(x)` is the cumulative distribution function for Logistic Distribution.
Approximation of the exact GeLU for greater forward speed. Note that this is different from
``torch.nn.SiLU`` by the coefficient ``1.702`` from the paper:
`"Gaussian error linear units"<https://arxiv.org/pdf/1606.08415.pdf>`_.
"""
def forward(self, x: Tensor) -> Tensor:
return torch.sigmoid(1.702 * x) * x
| EXA-1-master | exa/libraries/multimodal-main/torchmultimodal/modules/layers/activation.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
# Code for some of the transformers components in this file are initialized
# from their counterparts in Hugging Face Transformers library.
from typing import Callable, List, NamedTuple, Optional, Tuple, Union
from torch import nn, Tensor
from torchmultimodal.modules.layers.attention import MultiHeadAttention, SelfAttention
from torchmultimodal.modules.layers.mlp import MLP
from torchmultimodal.modules.layers.normalizations import Fp32LayerNorm
class TransformerOutput(NamedTuple):
last_hidden_state: Optional[Tensor] = None
pooler_output: Optional[Tensor] = None
hidden_states: Optional[List[Tensor]] = None
attentions: Optional[List[Tensor]] = None
image_labels: Optional[Tensor] = None
class TransformerCrossAttentionLayer(nn.Module):
"""Transformer layer with self-attention on inputs and cross-attention on an encoder's outputs.
Can be used in a transformer decoder or an encoder with cross-attention. Similar to
``nn.TransformerDecoderLayer``, but generalized for use in an encoder with cross-attention as well.
Uses a custom ``MultiHeadAttention`` that supports n-dimensional inputs including sequences,
images, video.
Attributes:
d_model (int): size of hidden dimension of input
n_head (int): number of attention heads
dim_feedforward (int): size of hidden dimension of feedforward network
dropout (float): dropout probability for all dropouts. Defaults to 0.
activation (Callable): activation function in feedforward network. Defaults to ``nn.ReLU``.
layer_norm_eps (float): the eps value in layer norms. Default is 1e-12.
norm_first (bool): if True, layer norm is done prior to each of self-attention, cross-attention,
and feedforward. Otherwise, layer norm is done after.
Args:
hidden_states (Tensor): input tensor of shape [b, d1, ..., dn, c] to calculate self-attention on.
encoder_hidden_states (Tensor): input tensor of shape [b, d1, ..., dn, c] to calculate
cross-attention on.
attention_mask (Tensor, optional): mask to be applied to self-attention inputs, ``hidden_states``.
See ``MultiHeadAttention`` for shape requirements.
cross_attention_mask (Tensor, optional): mask to be applied to cross-attention inputs,
``encoder_hidden_states``. See ``MultiHeadAttention`` for shape requirements.
"""
def __init__(
self,
d_model: int,
n_head: int,
dim_feedforward: int,
dropout: float = 0.0,
activation: Callable[..., nn.Module] = nn.ReLU,
layer_norm_eps: float = 1e-12,
norm_first: bool = False,
) -> None:
super().__init__()
# attention block
self.attention = MultiHeadAttention(
dim_q=d_model,
dim_kv=d_model,
n_head=n_head,
attn_module=SelfAttention(dropout),
)
self.attention_dropout = nn.Dropout(dropout)
# cross attention block
self.cross_attention = MultiHeadAttention(
dim_q=d_model,
dim_kv=d_model,
n_head=n_head,
attn_module=SelfAttention(dropout),
)
self.cross_attention_dropout = nn.Dropout(dropout)
# feedforward block
self.feedforward = MLP(
d_model, d_model, dim_feedforward, dropout=dropout, activation=activation
)
self.feedforward_dropout = nn.Dropout(dropout)
# layernorms
self.attention_layernorm = Fp32LayerNorm(d_model, eps=layer_norm_eps)
self.cross_attention_layernorm = Fp32LayerNorm(d_model, eps=layer_norm_eps)
self.feedforward_layernorm = Fp32LayerNorm(d_model, eps=layer_norm_eps)
self.norm_first = norm_first
def _self_attention_block(
self, hidden_states: Tensor, attention_mask: Optional[Tensor] = None
) -> Tensor:
output = self.attention(
hidden_states, attention_mask=attention_mask, return_attn_weights=False
)
output = self.attention_dropout(output)
return output
def _cross_attention_block(
self,
hidden_states: Tensor,
encoder_hidden_states: Tensor,
cross_attention_mask: Optional[Tensor] = None,
) -> Tensor:
output = self.cross_attention(
hidden_states,
encoder_hidden_states,
attention_mask=cross_attention_mask,
return_attn_weights=False,
)
output = self.cross_attention_dropout(output)
return output
def _feedforward_block(self, hidden_states: Tensor) -> Tensor:
h = self.feedforward(hidden_states)
h = self.feedforward_dropout(h)
return h
def _forward_prenorm(
self,
hidden_states: Tensor,
encoder_hidden_states: Tensor,
attention_mask: Optional[Tensor] = None,
cross_attention_mask: Optional[Tensor] = None,
) -> Tensor:
x = hidden_states
kv = encoder_hidden_states
inputs = self.attention_layernorm(x)
attn_output = self._self_attention_block(inputs, attention_mask=attention_mask)
attn_residual = attn_output + x
attn_norm_output = self.cross_attention_layernorm(attn_residual)
cross_attention_output = self._cross_attention_block(
attn_norm_output, kv, cross_attention_mask
)
cross_attention_residual = cross_attention_output + attn_norm_output
cross_attention_norm_output = self.feedforward_layernorm(
cross_attention_residual
)
ff_residual = cross_attention_norm_output + self._feedforward_block(
cross_attention_norm_output
)
return ff_residual
def _forward_postnorm(
self,
hidden_states: Tensor,
encoder_hidden_states: Tensor,
attention_mask: Optional[Tensor] = None,
cross_attention_mask: Optional[Tensor] = None,
) -> Tensor:
x = hidden_states
kv = encoder_hidden_states
attn_output = self._self_attention_block(x, attention_mask=attention_mask)
attn_residual = attn_output + x
attn_norm_output = self.attention_layernorm(attn_residual)
cross_attention_output = self._cross_attention_block(
attn_norm_output, kv, cross_attention_mask
)
cross_attention_residual = cross_attention_output + attn_norm_output
cross_attention_norm_output = self.cross_attention_layernorm(
cross_attention_residual
)
ff_residual = cross_attention_norm_output + self._feedforward_block(
cross_attention_norm_output
)
outputs = self.feedforward_layernorm(ff_residual)
return outputs
def forward(
self,
hidden_states: Tensor,
encoder_hidden_states: Tensor,
attention_mask: Optional[Tensor] = None,
cross_attention_mask: Optional[Tensor] = None,
) -> Tensor:
if self.norm_first:
return self._forward_prenorm(
hidden_states,
encoder_hidden_states,
attention_mask,
cross_attention_mask,
)
else:
return self._forward_postnorm(
hidden_states,
encoder_hidden_states,
attention_mask,
cross_attention_mask,
)
class TransformerEncoderLayer(nn.Module):
"""Transformer encoder layer is made up of multihead self-attention and feedforward blocks,
based on the architecture in "Attention Is All You Need" (Vaswani et al. 2017). Similar to
``nn.TransformerEncoderLayer``, but uses a custom ``MultiHeadAttention`` that supports
n-dimensional inputs (including sequences, images, video) and head-masking.
Attributes:
d_model (int): size of hidden dimension of input
n_head (int): number of attention heads
dim_feedforward (int): size of hidden dimension of feedforward network
dropout (float): dropout probability for all dropouts. Defaults to 0.
activation (Callable): activation function in feedforward network. Defaults to ``nn.ReLU``.
layer_norm_eps (float): the eps value in layer norms. Default is 1e-12.
norm_first (bool): if True, layer norm is done prior to each of self-attention, cross-attention,
and feedforward. Otherwise, layer norm is done after.
Args:
hidden_states (Tensor): input tensor of shape [b, d1, ..., dn, c] to calculate self-attention on.
attention_mask (Tensor, optional): mask to be applied to self-attention inputs, ``hidden_states``. See
``MultiHeadAttention`` for shape requirements.
head_mask (Tensor, optional): mask to be applied to self-attention inputs after softmax and dropout,
before matrix multiplication with values. See ``MultiHeadAttention`` for shape requirements.
return_attn_weights (bool, optional): return attention probabilities in addition to attention output.
Defaults to False.
"""
def __init__(
self,
d_model: int,
n_head: int,
dim_feedforward: int,
dropout: float = 0.0,
activation: Callable[..., nn.Module] = nn.ReLU,
layer_norm_eps: float = 1e-12,
norm_first: bool = False,
) -> None:
super().__init__()
# attention block
self.attention = MultiHeadAttention(
dim_q=d_model,
dim_kv=d_model,
n_head=n_head,
attn_module=SelfAttention(dropout),
)
self.attention_dropout = nn.Dropout(dropout)
# feedforward block
self.feedforward = MLP(
d_model, d_model, dim_feedforward, dropout=dropout, activation=activation
)
self.feedforward_dropout = nn.Dropout(dropout)
# layernorms
self.attention_layernorm = Fp32LayerNorm(d_model, eps=layer_norm_eps)
self.feedforward_layernorm = Fp32LayerNorm(d_model, eps=layer_norm_eps)
self.norm_first = norm_first
def _attention_block(
self,
hidden_states: Tensor,
attention_mask: Optional[Tensor] = None,
head_mask: Optional[Tensor] = None,
) -> Tuple[Tensor, Tensor]:
output, attn_weights = self.attention(
hidden_states,
attention_mask=attention_mask,
head_mask=head_mask,
return_attn_weights=True,
)
output = self.attention_dropout(output)
return output, attn_weights
def _feedforward_block(self, hidden_states: Tensor) -> Tensor:
h = self.feedforward(hidden_states)
h = self.feedforward_dropout(h)
return h
def _forward_prenorm(
self,
hidden_states: Tensor,
attention_mask: Optional[Tensor] = None,
head_mask: Optional[Tensor] = None,
return_attn_weights: bool = False,
) -> Union[Tensor, Tuple[Tensor, Tensor]]:
x = hidden_states
inputs = self.attention_layernorm(x)
attn_output, attn_weights = self._attention_block(
inputs,
attention_mask=attention_mask,
head_mask=head_mask,
)
attn_residual = attn_output + x
ff_residual = attn_residual + self._feedforward_block(
self.feedforward_layernorm(attn_residual)
)
if return_attn_weights:
return ff_residual, attn_weights
else:
return ff_residual
def _forward_postnorm(
self,
hidden_states: Tensor,
attention_mask: Optional[Tensor] = None,
head_mask: Optional[Tensor] = None,
return_attn_weights: bool = False,
) -> Union[Tensor, Tuple[Tensor, Tensor]]:
x = hidden_states
attn_output, attn_weights = self._attention_block(
x,
attention_mask=attention_mask,
head_mask=head_mask,
)
attn_residual = attn_output + x
attn_residual = self.attention_layernorm(attn_residual)
ff_residual = attn_residual + self._feedforward_block(attn_residual)
outputs = self.feedforward_layernorm(ff_residual)
if return_attn_weights:
return outputs, attn_weights
else:
return outputs
def forward(
self,
hidden_states: Tensor,
attention_mask: Optional[Tensor] = None,
head_mask: Optional[Tensor] = None,
return_attn_weights: bool = False,
) -> Union[Tensor, Tuple[Tensor, Tensor]]:
if self.norm_first:
return self._forward_prenorm(
hidden_states,
attention_mask,
head_mask,
return_attn_weights,
)
else:
return self._forward_postnorm(
hidden_states,
attention_mask,
head_mask,
return_attn_weights,
)
class TransformerEncoder(nn.Module):
def __init__(
self,
n_layer: int,
d_model: int,
n_head: int,
dim_feedforward: int,
dropout: float = 0.0,
activation: Callable[..., nn.Module] = nn.ReLU,
layer_norm_eps: float = 1e-12,
norm_first: bool = False,
final_layer_norm_eps: Optional[float] = None,
):
super().__init__()
self.layer = nn.ModuleList(
[
TransformerEncoderLayer(
d_model,
n_head,
dim_feedforward,
dropout,
activation,
layer_norm_eps,
norm_first,
)
for _ in range(n_layer)
]
)
self.final_layer_norm = None
if final_layer_norm_eps:
self.final_layer_norm = Fp32LayerNorm(d_model, eps=final_layer_norm_eps)
def forward(
self,
hidden_states: Tensor,
attention_mask: Optional[Tensor] = None,
head_mask: Optional[Tensor] = None,
return_attn_weights: bool = False,
return_hidden_states: bool = False,
) -> TransformerOutput:
all_hidden_states = [] if return_hidden_states else None
all_self_attentions = [] if return_attn_weights else None
for layer_module in self.layer:
if return_hidden_states:
all_hidden_states.append(hidden_states)
layer_outputs = layer_module(
hidden_states,
attention_mask=attention_mask,
head_mask=head_mask,
return_attn_weights=return_attn_weights,
)
if return_attn_weights:
hidden_states = layer_outputs[0]
all_self_attentions.append(layer_outputs[1])
else:
hidden_states = layer_outputs
if return_hidden_states:
all_hidden_states.append(hidden_states)
if self.final_layer_norm is not None:
hidden_states = self.final_layer_norm(hidden_states)
return TransformerOutput(
last_hidden_state=hidden_states,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
)
| EXA-1-master | exa/libraries/multimodal-main/torchmultimodal/modules/layers/transformer.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import itertools
from typing import Tuple
import torch
from torch import nn, Tensor
class BroadcastedPositionEmbedding(nn.Module):
r"""Spatiotemporal broadcasted positional embeddings.
Based on broadcasted position embedding algorithm in codebase:
https://github.com/wilson1yan/VideoGPT/blob/c21cc7e2579f820cb2b90097406d72cf69a46474/videogpt/attention.py#L458
Each embedding vector of the ``i``-th dim is repeated by ``N`` times, where
:math:`N = \prod_{j>i}\text{dim}[j]`.
Args:
latent_shape (Tuple[int, ...]): Shape of encoded data before batching and embedding.
embedding_dim (int): The size of each embedding vector.
Raises:
ValueError: if ``embedding_dim`` is not an integer multiple of ``len(shape)``.
"""
def __init__(
self,
latent_shape: Tuple[int, ...],
embedding_dim: int,
) -> None:
"""
Args:
latent_shape (Tuple[int, ...]): Shape of encoded data before batching and embedding.
embedding_dim (int): The size of each embedding vector.
Raises:
ValueError: if ``embedding_dim`` is not an integer multiple of ``len(shape)``
"""
super().__init__()
if embedding_dim % len(latent_shape) != 0:
raise ValueError(
f"Embedding dim {embedding_dim} modulo len(latent_shape) {len(latent_shape)} is not zero"
)
self.latent_shape = latent_shape
self.n_dim = n_dim = len(self.latent_shape)
self.embedding_dim = embedding_dim
self.embedding = nn.ParameterDict(
{
f"d_{i}": nn.Parameter(
torch.randn(self.latent_shape[i], embedding_dim // n_dim) * 0.01
)
for i in range(n_dim)
}
)
@property
def indices(self) -> Tensor:
"""Returns broadcasted indices of the data
For example::
>>> pos_emb = BroadcastedPositionEmbedding(shape=(2, 3), embedding_dim=6)
>>> pos_emb.indices
tensor([[0, 0], [0, 1], [0, 2], [1, 0], [1, 1], [1, 2]])
"""
return torch.cartesian_prod(*[torch.arange(s) for s in self.latent_shape])
def _broadcast(self, i: int) -> Tensor:
"""Broadcasts the ``i``-th embedding matrix ``(self.latent_shape[i], self.embedding_dim // n_dim)`` along the other
dims of ``self.latent_shape``. The embedding dim is not touched.
For example::
>>> pos_emb = BroadcastedPositionEmbedding(shape=(2, 4), embedding_dim=6)
>>> print(pos_emb.embedding["d_0"].shape)
torch.Size([2, 3])
>>> pos_emb.embedding["d_0"] = nn.Parameter(torch.tensor([[0., 0., 0.], [0., 0., 1.]]))
>>> out = pos_emb._broadcast(i=0)
>>> print(out)
tensor([[[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]],
[[0, 0, 1], [0, 0, 1], [0, 0, 1], [0, 0, 1]]])
>>> print(out.shape)
(2, 4, 3)
The input is broadcasted along the second dim ``4`` since it's the ``0``-th embedding constructed w.r.t the
first dim ``2``.
"""
emb = self.embedding[f"d_{i}"]
# (1, ..., 1, self.latent_shape[i], 1, ..., embedding_dim)
emb = emb.view(
*itertools.repeat(1, i),
self.latent_shape[i],
*itertools.repeat(1, (self.n_dim - i - 1)),
-1,
)
# (*self.latent_shape, embedding_dim)
emb = emb.expand(*self.latent_shape, -1)
return emb
def forward(self, position_ids: Tensor) -> Tensor:
"""
Args:
position_ids (Tensor): batches of of 1D integer tensors indicating locations of the broadcasted
position embeddings to be returned.
Returns:
A tensor with the position embeddings selected by position ids.
Raises:
IndexError: If any position id(s) provided is outside of the indices range.
"""
invalid_ids = position_ids[
torch.logical_or(position_ids >= len(self.indices), position_ids < -1)
]
if len(invalid_ids):
raise IndexError(f"Invalid position ids: {invalid_ids}")
embeddings = []
for i in range(self.n_dim):
emb = self._broadcast(i)
embeddings.append(emb)
# concatenated embeddings: (*(shape), embedding_dim)
embeddings = torch.cat(embeddings, dim=-1)
# expand the permuted tensor to form a list of size `n_dim`
# where each elm is a tensor of shape (pos_ids, batch)
indices = [*self.indices[position_ids].permute(2, 1, 0)]
embeddings = embeddings[indices].transpose(0, 1) # (batch, pos_ids, emb_dim)
return embeddings
| EXA-1-master | exa/libraries/multimodal-main/torchmultimodal/modules/layers/position_embedding.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import warnings
from itertools import repeat
from typing import Any, Tuple, Union
from torch import nn, Size, Tensor
from torch.nn import functional as F
class SamePadConv3d(nn.Module):
"""Performs a same padded convolution on a 3D input.
This maintains input shape with unit stride, and divides input dims by non-unit stride.
Code reference:
https://github.com/wilson1yan/VideoGPT/blob/master/videogpt/vqvae.py
Args:
in_channels (int): Number of channels in input, same as ``nn.Conv3d``.
out_channels (int): Number of channels for output, same as ``nn.Conv3d``.
kernel_size (int or Tuple[int, int, int]): Size of convolutional filter, same as ``nn.Conv3d``.
stride (int or Tuple[int, int, int], optional): Stride for convolution, same as ``nn.Conv3d``.
bias (bool, optional): If ``True`` use a bias for convolutional layer or not,
same as ``nn.Conv3d``. Defaults to ``True``.
"""
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: Union[int, Tuple[int, int, int]],
stride: Union[int, Tuple[int, int, int]] = 1,
bias: bool = True,
**kwargs: Any,
) -> None:
super().__init__()
self.pad_input: Tuple = None
self.kernel_size = kernel_size
self.stride = stride
if "padding" in kwargs:
warnings.warn(
"Padding was specified but will not be used in favor of same padding, \
use Conv3d directly for custom padding"
)
self.conv = nn.Conv3d(
in_channels,
out_channels,
self.kernel_size,
stride=self.stride,
bias=bias,
**kwargs,
)
def forward(self, x: Tensor) -> Tensor:
"""
Args:
x (Tensor): Input of shape ``(b, c, d1, d2, d3)``.
"""
# Calculate padding needed based on input shape only once to reduce run time
if self.pad_input is None:
self.pad_input = calculate_same_padding(
self.kernel_size, self.stride, x.shape[2:]
)
return self.conv(F.pad(x, self.pad_input))
class SamePadConvTranspose3d(nn.Module):
"""Performs a same padded transposed convolution on a 3D input.
This ensures output shape in input shape multiplied by stride.
Code reference:
https://github.com/wilson1yan/VideoGPT/blob/master/videogpt/vqvae.py
Args:
in_channels (int): Number of channels in input, same as Conv3d
out_channels (int): Number of channels for output, same as Conv3d
kernel_size (int or Tuple[int, int, int]): Size of convolutional filter, same as Conv3d
stride (int or Tuple[int, int, int], optional): Stride for convolution, same as Conv3d
bias (bool, optional): If ``True`` use a bias for convolutional layer or not,
same as ``nn.Conv3d``. Defaults to ``True``.
"""
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: Union[int, Tuple[int, int, int]],
stride: Union[int, Tuple[int, int, int]] = 1,
bias: bool = True,
**kwargs: Any,
) -> None:
super().__init__()
self.pad_input: Tuple = None
self.kernel_size = kernel_size
self.stride = stride
if "padding" in kwargs:
warnings.warn(
"Padding was specified but will not be used in favor of same padding, \
use ConvTranspose3d directly for custom padding"
)
self.convt = nn.ConvTranspose3d(
in_channels, out_channels, kernel_size, stride=stride, bias=bias, **kwargs
)
def forward(self, x: Tensor) -> Tensor:
"""
Args:
x (Tensor): Input of shape ``(b, c, d1, d2, d3)``.
"""
# Calculate padding needed based on input shape only once to reduce run time
if self.pad_input is None:
self.pad_input = calculate_same_padding(
self.kernel_size, self.stride, x.shape[2:]
)
self.convt.padding, self.convt.output_padding = calculate_transpose_padding(
self.kernel_size, self.stride, x.shape[2:], self.pad_input[::-1]
)
return self.convt(F.pad(x, self.pad_input))
def calculate_same_padding(
kernel_size: Union[int, Tuple[int, ...]],
stride: Union[int, Tuple[int, ...]],
input_shape: Union[Size, Tuple[int, ...]],
) -> Tuple:
"""Calculates padding amount on each dimension based on given kernel size and stride.
Pads to match the 'SAME' padding in Keras, i.e., with a stride of 1 output is guaranteed
to have the same shape as input, with stride 2 the dimensions of output are halved. If
stride does not divide into input evenly, then output = ceil(input / stride), following
the TensorFlow implementation explained here:
https://www.tensorflow.org/api_docs/python/tf/nn#notes_on_padding_2
Code reference:
https://github.com/wilson1yan/VideoGPT/blob/master/videogpt/vqvae.py
Args:
kernel_size (int or Tuple[int, ...]): Size of convolutional kernel.
stride (int or Tuple[int, ...]): Stride amount of kernel.
input_shape (Size or Tuple[int, ...]): Shape of input, without batch or channel dimension.
Returns:
A tuple of the padding amount in a tuple of tuples for each dimension.
"""
n_dims = len(input_shape)
if isinstance(kernel_size, int):
kernel_size = tuple(repeat(kernel_size, n_dims))
if isinstance(stride, int):
stride = tuple(repeat(stride, n_dims))
if not (len(kernel_size) == len(stride) == len(input_shape)):
raise ValueError("dims for kernel, stride, and input must match")
total_pad = []
for k, s, d in zip(kernel_size, stride, input_shape):
if d % s == 0:
pad = max(k - s, 0)
else:
pad = max(k - (d % s), 0)
total_pad.append(pad)
pad_input = []
for p in total_pad[::-1]: # reverse since F.pad starts from last dim
pad_input.append(p // 2 + p % 2)
pad_input.append(p // 2)
pad_input = tuple(pad_input)
return pad_input
def calculate_transpose_padding(
kernel_size: Union[int, Tuple[int, ...]],
stride: Union[int, Tuple[int, ...]],
input_shape: Union[Size, Tuple[int, ...]],
input_pad: Union[int, Tuple[int, ...]] = 0,
) -> Tuple[Tuple, Tuple]:
"""Calculates padding for transposed convolution based on input dims, kernel size, and stride.
Pads to match the 'SAME' padding in Keras, i.e., with a stride of 1 output is guaranteed
to have the same shape as input, with stride 2 the dimensions of output are doubled.
The 'padding' argument in ConvTranspose effectively trims the output, and the 'output_padding'
argument effectively expands the output. These two knobs are adjusted to meet desired output dim.
Args:
kernel_size (int or Tuple[int, ...]): Size of convolutional kernel.
stride (int or Tuple[int, ...]): Stride amount of kernel.
input_shape (Size or Tuple[int, ...]): Shape of input, without batch or channel dimension.
input_pad (int or Tuple[int, ...]): Amount of padding added to input, must be twice length of
kernel/stride/input_shape.
Returns:
A tuple of padding and output_padding to be used in ConvTranspose layers
"""
n_dims = len(input_shape)
if isinstance(kernel_size, int):
kernel_size = tuple(repeat(kernel_size, n_dims))
if isinstance(stride, int):
stride = tuple(repeat(stride, n_dims))
if isinstance(input_pad, int):
input_pad = tuple(repeat(input_pad, n_dims * 2))
if not (len(kernel_size) == len(stride) == len(input_shape)):
raise ValueError("dims for kernel, stride, and input must match")
if len(input_pad) % 2 != 0 or len(input_pad) // 2 != len(input_shape):
raise ValueError("input_pad length must be twice the number of dims")
transpose_pad = []
output_pad = []
# Calculate current projected output dim and adjust padding and output_padding to match
# input_dim * stride for a ConvTranspose layer
for i, (d, k, s) in enumerate(zip(input_shape, kernel_size, stride)):
# Calculate the output dim after transpose convolution:
# out_dim = kernel + (in_dim + pad - 1) * stride
# This needs to be adjusted with padding to meet desired dim, in_dim * stride
output_shape_actual = k + (d + input_pad[2 * i] + input_pad[2 * i + 1] - 1) * s
output_shape_expected = d * s
# This controls padding argument in ConvTranspose,
# where output dim is effectively trimmed by 2 * transpose_pad
transpose_pad.append(
max((output_shape_actual - output_shape_expected + 1) // 2, 0)
)
# This controls output_padding argument in ConvTranspose,
# where output dim is expanded by 1 * output_pad
output_pad.append(
output_shape_expected - (output_shape_actual - transpose_pad[-1] * 2)
)
transpose_pad = tuple(transpose_pad)
output_pad = tuple(output_pad)
return transpose_pad, output_pad
| EXA-1-master | exa/libraries/multimodal-main/torchmultimodal/modules/layers/conv.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from typing import Optional
import torch.nn.functional as F
from torch import nn, Tensor
class MultiHeadSelfAttention(nn.Module):
"""
Multihead self attention.
Similar to the self attention variant of MHA in attention.py but uses the scaled_dot_product_attention from PyTorch
(which uses flash or memory efficient version for certain conditions).
TODO: merge this into attention.py once other models are ready to use it.
Args:
embed_dim (int): embedding dimension of the input
num_heads (int): number of attn heads
dropout (float): dropout rate
"""
def __init__(self, embed_dim: int, num_heads: int, dropout: float = 0.0):
super().__init__()
self.input_proj = nn.Linear(embed_dim, 3 * embed_dim)
self.output_proj = nn.Linear(embed_dim, embed_dim)
self.num_heads = num_heads
self.dropout = dropout
def forward(
self,
query: Tensor,
attn_mask: Optional[Tensor] = None,
is_causal: bool = False,
) -> Tensor:
"""
Args:
query (Tensor): input query of shape bsz x seq_len x embed_dim
attn_mask (optional Tensor): attention mask of shape bsz x seq_len x seq_len. Two types of masks are supported.
A boolean mask where a value of True indicates that the element should take part in attention.
A float mask of the same type as query that is added to the attention score.
is_causal (bool): If true, does causal attention masking. attn_mask should be set to None if this is set to True
Returns:
attention output Tensor of shape bsz x seq_len x embed_dim
"""
bsz = query.size(0)
embed_dim = query.size(-1)
projected_query = self.input_proj(query)
query, key, value = projected_query.chunk(3, dim=-1)
head_dim = embed_dim // self.num_heads
# bsz x seq len x embed_dim => bsz x num_heads x seq len x head_dim
query = query.view(bsz, -1, self.num_heads, head_dim).transpose(1, 2)
key = key.view(bsz, -1, self.num_heads, head_dim).transpose(1, 2)
value = value.view(bsz, -1, self.num_heads, head_dim).transpose(1, 2)
attn = F.scaled_dot_product_attention(
query, key, value, attn_mask, self.dropout, is_causal
)
attn = attn.transpose(1, 2).reshape(bsz, -1, embed_dim)
attn_out = self.output_proj(attn)
return attn_out
| EXA-1-master | exa/libraries/multimodal-main/torchmultimodal/modules/layers/multi_head_attention.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import torch
from torch import nn, Tensor
POOLING_TYPES = ["sum", "mean", "max"]
class EmbeddingEncoder(nn.Module):
"""Combine embeddings for tensor representing list of indices based on pooling type
Args:
embedding (nn.Embedding): embedding module
pooling_type (str): pooling function to combine the embeddings like sum. Choose
from pooling_types
pooling_dim (int) : dimension along which the pooling function is applied
use_hash (bool): if hashing based on embedding vocab size if applied to input
before embedding layer
Inputs:
x (Tensor): Tensor bsz x max seq length representing (padded) list of indices
for embedding
"""
def __init__(
self,
embedding: nn.Embedding,
pooling_type: str,
pooling_dim: int = 1,
use_hash: bool = False,
):
super().__init__()
self.embedding = embedding
if pooling_type not in POOLING_TYPES:
raise ValueError(
f"pooling type should be in {POOLING_TYPES}, found {pooling_type}"
)
self.pooling_type = pooling_type
self.pooling_dim = pooling_dim
self.use_hash = use_hash
def forward(self, x: Tensor) -> Tensor:
if self.use_hash:
x = x % (self.embedding.num_embeddings - 1) + 1
out = self.embedding(x)
if self.pooling_type == "sum":
out = torch.sum(out, dim=self.pooling_dim)
elif self.pooling_type == "mean":
out = torch.mean(out, dim=self.pooling_dim)
else:
out = torch.max(out, dim=self.pooling_dim).values
return out
| EXA-1-master | exa/libraries/multimodal-main/torchmultimodal/modules/encoders/embedding_encoder.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
# Modified from 2d Swin Transformers in torchvision:
# https://github.com/pytorch/vision/blob/main/torchvision/models/swin_transformer.py
from typing import Callable, List, Optional
from torch import nn
from torchvision.models.video.swin_transformer import (
PatchMerging,
SwinTransformer3d as TVSwinTransformer3d,
)
class SwinTransformer3d(TVSwinTransformer3d):
"""
Implements 3D Swin Transformer from the `"Video Swin Transformer" <https://arxiv.org/abs/2106.13230>`_ paper.
We upstream the model from torchvision:
https://github.com/pytorch/vision/blob/main/torchvision/models/video/swin_transformer.py#L363
Args:
patch_size (List[int]): Patch size.
embed_dim (int): Patch embedding dimension.
depths (List(int)): Depth of each Swin Transformer layer.
num_heads (List(int)): Number of attention heads in different layers.
window_size (List[int]): Window size.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4.0.
dropout (float): Dropout rate. Default: 0.0.
attention_dropout (float): Attention dropout rate. Default: 0.0.
stochastic_depth_prob (float): Stochastic depth rate. Default: 0.0.
num_classes (int, optional): Number of classes for classification head,
if None it will have no head. Default: 400.
block (nn.Module, optional): SwinTransformer Block. Default: None.
norm_layer (nn.Module, optional): Normalization layer. Default: None.
patch_embed (nn.Module, optional): Patch Embedding layer. Default: None.
"""
def __init__(
self,
patch_size: List[int],
embed_dim: int,
depths: List[int],
num_heads: List[int],
window_size: List[int],
mlp_ratio: float = 4.0,
dropout: float = 0.0,
attention_dropout: float = 0.0,
stochastic_depth_prob: float = 0.0,
num_classes: Optional[int] = 400,
norm_layer: Optional[Callable[..., nn.Module]] = None,
block: Optional[Callable[..., nn.Module]] = None,
downsample_layer: Callable[..., nn.Module] = PatchMerging,
patch_embed: Optional[Callable[..., nn.Module]] = None,
):
# Create non-optional _num_classes to construct torchvision SwinTransformer3d
_num_classes = 400
if num_classes is not None:
_num_classes = num_classes
super().__init__(
patch_size=patch_size,
embed_dim=embed_dim,
depths=depths,
num_heads=num_heads,
window_size=window_size,
mlp_ratio=mlp_ratio,
dropout=dropout,
attention_dropout=attention_dropout,
stochastic_depth_prob=stochastic_depth_prob,
num_classes=_num_classes,
norm_layer=norm_layer,
block=block,
downsample_layer=downsample_layer,
patch_embed=patch_embed,
)
if num_classes is None:
# Convert the head into identity
self.head = nn.Identity()
| EXA-1-master | exa/libraries/multimodal-main/torchmultimodal/modules/encoders/swin_transformer_3d_encoder.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from typing import Callable, List, Optional, Union
import torch
from torch import nn, Tensor
from torchmultimodal.modules.fusions.deepset_fusion import (
DeepsetFusionModule,
DeepsetFusionWithTransformer,
)
class MILEncoder(nn.Module):
"""
Multi instance learning encoder that partitions the input into a set of inputs
and uses a shared encoder followed by deepset
fusion to get a pooled representation of the entire input. Example use is to build a
single representation from embeddings of all images in a post.
Args:
partition_sizes (List[int]): list of size for each partition of the input
shared_encoder (nn.Module): Shared encoder for each partition of the input.
shared_encoder_dim (int) : Output dimension of the encoders
Following fields are same as the params for deepset fusion
mlp (nn.Module): MLP with in dim as projection dim (min of embed dim).\
Use MLP from mlp_classifier for default mlp implementation.
pooling_function (Callable): Pooling function to combine the tensors,\
like torch.median
apply_attention (bool): If self attention is applied before\
stacking embeddings, defaults to False
modality_normalize (bool): If normalization is applied along the modality axis,\
defaults to False
norm_factor(float): norm factor for normalization, defaults to 2.0
use_auto_mapping(bool): If true, projection layer to min embedding dim \
is applied to the embeddings. defaults to False
"""
def __init__(
self,
partition_sizes: List[int],
shared_encoder: nn.Module,
shared_encoder_dim: int,
mlp: nn.Module,
pooling_function: Callable,
apply_attention: bool = False,
attention_dim: Optional[int] = None,
modality_normalize: bool = False,
norm_factor: float = 2.0,
use_auto_mapping: bool = False,
):
super().__init__()
self.partition_sizes = partition_sizes
self.shared_encoder = shared_encoder
channel_to_encoder_dim = {}
for i in range(len(partition_sizes)):
channel_to_encoder_dim[self.get_channel_name(i)] = shared_encoder_dim
deepset_fusion_cls = (
DeepsetFusionWithTransformer
if isinstance(pooling_function, nn.TransformerEncoder)
else DeepsetFusionModule
)
self.deepset_fusion: Union[
DeepsetFusionWithTransformer, DeepsetFusionModule
] = deepset_fusion_cls(
channel_to_encoder_dim=channel_to_encoder_dim,
mlp=mlp,
pooling_function=pooling_function, # type: ignore
apply_attention=apply_attention,
attention_dim=attention_dim,
modality_normalize=modality_normalize,
norm_factor=norm_factor,
use_auto_mapping=use_auto_mapping,
)
def get_channel_name(self, id: int) -> str:
# create dummy channel name to pass to fusion
return f"mil_{id}"
def forward(self, x: Tensor) -> Tensor:
idx = 0
input_size = x.size(dim=1)
if input_size != sum(self.partition_sizes):
raise ValueError(
f"partition sizes should sum to the input size {input_size}"
)
partitioned_input = torch.split(x, self.partition_sizes, dim=1)
encoded_input = {}
for idx, input in enumerate(partitioned_input):
key = self.get_channel_name(idx)
encoded_input[key] = self.shared_encoder(input)
return self.deepset_fusion(encoded_input)
| EXA-1-master | exa/libraries/multimodal-main/torchmultimodal/modules/encoders/mil_encoder.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
| EXA-1-master | exa/libraries/multimodal-main/torchmultimodal/modules/encoders/__init__.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from typing import Callable, Tuple, Union
import torch
from torch import nn, Tensor
class WeightedEmbeddingEncoder(nn.Module):
"""Combine weighted embeddings for tensor representing list of indices based on
pooling type.
Args:
embedding (nn.Embedding): embedding module
pooling_function (Callable[[Tensor, int], Union[Tensor, Tuple]]): pooling function to combine the weighted embeddings,\
example: torch.sum function should return a tensor or namedtuple containing the tensor in the values field like torch.max
pooling_dim (int) : dimension along which the pooling function is applied
Inputs:
weights (Tensor): A float tensor of shape [batch_size x num_categories] containing the weights of a categorical feature.\
The weights represent multiplier factors for the corresponding category embedding vectors.
"""
def __init__(
self,
embedding: nn.Embedding,
pooling_function: Callable[[Tensor, int], Union[Tensor, Tuple]],
pooling_dim: int = 1,
) -> None:
super().__init__()
self.embedding = embedding
self.pooling_function = pooling_function
self.pooling_dim = pooling_dim
def forward(self, weights: Tensor) -> Tensor:
index = torch.arange(0, weights.size(1), dtype=torch.int)
index = index.to(weights.device)
weighted_embeddings = self.embedding(index) * weights.unsqueeze(-1)
pooled_embeddings = self.pooling_function(weighted_embeddings, self.pooling_dim)
if isinstance(pooled_embeddings, Tensor):
output: Tensor = pooled_embeddings
else:
assert hasattr(
pooled_embeddings, "values"
), "pooled embeddings should be Tensor or tuple with values field as Tensor"
output = pooled_embeddings.values # type: ignore
return output
| EXA-1-master | exa/libraries/multimodal-main/torchmultimodal/modules/encoders/weighted_embedding_encoder.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.