python_code
stringlengths 0
992k
| repo_name
stringlengths 8
46
| file_path
stringlengths 5
162
|
---|---|---|
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
from copy import deepcopy
import torch
import torch.nn.functional as F
from lavis.common.registry import registry
from lavis.models.albef_models import AlbefBase, compute_sim_matrix
from lavis.models.albef_models.albef_outputs import (
AlbefIntermediateOutput,
AlbefOutput,
AlbefSimilarity,
)
from lavis.models.base_model import MomentumDistilationMixin, SharedQueueMixin
from lavis.models.med import XBertEncoder
from lavis.models.vit import VisionTransformerEncoder
from torch import nn
@registry.register_model("albef_retrieval")
class AlbefRetrieval(AlbefBase, MomentumDistilationMixin, SharedQueueMixin):
"""
ALBEF retrieval model.
Supported model types:
- coco: fine-tuned ALBEF base model on COCO dataset (Karparthy split).
- flickr: fine-tuned ALBEF base model on Flickr30k dataset.
Usage:
>>> from lavis.models import load_model
>>> model = load_model("albef_retrieval", "coco")
>>> model = load_model("albef_retrieval", "flickr")
"""
PRETRAINED_MODEL_CONFIG_DICT = {
"coco": "configs/models/albef_retrieval_coco.yaml",
"flickr": "configs/models/albef_retrieval_flickr.yaml",
}
def __init__(
self,
image_encoder,
text_encoder,
queue_size,
embed_dim=256,
temp=0.07,
use_distill=True,
momentum=0.995,
alpha=0.4,
max_txt_len=30,
):
super().__init__()
self.tokenizer = self.init_tokenizer()
self.visual_encoder = image_encoder
self.text_encoder = text_encoder
text_width = text_encoder.config.hidden_size
vision_width = image_encoder.vision_width
self.vision_proj = nn.Linear(vision_width, embed_dim)
self.text_proj = nn.Linear(text_width, embed_dim)
self.itm_head = nn.Linear(text_width, 2)
# create the momentum encoder
self.visual_encoder_m = deepcopy(self.visual_encoder)
self.text_encoder_m = deepcopy(self.text_encoder)
self.vision_proj_m = deepcopy(self.vision_proj)
self.text_proj_m = deepcopy(self.text_proj)
self.model_pairs = [
[self.visual_encoder, self.visual_encoder_m],
[self.text_encoder, self.text_encoder_m],
[self.vision_proj, self.vision_proj_m],
[self.text_proj, self.text_proj_m],
]
self.copy_params()
# create the queue
self.register_buffer("image_queue", torch.randn(embed_dim, queue_size))
self.register_buffer("text_queue", torch.randn(embed_dim, queue_size))
self.register_buffer("idx_queue", torch.full((1, queue_size), -100))
self.register_buffer("queue_ptr", torch.zeros(1, dtype=torch.long))
self.image_queue = nn.functional.normalize(self.image_queue, dim=0)
self.text_queue = nn.functional.normalize(self.text_queue, dim=0)
self.queue_size = queue_size
self.momentum = momentum
self.temp = nn.Parameter(temp * torch.ones([]))
self.alpha = alpha
self.max_txt_len = max_txt_len
self.use_distill = use_distill
def _rampup_factor(self, epoch, iters, num_iters_per_epoch):
return min(1, (epoch * num_iters_per_epoch + iters) / (2 * num_iters_per_epoch))
def forward(self, samples):
"""
Args:
samples (dict): A dictionary containing the following keys:
- image (torch.Tensor): A tensor of shape (batch_size, 3, H, W). The input images.
- text_input (list): A list of length batch_size, each element is a string of text/caption.
- image_id (torch.Tensor): A tensor of shape (batch_size, ). The image ids, used to identify same images in batch.
- epoch (int): The current epoch.
- iters (int): The current iteration.
- num_iters_per_epoch (int): The number of iterations per epoch.
Returns:
BlipOutput: A BlipOutput object. See ``lavis.models.blip_models.blip_outputs.BlipOutput`` for more details.
Examples:
>>> import torch
>>> from lavis.models import load_model
>>> model = load_model("albef_retrieval", "coco")
>>> images = torch.randn(4, 3, 384, 384)
>>> text_input = ["caption of image 1", "another caption of image 1", "caption of image 2", "caption of image 3"]
>>> image_id = torch.tensor([1, 1, 2, 3])
>>> samples = {"image": images, "text_input": text_input, "image_id": image_id, "epoch": 0, "iters": 0, "num_iters_per_epoch": 100}
>>> output = model(samples)
>>> output.keys()
odict_keys(['sims', 'intermediate_output', 'loss', 'loss_itc', 'loss_itm'])
"""
image = samples["image"]
caption = samples["text_input"]
idx = samples["image_id"]
alpha = self.alpha * self._rampup_factor(
epoch=samples["epoch"],
iters=samples["iters"],
num_iters_per_epoch=samples["num_iters_per_epoch"],
)
with torch.no_grad():
self.temp.clamp_(0.001, 0.5)
image_embeds = self.visual_encoder.forward_features(image)
image_atts = torch.ones(image_embeds.size()[:-1], dtype=torch.long).to(
self.device
)
image_feat = F.normalize(self.vision_proj(image_embeds[:, 0, :]), dim=-1)
text = self.tokenizer(
caption,
padding="max_length",
truncation=True,
max_length=self.max_txt_len,
return_tensors="pt",
).to(self.device)
text_output = self.text_encoder.forward_text(text)
text_embeds = text_output.last_hidden_state
text_feat = F.normalize(self.text_proj(text_embeds[:, 0, :]), dim=-1)
idx = idx.view(-1, 1)
idx_all = torch.cat([idx.t(), self.idx_queue.clone().detach()], dim=1)
pos_idx = torch.eq(idx, idx_all).float()
sim_targets = pos_idx / pos_idx.sum(1, keepdim=True)
with torch.no_grad():
self._momentum_update()
image_embeds_m = self.visual_encoder_m(image)
image_feat_m = F.normalize(
self.vision_proj_m(image_embeds_m[:, 0, :]), dim=-1
)
image_feat_all = torch.cat(
[image_feat_m.t(), self.image_queue.clone().detach()], dim=1
)
text_output_m = self.text_encoder_m.forward_text(text)
text_embeds_m = text_output_m.last_hidden_state
text_feat_m = F.normalize(self.text_proj_m(text_embeds_m[:, 0, :]), dim=-1)
text_feat_all = torch.cat(
[text_feat_m.t(), self.text_queue.clone().detach()], dim=1
)
if self.use_distill:
sim_i2t_m = image_feat_m @ text_feat_all / self.temp
sim_t2i_m = text_feat_m @ image_feat_all / self.temp
sim_i2t_targets = (
alpha * F.softmax(sim_i2t_m, dim=1) + (1 - alpha) * sim_targets
)
sim_t2i_targets = (
alpha * F.softmax(sim_t2i_m, dim=1) + (1 - alpha) * sim_targets
)
sim_i2t = image_feat @ text_feat_all / self.temp
sim_t2i = text_feat @ image_feat_all / self.temp
if self.use_distill:
loss_i2t = -torch.sum(
F.log_softmax(sim_i2t, dim=1) * sim_i2t_targets, dim=1
).mean()
loss_t2i = -torch.sum(
F.log_softmax(sim_t2i, dim=1) * sim_t2i_targets, dim=1
).mean()
else:
loss_i2t = -torch.sum(
F.log_softmax(sim_i2t, dim=1) * sim_targets, dim=1
).mean()
loss_t2i = -torch.sum(
F.log_softmax(sim_t2i, dim=1) * sim_targets, dim=1
).mean()
loss_itc = (loss_i2t + loss_t2i) / 2
self._dequeue_and_enqueue(image_feat_m, text_feat_m, idx)
encoder_output_pos = self.text_encoder(
encoder_embeds=text_embeds,
attention_mask=text.attention_mask,
encoder_hidden_states=image_embeds,
encoder_attention_mask=image_atts,
return_dict=True,
mode="fusion",
)
with torch.no_grad():
bs = image.size(0)
weights_i2t = F.softmax(sim_i2t[:, :bs] + 1e-4, dim=1)
weights_t2i = F.softmax(sim_t2i[:, :bs] + 1e-4, dim=1)
mask = torch.eq(idx, idx.T)
weights_i2t.masked_fill_(mask, 0)
weights_t2i.masked_fill_(mask, 0)
# select a negative image for each text
image_embeds_neg = []
for b in range(bs):
neg_idx = torch.multinomial(weights_t2i[b], 1).item()
image_embeds_neg.append(image_embeds[neg_idx])
image_embeds_neg = torch.stack(image_embeds_neg, dim=0)
# select a negative text for each image
text_embeds_neg = []
text_atts_neg = []
for b in range(bs):
neg_idx = torch.multinomial(weights_i2t[b], 1).item()
text_embeds_neg.append(text_embeds[neg_idx])
text_atts_neg.append(text.attention_mask[neg_idx])
text_embeds_neg = torch.stack(text_embeds_neg, dim=0)
text_atts_neg = torch.stack(text_atts_neg, dim=0)
text_embeds_all = torch.cat([text_embeds, text_embeds_neg], dim=0)
text_atts_all = torch.cat([text.attention_mask, text_atts_neg], dim=0)
image_embeds_all = torch.cat([image_embeds_neg, image_embeds], dim=0)
image_atts_all = torch.cat([image_atts, image_atts], dim=0)
encoder_output_neg = self.text_encoder(
encoder_embeds=text_embeds_all,
attention_mask=text_atts_all,
encoder_hidden_states=image_embeds_all,
encoder_attention_mask=image_atts_all,
return_dict=True,
mode="fusion",
)
vl_embeddings = torch.cat(
[
encoder_output_pos.last_hidden_state[:, 0, :],
encoder_output_neg.last_hidden_state[:, 0, :],
],
dim=0,
)
itm_logits = self.itm_head(vl_embeddings)
itm_labels = torch.cat(
[torch.ones(bs, dtype=torch.long), torch.zeros(2 * bs, dtype=torch.long)],
dim=0,
).to(self.device)
loss_itm = F.cross_entropy(itm_logits, itm_labels)
return AlbefOutput(
loss=loss_itc + loss_itm,
loss_itc=loss_itc,
loss_itm=loss_itm,
sims=AlbefSimilarity(
sim_i2t=sim_i2t,
sim_t2i=sim_t2i,
sim_i2t_m=sim_i2t_m,
sim_t2i_m=sim_t2i_m,
sim_i2t_targets=sim_i2t_targets,
sim_t2i_targets=sim_t2i_targets,
),
intermediate_output=AlbefIntermediateOutput(
image_embeds=image_embeds,
image_embeds_m=image_embeds_m,
text_embeds=text_embeds,
text_embeds_m=text_embeds_m,
encoder_output=encoder_output_pos,
encoder_output_neg=encoder_output_neg,
itm_logits=itm_logits,
itm_labels=itm_labels,
),
)
@classmethod
def from_config(cls, cfg=None):
image_encoder = VisionTransformerEncoder.from_config(cfg, from_pretrained=False)
text_encoder = XBertEncoder.from_config(cfg)
embed_dim = cfg.get("embed_dim", 256)
momentum = cfg.get("momentum", 0.995)
alpha = cfg.get("alpha", 0.4)
temp = cfg.get("temp", 0.07)
max_txt_len = cfg.get("max_txt_len", 30)
queue_size = cfg.get("queue_size", 0)
use_distill = cfg.get("use_distill", True)
model = cls(
image_encoder=image_encoder,
text_encoder=text_encoder,
queue_size=queue_size,
embed_dim=embed_dim,
temp=temp,
momentum=momentum,
alpha=alpha,
max_txt_len=max_txt_len,
use_distill=use_distill,
)
model.load_checkpoint_from_config(cfg)
return model
def compute_sim_matrix(self, data_loader, task_cfg):
"""
Compute similarity i2t, t2i matrix for the given data loader.
"""
k_test = task_cfg.k_test
return compute_sim_matrix(model=self, data_loader=data_loader, k_test=k_test)
| 3D-LLM-main | three_steps_3d_feature/second_step/lavis/models/albef_models/albef_retrieval.py |
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
from dataclasses import dataclass
from typing import Optional
import torch
from transformers.modeling_outputs import (
BaseModelOutputWithPoolingAndCrossAttentions,
CausalLMOutputWithCrossAttentions,
ModelOutput,
)
@dataclass
class AlbefSimilarity(ModelOutput):
sim_i2t: torch.FloatTensor = None
sim_t2i: torch.FloatTensor = None
sim_i2t_m: Optional[torch.FloatTensor] = None
sim_t2i_m: Optional[torch.FloatTensor] = None
sim_i2t_targets: Optional[torch.FloatTensor] = None
sim_t2i_targets: Optional[torch.FloatTensor] = None
@dataclass
class AlbefIntermediateOutput(ModelOutput):
# uni-modal features
image_embeds: torch.FloatTensor = None
text_embeds: Optional[torch.FloatTensor] = None
image_embeds_m: Optional[torch.FloatTensor] = None
text_embeds_m: Optional[torch.FloatTensor] = None
# intermediate outputs of multimodal encoder
encoder_output: Optional[BaseModelOutputWithPoolingAndCrossAttentions] = None
encoder_output_m: Optional[BaseModelOutputWithPoolingAndCrossAttentions] = None
encoder_output_neg: Optional[BaseModelOutputWithPoolingAndCrossAttentions] = None
itm_logits: Optional[torch.FloatTensor] = None
itm_labels: Optional[torch.LongTensor] = None
# intermediate outputs of multimodal decoder
decoder_output: Optional[CausalLMOutputWithCrossAttentions] = None
decoder_labels: Optional[torch.LongTensor] = None
@dataclass
class AlbefOutput(ModelOutput):
# some finetuned models (e.g. BlipVQA) do not compute similarity, thus optional.
sims: Optional[AlbefSimilarity] = None
intermediate_output: AlbefIntermediateOutput = None
loss: Optional[torch.FloatTensor] = None
loss_itc: Optional[torch.FloatTensor] = None
loss_itm: Optional[torch.FloatTensor] = None
loss_mlm: Optional[torch.FloatTensor] = None
@dataclass
class AlbefOutputWithLogits(AlbefOutput):
logits: torch.FloatTensor = None
logits_m: torch.FloatTensor = None
@dataclass
class AlbefOutputFeatures(ModelOutput):
"""
Data class of features from AlbefFeatureExtractor.
Args:
image_embeds: `torch.FloatTensor` of shape `(batch_size, num_patches+1, embed_dim)`, `optional`
image_features: `torch.FloatTensor` of shape `(batch_size, num_patches+1, feature_dim)`, `optional`
text_embeds: `torch.FloatTensor` of shape `(batch_size, sequence_length+1, embed_dim)`, `optional`
text_features: `torch.FloatTensor` of shape `(batch_size, sequence_length+1, feature_dim)`, `optional`
The first embedding or feature is for the [CLS] token.
Features are obtained by projecting the corresponding embedding into a normalized low-dimensional space.
"""
image_embeds: Optional[torch.FloatTensor] = None
image_embeds_proj: Optional[torch.FloatTensor] = None
text_embeds: Optional[torch.FloatTensor] = None
text_embeds_proj: Optional[torch.FloatTensor] = None
multimodal_embeds: Optional[torch.FloatTensor] = None
| 3D-LLM-main | three_steps_3d_feature/second_step/lavis/models/albef_models/albef_outputs.py |
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
import datetime
import logging
import os
import time
import lavis.common.dist_utils as dist_utils
import torch
import torch.distributed as dist
import torch.nn.functional as F
from lavis.common.dist_utils import download_cached_file
from lavis.common.logger import MetricLogger
from lavis.common.utils import is_url
from lavis.models.base_model import BaseModel
from lavis.models.vit import interpolate_pos_embed
from transformers import BertTokenizer
class AlbefBase(BaseModel):
@classmethod
def init_tokenizer(cls):
return BertTokenizer.from_pretrained("bert-base-uncased")
def load_from_pretrained(self, url_or_filename, rename_text_keys=True):
if is_url(url_or_filename):
cached_file = download_cached_file(
url_or_filename, check_hash=False, progress=True
)
checkpoint = torch.load(cached_file, map_location="cpu")
elif os.path.isfile(url_or_filename):
checkpoint = torch.load(url_or_filename, map_location="cpu")
else:
raise RuntimeError("checkpoint url or path is invalid")
if "model" in checkpoint:
state_dict = checkpoint["model"]
else:
state_dict = checkpoint
state_dict["visual_encoder.pos_embed"] = interpolate_pos_embed(
state_dict["visual_encoder.pos_embed"], self.visual_encoder
)
if (
"visual_encoder_m.pos_embed" in self.state_dict().keys()
and "visual_encoder_m.pos_embed" in state_dict
):
state_dict["visual_encoder_m.pos_embed"] = interpolate_pos_embed(
state_dict["visual_encoder_m.pos_embed"], self.visual_encoder_m
)
if rename_text_keys:
for key in list(state_dict.keys()):
if "bert" in key:
new_key = key.replace("bert.", "")
state_dict[new_key] = state_dict[key]
del state_dict[key]
for key in self.state_dict().keys():
if key in state_dict.keys():
if state_dict[key].shape != self.state_dict()[key].shape:
del state_dict[key]
msg = self.load_state_dict(state_dict, strict=False)
logging.info("Missing keys {}".format(msg.missing_keys))
logging.info("load checkpoint from %s" % url_or_filename)
return msg
def compute_sim_matrix(model, data_loader, **kwargs):
k_test = kwargs.pop("k_test")
metric_logger = MetricLogger(delimiter=" ")
header = "Evaluation:"
logging.info("Computing features for evaluation...")
start_time = time.time()
texts = data_loader.dataset.text
num_text = len(texts)
text_bs = 256
text_ids = []
text_embeds = []
text_atts = []
for i in range(0, num_text, text_bs):
text = texts[i : min(num_text, i + text_bs)]
text_input = model.tokenizer(
text,
padding="max_length",
truncation=True,
max_length=35,
return_tensors="pt",
).to(model.device)
text_output = model.text_encoder.forward_text(text_input)
text_embed = F.normalize(
model.text_proj(text_output.last_hidden_state[:, 0, :])
)
text_embeds.append(text_embed)
text_ids.append(text_input.input_ids)
text_atts.append(text_input.attention_mask)
text_embeds = torch.cat(text_embeds, dim=0)
text_ids = torch.cat(text_ids, dim=0)
text_atts = torch.cat(text_atts, dim=0)
if hasattr(model.tokenizer, "enc_token_id"):
text_ids[:, 0] = model.tokenizer.enc_token_id
image_feats = []
image_embeds = []
for samples in data_loader:
image = samples["image"]
image = image.to(model.device)
image_feat = model.visual_encoder.forward_features(image)
image_embed = model.vision_proj(image_feat[:, 0, :])
image_embed = F.normalize(image_embed, dim=-1)
image_feats.append(image_feat.cpu())
image_embeds.append(image_embed)
image_feats = torch.cat(image_feats, dim=0)
image_embeds = torch.cat(image_embeds, dim=0)
sims_matrix = image_embeds @ text_embeds.t()
score_matrix_i2t = torch.full(
(len(data_loader.dataset.image), len(texts)), -100.0
).to(model.device)
num_tasks = dist_utils.get_world_size()
rank = dist_utils.get_rank()
step = sims_matrix.size(0) // num_tasks + 1
start = rank * step
end = min(sims_matrix.size(0), start + step)
for i, sims in enumerate(
metric_logger.log_every(sims_matrix[start:end], 50, header)
):
# topk_sim, topk_idx = sims.topk(k=config["k_test"], dim=0)
topk_sim, topk_idx = sims.topk(k=k_test, dim=0)
encoder_output = image_feats[start + i].repeat(k_test, 1, 1).to(model.device)
encoder_att = torch.ones(encoder_output.size()[:-1], dtype=torch.long).to(
model.device
)
output = model.text_encoder(
text_ids[topk_idx],
attention_mask=text_atts[topk_idx],
encoder_hidden_states=encoder_output,
encoder_attention_mask=encoder_att,
return_dict=True,
)
score = model.itm_head(output.last_hidden_state[:, 0, :])[:, 1]
score_matrix_i2t[start + i, topk_idx] = score + topk_sim
sims_matrix = sims_matrix.t()
score_matrix_t2i = torch.full(
(len(texts), len(data_loader.dataset.image)), -100.0
).to(model.device)
step = sims_matrix.size(0) // num_tasks + 1
start = rank * step
end = min(sims_matrix.size(0), start + step)
for i, sims in enumerate(
metric_logger.log_every(sims_matrix[start:end], 50, header)
):
topk_sim, topk_idx = sims.topk(k=k_test, dim=0)
encoder_output = image_feats[topk_idx.cpu()].to(model.device)
encoder_att = torch.ones(encoder_output.size()[:-1], dtype=torch.long).to(
model.device
)
output = model.text_encoder(
text_ids[start + i].repeat(k_test, 1),
attention_mask=text_atts[start + i].repeat(k_test, 1),
encoder_hidden_states=encoder_output,
encoder_attention_mask=encoder_att,
return_dict=True,
)
score = model.itm_head(output.last_hidden_state[:, 0, :])[:, 1]
score_matrix_t2i[start + i, topk_idx] = score + topk_sim
if dist_utils.is_dist_avail_and_initialized():
dist.barrier()
torch.distributed.all_reduce(
score_matrix_i2t, op=torch.distributed.ReduceOp.SUM
)
torch.distributed.all_reduce(
score_matrix_t2i, op=torch.distributed.ReduceOp.SUM
)
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
logging.info("Evaluation time {}".format(total_time_str))
return score_matrix_i2t.cpu().numpy(), score_matrix_t2i.cpu().numpy()
| 3D-LLM-main | three_steps_3d_feature/second_step/lavis/models/albef_models/__init__.py |
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
import warnings
import torch
import torch.nn.functional as F
from lavis.common.registry import registry
from lavis.common.utils import get_abs_path
from lavis.models.albef_models import AlbefBase
from lavis.models.albef_models.albef_outputs import AlbefOutputFeatures
from lavis.models.med import BertForMaskedLM
from lavis.models.vit import VisionTransformerEncoder
from torch import nn
from transformers import BertConfig
@registry.register_model("albef_feature_extractor")
class AlbefFeatureExtractor(AlbefBase):
PRETRAINED_MODEL_CONFIG_DICT = {
"base": "configs/models/albef_feature_extractor.yaml",
}
def __init__(self, image_encoder, text_encoder, embed_dim=256, max_txt_len=30):
super().__init__()
self.tokenizer = self.init_tokenizer()
self.visual_encoder = image_encoder
self.text_encoder = text_encoder
text_width = text_encoder.config.hidden_size
vision_width = image_encoder.vision_width
self.embed_dim = embed_dim
self.vision_proj = nn.Linear(vision_width, embed_dim)
self.text_proj = nn.Linear(text_width, embed_dim)
self.max_txt_len = max_txt_len
self.temp = nn.Parameter(0.07 * torch.ones([]))
@torch.no_grad()
def extract_features(self, samples, mode="multimodal"):
"""
Extract features for multimodal or unimodal samples.
Args:
samples (dict): A dictionary of samples, containing the following keys:
- image (torch.Tensor): A tensor of shape (B, C, H, W) containing the image.
Raw images should be preprocessed before being passed to feature extractor.
- text_input (list): A list of strings containing the text, length B.
mode (str): The mode of feature extraction. Can be either "multimodal", "text" or "image".
If "multimodal", return image features and multimodal features;
if "text", return text features;
if "image", return image features.
Default: "multimodal".
Returns:
An AlbefOutputFeatures object, see lavis/models/albef_models/albef_outputs.py for details.
Examples:
```python
>>> from PIL import Image
>>> from lavis.models import load_model_and_preprocess
>>> raw_image = Image.open("docs/data/merlion.png").convert("RGB")
>>> caption = "a large fountain spewing water into the air"
>>> model, vis_processors, txt_processors = load_model_and_preprocess("albef_feature_extractor", is_eval=True)
>>> image = vis_processors["eval"](raw_image).unsqueeze(0)
>>> text_input = txt_processors["eval"](caption)
>>> sample = {"image": image, "text_input": [text_input]}
>>> features_multimodal = model.extract_features(sample)
>>> features_multimodal.keys()
odict_keys(['image_embeds', 'multimodal_embeds'])
>>> features_multimodal.image_embeds.shape
torch.Size([1, 197, 768])
>>> features_multimodal.multimodal_embeds.shape
torch.Size([1, 12, 768])
>>> features_text = model.extract_features(sample, mode="text")
>>> features_text.keys()
odict_keys(['text_embeds', 'text_features'])
>>> features_text.text_embeds.shape
torch.Size([1, 12, 768])
>>> features_text.text_features.shape
torch.Size([1, 12, 256])
>>> features_image = model.extract_features(sample, mode="image")
>>> features_image.keys()
odict_keys(['image_embeds', 'image_features'])
>>> features_image.image_embeds.shape
torch.Size([1, 197, 768])
>>> features_image.image_features.shape
torch.Size([1, 197, 256])
```
"""
image = samples["image"]
caption = samples["text_input"]
if isinstance(mode, str):
mode = [mode]
for m in mode:
assert m in [
"multimodal",
"image",
"text",
], "mode must be one of [multimodal, image, text], but got {}".format(m)
# initalize output
image_embeds, text_embeds, multimodal_embeds = None, None, None
image_features, text_features = None, None
if "image" in mode or "multimodal" in mode:
assert (
image is not None
), "image must be provided if mode is 'image' or 'multimodal'"
image_embeds = self.visual_encoder.forward_features(image)
image_features = F.normalize(self.vision_proj(image_embeds), dim=-1)
if "text" in mode or "multimodal" in mode:
assert (
caption is not None
), "text must be provided if mode is 'text' or 'multimodal'"
text = self.tokenizer(
caption,
padding=True,
return_tensors="pt",
).to(self.device)
text_output = self.text_encoder.bert(
text.input_ids,
attention_mask=text.attention_mask,
return_dict=True,
mode="text",
)
text_embeds = text_output.last_hidden_state
text_features = F.normalize(self.text_proj(text_embeds), dim=-1)
if "multimodal" in mode:
image_atts = torch.ones(image_embeds.size()[:-1], dtype=torch.long).to(
self.device
)
# forward the positve image-text pair
output = self.text_encoder.bert(
encoder_embeds=text_embeds,
attention_mask=text.attention_mask,
encoder_hidden_states=image_embeds,
encoder_attention_mask=image_atts,
return_dict=True,
mode="fusion",
)
multimodal_embeds = output.last_hidden_state
return AlbefOutputFeatures(
image_embeds=image_embeds,
image_embeds_proj=image_features,
text_embeds=text_embeds,
text_embeds_proj=text_features,
multimodal_embeds=multimodal_embeds,
)
@classmethod
def from_config(cls, cfg=None):
image_encoder = VisionTransformerEncoder.from_config(cfg, from_pretrained=True)
config_text_encoder = BertConfig.from_json_file(
get_abs_path(cfg["med_config_path"])
)
config_text_encoder.fusion_layer = 6
text_encoder = BertForMaskedLM.from_pretrained(
"bert-base-uncased", config=config_text_encoder
)
embed_dim = cfg.get("embed_dim", 256)
max_txt_len = cfg.get("max_txt_len", 30)
model = cls(
image_encoder=image_encoder,
text_encoder=text_encoder,
embed_dim=embed_dim,
max_txt_len=max_txt_len,
)
# load pre-trained weights
pretrain_path = cfg.get("pretrained", None)
if pretrain_path is not None:
msg = model.load_from_pretrained(
url_or_filename=pretrain_path, rename_text_keys=False
)
else:
warnings.warn("No pretrained weights are loaded.")
return model
| 3D-LLM-main | three_steps_3d_feature/second_step/lavis/models/albef_models/albef_feature_extractor.py |
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
import logging
import os
from copy import deepcopy
import torch
import torch.nn.functional as F
from lavis.common.registry import registry
from lavis.common.utils import get_abs_path, is_url
from lavis.models.albef_models import AlbefBase
from lavis.models.albef_models.albef_outputs import AlbefIntermediateOutput, AlbefOutput
from lavis.models.base_model import MomentumDistilationMixin, tile
from lavis.models.med import BertConfig, BertLMHeadModel, XBertEncoder
from lavis.models.vit import VisionTransformerEncoder, interpolate_pos_embed
from lavis.common.dist_utils import download_cached_file
@registry.register_model("albef_vqa")
class AlbefVQA(AlbefBase, MomentumDistilationMixin):
"""
ALBEF VQA models.
Supported model types:
- base: vqa model initialized with pre-trained ALBEF base model on 115M image-text pairs after CapFilt; not fine-tuned.
- vqav2: fine-tuned ALBEF base model on VQA v2.0 dataset.
Usage:
>>> from lavis.models import load_model
>>> model = load_model("albef_vqa", "vqav2")
"""
PRETRAINED_MODEL_CONFIG_DICT = {
"vqav2": "configs/models/albef_vqav2.yaml",
}
def __init__(
self,
image_encoder,
text_encoder,
text_decoder,
use_distill=True,
momentum=0.995,
alpha=0.4,
max_txt_len=35,
):
super().__init__()
self.tokenizer = self.init_tokenizer()
self.max_txt_len = max_txt_len
self.use_distill = use_distill
self.visual_encoder = image_encoder
self.text_encoder = text_encoder
self.text_decoder = text_decoder
if self.use_distill:
self.visual_encoder_m = deepcopy(self.visual_encoder)
self.text_encoder_m = deepcopy(self.text_encoder)
self.text_decoder_m = deepcopy(self.text_decoder)
self.momentum = momentum
self.alpha = alpha
self.model_pairs = [
[self.visual_encoder, self.visual_encoder_m],
[self.text_encoder, self.text_encoder_m],
[self.text_decoder, self.text_decoder_m],
]
self.copy_params()
def _rampup_factor(self, epoch, iters, num_iters_per_epoch):
return min(1, (epoch * num_iters_per_epoch + iters) / num_iters_per_epoch)
def forward(self, samples):
"""
Args:
samples (dict): A dictionary containing the following keys:
- image (torch.Tensor): A tensor of shape (batch_size, 3, H, W). Default H=480, W=480.
- text_input (list): A list of strings, each string is a question
- answer (list): A list of strings, each string is an answer
- weight (torch.Tensor): A tensor used to weigh each answer in the loss computation.
The shape of the tensor is (sum(n_answers),)
- n_answers (torch.Tensor): A tensor shape (batch_size,) containing the number of answers
for each question in the batch.
Returns:
An AlbefOutput object containing loss and intermediate outputs;
see lavis/models/albef_models/albef_outputs.py for more details.
Examples:
>>> import torch
>>> from lavis.models import load_model
>>> model = load_model("albef_vqa")
>>> samples = {
... "image": torch.rand(2, 3, 384, 384),
... "text_input": ["What is this?", "What is that?"],
... "answer": ["cat", "cat", "dog"],
... "weight": torch.tensor([1.0, 1.0, 1.0]),
... "n_answers": torch.tensor([2, 1]),
... "epoch": 0, "iters": 0, "num_iters_per_epoch": 1000,
... }
>>> output = model(samples)
>>> output.keys()
odict_keys(['intermediate_output', 'loss'])
"""
(
encoder_output,
encoder_output_m,
image_embeds,
image_embeds_m,
) = self.forward_encoder(samples)
loss, decoder_output, decoder_targets = self.forward_decoder(
samples, encoder_out=(encoder_output, encoder_output_m)
)
return AlbefOutput(
loss=loss,
intermediate_output=AlbefIntermediateOutput(
image_embeds=image_embeds,
image_embeds_m=image_embeds_m,
encoder_output=encoder_output,
encoder_output_m=encoder_output_m,
decoder_output=decoder_output,
decoder_labels=decoder_targets,
),
)
def forward_encoder(self, samples):
questions = samples["text_input"]
questions = self.tokenizer(
questions,
padding="longest",
truncation=True,
max_length=self.max_txt_len,
return_tensors="pt",
).to(self.device)
samples.update({"tokenized_text": questions})
image_embeds = self.visual_encoder.forward_features(samples["image"])
encoder_output = self.text_encoder.forward_automask(
tokenized_text=samples["tokenized_text"], visual_embeds=image_embeds
)
if self.use_distill:
self._momentum_update()
with torch.no_grad():
image_embeds_m = self.visual_encoder_m(samples["image"])
encoder_output_m = self.text_encoder_m.forward_automask(
tokenized_text=samples["tokenized_text"],
visual_embeds=image_embeds_m,
)
else:
encoder_output_m = None
image_embeds_m = None
return encoder_output, encoder_output_m, image_embeds, image_embeds_m
def forward_decoder(self, samples, encoder_out, **kwargs):
answers = self.tokenizer(
samples["answer"], padding="longest", return_tensors="pt"
).to(self.device)
answer_targets = answers.input_ids.masked_fill(
answers.input_ids == self.tokenizer.pad_token_id, -100
)
question_states = []
question_atts = []
question = samples["tokenized_text"]
question_output, question_output_m = encoder_out
for b, n in enumerate(samples["n_answers"]):
question_states += [question_output.last_hidden_state[b]] * n
question_atts += [question.attention_mask[b]] * n
question_states = torch.stack(question_states, dim=0)
question_atts = torch.stack(question_atts, dim=0)
if self.use_distill:
with torch.no_grad():
question_states_m = []
for b, n in enumerate(samples["n_answers"]):
question_states_m += [question_output_m.last_hidden_state[b]] * n
question_states_m = torch.stack(question_states_m, 0)
logits_m = self.text_decoder_m(
answers.input_ids,
attention_mask=answers.attention_mask,
encoder_hidden_states=question_states_m,
encoder_attention_mask=question_atts,
return_logits=True,
)
alpha = self.alpha * self._rampup_factor(
epoch=samples["epoch"],
iters=samples["iters"],
num_iters_per_epoch=samples["num_iters_per_epoch"],
)
answer_output = self.text_decoder(
answers.input_ids,
attention_mask=answers.attention_mask,
encoder_hidden_states=question_states,
encoder_attention_mask=question_atts,
labels=answer_targets,
soft_labels=F.softmax(logits_m, dim=-1),
alpha=alpha,
return_dict=True,
reduction="none",
)
loss = samples["weight"] * answer_output.loss
bsz = samples["image"].size(0)
loss = loss.sum() / bsz
return loss, answer_output, answer_targets
def predict_answers(self, samples, answer_list, num_ans_candidates=128, **kwargs):
"""
Args:
samples (dict): A dictionary containing the following keys:
- image (torch.Tensor): A tensor of shape (batch_size, 3, H, W). Default H=480, W=480.
- text_input (str or [str]): String or a list of strings, each string is a question.
The number of questions must be equal to the batch size. If a single string, will be converted to a list of string, with length 1 first.
num_ans_candidates (int): Number of answer candidates, used to filter out answers with low probability.
answer_list (list): A list of strings, each string is an answer.
Returns:
List: A list of strings, each string is an answer.
Examples:
>>> from PIL import Image
>>> from lavis.models import load_model_and_preprocess
>>> model, vis_processors, txt_processors = load_model_and_preprocess("albef_vqa", "vqav2")
>>> raw_image = Image.open("docs/data/merlion.png").convert("RGB")
>>> question = "Which city is this photo taken?"
>>> image = vis_processors["eval"](raw_image).unsqueeze(0)
>>> question = txt_processors["eval"](question)
>>> samples = {"image": image, "text_input": [question]}
>>> answer_list = ["Singapore", "London", "Palo Alto", "Tokyo"]
>>> answers = model.predict_answers(samples, answer_list=answer_list)
>>> answers
['Singapore']
"""
if isinstance(samples["text_input"], str):
samples["text_input"] = [samples["text_input"]]
assert len(samples["text_input"]) == samples["image"].size(
0
), "The number of questions must be equal to the batch size."
num_ans_candidates = min(num_ans_candidates, len(answer_list))
return self.rank_answers(
samples, answer_list=answer_list, num_ans_candidates=num_ans_candidates
)
def rank_answers(self, samples, answer_list, num_ans_candidates):
"""
Generate the first token of answers using decoder and select ${num_ans_candidates}
most probable ones. Then select answers from answer list, which start with the probable tokens.
Lastly, use the selected answers as the ground-truth labels for decoding and calculating LM loss.
Return the answers that minimize the losses as result.
"""
answer_candidates = self.tokenizer(
answer_list, padding="longest", return_tensors="pt"
).to(self.device)
# answer_candidates.input_ids[:, 0] = self.tokenizer.bos_token_id
answer_ids = answer_candidates.input_ids
answer_atts = answer_candidates.attention_mask
question_output, _, _, _ = self.forward_encoder(samples)
question_states = question_output.last_hidden_state
tokenized_question = samples["tokenized_text"]
question_atts = tokenized_question.attention_mask
num_ques = question_states.size(0)
start_ids = answer_ids[0, 0].repeat(num_ques, 1) # bos token
start_output = self.text_decoder(
start_ids,
encoder_hidden_states=question_states,
encoder_attention_mask=question_atts,
return_dict=True,
reduction="none",
)
logits = start_output.logits[:, 0, :] # first token's logit
# topk_probs: top-k probability
# topk_ids: [num_question, k]
answer_first_token = answer_ids[:, 1]
prob_first_token = F.softmax(logits, dim=1).index_select(
dim=1, index=answer_first_token
)
topk_probs, topk_ids = prob_first_token.topk(num_ans_candidates, dim=1)
# answer input: [num_question*k, answer_len]
input_ids = []
input_atts = []
for b, topk_id in enumerate(topk_ids):
input_ids.append(answer_ids.index_select(dim=0, index=topk_id))
input_atts.append(answer_atts.index_select(dim=0, index=topk_id))
input_ids = torch.cat(input_ids, dim=0)
input_atts = torch.cat(input_atts, dim=0)
targets_ids = input_ids.masked_fill(
input_ids == self.tokenizer.pad_token_id, -100
)
# repeat encoder's output for top-k answers
question_states = tile(question_states, 0, num_ans_candidates)
question_atts = tile(question_atts, 0, num_ans_candidates)
output = self.text_decoder(
input_ids,
attention_mask=input_atts,
encoder_hidden_states=question_states,
encoder_attention_mask=question_atts,
labels=targets_ids,
return_dict=True,
reduction="none",
)
log_probs_sum = -output.loss
log_probs_sum = log_probs_sum.view(num_ques, num_ans_candidates)
max_topk_ids = log_probs_sum.argmax(dim=1)
max_ids = topk_ids[max_topk_ids >= 0, max_topk_ids]
answers = [answer_list[max_id] for max_id in max_ids]
return answers
@classmethod
def from_config(cls, cfg=None):
image_encoder = VisionTransformerEncoder.from_config(cfg)
text_encoder = XBertEncoder.from_config(cfg)
config_decoder = BertConfig.from_json_file(get_abs_path(cfg["med_config_path"]))
config_decoder.fusion_layer = 0
config_decoder.num_hidden_layers = 6
text_decoder = BertLMHeadModel.from_pretrained(
"bert-base-uncased", config=config_decoder
)
alpha = cfg.get("alpha", 0.4)
momentum = cfg.get("momentum", 0.995)
use_distill = cfg.get("use_distill", True)
max_txt_len = cfg.get("max_txt_len", 25)
model = cls(
image_encoder=image_encoder,
text_encoder=text_encoder,
text_decoder=text_decoder,
use_distill=use_distill,
momentum=momentum,
alpha=alpha,
max_txt_len=max_txt_len,
)
# load pre-trained weights
model.load_checkpoint_from_config(cfg)
return model
def load_from_pretrained(self, url_or_filename):
if is_url(url_or_filename):
cached_file = download_cached_file(
url_or_filename, check_hash=False, progress=True
)
checkpoint = torch.load(cached_file, map_location="cpu")
elif os.path.isfile(url_or_filename):
checkpoint = torch.load(url_or_filename, map_location="cpu")
else:
raise RuntimeError("checkpoint url or path is invalid")
if "model" in checkpoint:
state_dict = checkpoint["model"]
else:
state_dict = checkpoint
# reshape positional embedding to accomodate for image resolution change
pos_embed_reshaped = interpolate_pos_embed(
state_dict["visual_encoder.pos_embed"], self.visual_encoder
)
state_dict["visual_encoder.pos_embed"] = pos_embed_reshaped
m_pos_embed_reshaped = interpolate_pos_embed(
state_dict["visual_encoder_m.pos_embed"], self.visual_encoder_m
)
state_dict["visual_encoder_m.pos_embed"] = m_pos_embed_reshaped
for key in list(state_dict.keys()):
if "bert" in key:
encoder_key = key.replace("bert.", "")
state_dict[encoder_key] = state_dict[key]
# intialize text decoder as multimodal encoder (last 6 layers of model.text_encoder)
if "text_encoder" in key:
if "layer" in key:
encoder_keys = key.split(".")
layer_num = int(encoder_keys[4])
if layer_num < 6:
del state_dict[key]
continue
else:
decoder_layer_num = layer_num - 6
encoder_keys[4] = str(decoder_layer_num)
encoder_key = ".".join(encoder_keys)
else:
encoder_key = key
decoder_key = encoder_key.replace("text_encoder", "text_decoder")
state_dict[decoder_key] = state_dict[key]
del state_dict[key]
for key in self.state_dict().keys():
if key in state_dict.keys():
if state_dict[key].shape != self.state_dict()[key].shape:
del state_dict[key]
msg = self.load_state_dict(state_dict, strict=False)
logging.info("load checkpoint from %s" % url_or_filename)
logging.info(f"missing keys: {msg.missing_keys}")
return msg
| 3D-LLM-main | three_steps_3d_feature/second_step/lavis/models/albef_models/albef_vqa.py |
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
from copy import deepcopy
import torch
import torch.nn.functional as F
from lavis.common.registry import registry
from lavis.common.utils import get_abs_path
from lavis.models.albef_models import AlbefBase
from lavis.models.albef_models.albef_outputs import AlbefIntermediateOutput, AlbefOutput
from lavis.models.base_model import MomentumDistilationMixin
from lavis.models.med import BertModel
from lavis.models.vit import VisionTransformerEncoder
from torch import nn
from transformers import BertConfig
@registry.register_model("albef_nlvr")
class AlbefNLVR(AlbefBase, MomentumDistilationMixin):
PRETRAINED_MODEL_CONFIG_DICT = {
"nlvr": "configs/models/albef_nlvr.yaml",
}
def __init__(
self,
image_encoder,
text_encoder,
num_classes,
momentum=0.995,
alpha=0.4,
use_distill=True,
max_txt_len=40,
):
super().__init__()
self.tokenizer = self.init_tokenizer()
self.max_txt_len = max_txt_len
self.use_distill = use_distill
self.visual_encoder = image_encoder
self.text_encoder = text_encoder
hidden_size = text_encoder.config.hidden_size
self.cls_head = nn.Sequential(
nn.Linear(hidden_size, hidden_size),
nn.ReLU(),
nn.Linear(hidden_size, num_classes),
)
self.share_cross_attention(self.text_encoder.encoder)
if self.use_distill:
self.visual_encoder_m = deepcopy(self.visual_encoder)
self.text_encoder_m = deepcopy(self.text_encoder)
self.cls_head_m = deepcopy(self.cls_head)
self.share_cross_attention(self.text_encoder_m.encoder)
self.momentum = momentum
self.alpha = alpha
self.model_pairs = [
[self.visual_encoder, self.visual_encoder_m],
[self.text_encoder, self.text_encoder_m],
[self.cls_head, self.cls_head_m],
]
self.copy_params()
def _rampup_factor(self, epoch, iters, num_iters_per_epoch):
return min(1, (epoch * num_iters_per_epoch + iters) / (2 * num_iters_per_epoch))
def forward(self, samples, is_train=True):
"""
Forward function for training and evaluation.
Args:
samples (dict): a dict of input samples, which contains the following keys:
- image0 (torch.Tensor): input image 0, shape (batch_size, 3, H, W), default H=384, W=384.
- image1 (torch.Tensor): input image 1, shape (batch_size, 3, H, W), default H=384, W=384.
- text_input (list): list of strings, each string is a natural language sentence.
- label (torch.LongTensor): ground truth label with shape (batch_size,).
is_train (bool): whether the model is in training mode.
If True, the model will return the loss;
If False, the model will return the prediction.
Examples:
>>> import torch
>>> from lavis.models import load_model
>>> model = load_model("albef_nlvr")
>>> samples = {
... "image0": torch.randn(2, 3, 384, 384),
... "image1": torch.randn(2, 3, 384, 384),
... "text_input": ["there is a ferret in tall grass", "there are lips in one of the images"],
... "label": torch.tensor([0, 1]),
... }
>>> output = model(samples)
>>> output.keys()
odict_keys(['intermediate_output', 'loss'])
"""
text = samples["text_input"]
text = self.tokenizer(
text,
padding="longest",
truncation=True,
max_length=self.max_txt_len,
return_tensors="pt",
).to(self.device)
targets = samples["label"]
image0 = samples["image0"]
image1 = samples["image1"]
images = torch.cat([image0, image1], dim=0)
image_embeds = self.visual_encoder.forward_features(images)
image_atts = torch.ones(image_embeds.size()[:-1], dtype=torch.long).to(
self.device
)
image0_embeds, image1_embeds = torch.split(image_embeds, targets.size(0))
encoder_output = self.text_encoder(
text.input_ids,
attention_mask=text.attention_mask,
encoder_hidden_states=[image0_embeds, image1_embeds],
encoder_attention_mask=[
image_atts[: image0_embeds.size(0)],
image_atts[image0_embeds.size(0) :],
],
return_dict=True,
)
prediction = self.cls_head(encoder_output.last_hidden_state[:, 0, :])
if is_train:
if self.use_distill:
with torch.no_grad():
self._momentum_update()
image_embeds_m = self.visual_encoder_m(images)
image0_embeds_m, image1_embeds_m = torch.split(
image_embeds_m, targets.size(0)
)
encoder_output_m = self.text_encoder(
text.input_ids,
attention_mask=text.attention_mask,
encoder_hidden_states=[image0_embeds_m, image1_embeds_m],
encoder_attention_mask=[
image_atts[: image0_embeds_m.size(0)],
image_atts[image0_embeds_m.size(0) :],
],
return_dict=True,
)
prediction_m = self.cls_head_m(
encoder_output_m.last_hidden_state[:, 0, :]
)
alpha = self.alpha * self._rampup_factor(
epoch=samples["epoch"],
iters=samples["iters"],
num_iters_per_epoch=samples["num_iters_per_epoch"],
)
loss = (1 - alpha) * F.cross_entropy(
prediction, targets
) - alpha * torch.sum(
F.log_softmax(prediction, dim=1) * F.softmax(prediction_m, dim=1),
dim=1,
).mean()
else:
loss = F.cross_entropy(prediction, targets)
encoder_output_m = None
image0_embeds_m, image1_embeds_m = None, None
# return {"loss": loss}
return AlbefOutput(
loss=loss,
intermediate_output=AlbefIntermediateOutput(
image_embeds=torch.stack([image0_embeds, image1_embeds], dim=0),
image_embeds_m=torch.stack(
[image0_embeds_m, image1_embeds_m], dim=0
),
encoder_output=encoder_output,
encoder_output_m=encoder_output_m,
),
)
else:
return {"predictions": prediction, "targets": targets}
def share_cross_attention(self, model):
for i in range(6):
layer_num = 6 + i * 2
modules_0 = model.layer[layer_num].crossattention.self._modules
modules_1 = model.layer[layer_num + 1].crossattention.self._modules
for name in modules_0.keys():
if "key" in name or "value" in name:
module_0 = modules_0[name]
module_1 = modules_1[name]
if hasattr(module_0, "weight"):
module_0.weight = module_1.weight
if hasattr(module_0, "bias"):
module_0.bias = module_1.bias
def predict(self, samples):
output = self.forward(samples, is_train=False)
return output
def load_from_pretrained(self, url_or_filename, use_distill=True):
_, msg = super().load_from_pretrained(url_or_filename)
if use_distill and any(["_m" in k for k in msg.missing_keys]):
# this is required when initializing the model from TA pre-trained weights
self.copy_params()
return msg
@classmethod
def from_config(cls, cfg=None):
image_encoder = VisionTransformerEncoder.from_config(cfg)
# text encoder + multimodal encoder
bert_config = BertConfig.from_json_file(get_abs_path(cfg["med_config_path"]))
bert_config.num_hidden_layers = 18
text_encoder = BertModel.from_pretrained(
"bert-base-uncased", config=bert_config, add_pooling_layer=False
)
alpha = cfg.get("alpha", 0.4)
momentum = cfg.get("momentum", 0.995)
use_distill = cfg.get("use_distill", True)
num_classes = cfg.get("num_classes", -1)
max_txt_len = cfg.get("max_txt_len", 40)
assert num_classes > 1, "Invalid number of classes provided, found {}".format(
num_classes
)
model = cls(
image_encoder=image_encoder,
text_encoder=text_encoder,
use_distill=use_distill,
alpha=alpha,
num_classes=num_classes,
momentum=momentum,
max_txt_len=max_txt_len,
)
model.load_checkpoint_from_config(cfg)
return model
| 3D-LLM-main | three_steps_3d_feature/second_step/lavis/models/albef_models/albef_nlvr.py |
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
import warnings
from copy import deepcopy
import torch
import torch.nn.functional as F
from lavis.common.registry import registry
from lavis.models.albef_models import AlbefBase
from lavis.models.albef_models.albef_outputs import (
AlbefIntermediateOutput,
AlbefOutputWithLogits,
)
from lavis.models.base_model import MomentumDistilationMixin
from lavis.models.med import XBertEncoder
from lavis.models.vit import VisionTransformerEncoder
from torch import nn
@registry.register_model("albef_classification")
class AlbefClassification(AlbefBase, MomentumDistilationMixin):
PRETRAINED_MODEL_CONFIG_DICT = {
"ve": "configs/models/albef_classification_ve.yaml",
}
def __init__(
self,
image_encoder,
text_encoder,
num_classes,
momentum=0.995,
alpha=0.4,
use_distill=True,
max_txt_len=40,
):
super().__init__()
self.tokenizer = self.init_tokenizer()
self.max_txt_len = max_txt_len
self.use_distill = use_distill
self.visual_encoder = image_encoder
self.text_encoder = text_encoder
hidden_size = text_encoder.config.hidden_size
if num_classes > 0:
self.cls_head = nn.Sequential(
nn.Linear(hidden_size, hidden_size),
nn.ReLU(),
nn.Linear(hidden_size, num_classes),
)
else:
warnings.warn(
f"Found num_classes=0, initializing {type(self)} without classifier."
)
if self.use_distill:
self.visual_encoder_m = deepcopy(self.visual_encoder)
self.text_encoder_m = deepcopy(self.text_encoder)
self.cls_head_m = deepcopy(self.cls_head)
self.momentum = momentum
self.alpha = alpha
self.model_pairs = [
[self.visual_encoder, self.visual_encoder_m],
[self.text_encoder, self.text_encoder_m],
[self.cls_head, self.cls_head_m],
]
self.copy_params()
def _rampup_factor(self, epoch, iters, num_iters_per_epoch):
return min(1, (epoch * num_iters_per_epoch + iters) / num_iters_per_epoch)
def forward(self, samples, is_train=True):
sentences = samples["text_input"]
sentences = self.tokenizer(
sentences,
padding="longest",
truncation=True,
max_length=self.max_txt_len,
return_tensors="pt",
).to(self.device)
samples.update({"tokenized_text": sentences})
targets = samples["label"]
image_embeds = self.visual_encoder.forward_features(samples["image"])
encoder_output = self.text_encoder.forward_automask(
samples["tokenized_text"], image_embeds
)
prediction = self.cls_head(encoder_output.last_hidden_state[:, 0, :])
if is_train:
if self.use_distill:
with torch.no_grad():
self._momentum_update()
image_embeds_m = self.visual_encoder_m(samples["image"])
encoder_output_m = self.text_encoder_m.forward_automask(
samples["tokenized_text"], image_embeds_m
)
prediction_m = self.cls_head_m(
encoder_output_m.last_hidden_state[:, 0, :]
)
alpha = self.alpha * self._rampup_factor(
epoch=samples["epoch"],
iters=samples["iters"],
num_iters_per_epoch=samples["num_iters_per_epoch"],
)
loss = (1 - alpha) * F.cross_entropy(
prediction, targets
) - alpha * torch.sum(
F.log_softmax(prediction, dim=1) * F.softmax(prediction_m, dim=1),
dim=1,
).mean()
else:
loss = F.cross_entropy(prediction, targets)
image_embeds_m, encoder_output_m, prediction_m = None, None, None
# return {"loss": loss}
return AlbefOutputWithLogits(
loss=loss,
intermediate_output=AlbefIntermediateOutput(
image_embeds=image_embeds,
image_embeds_m=image_embeds_m,
encoder_output=encoder_output,
encoder_output_m=encoder_output_m,
),
logits=prediction,
logits_m=prediction_m,
)
else:
return {"predictions": prediction, "targets": targets}
def predict(self, samples):
output = self.forward(samples, is_train=False)
return output
@classmethod
def from_config(cls, cfg=None):
image_encoder = VisionTransformerEncoder.from_config(cfg)
# text encoder + multimodal encoder
text_encoder = XBertEncoder.from_config(cfg)
alpha = cfg.get("alpha", 0.4)
momentum = cfg.get("momentum", 0.995)
use_distill = cfg.get("use_distill", True)
num_classes = cfg.get("num_classes", -1)
max_txt_len = cfg.get("max_txt_len", 40)
assert num_classes > 1, "Invalid number of classes provided, found {}".format(
num_classes
)
model = cls(
image_encoder=image_encoder,
text_encoder=text_encoder,
use_distill=use_distill,
alpha=alpha,
num_classes=num_classes,
momentum=momentum,
max_txt_len=max_txt_len,
)
model.load_checkpoint_from_config(cfg)
return model
| 3D-LLM-main | three_steps_3d_feature/second_step/lavis/models/albef_models/albef_classification.py |
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
from copy import deepcopy
import numpy as np
import torch
import torch.nn.functional as F
from lavis.common.registry import registry
from lavis.common.utils import get_abs_path
from lavis.models.albef_models import AlbefBase
from lavis.models.albef_models.albef_outputs import (
AlbefIntermediateOutput,
AlbefOutput,
AlbefSimilarity,
)
from lavis.models.base_model import MomentumDistilationMixin, SharedQueueMixin
from lavis.models.med import BertForMaskedLM
from lavis.models.vit import VisionTransformerEncoder
from torch import nn
from transformers import BertConfig
@registry.register_model("albef_pretrain")
class AlbefPretrain(AlbefBase, MomentumDistilationMixin, SharedQueueMixin):
"""
ALBEF pretrain model.
Supported model types:
- base: ALBEF base model used for pretraining.
"""
PRETRAINED_MODEL_CONFIG_DICT = {
"base": "configs/models/albef_pretrain_base.yaml",
}
def __init__(
self,
image_encoder,
text_encoder,
queue_size,
embed_dim=256,
mlm_mask_prob=0.15,
temp=0.07,
momentum=0.995,
alpha=0.4,
max_txt_len=30,
):
super().__init__()
self.tokenizer = self.init_tokenizer()
self.visual_encoder = image_encoder
self.text_encoder = text_encoder
text_width = text_encoder.config.hidden_size
vision_width = image_encoder.vision_width
self.embed_dim = embed_dim
self.vision_proj = nn.Linear(vision_width, embed_dim)
self.text_proj = nn.Linear(text_width, embed_dim)
self.itm_head = nn.Linear(text_width, 2)
# create the momentum encoder
self.visual_encoder_m = deepcopy(self.visual_encoder)
self.text_encoder_m = deepcopy(self.text_encoder)
self.vision_proj_m = deepcopy(self.vision_proj)
self.text_proj_m = deepcopy(self.text_proj)
self.model_pairs = [
[self.visual_encoder, self.visual_encoder_m],
[self.text_encoder, self.text_encoder_m],
[self.vision_proj, self.vision_proj_m],
[self.text_proj, self.text_proj_m],
]
self.copy_params()
# create the queue
self.register_buffer("image_queue", torch.randn(embed_dim, queue_size))
self.register_buffer("text_queue", torch.randn(embed_dim, queue_size))
self.register_buffer("queue_ptr", torch.zeros(1, dtype=torch.long))
self.image_queue = nn.functional.normalize(self.image_queue, dim=0)
self.text_queue = nn.functional.normalize(self.text_queue, dim=0)
self.queue_size = queue_size
self.momentum = momentum
self.temp = nn.Parameter(temp * torch.ones([]))
self.alpha = alpha
self.max_txt_len = max_txt_len
self.mlm_probability = mlm_mask_prob
def _rampup_factor(self, epoch, iters, num_iters_per_epoch):
return min(1, (epoch * num_iters_per_epoch + iters) / (2 * num_iters_per_epoch))
def forward(self, samples):
"""
Args:
samples (dict): A dictionary containing the following keys:
- image (torch.Tensor): A tensor of shape (batch_size, 3, H, W). The input images. Default: H=224, W=224.
- text_input (list): A list of length batch_size, each element is a string of text/caption.
- epoch (int): The current epoch.
- iters (int): The current iteration.
- num_iters_per_epoch (int): The number of iterations per epoch.
Returns:
BlipOutput: A BlipOutput object containing loss and intermediate output. See ``lavis.models.blip_models.blip_outputs.BlipOutput`` for more details.
Examples:
>>> import torch
>>> from lavis.models import load_model
>>> model = load_model("albef_pretrain")
>>> images = torch.randn(4, 3, 224, 224)
>>> text_input = ["caption of image 1", "another caption of image 1", "caption of image 2", "caption of image 3"]
>>> samples = {"image": images, "text_input": text_input, "epoch": 0, "iters": 0, "num_iters_per_epoch": 100}
>>> output = model(samples)
>>> output.keys()
odict_keys(['sims', 'intermediate_output', 'loss', 'loss_itc', 'loss_itm', 'loss_mlm'])
"""
image = samples["image"]
caption = samples["text_input"]
alpha = self.alpha * self._rampup_factor(
epoch=samples["epoch"],
iters=samples["iters"],
num_iters_per_epoch=samples["num_iters_per_epoch"],
)
with torch.no_grad():
self.temp.clamp_(0.001, 0.5)
image_embeds = self.visual_encoder.forward_features(image)
image_atts = torch.ones(image_embeds.size()[:-1], dtype=torch.long).to(
self.device
)
text = self.tokenizer(
caption,
padding="max_length",
truncation=True,
max_length=self.max_txt_len,
return_tensors="pt",
).to(self.device)
image_feat = F.normalize(self.vision_proj(image_embeds[:, 0, :]), dim=-1)
text_output = self.text_encoder.bert(
text.input_ids,
attention_mask=text.attention_mask,
return_dict=True,
mode="text",
)
text_embeds = text_output.last_hidden_state
text_feat = F.normalize(self.text_proj(text_embeds[:, 0, :]), dim=-1)
# get momentum features
with torch.no_grad():
self._momentum_update()
image_embeds_m = self.visual_encoder_m(image)
image_feat_m = F.normalize(
self.vision_proj_m(image_embeds_m[:, 0, :]), dim=-1
)
image_feat_all = torch.cat(
[image_feat_m.t(), self.image_queue.clone().detach()], dim=1
)
text_output_m = self.text_encoder_m.bert(
text.input_ids,
attention_mask=text.attention_mask,
return_dict=True,
mode="text",
)
text_embeds_m = text_output_m.last_hidden_state
text_feat_m = F.normalize(self.text_proj_m(text_embeds_m[:, 0, :]), dim=-1)
text_feat_all = torch.cat(
[text_feat_m.t(), self.text_queue.clone().detach()], dim=1
)
sim_i2t_m = image_feat_m @ text_feat_all / self.temp
sim_t2i_m = text_feat_m @ image_feat_all / self.temp
sim_targets = torch.zeros(sim_i2t_m.size()).to(image.device)
sim_targets.fill_diagonal_(1)
sim_i2t_targets = (
alpha * F.softmax(sim_i2t_m, dim=1) + (1 - alpha) * sim_targets
)
sim_t2i_targets = (
alpha * F.softmax(sim_t2i_m, dim=1) + (1 - alpha) * sim_targets
)
sim_i2t = image_feat @ text_feat_all / self.temp
sim_t2i = text_feat @ image_feat_all / self.temp
loss_i2t = -torch.sum(
F.log_softmax(sim_i2t, dim=1) * sim_i2t_targets, dim=1
).mean()
loss_t2i = -torch.sum(
F.log_softmax(sim_t2i, dim=1) * sim_t2i_targets, dim=1
).mean()
loss_itc = (loss_i2t + loss_t2i) / 2
self._dequeue_and_enqueue(image_feat_m, text_feat_m)
# forward the positve image-text pair
encoder_output_pos = self.text_encoder.bert(
encoder_embeds=text_embeds,
attention_mask=text.attention_mask,
encoder_hidden_states=image_embeds,
encoder_attention_mask=image_atts,
return_dict=True,
mode="fusion",
)
with torch.no_grad():
bs = image.size(0)
weights_i2t = sim_i2t[:, :bs].clone()
weights_t2i = sim_t2i[:, :bs].clone()
weights_i2t.fill_diagonal_(-np.Inf)
weights_t2i.fill_diagonal_(-np.Inf)
weights_i2t = F.softmax(weights_i2t, dim=1)
weights_t2i = F.softmax(weights_t2i, dim=1)
# select a negative image for each text
image_embeds_neg = []
for b in range(bs):
neg_idx = torch.multinomial(weights_t2i[b], 1).item()
image_embeds_neg.append(image_embeds[neg_idx])
image_embeds_neg = torch.stack(image_embeds_neg, dim=0)
# select a negative text for each image
text_embeds_neg = []
text_atts_neg = []
for b in range(bs):
neg_idx = torch.multinomial(weights_i2t[b], 1).item()
text_embeds_neg.append(text_embeds[neg_idx])
text_atts_neg.append(text.attention_mask[neg_idx])
text_embeds_neg = torch.stack(text_embeds_neg, dim=0)
text_atts_neg = torch.stack(text_atts_neg, dim=0)
text_embeds_all = torch.cat([text_embeds, text_embeds_neg], dim=0)
text_atts_all = torch.cat([text.attention_mask, text_atts_neg], dim=0)
image_embeds_all = torch.cat([image_embeds_neg, image_embeds], dim=0)
image_atts_all = torch.cat([image_atts, image_atts], dim=0)
encoder_output_neg = self.text_encoder.bert(
encoder_embeds=text_embeds_all,
attention_mask=text_atts_all,
encoder_hidden_states=image_embeds_all,
encoder_attention_mask=image_atts_all,
return_dict=True,
mode="fusion",
)
vl_embeddings = torch.cat(
[
encoder_output_pos.last_hidden_state[:, 0, :],
encoder_output_neg.last_hidden_state[:, 0, :],
],
dim=0,
)
itm_logits = self.itm_head(vl_embeddings)
itm_labels = torch.cat(
[torch.ones(bs, dtype=torch.long), torch.zeros(2 * bs, dtype=torch.long)],
dim=0,
).to(self.device)
loss_itm = F.cross_entropy(itm_logits, itm_labels)
# MLM
input_ids = text.input_ids.clone()
labels = input_ids.clone()
probability_matrix = torch.full(labels.shape, self.mlm_probability)
input_ids, labels = self.mask(
input_ids,
self.text_encoder.config.vocab_size,
self.device,
targets=labels,
probability_matrix=probability_matrix,
)
with torch.no_grad():
logits_m = self.text_encoder_m(
input_ids,
attention_mask=text.attention_mask,
encoder_hidden_states=image_embeds_m,
encoder_attention_mask=image_atts,
return_dict=True,
return_logits=True,
)
mlm_output = self.text_encoder(
input_ids,
attention_mask=text.attention_mask,
encoder_hidden_states=image_embeds,
encoder_attention_mask=image_atts,
return_dict=True,
labels=labels,
soft_labels=F.softmax(logits_m, dim=-1),
alpha=alpha,
)
loss_mlm = mlm_output.loss
return AlbefOutput(
loss=loss_itc + loss_itm + loss_mlm,
loss_itc=loss_itc,
loss_itm=loss_itm,
loss_mlm=loss_mlm,
sims=AlbefSimilarity(
sim_i2t=sim_i2t,
sim_t2i=sim_t2i,
sim_i2t_m=sim_i2t_m,
sim_t2i_m=sim_t2i_m,
sim_i2t_targets=sim_i2t_targets,
sim_t2i_targets=sim_t2i_targets,
),
intermediate_output=AlbefIntermediateOutput(
image_embeds=image_embeds,
image_embeds_m=image_embeds_m,
text_embeds=text_embeds,
text_embeds_m=text_embeds_m,
encoder_output=encoder_output_pos,
encoder_output_neg=encoder_output_neg,
itm_logits=itm_logits,
itm_labels=itm_labels,
),
)
def mask(
self,
input_ids,
vocab_size,
device,
targets=None,
masked_indices=None,
probability_matrix=None,
):
"""
Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original.
"""
if masked_indices is None:
masked_indices = torch.bernoulli(probability_matrix).bool()
masked_indices[input_ids == self.tokenizer.pad_token_id] = False
masked_indices[input_ids == self.tokenizer.cls_token_id] = False
if targets is not None:
targets[~masked_indices] = -100 # We only compute loss on masked tokens
# 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])
indices_replaced = (
torch.bernoulli(torch.full(input_ids.shape, 0.8)).bool() & masked_indices
)
input_ids[indices_replaced] = self.tokenizer.mask_token_id
# 10% of the time, we replace masked input tokens with random word
indices_random = (
torch.bernoulli(torch.full(input_ids.shape, 0.5)).bool()
& masked_indices
& ~indices_replaced
)
random_words = torch.randint(vocab_size, input_ids.shape, dtype=torch.long).to(
device
)
input_ids[indices_random] = random_words[indices_random]
# The rest of the time (10% of the time) we keep the masked input tokens unchanged
if targets is not None:
return input_ids, targets
else:
return input_ids
@classmethod
def from_config(cls, cfg=None):
image_encoder = VisionTransformerEncoder.from_config(cfg, from_pretrained=True)
config_text_encoder = BertConfig.from_json_file(
get_abs_path(cfg["med_config_path"])
)
config_text_encoder.fusion_layer = 6
text_encoder = BertForMaskedLM.from_pretrained(
"bert-base-uncased", config=config_text_encoder
)
embed_dim = cfg.get("embed_dim", 256)
momentum = cfg.get("momentum", 0.995)
alpha = cfg.get("alpha", 0.4)
mlm_mask_prob = cfg.get("mlm_mask_prob", 0.15)
temp = cfg.get("temp", 0.07)
max_txt_len = cfg.get("max_txt_len", 30)
queue_size = cfg.get("queue_size", 65536)
model = cls(
image_encoder=image_encoder,
text_encoder=text_encoder,
queue_size=queue_size,
embed_dim=embed_dim,
mlm_mask_prob=mlm_mask_prob,
temp=temp,
momentum=momentum,
alpha=alpha,
max_txt_len=max_txt_len,
)
return model
| 3D-LLM-main | three_steps_3d_feature/second_step/lavis/models/albef_models/albef_pretrain.py |
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
import torch
import torch.nn as nn
from lavis.common.registry import registry
from lavis.models.base_model import BaseModel
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import GPT2LMHeadModel
from transformers.modeling_outputs import CausalLMOutputWithCrossAttentions
@registry.register_model("gpt_dialogue")
class GPTDialogue(BaseModel, GPT2LMHeadModel):
PRETRAINED_MODEL_CONFIG_DICT = {"base": "configs/models/gpt_dialogue_base.yaml"}
def __init__(self, config, len_video_ft=4224):
super().__init__(config)
self.video_ff = nn.Linear(len_video_ft, config.n_embd)
self.video_ff_out = nn.Linear(config.n_embd, len_video_ft)
# Model parallel
self.model_parallel = False
self.device_map = None
# Initialize weights and apply final processing
self.post_init()
def forward(
self,
samples,
past_key_values=None,
position_ids=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
input_embs = self.transformer.wte(samples["input_ids"])
video_embs = self.video_ff(samples["video_fts"])
input_embs = torch.cat([video_embs, input_embs], dim=1)
transformer_outputs = self.transformer(
attention_mask=samples["attn_mask"],
token_type_ids=samples["token_type_ids"],
inputs_embeds=input_embs,
position_ids=position_ids,
head_mask=head_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = transformer_outputs[0]
lm_logits = self.lm_head(hidden_states)
loss = None
if samples["labels"] is not None:
# Shift so that tokens < n predict n
shift_logits = lm_logits[..., :-1, :].contiguous()
shift_labels = samples["labels"][..., 1:].contiguous()
# Flatten the tokens
loss_fct = CrossEntropyLoss(ignore_index=-1)
loss = loss_fct(
shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1)
)
if samples["video_fts"] is not None:
len_video_fts = samples["video_fts"].shape[1]
video_logits = self.video_ff_out(hidden_states[:, :len_video_fts, :])
# Shift so that tokens < n predict n
shift_logits = video_logits[..., :-1, :].contiguous()
shift_labels = samples["video_fts"][..., 1:, :].contiguous()
# Flatten the tokens
loss_fct = MSELoss(reduction="mean")
video_loss = loss_fct(shift_logits, shift_labels)
if loss is not None:
loss = loss + video_loss
else:
loss = video_loss
return CausalLMOutputWithCrossAttentions(
loss=loss,
logits=lm_logits,
past_key_values=transformer_outputs.past_key_values,
hidden_states=transformer_outputs.hidden_states,
attentions=transformer_outputs.attentions,
cross_attentions=transformer_outputs.cross_attentions,
)
@classmethod
def from_config(cls, cfg):
model = cls.__bases__[1].from_pretrained("gpt2")
model.resize_token_embeddings(cfg["len_tokenizer"])
return model
| 3D-LLM-main | three_steps_3d_feature/second_step/lavis/models/gpt_models/gpt_dialogue.py |
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
import torch
| 3D-LLM-main | three_steps_3d_feature/second_step/lavis/models/img2prompt_models/__init__.py |
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
import random
import spacy
import torch
import torch.nn.functional as F
from transformers import T5ForConditionalGeneration, T5Tokenizer
from lavis.common.dist_utils import download_cached_file
from lavis.common.registry import registry
from lavis.models.base_model import BaseModel
from lavis.models.blip_models.blip_image_text_matching import compute_gradcam
open_pos = ["NOUN", "VERB", "ADJ", "ADV", "NUM"]
@registry.register_model("img2prompt_vqa")
class Img2PromptVQA(BaseModel):
"""
Img2Prompt_VQA model consists of three submodels for zero-shot VQA:
1. Image-questioning matching model
2. Image captioning model
3. Large Language model
Supported model types:
- base: BLIPITM, BLIPCaption, PNPUnifiedQAv2FiD (t5-base)
- large: BLIPITM, BLIPCaption, PNPUnifiedQAv2FiD (t5-large)
- 3b: BLIPITM, BLIPCaption, PNPUnifiedQAv2FiD (t5-3b)
Usage:
>>> from lavis.models import load_model
>>> model = load_model("img2prompt_vqa", "base", is_eval=True)
"""
PRETRAINED_MODEL_CONFIG_DICT = {
"base": "configs/models/img2prompt-vqa/img2prompt_vqa_base.yaml",
}
def __init__(
self,
image_question_matching_model,
image_captioning_model,
question_generation_model,
question_generation_tokenizer,
offload_model=False,
):
super().__init__()
self.image_question_matching_model = image_question_matching_model
self.image_captioning_model = image_captioning_model
self.question_generation_model = question_generation_model
self.question_generation_tokenizer = question_generation_tokenizer
self.offload_model = offload_model
self.nlp = spacy.load("en_core_web_sm")
def forward_itm(self, samples, block_num=7):
"""
Args:
samples (dict): A dictionary containing the following keys:
- image (torch.Tensor): A tensor of shape (batch_size, 3, H, W)
- text_input (list): A list of strings of length batch_size
block_num (int): The index of cross-attention block for gradcam computation.
Returns:
samples (dict): A dictionary containing the following keys:
- image (torch.Tensor): A tensor of shape (batch_size, 3, H, W)
- text_input (list): A list of strings of length batch_size
- gradcams (torch.Tensor): A tensor of shape (batch_size, H*W)
"""
image = samples["image"]
question = [text.strip("?") for text in samples["text_input"]]
tokenized_text = self.image_question_matching_model.tokenizer(
question, padding="longest", truncation=True, return_tensors="pt"
).to(self.image_question_matching_model.device)
with torch.set_grad_enabled(True):
gradcams, _ = compute_gradcam(
model=self.image_question_matching_model,
visual_input=image,
text_input=question,
tokenized_text=tokenized_text,
block_num=block_num,
)
gradcams = [gradcam_[1] for gradcam_ in gradcams]
samples["gradcams"] = torch.stack(gradcams).reshape(
samples["image"].size(0), -1
)
return samples
def itm_rank(self, image_embeds, image_atts, encoder_input_ids, match_head="itm"):
# breakpoint()
encoder_input_ids = encoder_input_ids.clone()
encoder_input_ids = encoder_input_ids[:, self.prompt_length - 1 :]
text_attention_mask = (encoder_input_ids != self.tokenizer.pad_token_id).long()
if match_head == "itm":
# encoder_input_ids = encoder_input_ids.clone()
encoder_input_ids[:, 0] = self.tokenizer.enc_token_id
output = self.text_encoder(
encoder_input_ids,
attention_mask=text_attention_mask,
encoder_hidden_states=image_embeds,
encoder_attention_mask=image_atts,
return_dict=True,
)
itm_output = self.itm_head(output.last_hidden_state[:, 0, :])
return itm_output # , mask, token_length
elif match_head == "itc":
encoder_input_ids[:, 0] = self.tokenizer.cls_token_id
text_output = self.text_encoder(
encoder_input_ids,
attention_mask=text_attention_mask,
return_dict=True,
mode="text",
)
image_feat = F.normalize(self.vision_proj(image_embeds[:, 0, :]), dim=-1)
text_feat = F.normalize(
self.text_proj(text_output.last_hidden_state[:, 0, :]), dim=-1
)
sim = image_feat @ text_feat.t()
return sim
def forward_cap(
self,
samples,
cap_max_length=20,
cap_min_length=0,
top_p=1,
top_k=50,
repetition_penalty=1.0,
num_captions=100,
num_patches=20,
):
"""
Args:
samples (dict): A dictionary containing the following keys:
- image (torch.Tensor): A tensor of shape (batch_size, 3, H, W)
- text_input (list): A list of strings of length batch_size
- gradcams (torch.Tensor): A tensor of shape (batch_size, H*W)
cap_max_length (int): The maximum length of the caption to be generated.
cap_min_length (int): The minimum length of the caption to be generated.
top_p (float): The cumulative probability for nucleus sampling.
top_k (float): The number of the highest probability tokens for top-k sampling.
repetition_penalty (float): The parameter for repetition penalty. 1.0 means no penalty.
num_captions (int): Number of captions generated for each image.
num_patches (int): Number of patches sampled for each image.
Returns:
samples (dict): A dictionary containing the following keys:
- image (torch.Tensor): A tensor of shape (batch_size, 3, H, W)
- text_input (list): A list of strings of length batch_size
- gradcams (torch.Tensor): A tensor of shape (batch_size, H*W)
- captions (nested list): A nested list of strings of total length batch_size * num_captions
"""
encoder_out = self.image_captioning_model.forward_encoder(samples)
captions = [[] for _ in range(encoder_out.size(0))]
min_num_captions = 0
while min_num_captions < num_captions:
encoder_out_samples = []
for i in range(num_captions):
patch_id = (
torch.multinomial(
samples["gradcams"].to(self.image_captioning_model.device),
num_patches,
).reshape(encoder_out.size(0), -1)
+ 1
)
patch_id = (
patch_id.sort(dim=1)
.values.unsqueeze(-1)
.expand(-1, -1, encoder_out.size(2))
)
encoder_out_sample = torch.gather(encoder_out, 1, patch_id)
encoder_out_samples.append(encoder_out_sample)
stacked = torch.stack(encoder_out_samples, dim=1)
image_embeds = torch.flatten(
stacked, start_dim=0, end_dim=1
) # (bsz*num_seq, num_patch, dim)
image_atts = torch.ones(image_embeds.size()[:-1], dtype=torch.long).to(
self.image_captioning_model.device
)
model_kwargs = {
"encoder_hidden_states": image_embeds,
"encoder_attention_mask": image_atts,
}
prompt = [self.image_captioning_model.prompt] * image_embeds.size(0)
prompt = self.image_captioning_model.tokenizer(
prompt, return_tensors="pt"
).to(self.image_captioning_model.device)
prompt.input_ids[:, 0] = self.image_captioning_model.tokenizer.bos_token_id
prompt.input_ids = prompt.input_ids[:, :-1]
decoder_out = self.image_captioning_model.text_decoder.generate(
input_ids=prompt.input_ids,
max_length=cap_max_length,
min_length=cap_min_length,
do_sample=True,
top_p=top_p,
top_k=top_k,
num_return_sequences=1,
eos_token_id=self.image_captioning_model.tokenizer.sep_token_id,
pad_token_id=self.image_captioning_model.tokenizer.pad_token_id,
repetition_penalty=repetition_penalty,
**model_kwargs
)
itm_outputs = self.image_question_matching_model.itm_rank(
image_embeds, image_atts, encoder_input_ids=decoder_out
) # caption filter
outputs = self.image_captioning_model.tokenizer.batch_decode(
decoder_out, skip_special_tokens=True
)
for counter, output in enumerate(outputs):
ind = counter // num_captions
if len(captions[ind]) < num_captions:
caption = output[len(self.image_captioning_model.prompt) :]
overlap_caption = [1 for caps in captions[ind] if caption in caps]
# print(itm_outputs)
if (
len(overlap_caption) == 0 and itm_outputs[counter] >= 0.5
): # image filter
captions[ind].append(caption)
min_num_captions = min([len(i) for i in captions])
samples["captions"] = captions
return samples
def answer_extraction(self, caption, num_question_generation=30):
cap_use = ""
# print(caption)
caption = caption
ans_to_cap_dict = {}
answers = []
for cap_idx, cap in enumerate(caption):
# print(cap)
cap_use += cap
cap = cap.strip().strip(".")
# print(cap)
cap = self.nlp(cap)
for token in cap: # Noun /Verb/Adj//NUM
if token.pos_ in open_pos:
if token.text.lower() not in ans_to_cap_dict:
ans_to_cap_dict[token.text.lower()] = [cap_idx]
else:
if cap_idx not in ans_to_cap_dict[token.text.lower()]:
ans_to_cap_dict[token.text.lower()].append(cap_idx)
answers.append(token.text)
for ent in cap.ents:
if ent.text not in answers:
if ent.text.lower() not in ans_to_cap_dict:
ans_to_cap_dict[ent.text.lower()] = [cap_idx]
else:
if cap_idx not in ans_to_cap_dict[ent.text.lower()]:
ans_to_cap_dict[ent.text.lower()].append(cap_idx)
answers.append(ent.text)
for chunk in cap.noun_chunks:
if len(chunk.text.split()) < 4:
if chunk.text.lower() not in ans_to_cap_dict:
ans_to_cap_dict[chunk.text.lower()] = [cap_idx]
else:
if cap_idx not in ans_to_cap_dict[chunk.text.lower()]:
ans_to_cap_dict[chunk.text.lower()].append(cap_idx)
# print(chunk.text)
answers.append(chunk.text)
answers = sorted(answers, key=answers.count, reverse=True)
real_answers = []
for i in answers:
i = i + "."
if i not in real_answers:
real_answers.append(i)
contexts_for_question_generation = []
answers = []
for ans in real_answers[
:num_question_generation
]: # Generate questions for 30 answers with max frequencies.
contexts_for_question_generation.append(
"answer: %s context: %s." % (ans, cap_use)
)
answers.append(ans)
contexts_for_question_generation.append(
"answer: %s context: %s." % ("yes.", cap_use)
)
answers.append("yes.")
return contexts_for_question_generation, answers, ans_to_cap_dict
def forward_qa_generation(self, samples):
caption = samples["captions"][0]
(
contexts_for_question_generation,
answers,
ans_to_cap_dict,
) = self.answer_extraction(caption)
inputs = self.question_generation_tokenizer(
contexts_for_question_generation,
padding="longest",
truncation=True,
max_length=2048,
return_tensors="pt",
).to(self.device)
question_size = inputs.input_ids.shape[0]
cur_b = 0
true_input_size = 10
outputs_list = []
while cur_b < question_size:
outputs = self.question_generation_model.generate(
input_ids=inputs.input_ids[cur_b : cur_b + true_input_size],
attention_mask=inputs.attention_mask[cur_b : cur_b + true_input_size],
num_beams=3,
max_length=30,
)
questions = self.question_generation_tokenizer.batch_decode(
outputs, skip_special_tokens=True
)
outputs_list += questions
cur_b += true_input_size
questions = outputs_list
samples["questions"] = questions
samples["answers"] = answers
samples["ans_to_cap_dict"] = ans_to_cap_dict
# results.append({"question_id": ques_id, "question":questions,"answer":answers})
return samples
def create_context_prompt(self, samples, num_caps_per_img=30):
ans_dict_queid = samples["ans_to_cap_dict"]
# print(ans_dict_queid)
caption = samples["captions"][0]
answers = samples["answers"]
Context_Prompt = ""
mycontexts_id = []
for idx in range(num_caps_per_img):
cap_id_list = ans_dict_queid.get(
answers[(len(answers) - 1 - idx) % len(answers)][:-1].lower(), [0]
)
for cap_id in cap_id_list:
if cap_id not in mycontexts_id:
Context_Prompt += caption[cap_id]
mycontexts_id.append(cap_id)
break # We just take one cap for each answer
samples["Context_Prompt"] = Context_Prompt
return Context_Prompt
def create_task_prompt(
self, samples, question_type="neural", num_question_per_img=30
):
syn_question_queid = samples["questions"]
syn_ans_queid = samples["answers"]
Task_Prompt = ""
for idx in range(num_question_per_img):
# if config['random_question']:
# qa_idx = random.randint(0, len(syn_question_queid) - 1)
# else:
qa_idx = idx
if (
question_type != "rule" and num_question_per_img > 0 and idx < 1
): ## yes and no questions for vqav2
# Task_Prompt += "Question:"
# Task_Prompt += syn_question_queid_next[-1]
# Task_Prompt += '\n'
# Task_Prompt += "Answer:no\n"
Task_Prompt += "Question:"
Task_Prompt += syn_question_queid[-1]
Task_Prompt += "\n"
Task_Prompt += "Answer:"
Task_Prompt += "yes\n"
Task_Prompt += "Question:Is this a toilet?\n"
Task_Prompt += "Answer:no\n"
if "question_type" == "rule": # Rule-Based Question Generation
Noun_Questions = [
"What item is this in this picture?",
"What item is that in this picture?",
]
Verb_Questions = [
"What action is being done in this picture?",
"Why is this item doing in this picture?",
"Which action is being taken in this picture?",
"What action is item doing in this picture?",
"What action is item performing in this picture?",
]
Adj_Questions = [
"How to describe one item in this picture?",
"What is item's ADJ TYPE in this picture?",
"What is the ADJ TYPE in this picture?",
]
Task_Prompt += "Question:"
doc = self.nlp(syn_ans_queid[(qa_idx) % len(syn_ans_queid)][:-1].lower())
if doc[-1].pos_ == "NOUN":
Task_Prompt += Noun_Questions[
random.randint(0, len(Noun_Questions) - 1)
]
elif doc[-1].pos_ == "VERB":
Task_Prompt += Verb_Questions[
random.randint(0, len(Verb_Questions) - 1)
]
elif doc[-1].pos_ == "ADJ":
Task_Prompt += Adj_Questions[
random.randint(0, len(Adj_Questions) - 1)
]
Task_Prompt += "\n"
Task_Prompt += "Answer:"
Task_Prompt += syn_ans_queid[(qa_idx) % len(syn_ans_queid)][:-1].lower()
Task_Prompt += "\n"
samples["Task_Prompt"] = Task_Prompt
# print(Task_Prompt)
return Task_Prompt
def prompts_construction(
self,
samples,
question_type="neural",
num_caps_per_img=30,
num_question_per_img=30,
):
Prompt = "Please reason the answer of the questions according to the given contexts.\n"
Context_Prompt = self.create_context_prompt(samples, num_caps_per_img)
Task_Prompt = self.create_task_prompt(
samples, question_type, num_question_per_img
)
Img2Prompt = (
Prompt
+ "Contexts:"
+ Context_Prompt
+ "\n"
+ Task_Prompt
+ "Question:"
+ samples["text_input"][0]
+ "\nAnswer:"
)
return Img2Prompt
def prepare_LLM_input(
self,
samples,
num_beams=1,
inference_method="generate",
max_len=20,
min_len=0,
internal_bsz_fid=1,
num_captions=50,
num_captions_fid=1,
cap_max_length=20,
cap_min_length=10,
top_k=50,
top_p=1,
repetition_penalty=1,
num_patches=20,
block_num=7,
):
"""
Args:
samples (dict): A dictionary containing the following keys:
- image (torch.Tensor): A tensor of shape (batch_size, 3, H, W). Default H=480, W=480.
- text_input (str or [str]): String or a list of strings, each string is a question.
The number of questions must be equal to the batch size. If a single string, will be converted to a list of string, with length 1 first.
num_beams (int): Number of beams for beam search. 1 means no beam search.
inference_method (str): Inference method. Must be "generate". The model will generate answers.
max_len (int): Maximum length of generated answers.
min_len (int): Minimum length of generated answers.
internal_bsz_fid (int): Internal batch size when using FiD decoding.
num_captions (int): Number of captions generated for each image.
num_captions_fid (int): Number of captions concatenated with a question during FiD decoding.
cap_max_length (int): The maximum length of the caption to be generated.
cap_min_length (int): The minimum length of the caption to be generated.
top_k (float): The number of the highest probability tokens for top-k sampling.
top_p (float): The cumulative probability for nucleus sampling.
repetition_penalty (float): The parameter for repetition penalty. 1.0 means no penalty.
num_patches (int): Number of patches sampled for each image.
block_num (int): The index of cross-attention block for gradcam computation.
Returns:
List: A list of strings, each string is an answer.
gradcams (torch.Tensor): A tensor of shape (batch_size, H*W)
captions (nested list): A nested list of strings of total length batch_size * num_captions
"""
assert inference_method in [
"generate",
], "Inference method must be 'generate', got {}.".format(inference_method)
if isinstance(samples["text_input"], str):
samples["text_input"] = [samples["text_input"]]
assert len(samples["text_input"]) == samples["image"].size(
0
), "The number of questions must be equal to the batch size."
samples = self.forward_itm(samples, block_num=block_num)
samples = self.forward_cap(
samples,
cap_max_length=cap_max_length,
cap_min_length=cap_min_length,
top_k=top_k,
top_p=top_p,
repetition_penalty=repetition_penalty,
num_captions=num_captions,
num_patches=num_patches,
)
if self.offload_model:
samples["image"] = samples["image"].to("cpu")
self.image_question_matching_model.to("cpu")
self.image_captioning_model.to("cpu")
torch.cuda.empty_cache()
pred_answers = self.forward_qa(
samples,
num_beams=num_beams,
max_len=max_len,
min_len=min_len,
internal_bsz_fid=internal_bsz_fid,
num_captions=num_captions,
num_captions_fid=num_captions_fid,
)
if self.offload_model:
self.image_question_matching_model.to(self.question_answering_model.device)
self.image_captioning_model.to(self.question_answering_model.device)
return pred_answers, samples["captions"], samples["gradcams"]
@classmethod
def from_config(cls, model_config):
itm_config = model_config.image_question_matching_model
cap_config = model_config.image_captioning_model
itm_cls = registry.get_model_class(itm_config.arch)
cap_cls = registry.get_model_class(cap_config.arch)
image_question_matching_model = itm_cls.from_config(itm_config)
image_captioning_model = cap_cls.from_config(cap_config)
question_generation_tokenizer = T5Tokenizer.from_pretrained(
"google/t5-large-lm-adapt"
)
question_generation_model = T5ForConditionalGeneration.from_pretrained(
"google/t5-large-lm-adapt"
)
cached_file = download_cached_file(
"https://storage.googleapis.com/sfr-vision-language-research/LAVIS/projects/img2prompt/T5_large_QG.pth",
check_hash=False,
progress=True,
)
checkpoint = torch.load(cached_file, map_location="cpu")
state_dict = checkpoint["model"]
question_generation_model.load_state_dict(state_dict)
model = cls(
image_question_matching_model=image_question_matching_model,
image_captioning_model=image_captioning_model,
question_generation_model=question_generation_model,
question_generation_tokenizer=question_generation_tokenizer,
offload_model=False,
)
return model
| 3D-LLM-main | three_steps_3d_feature/second_step/lavis/models/img2prompt_models/img2prompt_vqa.py |
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
from copy import deepcopy
import torch
import torch.nn.functional as F
from lavis.common.registry import registry
from lavis.models.base_model import MomentumDistilationMixin
from lavis.models.blip_models.blip import BlipBase
from lavis.models.blip_models.blip_outputs import (
BlipIntermediateOutput,
BlipOutputWithLogits,
)
from lavis.models.med import XBertEncoder
from lavis.models.vit import VisionTransformerEncoder
from torch import nn
@registry.register_model("blip_classification")
class BlipClassification(BlipBase, MomentumDistilationMixin):
PRETRAINED_MODEL_CONFIG_DICT = {
"base": "configs/models/blip_classification_base.yaml",
}
def __init__(
self,
image_encoder,
text_encoder,
num_classes,
momentum=0.995,
alpha=0.4,
max_txt_len=40,
use_distill=True,
):
super().__init__()
self.tokenizer = self.init_tokenizer()
self.use_distill = use_distill
self.visual_encoder = image_encoder
self.text_encoder = text_encoder
hidden_size = text_encoder.config.hidden_size
self.cls_head = nn.Sequential(
nn.Linear(hidden_size, hidden_size),
nn.ReLU(),
nn.Linear(hidden_size, num_classes),
)
if self.use_distill:
self.visual_encoder_m = deepcopy(self.visual_encoder)
self.text_encoder_m = deepcopy(self.text_encoder)
self.cls_head_m = deepcopy(self.cls_head)
self.momentum = momentum
self.alpha = alpha
self.model_pairs = [
[self.visual_encoder, self.visual_encoder_m],
[self.text_encoder, self.text_encoder_m],
[self.cls_head, self.cls_head_m],
]
self.copy_params()
self.max_txt_len = max_txt_len
def _rampup_factor(self, epoch, iters, num_iters_per_epoch):
return min(1, (epoch * num_iters_per_epoch + iters) / num_iters_per_epoch)
def forward(self, samples, is_train=True):
sentences = samples["text_input"]
sentences = self.tokenizer(
sentences,
padding="longest",
truncation=True,
max_length=self.max_txt_len,
return_tensors="pt",
).to(self.device)
samples.update({"tokenized_text": sentences})
targets = samples["label"]
image_embeds = self.visual_encoder.forward_features(samples["image"])
encoder_output = self.text_encoder.forward_automask(
samples["tokenized_text"], image_embeds
)
prediction = self.cls_head(encoder_output.last_hidden_state[:, 0, :])
if is_train:
if self.use_distill:
with torch.no_grad():
self._momentum_update()
image_embeds_m = self.visual_encoder_m(samples["image"])
encoder_output_m = self.text_encoder_m.forward_automask(
samples["tokenized_text"], image_embeds_m
)
prediction_m = self.cls_head_m(
encoder_output_m.last_hidden_state[:, 0, :]
)
alpha = self.alpha * self._rampup_factor(
epoch=samples["epoch"],
iters=samples["iters"],
num_iters_per_epoch=samples["num_iters_per_epoch"],
)
loss = (1 - alpha) * F.cross_entropy(
prediction, targets
) - alpha * torch.sum(
F.log_softmax(prediction, dim=1) * F.softmax(prediction_m, dim=1),
dim=1,
).mean()
else:
loss = F.cross_entropy(prediction, targets)
# return {"loss": loss}
return BlipOutputWithLogits(
loss=loss,
intermediate_output=BlipIntermediateOutput(
image_embeds=image_embeds,
image_embeds_m=image_embeds_m,
encoder_output=encoder_output,
encoder_output_m=encoder_output_m,
),
logits=prediction,
logits_m=prediction_m,
)
else:
return {"predictions": prediction, "targets": targets}
def predict(self, samples):
output = self.forward(samples, is_train=False)
return output
@classmethod
def from_config(cls, cfg=None):
image_encoder = VisionTransformerEncoder.from_config(cfg)
# text encoder + multimodal encoder
text_encoder = XBertEncoder.from_config(cfg)
use_distill = cfg.get("use_distill", True)
momentum = cfg.get("momentum", 0.995)
num_classes = cfg.get("num_classes", -1)
alpha = cfg.get("alpha", 0.4)
max_txt_len = cfg.get("max_txt_len", 40)
assert num_classes > 1, "Invalid number of classes provided, found {}".format(
num_classes
)
model = cls(
image_encoder=image_encoder,
text_encoder=text_encoder,
use_distill=use_distill,
alpha=alpha,
num_classes=num_classes,
momentum=momentum,
max_txt_len=max_txt_len,
)
# load pre-trained weights
pretrain_path = cfg.get("pretrained", None)
if pretrain_path is not None:
msg = model.load_from_pretrained(url_or_filename=pretrain_path)
return model
| 3D-LLM-main | three_steps_3d_feature/second_step/lavis/models/blip_models/blip_classification.py |
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
import torch
import torch.nn.functional as F
from lavis.common.registry import registry
from lavis.models.base_model import tile
from lavis.models.blip_models.blip import BlipBase
from lavis.models.blip_models.blip_outputs import (
BlipOutput,
BlipIntermediateOutput,
)
from lavis.models.med import XBertEncoder, XBertLMHeadDecoder
from lavis.models.vit import VisionTransformerEncoder
@registry.register_model("blip_vqa")
class BlipVQA(BlipBase):
"""
BLIP VQA models.
Supported model types:
- base: vqa model initialized with pre-trained BLIP base model on 115M image-text pairs after CapFilt; not fine-tuned.
- vqav2: fine-tuned BLIP base model on VQA v2.0 dataset.
Usage:
>>> from lavis.models import load_model
>>> model = load_model("blip_vqa", "vqav2")
>>> model = load_model("blip_vqa", "okvqa")
>>> model = load_model("blip_vqa", "aokvqa")
"""
PRETRAINED_MODEL_CONFIG_DICT = {
"vqav2": "configs/models/blip_vqav2.yaml",
"okvqa": "configs/models/blip_vqa_okvqa.yaml",
"aokvqa": "configs/models/blip_vqa_aokvqa.yaml",
}
def __init__(self, image_encoder, text_encoder, text_decoder, max_txt_len=35):
super().__init__()
self.tokenizer = self.init_tokenizer()
self.visual_encoder = image_encoder
self.text_encoder = text_encoder
self.text_decoder = text_decoder
self.max_txt_len = max_txt_len
def forward(self, samples):
"""
Args:
samples (dict): A dictionary containing the following keys:
- image (torch.Tensor): A tensor of shape (batch_size, 3, H, W). Default H=480, W=480.
- text_input (list): A list of strings, each string is a question
- answer (list): A list of strings, each string is an answer
- weight (torch.Tensor): A tensor used to weigh each answer in the loss computation.
The shape of the tensor is (sum(n_answers),)
- n_answers (torch.Tensor): A tensor shape (batch_size,) containing the number of answers
for each question in the batch.
Returns:
A BlipOutput object containing loss and intermediate outputs,
see :class:`lavis.models.blip_outputs.BlipOutput` for more details.
Examples:
```python
>>> import torch
>>> from lavis.models import load_model
>>> model = load_model("blip_vqa")
>>> samples = {
... "image": torch.rand(2, 3, 480, 480),
... "text_input": ["What is this?", "What is that?"],
... "answer": ["cat", "cat", "dog"],
... "weight": torch.tensor([1.0, 1.0, 1.0]),
... "n_answers": torch.tensor([2, 1]),
... }
>>> output = model(samples)
>>> output.keys()
odict_keys(['intermediate_output', 'loss'])
>>> output.intermediate_output.keys()
odict_keys(['image_embeds', 'encoder_output', 'decoder_output', 'decoder_labels'])
```
"""
encoder_output, image_embeds = self.forward_encoder(samples)
loss, decoder_output, decoder_targets = self.forward_decoder(
samples=samples, encoder_out=encoder_output
)
return BlipOutput(
loss=loss,
intermediate_output=BlipIntermediateOutput(
image_embeds=image_embeds,
encoder_output=encoder_output,
decoder_output=decoder_output,
decoder_labels=decoder_targets,
),
)
def forward_encoder(self, samples):
questions = samples["text_input"]
questions = self.tokenizer(
questions,
padding="longest",
truncation=True,
max_length=self.max_txt_len,
return_tensors="pt",
).to(self.device)
questions.input_ids[:, 0] = self.tokenizer.enc_token_id
samples.update({"tokenized_text": questions})
image_embeds = self.visual_encoder.forward_features(samples["image"])
encoder_output = self.text_encoder.forward_automask(
tokenized_text=samples["tokenized_text"], visual_embeds=image_embeds
)
return encoder_output, image_embeds
def forward_decoder(self, samples, encoder_out, **kwargs):
answers = self.tokenizer(
samples["answer"], padding="longest", return_tensors="pt"
).to(self.device)
answers.input_ids[:, 0] = self.tokenizer.bos_token_id
answer_targets = answers.input_ids.masked_fill(
answers.input_ids == self.tokenizer.pad_token_id, -100
)
question_states = []
question_atts = []
question = samples["tokenized_text"]
question_output = encoder_out
for b, n in enumerate(samples["n_answers"]):
question_states += [question_output.last_hidden_state[b]] * n
question_atts += [question.attention_mask[b]] * n
question_states = torch.stack(question_states, dim=0)
question_atts = torch.stack(question_atts, dim=0)
answer_output = self.text_decoder(
answers.input_ids,
attention_mask=answers.attention_mask,
encoder_hidden_states=question_states,
encoder_attention_mask=question_atts,
labels=answer_targets,
return_dict=True,
reduction="none",
)
loss = samples["weight"] * answer_output.loss
bsz = samples["image"].size(0)
loss = loss.sum() / bsz
return loss, answer_output, answer_targets
def predict_answers(
self,
samples,
num_beams=3,
inference_method="rank",
max_len=10,
min_len=1,
num_ans_candidates=128,
answer_list=None,
**kwargs
):
"""
Args:
samples (dict): A dictionary containing the following keys:
- image (torch.Tensor): A tensor of shape (batch_size, 3, H, W). Default H=480, W=480.
- text_input (str or [str]): String or a list of strings, each string is a question.
The number of questions must be equal to the batch size. If a single string, will be converted to a list of string, with length 1 first.
num_beams (int): Number of beams for beam search. 1 means no beam search.
inference_method (str): Inference method. One of "rank", "generate".
- If "rank", the model will return answers with the highest probability from the answer list.
- If "generate", the model will generate answers.
max_len (int): Maximum length of generated answers.
min_len (int): Minimum length of generated answers.
num_ans_candidates (int): Number of answer candidates, used to filter out answers with low probability.
answer_list (list): A list of strings, each string is an answer.
Returns:
List: A list of strings, each string is an answer.
Examples:
```python
>>> from PIL import Image
>>> from lavis.models import load_model_and_preprocess
>>> model, vis_processors, txt_processors = load_model_and_preprocess("blip_vqa", "vqav2")
>>> raw_image = Image.open("docs/data/merlion.png").convert("RGB")
>>> question = "Which city is this photo taken?"
>>> image = vis_processors["eval"](raw_image).unsqueeze(0)
>>> question = txt_processors["eval"](question)
>>> samples = {"image": image, "text_input": [question]}
>>> answers = model.predict_answers(samples)
>>> answers
['singapore']
>>> answer_list = ["Singapore", "London", "Palo Alto", "Tokyo"]
>>> answers = model.predict_answers(samples, answer_list=answer_list)
>>> answers
['Singapore']
```
"""
assert inference_method in [
"rank",
"generate",
], "Inference method must be one of 'rank' or 'generate', got {}.".format(
inference_method
)
if isinstance(samples["text_input"], str):
samples["text_input"] = [samples["text_input"]]
assert len(samples["text_input"]) == samples["image"].size(
0
), "The number of questions must be equal to the batch size."
if inference_method == "generate":
return self._generate_answers(
samples, num_beams=num_beams, max_length=max_len, min_length=min_len
)
elif inference_method == "rank":
assert answer_list is not None, "answer_list must be provided for ranking"
num_ans_candidates = min(num_ans_candidates, len(answer_list))
return self._rank_answers(
samples, answer_list=answer_list, num_ans_candidates=num_ans_candidates
)
def _generate_answers(self, samples, num_beams=3, max_length=10, min_length=1):
encoder_out, _ = self.forward_encoder(samples)
question_output = encoder_out
question_states = question_output.last_hidden_state.repeat_interleave(
num_beams, dim=0
)
question_atts = torch.ones(question_states.size()[:-1], dtype=torch.long).to(
self.device
)
model_kwargs = {
"encoder_hidden_states": question_states,
"encoder_attention_mask": question_atts,
}
bsz = samples["image"].size(0)
bos_ids = torch.full(
(bsz, 1), fill_value=self.tokenizer.bos_token_id, device=self.device
)
outputs = self.text_decoder.generate(
input_ids=bos_ids,
max_length=max_length,
min_length=min_length,
num_beams=num_beams,
eos_token_id=self.tokenizer.sep_token_id,
pad_token_id=self.tokenizer.pad_token_id,
**model_kwargs
)
# collect answers
answers = []
for output in outputs:
answer = self.tokenizer.decode(output, skip_special_tokens=True)
answers.append(answer)
return answers
def _rank_answers(self, samples, answer_list, num_ans_candidates):
"""
Generate the first token of answers using decoder and select ${num_ans_candidates}
most probable ones. Then select answers from answer list, which start with the probable tokens.
Lastly, use the selected answers as the ground-truth labels for decoding and calculating LM loss.
Return the answers that minimize the losses as result.
"""
answer_candidates = self.tokenizer(
answer_list, padding="longest", return_tensors="pt"
).to(self.device)
answer_candidates.input_ids[:, 0] = self.tokenizer.bos_token_id
answer_ids = answer_candidates.input_ids
answer_atts = answer_candidates.attention_mask
question_output, _ = self.forward_encoder(samples)
question_states = question_output.last_hidden_state
tokenized_question = samples["tokenized_text"]
question_atts = tokenized_question.attention_mask
num_ques = question_states.size(0)
start_ids = answer_ids[0, 0].repeat(num_ques, 1) # bos token
start_output = self.text_decoder(
start_ids,
encoder_hidden_states=question_states,
encoder_attention_mask=question_atts,
return_dict=True,
reduction="none",
)
logits = start_output.logits[:, 0, :] # first token's logit
# topk_probs: top-k probability
# topk_ids: [num_question, k]
answer_first_token = answer_ids[:, 1]
prob_first_token = F.softmax(logits, dim=1).index_select(
dim=1, index=answer_first_token
)
topk_probs, topk_ids = prob_first_token.topk(num_ans_candidates, dim=1)
# answer input: [num_question*k, answer_len]
input_ids = []
input_atts = []
for b, topk_id in enumerate(topk_ids):
input_ids.append(answer_ids.index_select(dim=0, index=topk_id))
input_atts.append(answer_atts.index_select(dim=0, index=topk_id))
input_ids = torch.cat(input_ids, dim=0)
input_atts = torch.cat(input_atts, dim=0)
targets_ids = input_ids.masked_fill(
input_ids == self.tokenizer.pad_token_id, -100
)
# repeat encoder's output for top-k answers
question_states = tile(question_states, 0, num_ans_candidates)
question_atts = tile(question_atts, 0, num_ans_candidates)
output = self.text_decoder(
input_ids,
attention_mask=input_atts,
encoder_hidden_states=question_states,
encoder_attention_mask=question_atts,
labels=targets_ids,
return_dict=True,
reduction="none",
)
log_probs_sum = -output.loss
log_probs_sum = log_probs_sum.view(num_ques, num_ans_candidates)
max_topk_ids = log_probs_sum.argmax(dim=1)
max_ids = topk_ids[max_topk_ids >= 0, max_topk_ids]
answers = [answer_list[max_id] for max_id in max_ids]
return answers
@classmethod
def from_config(cls, cfg=None):
image_encoder = VisionTransformerEncoder.from_config(cfg)
# text encoder + multimodal encoder
text_encoder = XBertEncoder.from_config(cfg)
text_decoder = XBertLMHeadDecoder.from_config(cfg)
max_txt_len = cfg.get("max_txt_len", 35)
model = cls(
image_encoder=image_encoder,
text_encoder=text_encoder,
text_decoder=text_decoder,
max_txt_len=max_txt_len,
)
model.load_checkpoint_from_config(cfg)
return model
| 3D-LLM-main | three_steps_3d_feature/second_step/lavis/models/blip_models/blip_vqa.py |
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
import logging
from typing import List
from torch import nn
def tie_encoder_decoder_weights(
encoder: nn.Module, decoder: nn.Module, base_model_prefix: str, skip_key: str
):
uninitialized_encoder_weights: List[str] = []
if decoder.__class__ != encoder.__class__:
logging.info(
f"{decoder.__class__} and {encoder.__class__} are not equal. In this case make sure that all encoder weights are correctly initialized."
)
def tie_encoder_to_decoder_recursively(
decoder_pointer: nn.Module,
encoder_pointer: nn.Module,
module_name: str,
uninitialized_encoder_weights: List[str],
skip_key: str,
depth=0,
):
assert isinstance(decoder_pointer, nn.Module) and isinstance(
encoder_pointer, nn.Module
), f"{decoder_pointer} and {encoder_pointer} have to be of type torch.nn.Module"
if hasattr(decoder_pointer, "weight") and skip_key not in module_name:
assert hasattr(encoder_pointer, "weight")
encoder_pointer.weight = decoder_pointer.weight
if hasattr(decoder_pointer, "bias"):
assert hasattr(encoder_pointer, "bias")
encoder_pointer.bias = decoder_pointer.bias
print(module_name + " is tied")
return
encoder_modules = encoder_pointer._modules
decoder_modules = decoder_pointer._modules
if len(decoder_modules) > 0:
assert (
len(encoder_modules) > 0
), f"Encoder module {encoder_pointer} does not match decoder module {decoder_pointer}"
all_encoder_weights = set(
[module_name + "/" + sub_name for sub_name in encoder_modules.keys()]
)
encoder_layer_pos = 0
for name, module in decoder_modules.items():
if name.isdigit():
encoder_name = str(int(name) + encoder_layer_pos)
decoder_name = name
if not isinstance(
decoder_modules[decoder_name],
type(encoder_modules[encoder_name]),
) and len(encoder_modules) != len(decoder_modules):
# this can happen if the name corresponds to the position in a list module list of layers
# in this case the decoder has added a cross-attention that the encoder does not have
# thus skip this step and subtract one layer pos from encoder
encoder_layer_pos -= 1
continue
elif name not in encoder_modules:
continue
elif depth > 500:
raise ValueError(
"Max depth of recursive function `tie_encoder_to_decoder` reached. It seems that there is a circular dependency between two or more `nn.Modules` of your model."
)
else:
decoder_name = encoder_name = name
tie_encoder_to_decoder_recursively(
decoder_modules[decoder_name],
encoder_modules[encoder_name],
module_name + "/" + name,
uninitialized_encoder_weights,
skip_key,
depth=depth + 1,
)
all_encoder_weights.remove(module_name + "/" + encoder_name)
uninitialized_encoder_weights += list(all_encoder_weights)
# tie weights recursively
tie_encoder_to_decoder_recursively(
decoder, encoder, base_model_prefix, uninitialized_encoder_weights, skip_key
)
| 3D-LLM-main | three_steps_3d_feature/second_step/lavis/models/blip_models/__init__.py |
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
import torch
import torch.nn.functional as F
from lavis.common.registry import registry
from lavis.models.blip_models.blip import BlipBase
from torch import nn
from lavis.models.med import XBertEncoder
from lavis.models.vit import VisionTransformerEncoder
@registry.register_model("blip_image_text_matching")
class BlipITM(BlipBase):
"""
BLIP Image-Text Matching (ITM) model.
Supported model types:
- base: fine-tuned BLIP retrieval weights on COCO dataset (Karpathy split).
- large: fine-tuned BLIP retrieval weights on COCO dataset (Karpathy split).
Usage:
>>> from lavis.models import load_model
>>> model = load_model("blip_image_text_matching", "base")
>>> model = load_model("blip_image_text_matching", "large")
"""
PRETRAINED_MODEL_CONFIG_DICT = {
"base": "configs/models/blip_itm_base.yaml",
"large": "configs/models/blip_itm_large.yaml",
}
def __init__(self, image_encoder, text_encoder, embed_dim=256, max_txt_len=35):
super().__init__()
self.tokenizer = self.init_tokenizer()
self.text_encoder = text_encoder
self.visual_encoder = image_encoder
self.max_txt_len = max_txt_len
# creating projection layers for ITC
text_width = text_encoder.config.hidden_size
vision_width = image_encoder.vision_width
self.vision_proj = nn.Linear(vision_width, embed_dim)
self.text_proj = nn.Linear(text_width, embed_dim)
self.itm_head = nn.Linear(text_width, 2)
def forward(self, samples, match_head="itm"):
image = samples["image"]
caption = samples["text_input"]
image_embeds = self.visual_encoder.forward_features(image)
image_atts = torch.ones(image_embeds.size()[:-1], dtype=torch.long).to(
image.device
)
text = self.tokenizer(
caption,
padding="longest",
truncation=True,
max_length=self.max_txt_len,
return_tensors="pt",
).to(image.device)
if match_head == "itm":
encoder_input_ids = text.input_ids.clone()
encoder_input_ids[:, 0] = self.tokenizer.enc_token_id # extra code
output = self.text_encoder(
encoder_input_ids,
attention_mask=text.attention_mask,
encoder_hidden_states=image_embeds,
encoder_attention_mask=image_atts,
return_dict=True,
)
itm_output = self.itm_head(output.last_hidden_state[:, 0, :])
return itm_output
elif match_head == "itc":
text_output = self.text_encoder(
text.input_ids,
attention_mask=text.attention_mask,
return_dict=True,
mode="text",
)
image_feat = F.normalize(self.vision_proj(image_embeds[:, 0, :]), dim=-1)
text_feat = F.normalize(
self.text_proj(text_output.last_hidden_state[:, 0, :]), dim=-1
)
sim = image_feat @ text_feat.t()
return sim
def itm_rank(self, image_embeds, image_atts, encoder_input_ids, match_head='itm'):
# breakpoint()
encoder_input_ids = encoder_input_ids.clone()
encoder_input_ids = encoder_input_ids[:, 3:]
text_attention_mask = (encoder_input_ids != self.tokenizer.pad_token_id).long()
if match_head == 'itm':
# encoder_input_ids = encoder_input_ids.clone()
encoder_input_ids[:, 0] = self.tokenizer.enc_token_id
output = self.text_encoder(encoder_input_ids,
attention_mask=text_attention_mask,
encoder_hidden_states=image_embeds,
encoder_attention_mask=image_atts,
return_dict=True,
)
# print(output.last_hidden_state.shape)
itm_output = self.itm_head(output.last_hidden_state[:, 0, :])
itm_output = F.softmax(itm_output, dim=1)[:,1]
return itm_output #, mask, token_length
elif match_head == 'itc':
encoder_input_ids[:, 0] = self.tokenizer.cls_token_id
text_output = self.text_encoder(encoder_input_ids, attention_mask=text_attention_mask,
return_dict=True, mode='text')
image_feat = F.normalize(self.vision_proj(image_embeds[:, 0, :]), dim=-1)
text_feat = F.normalize(self.text_proj(text_output.last_hidden_state[:, 0, :]), dim=-1)
sim = image_feat @ text_feat.t()
return sim
@classmethod
def from_config(cls, cfg=None):
image_encoder = VisionTransformerEncoder.from_config(cfg)
text_encoder = XBertEncoder.from_config(cfg)
embed_dim = cfg.get("embed_dim", 256)
max_txt_len = cfg.get("max_txt_len", 35)
model = cls(
image_encoder=image_encoder,
text_encoder=text_encoder,
embed_dim=embed_dim,
max_txt_len=max_txt_len,
)
model.load_checkpoint_from_config(cfg)
return model
def compute_gradcam(model, visual_input, text_input, tokenized_text, block_num=6):
model.text_encoder.base_model.base_model.encoder.layer[
block_num
].crossattention.self.save_attention = True
output = model({"image": visual_input, "text_input": text_input}, match_head="itm")
loss = output[:, 1].sum()
model.zero_grad()
loss.backward()
with torch.no_grad():
mask = tokenized_text.attention_mask.view(
tokenized_text.attention_mask.size(0), 1, -1, 1, 1
) # (bsz,1,token_len, 1,1)
token_length = tokenized_text.attention_mask.sum(dim=-1) - 2
token_length = token_length.cpu()
# grads and cams [bsz, num_head, seq_len, image_patch]
grads = model.text_encoder.base_model.base_model.encoder.layer[
block_num
].crossattention.self.get_attn_gradients()
cams = model.text_encoder.base_model.base_model.encoder.layer[
block_num
].crossattention.self.get_attention_map()
# assume using vit with 576 num image patch
cams = cams[:, :, :, 1:].reshape(visual_input.size(0), 12, -1, 24, 24) * mask
grads = (
grads[:, :, :, 1:].clamp(0).reshape(visual_input.size(0), 12, -1, 24, 24)
* mask
)
gradcams = cams * grads
gradcam_list = []
for ind in range(visual_input.size(0)):
token_length_ = token_length[ind]
gradcam = gradcams[ind].mean(0).cpu().detach()
# [enc token gradcam, average gradcam across token, gradcam for individual token]
gradcam = torch.cat(
(
gradcam[0:1, :],
gradcam[1 : token_length_ + 1, :].sum(dim=0, keepdim=True)
/ token_length_,
gradcam[1:, :],
)
)
gradcam_list.append(gradcam)
return gradcam_list, output
| 3D-LLM-main | three_steps_3d_feature/second_step/lavis/models/blip_models/blip_image_text_matching.py |
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
import logging
import os
import torch
from lavis.common.dist_utils import download_cached_file
from lavis.common.utils import is_url
from lavis.models.base_model import BaseModel
from lavis.models.vit import interpolate_pos_embed
from transformers import BertTokenizer
class BlipBase(BaseModel):
@classmethod
def init_tokenizer(cls):
tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
tokenizer.add_special_tokens({"bos_token": "[DEC]"})
tokenizer.add_special_tokens({"additional_special_tokens": ["[ENC]"]})
tokenizer.enc_token_id = tokenizer.additional_special_tokens_ids[0]
return tokenizer
def load_from_pretrained(self, url_or_filename):
if is_url(url_or_filename):
cached_file = download_cached_file(
url_or_filename, check_hash=False, progress=True
)
checkpoint = torch.load(cached_file, map_location="cpu")
elif os.path.isfile(url_or_filename):
checkpoint = torch.load(url_or_filename, map_location="cpu")
else:
raise RuntimeError("checkpoint url or path is invalid")
state_dict = checkpoint["model"]
state_dict["visual_encoder.pos_embed"] = interpolate_pos_embed(
state_dict["visual_encoder.pos_embed"], self.visual_encoder
)
if "visual_encoder_m.pos_embed" in self.state_dict().keys():
state_dict["visual_encoder_m.pos_embed"] = interpolate_pos_embed(
state_dict["visual_encoder_m.pos_embed"], self.visual_encoder_m
)
for key in self.state_dict().keys():
if key in state_dict.keys():
if state_dict[key].shape != self.state_dict()[key].shape:
del state_dict[key]
msg = self.load_state_dict(state_dict, strict=False)
logging.info("Missing keys {}".format(msg.missing_keys))
logging.info("load checkpoint from %s" % url_or_filename)
return msg
| 3D-LLM-main | three_steps_3d_feature/second_step/lavis/models/blip_models/blip.py |
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
import math
from typing import Tuple
import torch
import torch.utils.checkpoint
from torch import Tensor, device, nn
from transformers.activations import ACT2FN
from transformers.modeling_outputs import (
BaseModelOutputWithPastAndCrossAttentions,
BaseModelOutputWithPoolingAndCrossAttentions,
)
from transformers.modeling_utils import (
PreTrainedModel,
apply_chunking_to_forward,
find_pruneable_heads_and_indices,
prune_linear_layer,
)
from transformers.models.bert.configuration_bert import BertConfig
from transformers.utils import logging
logger = logging.get_logger(__name__)
class BertEmbeddings(nn.Module):
"""Construct the embeddings from word and position embeddings."""
def __init__(self, config):
super().__init__()
self.word_embeddings = nn.Embedding(
config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id
)
self.position_embeddings = nn.Embedding(
config.max_position_embeddings, config.hidden_size
)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
# position_ids (1, len position emb) is contiguous in memory and exported when serialized
self.register_buffer(
"position_ids", torch.arange(config.max_position_embeddings).expand((1, -1))
)
self.position_embedding_type = getattr(
config, "position_embedding_type", "absolute"
)
self.config = config
def forward(
self,
input_ids=None,
position_ids=None,
inputs_embeds=None,
past_key_values_length=0,
):
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
seq_length = input_shape[1]
if position_ids is None:
position_ids = self.position_ids[
:, past_key_values_length : seq_length + past_key_values_length
]
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
embeddings = inputs_embeds
if self.position_embedding_type == "absolute":
position_embeddings = self.position_embeddings(position_ids)
embeddings += position_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class BertSelfAttention(nn.Module):
def __init__(self, config, is_cross_attention):
super().__init__()
self.config = config
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(
config, "embedding_size"
):
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads)
)
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
if is_cross_attention:
self.key = nn.Linear(config.encoder_width, self.all_head_size)
self.value = nn.Linear(config.encoder_width, self.all_head_size)
else:
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
self.position_embedding_type = getattr(
config, "position_embedding_type", "absolute"
)
if (
self.position_embedding_type == "relative_key"
or self.position_embedding_type == "relative_key_query"
):
self.max_position_embeddings = config.max_position_embeddings
self.distance_embedding = nn.Embedding(
2 * config.max_position_embeddings - 1, self.attention_head_size
)
self.save_attention = False
def save_attn_gradients(self, attn_gradients):
self.attn_gradients = attn_gradients
def get_attn_gradients(self):
return self.attn_gradients
def save_attention_map(self, attention_map):
self.attention_map = attention_map
def get_attention_map(self):
return self.attention_map
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (
self.num_attention_heads,
self.attention_head_size,
)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
):
mixed_query_layer = self.query(hidden_states)
# If this is instantiated as a cross-attention module, the keys
# and values come from an encoder; the attention mask needs to be
# such that the encoder's padding tokens are not attended to.
is_cross_attention = encoder_hidden_states is not None
if is_cross_attention:
key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
attention_mask = encoder_attention_mask
elif past_key_value is not None:
key_layer = self.transpose_for_scores(self.key(hidden_states))
value_layer = self.transpose_for_scores(self.value(hidden_states))
key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
else:
key_layer = self.transpose_for_scores(self.key(hidden_states))
value_layer = self.transpose_for_scores(self.value(hidden_states))
query_layer = self.transpose_for_scores(mixed_query_layer)
past_key_value = (key_layer, value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
if (
self.position_embedding_type == "relative_key"
or self.position_embedding_type == "relative_key_query"
):
seq_length = hidden_states.size()[1]
position_ids_l = torch.arange(
seq_length, dtype=torch.long, device=hidden_states.device
).view(-1, 1)
position_ids_r = torch.arange(
seq_length, dtype=torch.long, device=hidden_states.device
).view(1, -1)
distance = position_ids_l - position_ids_r
positional_embedding = self.distance_embedding(
distance + self.max_position_embeddings - 1
)
positional_embedding = positional_embedding.to(
dtype=query_layer.dtype
) # fp16 compatibility
if self.position_embedding_type == "relative_key":
relative_position_scores = torch.einsum(
"bhld,lrd->bhlr", query_layer, positional_embedding
)
attention_scores = attention_scores + relative_position_scores
elif self.position_embedding_type == "relative_key_query":
relative_position_scores_query = torch.einsum(
"bhld,lrd->bhlr", query_layer, positional_embedding
)
relative_position_scores_key = torch.einsum(
"bhrd,lrd->bhlr", key_layer, positional_embedding
)
attention_scores = (
attention_scores
+ relative_position_scores_query
+ relative_position_scores_key
)
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
if attention_mask is not None:
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
if is_cross_attention and self.save_attention:
self.save_attention_map(attention_probs)
attention_probs.register_hook(self.save_attn_gradients)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs_dropped = self.dropout(attention_probs)
# Mask heads if we want to
if head_mask is not None:
attention_probs_dropped = attention_probs_dropped * head_mask
context_layer = torch.matmul(attention_probs_dropped, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
outputs = (
(context_layer, attention_probs) if output_attentions else (context_layer,)
)
outputs = outputs + (past_key_value,)
return outputs
class BertSelfOutput(nn.Module):
def __init__(self, config, twin=False, merge=False):
super().__init__()
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
if twin:
self.dense0 = nn.Linear(config.hidden_size, config.hidden_size)
self.dense1 = nn.Linear(config.hidden_size, config.hidden_size)
else:
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
if merge:
self.act = ACT2FN[config.hidden_act]
self.merge_layer = nn.Linear(config.hidden_size * 2, config.hidden_size)
self.merge = True
else:
self.merge = False
def forward(self, hidden_states, input_tensor):
if type(hidden_states) == list:
hidden_states0 = self.dense0(hidden_states[0])
hidden_states1 = self.dense1(hidden_states[1])
if self.merge:
# hidden_states = self.merge_layer(self.act(torch.cat([hidden_states0,hidden_states1],dim=-1)))
hidden_states = self.merge_layer(
torch.cat([hidden_states0, hidden_states1], dim=-1)
)
else:
hidden_states = (hidden_states0 + hidden_states1) / 2
else:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertAttention(nn.Module):
def __init__(self, config, is_cross_attention=False, layer_num=-1):
super().__init__()
if is_cross_attention:
self.self0 = BertSelfAttention(config, is_cross_attention)
self.self1 = BertSelfAttention(config, is_cross_attention)
else:
self.self = BertSelfAttention(config, is_cross_attention)
self.output = BertSelfOutput(
config,
twin=is_cross_attention,
merge=(is_cross_attention and layer_num >= 6),
)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(
heads,
self.self.num_attention_heads,
self.self.attention_head_size,
self.pruned_heads,
)
# Prune linear layers
self.self.query = prune_linear_layer(self.self.query, index)
self.self.key = prune_linear_layer(self.self.key, index)
self.self.value = prune_linear_layer(self.self.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
# Update hyper params and store pruned heads
self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
self.self.all_head_size = (
self.self.attention_head_size * self.self.num_attention_heads
)
self.pruned_heads = self.pruned_heads.union(heads)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
):
if type(encoder_hidden_states) == list:
self_outputs0 = self.self0(
hidden_states,
attention_mask,
head_mask,
encoder_hidden_states[0],
encoder_attention_mask[0],
past_key_value,
output_attentions,
)
self_outputs1 = self.self1(
hidden_states,
attention_mask,
head_mask,
encoder_hidden_states[1],
encoder_attention_mask[1],
past_key_value,
output_attentions,
)
attention_output = self.output(
[self_outputs0[0], self_outputs1[0]], hidden_states
)
outputs = (attention_output,) + self_outputs0[
1:
] # add attentions if we output them
else:
self_outputs = self.self(
hidden_states,
attention_mask,
head_mask,
encoder_hidden_states,
encoder_attention_mask,
past_key_value,
output_attentions,
)
attention_output = self.output(self_outputs[0], hidden_states)
outputs = (attention_output,) + self_outputs[
1:
] # add attentions if we output them
return outputs
class BertIntermediate(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class BertOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertLayer(nn.Module):
def __init__(self, config, layer_num):
super().__init__()
self.config = config
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = BertAttention(config)
self.layer_num = layer_num
if self.config.add_cross_attention:
self.crossattention = BertAttention(
config,
is_cross_attention=self.config.add_cross_attention,
layer_num=layer_num,
)
self.intermediate = BertIntermediate(config)
self.output = BertOutput(config)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
mode=None,
):
# decoder uni-directional self-attention cached key/values tuple is at positions 1,2
self_attn_past_key_value = (
past_key_value[:2] if past_key_value is not None else None
)
self_attention_outputs = self.attention(
hidden_states,
attention_mask,
head_mask,
output_attentions=output_attentions,
past_key_value=self_attn_past_key_value,
)
attention_output = self_attention_outputs[0]
outputs = self_attention_outputs[1:-1]
present_key_value = self_attention_outputs[-1]
if mode == "multimodal":
assert (
encoder_hidden_states is not None
), "encoder_hidden_states must be given for cross-attention layers"
cross_attention_outputs = self.crossattention(
attention_output,
attention_mask,
head_mask,
encoder_hidden_states,
encoder_attention_mask,
output_attentions=output_attentions,
)
attention_output = cross_attention_outputs[0]
outputs = (
outputs + cross_attention_outputs[1:-1]
) # add cross attentions if we output attention weights
layer_output = apply_chunking_to_forward(
self.feed_forward_chunk,
self.chunk_size_feed_forward,
self.seq_len_dim,
attention_output,
)
outputs = (layer_output,) + outputs
outputs = outputs + (present_key_value,)
return outputs
def feed_forward_chunk(self, attention_output):
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
class BertEncoder(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.layer = nn.ModuleList(
[BertLayer(config, i) for i in range(config.num_hidden_layers)]
)
self.gradient_checkpointing = False
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=False,
output_hidden_states=False,
return_dict=True,
mode="multimodal",
):
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
all_cross_attentions = (
() if output_attentions and self.config.add_cross_attention else None
)
next_decoder_cache = () if use_cache else None
for i in range(self.config.num_hidden_layers):
layer_module = self.layer[i]
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_head_mask = head_mask[i] if head_mask is not None else None
past_key_value = past_key_values[i] if past_key_values is not None else None
if self.gradient_checkpointing and self.training:
if use_cache:
logger.warn(
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
)
use_cache = False
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, past_key_value, output_attentions)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(layer_module),
hidden_states,
attention_mask,
layer_head_mask,
encoder_hidden_states,
encoder_attention_mask,
mode=mode,
)
else:
layer_outputs = layer_module(
hidden_states,
attention_mask,
layer_head_mask,
encoder_hidden_states,
encoder_attention_mask,
past_key_value,
output_attentions,
mode=mode,
)
hidden_states = layer_outputs[0]
if use_cache:
next_decoder_cache += (layer_outputs[-1],)
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(
v
for v in [
hidden_states,
next_decoder_cache,
all_hidden_states,
all_self_attentions,
all_cross_attentions,
]
if v is not None
)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=next_decoder_cache,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
cross_attentions=all_cross_attentions,
)
class BertPooler(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class BertPredictionHeadTransform(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
if isinstance(config.hidden_act, str):
self.transform_act_fn = ACT2FN[config.hidden_act]
else:
self.transform_act_fn = config.hidden_act
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
class BertLMPredictionHead(nn.Module):
def __init__(self, config):
super().__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
class BertOnlyMLMHead(nn.Module):
def __init__(self, config):
super().__init__()
self.predictions = BertLMPredictionHead(config)
def forward(self, sequence_output):
prediction_scores = self.predictions(sequence_output)
return prediction_scores
class BertPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = BertConfig
base_model_prefix = "bert"
_keys_to_ignore_on_load_missing = [r"position_ids"]
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
class BertModel(BertPreTrainedModel):
"""
The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
cross-attention is added between the self-attention layers, following the architecture described in `Attention is
all you need <https://arxiv.org/abs/1706.03762>`__ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit,
Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.
argument and :obj:`add_cross_attention` set to :obj:`True`; an :obj:`encoder_hidden_states` is then expected as an
input to the forward pass.
"""
def __init__(self, config, add_pooling_layer=True):
super().__init__(config)
self.config = config
self.embeddings = BertEmbeddings(config)
self.encoder = BertEncoder(config)
self.pooler = BertPooler(config) if add_pooling_layer else None
self.init_weights()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
def get_extended_attention_mask(
self,
attention_mask: Tensor,
input_shape: Tuple[int],
device: device,
is_decoder: bool,
) -> Tensor:
"""
Makes broadcastable attention and causal masks so that future and masked tokens are ignored.
Arguments:
attention_mask (:obj:`torch.Tensor`):
Mask with ones indicating tokens to attend to, zeros for tokens to ignore.
input_shape (:obj:`Tuple[int]`):
The shape of the input to the model.
device: (:obj:`torch.device`):
The device of the input to the model.
Returns:
:obj:`torch.Tensor` The extended attention mask, with a the same dtype as :obj:`attention_mask.dtype`.
"""
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
if attention_mask.dim() == 3:
extended_attention_mask = attention_mask[:, None, :, :]
elif attention_mask.dim() == 2:
# Provided a padding mask of dimensions [batch_size, seq_length]
# - if the model is a decoder, apply a causal mask in addition to the padding mask
# - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length]
if is_decoder:
batch_size, seq_length = input_shape
seq_ids = torch.arange(seq_length, device=device)
causal_mask = (
seq_ids[None, None, :].repeat(batch_size, seq_length, 1)
<= seq_ids[None, :, None]
)
# in case past_key_values are used we need to add a prefix ones mask to the causal mask
# causal and attention masks must have same type with pytorch version < 1.3
causal_mask = causal_mask.to(attention_mask.dtype)
if causal_mask.shape[1] < attention_mask.shape[1]:
prefix_seq_len = attention_mask.shape[1] - causal_mask.shape[1]
causal_mask = torch.cat(
[
torch.ones(
(batch_size, seq_length, prefix_seq_len),
device=device,
dtype=causal_mask.dtype,
),
causal_mask,
],
axis=-1,
)
extended_attention_mask = (
causal_mask[:, None, :, :] * attention_mask[:, None, None, :]
)
else:
extended_attention_mask = attention_mask[:, None, None, :]
else:
raise ValueError(
"Wrong shape for input_ids (shape {}) or attention_mask (shape {})".format(
input_shape, attention_mask.shape
)
)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = extended_attention_mask.to(
dtype=self.dtype
) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
return extended_attention_mask
def forward(
self,
input_ids=None,
attention_mask=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
is_decoder=False,
mode="multimodal",
):
r"""
encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
the model is configured as a decoder.
encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`
(those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.
use_cache (:obj:`bool`, `optional`):
If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
decoding (see :obj:`past_key_values`).
"""
output_attentions = (
output_attentions
if output_attentions is not None
else self.config.output_attentions
)
output_hidden_states = (
output_hidden_states
if output_hidden_states is not None
else self.config.output_hidden_states
)
return_dict = (
return_dict if return_dict is not None else self.config.use_return_dict
)
if is_decoder:
use_cache = use_cache if use_cache is not None else self.config.use_cache
else:
use_cache = False
if input_ids is not None and inputs_embeds is not None:
raise ValueError(
"You cannot specify both input_ids and inputs_embeds at the same time"
)
elif input_ids is not None:
input_shape = input_ids.size()
batch_size, seq_length = input_shape
device = input_ids.device
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
batch_size, seq_length = input_shape
device = inputs_embeds.device
elif encoder_embeds is not None:
input_shape = encoder_embeds.size()[:-1]
batch_size, seq_length = input_shape
device = encoder_embeds.device
else:
raise ValueError(
"You have to specify either input_ids or inputs_embeds or encoder_embeds"
)
# past_key_values_length
past_key_values_length = (
past_key_values[0][0].shape[2] if past_key_values is not None else 0
)
if attention_mask is None:
attention_mask = torch.ones(
((batch_size, seq_length + past_key_values_length)), device=device
)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(
attention_mask, input_shape, device, is_decoder
)
# If a 2D or 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_hidden_states is not None:
if type(encoder_hidden_states) == list:
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states[
0
].size()
else:
(
encoder_batch_size,
encoder_sequence_length,
_,
) = encoder_hidden_states.size()
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
if type(encoder_attention_mask) == list:
encoder_extended_attention_mask = [
self.invert_attention_mask(mask) for mask in encoder_attention_mask
]
elif encoder_attention_mask is None:
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
encoder_extended_attention_mask = self.invert_attention_mask(
encoder_attention_mask
)
else:
encoder_extended_attention_mask = self.invert_attention_mask(
encoder_attention_mask
)
else:
encoder_extended_attention_mask = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
if encoder_embeds is None:
embedding_output = self.embeddings(
input_ids=input_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
past_key_values_length=past_key_values_length,
)
else:
embedding_output = encoder_embeds
encoder_outputs = self.encoder(
embedding_output,
attention_mask=extended_attention_mask,
head_mask=head_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_extended_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
mode=mode,
)
sequence_output = encoder_outputs[0]
pooled_output = (
self.pooler(sequence_output) if self.pooler is not None else None
)
if not return_dict:
return (sequence_output, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndCrossAttentions(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
past_key_values=encoder_outputs.past_key_values,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
cross_attentions=encoder_outputs.cross_attentions,
)
| 3D-LLM-main | three_steps_3d_feature/second_step/lavis/models/blip_models/nlvr_encoder.py |
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
import torch
from lavis.common.registry import registry
from lavis.models.blip_models.blip import BlipBase
from lavis.models.blip_models.blip_outputs import (
BlipOutput,
BlipIntermediateOutput,
)
from lavis.models.med import XBertLMHeadDecoder
from lavis.models.vit import VisionTransformerEncoder
@registry.register_model("blip_caption")
class BlipCaption(BlipBase):
"""
BLIP captioning model.
Supported model types:
- base_coco: fine-tuned BLIP base model on COCO caption dataset (Karparthy split).
- large_coco: fine-tuned BLIP large model on COCO caption dataset (Karparthy split).
Usage:
>>> from lavis.models import load_model
>>> model = load_model("blip_caption", "base_coco")
>>> model = load_model("blip_caption", "large_coco")
"""
PRETRAINED_MODEL_CONFIG_DICT = {
"base_coco": "configs/models/blip_caption_base_coco.yaml",
"large_coco": "configs/models/blip_caption_large_coco.yaml",
}
def __init__(self, image_encoder, text_decoder, prompt=None, max_txt_len=40):
super().__init__()
self.tokenizer = self.init_tokenizer()
self.visual_encoder = image_encoder
self.text_decoder = text_decoder
self.prompt = prompt
self.prompt_length = len(self.tokenizer(self.prompt).input_ids) - 1
self.max_txt_len = max_txt_len
def forward_encoder(self, samples):
image_embeds = self.visual_encoder.forward_features(samples["image"])
return image_embeds
def forward_decoder(self, samples, image_embeds):
# prepare inputs for forwarding decoder
raw_text = samples["text_input"]
text = self.tokenizer(
raw_text,
padding="longest",
truncation=True,
max_length=self.max_txt_len,
return_tensors="pt",
).to(self.device)
text.input_ids[:, 0] = self.tokenizer.bos_token_id
# prepare targets for forwarding decoder
decoder_targets = text.input_ids.masked_fill(
text.input_ids == self.tokenizer.pad_token_id, -100
)
decoder_targets[:, : self.prompt_length] = -100
# forward decoder
image_atts = torch.ones(image_embeds.size()[:-1], dtype=torch.long).to(
self.device
)
decoder_output = self.text_decoder(
input_ids=text.input_ids,
attention_mask=text.attention_mask,
encoder_hidden_states=image_embeds,
encoder_attention_mask=image_atts,
labels=decoder_targets,
return_dict=True,
)
return decoder_output, decoder_targets
def forward(self, samples):
r"""
Args:
samples (dict): A dictionary containing the following keys:
- image (torch.Tensor): A tensor of shape (batch_size, 3, H, W)
- text_input (list): A list of strings of length batch_size.
Returns:
output (BlipOutput): A BlipOutput object containing the following
attributes:
- loss (torch.Tensor): A scalar tensor containing the total loss. For BlipCaption, this is the same as the LM loss.
- loss_lm (torch.Tensor): A scalar tensor containing the LM loss.
- intermediate_outputs (BlipIntermediateOutput): A BlipIntermediateOutput object containing intermediate outputs.
see :class:`lavis.models.blip_models.blip_outputs.BlipOutput` for more details.
Example:
```python
>>> from PIL import Image
>>> from lavis.models import load_model_and_preprocess
>>> model, vis_processors, txt_processors = load_model_and_preprocess("blip_caption")
>>> raw_image = Image.open("docs/data/merlion.png").convert("RGB")
>>> image = vis_processors["eval"](raw_image).unsqueeze(0)
>>> text_input = ["a large statue of a person spraying water from a fountain"]
>>> samples = {"image": image, "text_input": text_input}
>>> output = model(samples)
>>> output.keys()
odict_keys(['intermediate_output', 'loss', 'loss_lm'])
>>> output.intermediate_output.image_embeds.shape
torch.Size([1, 577, 768])
>>> output.intermediate_output.decoder_labels.shape
torch.Size([1, 13])
```"""
image_embeds = self.forward_encoder(samples)
decoder_output, decoder_targets = self.forward_decoder(samples, image_embeds)
# return decoder_out
return BlipOutput(
loss=decoder_output.loss,
loss_lm=decoder_output.loss,
intermediate_output=BlipIntermediateOutput(
image_embeds=image_embeds,
decoder_output=decoder_output,
decoder_labels=decoder_targets,
),
)
def generate(
self,
samples,
use_nucleus_sampling=False,
num_beams=3,
max_length=30,
min_length=10,
top_p=0.9,
repetition_penalty=1.0,
num_captions=1,
):
"""
Args:
samples (dict): A dictionary containing the following keys:
- image (torch.Tensor): A tensor of shape (batch_size, 3, H, W)
use_nucleus_sampling (bool): Whether to use nucleus sampling. If False, use top-k sampling.
num_beams (int): Number of beams for beam search. 1 means no beam search.
max_length (int): The maximum length of the sequence to be generated.
min_length (int): The minimum length of the sequence to be generated.
top_p (float): The cumulative probability for nucleus sampling.
repetition_penalty (float): The parameter for repetition penalty. 1.0 means no penalty.
num_captions (int): Number of captions to be generated for each image.
Returns:
captions (list): A list of strings of length batch_size * num_captions.
Example:
```python
>>> from PIL import Image
>>> from lavis.models import load_model_and_preprocess
>>> model, vis_processors, txt_processors = load_model_and_preprocess("blip_caption")
>>> raw_image = Image.open("docs/data/merlion.png").convert("RGB")
>>> image = vis_processors["eval"](raw_image).unsqueeze(0)
>>> samples = {"image": image}
>>> captions = model.generate(samples)
>>> captions
['a large statue of a person spraying water from a fountain']
>>> captions = model.generate(samples, use_nucleus_sampling=True, num_captions=3)
>>> captions # example output, results may vary due to randomness
['singapore showing the view of some building',
'the singapore harbor in twilight, as the weather is going down',
'the famous singapore fountain at sunset']
"""
# prepare inputs for decoder generation.
encoder_out = self.forward_encoder(samples)
image_embeds = torch.repeat_interleave(encoder_out, num_captions, 0)
prompt = [self.prompt] * image_embeds.size(0)
prompt = self.tokenizer(prompt, return_tensors="pt").to(self.device)
prompt.input_ids[:, 0] = self.tokenizer.bos_token_id
prompt.input_ids = prompt.input_ids[:, :-1]
# get decoded text
decoder_out = self.text_decoder.generate_from_encoder(
tokenized_prompt=prompt,
visual_embeds=image_embeds,
sep_token_id=self.tokenizer.sep_token_id,
pad_token_id=self.tokenizer.pad_token_id,
use_nucleus_sampling=use_nucleus_sampling,
num_beams=num_beams,
max_length=max_length,
min_length=min_length,
top_p=top_p,
repetition_penalty=repetition_penalty,
)
outputs = self.tokenizer.batch_decode(decoder_out, skip_special_tokens=True)
captions = [output[len(self.prompt) :] for output in outputs]
return captions
@classmethod
def from_config(cls, cfg):
# vision encoder
image_encoder = VisionTransformerEncoder.from_config(cfg)
# text encoder + multimodal decoder
text_decoder = XBertLMHeadDecoder.from_config(cfg)
prompt = cfg.get("prompt", None)
max_txt_len = cfg.get("max_txt_len", 40)
model = cls(image_encoder, text_decoder, prompt=prompt, max_txt_len=max_txt_len)
model.load_checkpoint_from_config(cfg)
return model
| 3D-LLM-main | three_steps_3d_feature/second_step/lavis/models/blip_models/blip_caption.py |
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
import warnings
import torch
import torch.nn.functional as F
from lavis.common.registry import registry
from lavis.models.blip_models.blip import BlipBase
from lavis.models.blip_models.blip_outputs import BlipOutputFeatures
from lavis.models.med import XBertEncoder
from lavis.models.vit import VisionTransformerEncoder
from torch import nn
@registry.register_model("blip_feature_extractor")
class BlipFeatureExtractor(BlipBase):
"""
Class for BLIP feature extractor.
Supported model types:
- base: BLIP base model with pre-trained weights from capfilt by BLIP large model.
Usage:
>>> from lavis.models import load_model
>>> model = load_model("blip_feature_extractor", "base")
"""
PRETRAINED_MODEL_CONFIG_DICT = {
"base": "configs/models/blip_feature_extractor_base.yaml",
# "large": "configs/models/blip_feature_extractor_large.yaml",
}
def __init__(self, image_encoder, text_encoder, embed_dim, max_txt_len=40):
super().__init__()
self.tokenizer = self.init_tokenizer()
self.visual_encoder = image_encoder
self.text_encoder = text_encoder
# creating projection layers for ITC
text_width = text_encoder.config.hidden_size
vision_width = image_encoder.vision_width
self.vision_proj = nn.Linear(vision_width, embed_dim)
self.text_proj = nn.Linear(text_width, embed_dim)
self.max_txt_len = max_txt_len
self.temp = nn.Parameter(0.07 * torch.ones([]))
@torch.no_grad()
def extract_features(self, samples, mode="multimodal"):
"""
Extract features for multimodal or unimodal samples.
Args:
samples (dict): A dictionary of samples, containing the following keys:
- image (torch.Tensor): A tensor of shape (B, C, H, W) containing the image.
Raw images should be preprocessed before being passed to feature extractor.
- text_input (list): A list of strings containing the text, length B.
mode (str): The mode of feature extraction. Can be either "multimodal", "text" or "image".
If "multimodal", return image features and multimodal features;
if "text", return text features;
if "image", return image features.
Default: "multimodal".
Returns:
BlipOutputFeatures: A BlipOutputFeatures object containing the features.
See lavis/models/blip_models/blip_outputs.py for more details.
Examples:
```python
>>> from PIL import Image
>>> from lavis.models import load_model_and_preprocess
>>> raw_image = Image.open("docs/data/merlion.png").convert("RGB")
>>> caption = "a large fountain spewing water into the air"
>>> model, vis_processors, txt_processors = load_model_and_preprocess("blip_feature_extractor", is_eval=True)
>>> image = vis_processors["eval"](raw_image).unsqueeze(0)
>>> text_input = txt_processors["eval"](caption)
>>> sample = {"image": image, "text_input": [text_input]}
>>> features_multimodal = model.extract_features(sample)
>>> features_multimodal.keys()
odict_keys(['image_embeds', 'multimodal_embeds'])
>>> features_multimodal.image_embeds.shape
torch.Size([1, 197, 768])
>>> features_multimodal.multimodal_embeds.shape
torch.Size([1, 12, 768])
>>> features_text = model.extract_features(sample, mode="text")
>>> features_text.keys()
odict_keys(['text_embeds', 'text_features'])
>>> features_text.text_embeds.shape
torch.Size([1, 12, 768])
>>> features_text.text_features.shape
torch.Size([1, 12, 256])
>>> features_image = model.extract_features(sample, mode="image")
>>> features_image.keys()
odict_keys(['image_embeds', 'image_features'])
>>> features_image.image_embeds.shape
torch.Size([1, 197, 768])
>>> features_image.image_features.shape
torch.Size([1, 197, 256])
```
"""
image = samples.get("image")
caption = samples.get("text_input")
# assert mode is one of "image", "text", "multimodal"
assert mode in [
"image",
"text",
"multimodal",
], "mode must be one of 'image', 'text', 'multimodal'"
# initalize output
image_embeds, text_embeds, multimodal_embeds = None, None, None
image_features, text_features = None, None
if mode == "image":
assert (
image is not None
), "Image is not provided for mode 'image' or 'multimodal'"
# return image features
image_embeds = self.visual_encoder.forward_features(image)
image_features = self.vision_proj(image_embeds)
image_features = F.normalize(image_features, dim=-1)
elif mode == "text":
assert (
caption is not None
), "text input is None for mode 'text' or 'multimodal'"
text = self.tokenizer(caption, return_tensors="pt", padding=True).to(
self.device
)
# return text features
text_output = self.text_encoder(
text.input_ids,
attention_mask=text.attention_mask,
return_dict=True,
mode="text",
)
text_embeds = text_output.last_hidden_state
text_features = self.text_proj(text_embeds)
text_features = F.normalize(text_features, dim=-1)
elif mode == "multimodal":
# return multimodel features
image_embeds = self.visual_encoder.forward_features(image)
image_atts = torch.ones(image_embeds.size()[:-1], dtype=torch.long).to(
self.device
)
text = self.tokenizer(caption, return_tensors="pt", padding=True).to(
self.device
)
text.input_ids[:, 0] = self.tokenizer.enc_token_id
output = self.text_encoder(
text.input_ids,
attention_mask=text.attention_mask,
encoder_hidden_states=image_embeds,
encoder_attention_mask=image_atts,
return_dict=True,
)
multimodal_embeds = output.last_hidden_state
return BlipOutputFeatures(
image_embeds=image_embeds,
image_embeds_proj=image_features,
text_embeds=text_embeds,
text_embeds_proj=text_features,
multimodal_embeds=multimodal_embeds,
)
@classmethod
def from_config(cls, cfg=None):
# set from_pretrained=True to load weights for 'bert-base-uncased'
image_encoder = VisionTransformerEncoder.from_config(cfg)
text_encoder = XBertEncoder.from_config(cfg)
embed_dim = cfg.get("embed_dim", 256)
max_txt_len = cfg.get("max_txt_len", 30)
model = cls(
image_encoder=image_encoder,
text_encoder=text_encoder,
embed_dim=embed_dim,
max_txt_len=max_txt_len,
)
# load pre-trained weights
pretrain_path = cfg.get("pretrained", None)
if pretrain_path is not None:
msg = model.load_from_pretrained(url_or_filename=pretrain_path)
else:
warnings.warn("No pretrained weights are loaded.")
return model
| 3D-LLM-main | three_steps_3d_feature/second_step/lavis/models/blip_models/blip_feature_extractor.py |
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
from copy import deepcopy
import torch
import torch.nn.functional as F
from lavis.common.registry import registry
from lavis.models.albef_models import compute_sim_matrix
from lavis.models.base_model import (
MomentumDistilationMixin,
SharedQueueMixin,
all_gather_with_grad,
concat_all_gather,
)
from lavis.models.blip_models.blip import BlipBase
from lavis.models.blip_models.blip_outputs import (
BlipOutput,
BlipSimilarity,
BlipIntermediateOutput,
)
from lavis.models.med import XBertEncoder
from lavis.models.vit import VisionTransformerEncoder
from torch import nn
@registry.register_model("blip_retrieval")
class BlipRetrieval(BlipBase, MomentumDistilationMixin, SharedQueueMixin):
"""
BLIP retrieval model.
Supported model types:
- coco: fine-tuned BLIP base model on COCO dataset (Karpathy split).
- flickr: fine-tuned BLIP base model on Flickr30k dataset.
Usage:
>>> from lavis.models import load_model
>>> model = load_model("blip_retrieval", "coco")
>>> model = load_model("blip_retrieval", "flickr")
"""
PRETRAINED_MODEL_CONFIG_DICT = {
"coco": "configs/models/blip_retrieval_coco.yaml",
"flickr": "configs/models/blip_retrieval_flickr.yaml",
}
def __init__(
self,
image_encoder,
text_encoder,
queue_size,
alpha=0.4,
embed_dim=256,
momentum=0.995,
negative_all_rank=False,
max_txt_len=35,
):
""" """
super().__init__()
self.tokenizer = self.init_tokenizer()
self.visual_encoder = image_encoder
self.text_encoder = text_encoder
# creating projection layers for ITC
text_width = text_encoder.config.hidden_size
vision_width = image_encoder.vision_width
self.vision_proj = nn.Linear(vision_width, embed_dim)
self.text_proj = nn.Linear(text_width, embed_dim)
self.itm_head = nn.Linear(text_width, 2)
# create the momentum encoder
self.visual_encoder_m = deepcopy(self.visual_encoder)
self.text_encoder_m = deepcopy(self.text_encoder)
self.vision_proj_m = deepcopy(self.vision_proj)
self.text_proj_m = deepcopy(self.text_proj)
self.model_pairs = [
[self.visual_encoder, self.visual_encoder_m],
[self.text_encoder, self.text_encoder_m],
[self.vision_proj, self.vision_proj_m],
[self.text_proj, self.text_proj_m],
]
self.copy_params()
# create the queue
self.register_buffer("image_queue", torch.randn(embed_dim, queue_size))
self.register_buffer("text_queue", torch.randn(embed_dim, queue_size))
self.register_buffer("idx_queue", torch.full((1, queue_size), -100))
self.register_buffer("queue_ptr", torch.zeros(1, dtype=torch.long))
self.image_queue = nn.functional.normalize(self.image_queue, dim=0)
self.text_queue = nn.functional.normalize(self.text_queue, dim=0)
self.queue_size = queue_size
self.momentum = momentum
self.temp = nn.Parameter(0.07 * torch.ones([]))
self.alpha = alpha
self.max_txt_len = max_txt_len
self.negative_all_rank = negative_all_rank
def _rampup_factor(self, epoch, iters, num_iters_per_epoch):
return min(1, (epoch * num_iters_per_epoch + iters) / (2 * num_iters_per_epoch))
def forward(self, samples):
"""
Args:
samples (dict): A dictionary containing the following keys:
- image (torch.Tensor): A tensor of shape (batch_size, 3, H, W). The input images.
- text_input (list): A list of length batch_size, each element is a string of text/caption.
- image_id (torch.Tensor): A tensor of shape (batch_size, ). The image ids, used to identify same images in batch.
- epoch (int): The current epoch.
- iters (int): The current iteration.
- num_iters_per_epoch (int): The number of iterations per epoch.
Returns:
BlipOutput: A BlipOutput object. See ``lavis.models.blip_models.blip_outputs.BlipOutput`` for more details.
Examples:
>>> import torch
>>> from lavis.models import load_model
>>> model = load_model("blip_retrieval", "coco")
>>> images = torch.randn(4, 3, 384, 384)
>>> text_input = ["caption of image 1", "another caption of image 1", "caption of image 2", "caption of image 3"]
>>> image_id = torch.tensor([1, 1, 2, 3])
>>> samples = {"image": images, "text_input": text_input, "image_id": image_id, "epoch": 0, "iters": 0, "num_iters_per_epoch": 100}
>>> output = model(samples)
>>> output.keys()
odict_keys(['sims', 'intermediate_output', 'loss', 'loss_itc', 'loss_itm'])
"""
image = samples["image"]
caption = samples["text_input"]
idx = samples["image_id"]
alpha = self.alpha * self._rampup_factor(
epoch=samples["epoch"],
iters=samples["iters"],
num_iters_per_epoch=samples["num_iters_per_epoch"],
)
with torch.no_grad():
self.temp.clamp_(0.001, 0.5)
image_embeds = self.visual_encoder.forward_features(image)
image_atts = torch.ones(image_embeds.size()[:-1], dtype=torch.long).to(
image.device
)
image_feat = F.normalize(self.vision_proj(image_embeds[:, 0, :]), dim=-1)
text = self.tokenizer(
caption,
padding="max_length",
truncation=True,
max_length=self.max_txt_len,
return_tensors="pt",
).to(image.device)
text_output = self.text_encoder.forward_text(text)
text_embeds = text_output.last_hidden_state
text_feat = F.normalize(self.text_proj(text_embeds[:, 0, :]), dim=-1)
# Image-text Contrastive Learning
idx = idx.view(-1, 1)
idx_all = torch.cat([idx.t(), self.idx_queue.clone().detach()], dim=1)
pos_idx = torch.eq(idx, idx_all).float()
sim_targets = pos_idx / pos_idx.sum(1, keepdim=True)
# get momentum features
with torch.no_grad():
self._momentum_update()
image_embeds_m = self.visual_encoder_m(image)
image_feat_m = F.normalize(
self.vision_proj_m(image_embeds_m[:, 0, :]), dim=-1
)
image_feat_m_all = torch.cat(
[image_feat_m.t(), self.image_queue.clone().detach()], dim=1
)
text_output_m = self.text_encoder_m.forward_text(text)
text_embeds_m = text_output_m.last_hidden_state
text_feat_m = F.normalize(self.text_proj_m(text_embeds_m[:, 0, :]), dim=-1)
text_feat_m_all = torch.cat(
[text_feat_m.t(), self.text_queue.clone().detach()], dim=1
)
sim_i2t_m = image_feat_m @ text_feat_m_all / self.temp
sim_t2i_m = text_feat_m @ image_feat_m_all / self.temp
sim_i2t_targets = (
alpha * F.softmax(sim_i2t_m, dim=1) + (1 - alpha) * sim_targets
)
sim_t2i_targets = (
alpha * F.softmax(sim_t2i_m, dim=1) + (1 - alpha) * sim_targets
)
sim_i2t = image_feat @ text_feat_m_all / self.temp
sim_t2i = text_feat @ image_feat_m_all / self.temp
loss_i2t = -torch.sum(
F.log_softmax(sim_i2t, dim=1) * sim_i2t_targets, dim=1
).mean()
loss_t2i = -torch.sum(
F.log_softmax(sim_t2i, dim=1) * sim_t2i_targets, dim=1
).mean()
loss_itc = (loss_i2t + loss_t2i) / 2
self._dequeue_and_enqueue(image_feat_m, text_feat_m, idx)
# Image-text Matching
encoder_input_ids = text.input_ids.clone()
encoder_input_ids[:, 0] = self.tokenizer.enc_token_id
# forward the positve image-text pair
bs = image.size(0)
output_pos = self.text_encoder(
encoder_input_ids,
attention_mask=text.attention_mask,
encoder_hidden_states=image_embeds,
encoder_attention_mask=image_atts,
return_dict=True,
)
idxs = concat_all_gather(idx)
if self.negative_all_rank:
# compute sample similarity
with torch.no_grad():
mask = torch.eq(idx, idxs.t())
image_feat_world = concat_all_gather(image_feat)
text_feat_world = concat_all_gather(text_feat)
sim_i2t = image_feat @ text_feat_world.t() / self.temp
sim_t2i = text_feat @ image_feat_world.t() / self.temp
weights_i2t = F.softmax(sim_i2t, dim=1)
weights_i2t.masked_fill_(mask, 0)
weights_t2i = F.softmax(sim_t2i, dim=1)
weights_t2i.masked_fill_(mask, 0)
image_embeds_world = all_gather_with_grad(image_embeds)
# select a negative image (from all ranks) for each text
image_embeds_neg = []
for b in range(bs):
neg_idx = torch.multinomial(weights_t2i[b], 1).item()
image_embeds_neg.append(image_embeds_world[neg_idx])
image_embeds_neg = torch.stack(image_embeds_neg, dim=0)
# select a negative text (from all ranks) for each image
input_ids_world = concat_all_gather(encoder_input_ids)
att_mask_world = concat_all_gather(text.attention_mask)
text_ids_neg = []
text_atts_neg = []
for b in range(bs):
neg_idx = torch.multinomial(weights_i2t[b], 1).item()
text_ids_neg.append(input_ids_world[neg_idx])
text_atts_neg.append(att_mask_world[neg_idx])
else:
with torch.no_grad():
mask = torch.eq(idx, idx.t())
sim_i2t = image_feat @ text_feat.t() / self.temp
sim_t2i = text_feat @ image_feat.t() / self.temp
weights_i2t = F.softmax(sim_i2t, dim=1)
weights_i2t.masked_fill_(mask, 0)
weights_t2i = F.softmax(sim_t2i, dim=1)
weights_t2i.masked_fill_(mask, 0)
# select a negative image (from same rank) for each text
image_embeds_neg = []
for b in range(bs):
neg_idx = torch.multinomial(weights_t2i[b], 1).item()
image_embeds_neg.append(image_embeds[neg_idx])
image_embeds_neg = torch.stack(image_embeds_neg, dim=0)
# select a negative text (from same rank) for each image
text_ids_neg = []
text_atts_neg = []
for b in range(bs):
neg_idx = torch.multinomial(weights_i2t[b], 1).item()
text_ids_neg.append(encoder_input_ids[neg_idx])
text_atts_neg.append(text.attention_mask[neg_idx])
text_ids_neg = torch.stack(text_ids_neg, dim=0)
text_atts_neg = torch.stack(text_atts_neg, dim=0)
text_ids_all = torch.cat([encoder_input_ids, text_ids_neg], dim=0)
text_atts_all = torch.cat([text.attention_mask, text_atts_neg], dim=0)
image_embeds_all = torch.cat([image_embeds_neg, image_embeds], dim=0)
image_atts_all = torch.cat([image_atts, image_atts], dim=0)
output_neg = self.text_encoder(
text_ids_all,
attention_mask=text_atts_all,
encoder_hidden_states=image_embeds_all,
encoder_attention_mask=image_atts_all,
return_dict=True,
)
vl_embeddings = torch.cat(
[
output_pos.last_hidden_state[:, 0, :],
output_neg.last_hidden_state[:, 0, :],
],
dim=0,
)
itm_logits = self.itm_head(vl_embeddings)
itm_labels = torch.cat(
[torch.ones(bs, dtype=torch.long), torch.zeros(2 * bs, dtype=torch.long)],
dim=0,
).to(self.device)
loss_itm = F.cross_entropy(itm_logits, itm_labels)
return BlipOutput(
loss=loss_itc + loss_itm,
loss_itc=loss_itc,
loss_itm=loss_itm,
sims=BlipSimilarity(
sim_i2t=sim_i2t,
sim_t2i=sim_t2i,
sim_i2t_m=sim_i2t_m,
sim_t2i_m=sim_t2i_m,
sim_i2t_targets=sim_i2t_targets,
sim_t2i_targets=sim_t2i_targets,
),
intermediate_output=BlipIntermediateOutput(
image_embeds=image_embeds,
image_embeds_m=image_embeds_m,
text_embeds=text_embeds,
text_embeds_m=text_embeds_m,
encoder_output=output_pos,
encoder_output_neg=output_neg,
itm_logits=itm_logits,
itm_labels=itm_labels,
),
)
def reset_queue_ptr(self):
self.queue_ptr = torch.zeros(1, dtype=torch.long)
@classmethod
def from_config(cls, cfg=None):
# set from_pretrained=True to load weights for 'bert-base-uncased'
image_encoder = VisionTransformerEncoder.from_config(cfg)
text_encoder = XBertEncoder.from_config(cfg)
embed_dim = cfg.get("embed_dim", 256)
momentum = cfg.get("momentum", 0.995)
alpha = cfg.get("alpha", 0.4)
negative_all_rank = cfg.get("negative_all_rank", False)
queue_size = cfg.get("queue_size", 0)
max_txt_len = cfg.get("max_txt_len", 35)
model = cls(
image_encoder=image_encoder,
text_encoder=text_encoder,
queue_size=queue_size,
alpha=alpha,
embed_dim=embed_dim,
momentum=momentum,
negative_all_rank=negative_all_rank,
max_txt_len=max_txt_len,
)
model.load_checkpoint_from_config(cfg)
model.reset_queue_ptr()
return model
def compute_sim_matrix(self, data_loader, task_cfg):
"""
Compute similarity i2t, t2i matrix for the given data loader.
"""
k_test = task_cfg.k_test
return compute_sim_matrix(model=self, data_loader=data_loader, k_test=k_test)
| 3D-LLM-main | three_steps_3d_feature/second_step/lavis/models/blip_models/blip_retrieval.py |
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
from dataclasses import dataclass
from typing import Optional
import torch
from transformers.modeling_outputs import (
ModelOutput,
BaseModelOutputWithPoolingAndCrossAttentions,
CausalLMOutputWithCrossAttentions,
)
@dataclass
class BlipSimilarity(ModelOutput):
sim_i2t: torch.FloatTensor = None
sim_t2i: torch.FloatTensor = None
sim_i2t_m: Optional[torch.FloatTensor] = None
sim_t2i_m: Optional[torch.FloatTensor] = None
sim_i2t_targets: Optional[torch.FloatTensor] = None
sim_t2i_targets: Optional[torch.FloatTensor] = None
@dataclass
class BlipIntermediateOutput(ModelOutput):
"""
Data class for intermediate outputs of BLIP models.
image_embeds (torch.FloatTensor): Image embeddings, shape (batch_size, num_patches, embed_dim).
text_embeds (torch.FloatTensor): Text embeddings, shape (batch_size, seq_len, embed_dim).
image_embeds_m (torch.FloatTensor): Image embeddings from momentum visual encoder, shape (batch_size, num_patches, embed_dim).
text_embeds_m (torch.FloatTensor): Text embeddings from momentum text encoder, shape (batch_size, seq_len, embed_dim).
encoder_output (BaseModelOutputWithPoolingAndCrossAttentions): output from the image-grounded text encoder.
encoder_output_neg (BaseModelOutputWithPoolingAndCrossAttentions): output from the image-grounded text encoder for negative pairs.
decoder_output (CausalLMOutputWithCrossAttentions): output from the image-grounded text decoder.
decoder_labels (torch.LongTensor): labels for the captioning loss.
itm_logits (torch.FloatTensor): logits for the image-text matching loss, shape (batch_size * 3, 2).
itm_labels (torch.LongTensor): labels for the image-text matching loss, shape (batch_size * 3,)
"""
# uni-modal features
image_embeds: torch.FloatTensor = None
text_embeds: Optional[torch.FloatTensor] = None
image_embeds_m: Optional[torch.FloatTensor] = None
text_embeds_m: Optional[torch.FloatTensor] = None
# intermediate outputs of multimodal encoder
encoder_output: Optional[BaseModelOutputWithPoolingAndCrossAttentions] = None
encoder_output_neg: Optional[BaseModelOutputWithPoolingAndCrossAttentions] = None
itm_logits: Optional[torch.FloatTensor] = None
itm_labels: Optional[torch.LongTensor] = None
# intermediate outputs of multimodal decoder
decoder_output: Optional[CausalLMOutputWithCrossAttentions] = None
decoder_labels: Optional[torch.LongTensor] = None
@dataclass
class BlipOutput(ModelOutput):
# some finetuned models (e.g. BlipVQA) do not compute similarity, thus optional.
sims: Optional[BlipSimilarity] = None
intermediate_output: BlipIntermediateOutput = None
loss: Optional[torch.FloatTensor] = None
loss_itc: Optional[torch.FloatTensor] = None
loss_itm: Optional[torch.FloatTensor] = None
loss_lm: Optional[torch.FloatTensor] = None
@dataclass
class BlipOutputWithLogits(BlipOutput):
logits: torch.FloatTensor = None
logits_m: torch.FloatTensor = None
@dataclass
class BlipOutputFeatures(ModelOutput):
"""
Data class of features from BlipFeatureExtractor.
Args:
image_embeds: (torch.FloatTensor) of shape (batch_size, num_patches+1, embed_dim), optional
image_features: (torch.FloatTensor) of shape (batch_size, num_patches+1, feature_dim), optional
text_embeds: (torch.FloatTensor) of shape (batch_size, sequence_length+1, embed_dim), optional
text_features: (torch.FloatTensor) of shape (batch_size, sequence_length+1, feature_dim), optional
The first embedding or feature is for the [CLS] token.
Features are obtained by projecting the corresponding embedding into a normalized low-dimensional space.
"""
image_embeds: Optional[torch.FloatTensor] = None
image_embeds_proj: Optional[torch.FloatTensor] = None
text_embeds: Optional[torch.FloatTensor] = None
text_embeds_proj: Optional[torch.FloatTensor] = None
multimodal_embeds: Optional[torch.FloatTensor] = None
| 3D-LLM-main | three_steps_3d_feature/second_step/lavis/models/blip_models/blip_outputs.py |
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
import os
import torch
import torch.nn.functional as F
from lavis.common.dist_utils import download_cached_file
from lavis.common.registry import registry
from lavis.common.utils import get_abs_path, is_url
from lavis.models.base_model import MomentumDistilationMixin
from lavis.models.blip_models.blip import BlipBase
from lavis.models.blip_models.blip_outputs import BlipIntermediateOutput, BlipOutput
from lavis.models.blip_models.nlvr_encoder import BertModel
from lavis.models.vit import VisionTransformerEncoder, interpolate_pos_embed
from torch import nn
from transformers import BertConfig
@registry.register_model("blip_nlvr")
class BlipNLVR(BlipBase, MomentumDistilationMixin):
"""
Class for BLIP NLVR model.
Supported model types:
- base: model with pre-trained BLIP weights, used as initialization for fine-tuning.
- nlvr: finetuned model on NLVR2 dataset.
Usage:
>>> from lavis.models import load_model
>>> model = load_model("blip_nlvr", "nlvr")
"""
PRETRAINED_MODEL_CONFIG_DICT = {
"nlvr": "configs/models/blip_nlvr.yaml",
}
def __init__(self, image_encoder, text_encoder, num_classes):
super().__init__()
self.tokenizer = self.init_tokenizer()
self.visual_encoder = image_encoder
self.text_encoder = text_encoder
hidden_size = text_encoder.config.hidden_size
self.cls_head = nn.Sequential(
nn.Linear(hidden_size, hidden_size),
nn.ReLU(),
nn.Linear(hidden_size, num_classes),
)
def forward(self, samples, is_train=True):
"""
Forward function for training and evaluation.
Args:
samples (dict): a dict of input samples, which contains the following keys:
- image0 (torch.Tensor): input image 0, shape (batch_size, 3, H, W), default H=384, W=384.
- image1 (torch.Tensor): input image 1, shape (batch_size, 3, H, W), default H=384, W=384.
- text_input (list): list of strings, each string is a natural language sentence.
- label (torch.LongTensor): ground truth label with shape (batch_size,).
is_train (bool): whether the model is in training mode.
If True, the model will return the loss;
If False, the model will return the prediction.
Examples:
>>> import torch
>>> from lavis.models import load_model
>>> model = load_model("blip_nlvr", "nlvr")
>>> samples = {
... "image0": torch.randn(2, 3, 384, 384),
... "image1": torch.randn(2, 3, 384, 384),
... "text_input": ["there is a ferret in tall grass", "there are lips in one of the images"],
... "label": torch.tensor([0, 1]),
... }
>>> output = model(samples)
>>> output.keys()
odict_keys(['intermediate_output', 'loss'])
"""
text = samples["text_input"]
text = self.tokenizer(text, padding="longest", return_tensors="pt").to(
self.device
)
text.input_ids[:, 0] = self.tokenizer.enc_token_id
targets = samples["label"]
image0 = samples["image0"]
image1 = samples["image1"]
images = torch.cat([image0, image1], dim=0)
image_embeds = self.visual_encoder.forward_features(images)
image_atts = torch.ones(image_embeds.size()[:-1], dtype=torch.long).to(
self.device
)
image0_embeds, image1_embeds = torch.split(image_embeds, targets.size(0))
encoder_output = self.text_encoder(
text.input_ids,
attention_mask=text.attention_mask,
encoder_hidden_states=[image0_embeds, image1_embeds],
encoder_attention_mask=[
image_atts[: image0_embeds.size(0)],
image_atts[image0_embeds.size(0) :],
],
return_dict=True,
)
prediction = self.cls_head(encoder_output.last_hidden_state[:, 0, :])
if is_train:
loss = F.cross_entropy(prediction, targets)
# return {"loss": loss}
return BlipOutput(
loss=loss,
intermediate_output=BlipIntermediateOutput(
image_embeds=torch.stack([image0_embeds, image1_embeds], dim=0),
encoder_output=encoder_output,
),
)
else:
return {"predictions": prediction, "targets": targets}
def predict(self, samples):
output = self.forward(samples, is_train=False)
return output
@classmethod
def from_config(cls, cfg=None):
image_encoder = VisionTransformerEncoder.from_config(cfg)
# text encoder + multimodal encoder
bert_config = BertConfig.from_json_file(get_abs_path(cfg["med_config_path"]))
text_encoder = BertModel(config=bert_config, add_pooling_layer=False)
num_classes = cfg.get("num_classes", 3)
assert num_classes > 1, "Invalid number of classes provided, found {}".format(
num_classes
)
model = cls(
image_encoder=image_encoder,
text_encoder=text_encoder,
num_classes=num_classes,
)
model.load_checkpoint_from_config(cfg)
return model
def load_from_pretrained(self, url_or_filename):
if is_url(url_or_filename):
cached_file = download_cached_file(
url_or_filename, check_hash=False, progress=True
)
checkpoint = torch.load(cached_file, map_location="cpu")
elif os.path.isfile(url_or_filename):
checkpoint = torch.load(url_or_filename, map_location="cpu")
else:
raise RuntimeError("checkpoint url or path is invalid")
state_dict = checkpoint["model"]
state_dict["visual_encoder.pos_embed"] = interpolate_pos_embed(
state_dict["visual_encoder.pos_embed"], self.visual_encoder
)
for key in list(state_dict.keys()):
if "crossattention.self." in key:
new_key0 = key.replace("self", "self0")
new_key1 = key.replace("self", "self1")
state_dict[new_key0] = state_dict[key]
state_dict[new_key1] = state_dict[key]
elif "crossattention.output.dense." in key:
new_key0 = key.replace("dense", "dense0")
new_key1 = key.replace("dense", "dense1")
state_dict[new_key0] = state_dict[key]
state_dict[new_key1] = state_dict[key]
msg = self.load_state_dict(state_dict, strict=False)
print("load checkpoint from %s" % url_or_filename)
print(f"missing keys {msg.missing_keys}")
return msg
| 3D-LLM-main | three_steps_3d_feature/second_step/lavis/models/blip_models/blip_nlvr.py |
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
from copy import deepcopy
import torch
import torch.nn.functional as F
from lavis.common.registry import registry
from lavis.models.base_model import MomentumDistilationMixin, SharedQueueMixin
from lavis.models.blip_models import tie_encoder_decoder_weights
from lavis.models.blip_models.blip import BlipBase
from lavis.models.blip_models.blip_outputs import (
BlipOutput,
BlipSimilarity,
BlipIntermediateOutput,
)
from lavis.models.med import XBertEncoder, XBertLMHeadDecoder
from lavis.models.vit import VisionTransformerEncoder
from torch import nn
@registry.register_model("blip_pretrain")
class BlipPretrain(BlipBase, SharedQueueMixin, MomentumDistilationMixin):
"""
BLIP pretrain model.
Supported model types:
- base: BLIP base model before pretraining.
"""
PRETRAINED_MODEL_CONFIG_DICT = {
"base": "configs/models/blip_pretrain_base.yaml",
# "large": "configs/models/blip_pretrain_large.yaml",
}
def __init__(
self,
image_encoder,
text_encoder,
text_decoder,
queue_size,
alpha=0.4,
embed_dim=256,
momentum=0.995,
tie_enc_dec_weights=True,
max_txt_len=30,
):
super().__init__()
self.tokenizer = self.init_tokenizer()
text_encoder.resize_token_embeddings(len(self.tokenizer))
text_decoder.resize_token_embeddings(len(self.tokenizer))
if tie_enc_dec_weights:
tie_encoder_decoder_weights(
encoder=text_encoder,
decoder=text_decoder.bert,
base_model_prefix="",
skip_key="/attention",
)
self.visual_encoder = image_encoder
self.text_encoder = text_encoder
self.text_decoder = text_decoder
# creating projection layers for ITC
text_width = text_encoder.config.hidden_size
vision_width = image_encoder.vision_width
self.vision_proj = nn.Linear(vision_width, embed_dim)
self.text_proj = nn.Linear(text_width, embed_dim)
self.itm_head = nn.Linear(text_width, 2)
# create the momentum encoder
self.visual_encoder_m = deepcopy(self.visual_encoder)
self.text_encoder_m = deepcopy(self.text_encoder)
self.vision_proj_m = deepcopy(self.vision_proj)
self.text_proj_m = deepcopy(self.text_proj)
self.model_pairs = [
[self.visual_encoder, self.visual_encoder_m],
[self.text_encoder, self.text_encoder_m],
[self.vision_proj, self.vision_proj_m],
[self.text_proj, self.text_proj_m],
]
self.copy_params()
# create the queue
self.register_buffer("image_queue", torch.randn(embed_dim, queue_size))
self.register_buffer("text_queue", torch.randn(embed_dim, queue_size))
self.register_buffer("queue_ptr", torch.zeros(1, dtype=torch.long))
self.image_queue = nn.functional.normalize(self.image_queue, dim=0)
self.text_queue = nn.functional.normalize(self.text_queue, dim=0)
self.queue_size = queue_size
self.momentum = momentum
self.temp = nn.Parameter(0.07 * torch.ones([]))
self.alpha = alpha
self.max_txt_len = max_txt_len
def _rampup_factor(self, epoch, iters, num_iters_per_epoch):
return min(1, (epoch * num_iters_per_epoch + iters) / (2 * num_iters_per_epoch))
def forward(self, samples):
"""
Args:
samples (dict): A dictionary containing the following keys:
- image (torch.Tensor): A tensor of shape (batch_size, 3, H, W). The input images. Default: H=224, W=224.
- text_input (list): A list of length batch_size, each element is a string of text/caption.
- epoch (int): The current epoch.
- iters (int): The current iteration.
- num_iters_per_epoch (int): The number of iterations per epoch.
Returns:
BlipOutput: A BlipOutput object containing loss and intermediate output. See ``lavis.models.blip_models.blip_outputs.BlipOutput`` for more details.
Examples:
>>> import torch
>>> from lavis.models import load_model
>>> model = load_model("blip_pretrain", "base")
>>> images = torch.randn(4, 3, 224, 224)
>>> text_input = ["caption of image 1", "another caption of image 1", "caption of image 2", "caption of image 3"]
>>> samples = {"image": images, "text_input": text_input, "epoch": 0, "iters": 0, "num_iters_per_epoch": 100}
>>> output = model(samples)
>>> output.keys()
odict_keys(['sims', 'intermediate_output', 'loss', 'loss_itc', 'loss_itm', 'loss_lm'])
>>> output.intermediate_output.keys()
odict_keys(['image_embeds', 'text_embeds', 'image_embeds_m', 'text_embeds_m', 'encoder_output', 'encoder_output_neg', 'itm_logits', 'itm_labels', 'decoder_output', 'decoder_labels'])
>>> output.intermediate_output.image_embeds.shape
>>> # shape: (batch_size, num_patches, embed_dim)
torch.Size([4, 197, 768])
>>> output.intermediate_output.text_embeds.shape
>>> # shape: (batch_size, max_txt_len, embed_dim)
torch.Size([4, 30, 768])
>>> output.intermediate_output.image_embeds_m.shape
>>> # shape: (batch_size, num_patches, embed_dim)
torch.Size([4, 197, 768])
>>> output.intermediate_output.text_embeds_m.shape
>>> # shape: (batch_size, max_txt_len, embed_dim)
torch.Size([4, 30, 768])
>>> output.intermediate_output.itm_logits.shape
>>> # shape: (batch_size * 3, 2)
torch.Size([12, 2])
>>> output.intermediate_output.itm_labels.shape
>>> # shape: (batch_size * 3,)
torch.Size([12])
>>> output.intermediate_output.encoder_output.last_hidden_state.shape
>>> # shape: (batch_size, max_txt_len, embed_dim)
torch.Size([4, 30, 768])
>>> output.intermediate_output.encoder_output_m.last_hidden_state.shape
>>> # shape: (batch_size, max_txt_len, embed_dim)
torch.Size([4, 30, 768])
>>> output.intermediate_output.decoder_output.logits.shape
>>> # shape: (batch_size, max_txt_len, vocab_size)
torch.Size([4, 30, 30524])
>>> output.intermediate_output.decoder_labels.shape
>>> # shape: (batch_size, max_txt_len)
torch.Size([4, 30])
"""
image = samples["image"]
caption = samples["text_input"]
alpha = self.alpha * self._rampup_factor(
epoch=samples["epoch"],
iters=samples["iters"],
num_iters_per_epoch=samples["num_iters_per_epoch"],
)
with torch.no_grad():
self.temp.clamp_(0.001, 0.5)
# image embeddings and features
image_embeds = self.visual_encoder.forward_features(image)
image_atts = torch.ones(image_embeds.size()[:-1], dtype=torch.long).to(
image.device
)
image_feat = F.normalize(self.vision_proj(image_embeds[:, 0, :]), dim=-1)
text = self.tokenizer(
caption,
padding="max_length",
truncation=True,
max_length=self.max_txt_len,
return_tensors="pt",
).to(image.device)
# text embeddings and features
text_output = self.text_encoder.forward_text(text)
text_embeds = text_output.last_hidden_state
text_feat = F.normalize(self.text_proj(text_embeds[:, 0, :]), dim=-1)
# get momentum features
with torch.no_grad():
self._momentum_update()
image_embeds_m = self.visual_encoder_m(image)
image_feat_m = F.normalize(
self.vision_proj_m(image_embeds_m[:, 0, :]), dim=-1
)
image_feat_all = torch.cat(
[image_feat_m.t(), self.image_queue.clone().detach()], dim=1
)
text_output_m = self.text_encoder_m.forward_text(text)
text_embeds_m = text_output_m.last_hidden_state
text_feat_m = F.normalize(self.text_proj_m(text_embeds_m[:, 0, :]), dim=-1)
text_feat_all = torch.cat(
[text_feat_m.t(), self.text_queue.clone().detach()], dim=1
)
sim_i2t_m = image_feat_m @ text_feat_all / self.temp
sim_t2i_m = text_feat_m @ image_feat_all / self.temp
sim_targets = torch.zeros(sim_i2t_m.size()).to(image.device)
sim_targets.fill_diagonal_(1)
sim_i2t_targets = (
alpha * F.softmax(sim_i2t_m, dim=1) + (1 - alpha) * sim_targets
)
sim_t2i_targets = (
alpha * F.softmax(sim_t2i_m, dim=1) + (1 - alpha) * sim_targets
)
sim_i2t = image_feat @ text_feat_all / self.temp
sim_t2i = text_feat @ image_feat_all / self.temp
loss_i2t = -torch.sum(
F.log_softmax(sim_i2t, dim=1) * sim_i2t_targets, dim=1
).mean()
loss_t2i = -torch.sum(
F.log_softmax(sim_t2i, dim=1) * sim_t2i_targets, dim=1
).mean()
loss_itc = (loss_i2t + loss_t2i) / 2
self._dequeue_and_enqueue(image_feat_m, text_feat_m)
# Image-text Matching
encoder_input_ids = text.input_ids.clone()
encoder_input_ids[:, 0] = self.tokenizer.enc_token_id
# forward the positve image-text pair
bs = image.size(0)
output_pos = self.text_encoder(
encoder_input_ids,
attention_mask=text.attention_mask,
encoder_hidden_states=image_embeds,
encoder_attention_mask=image_atts,
return_dict=True,
)
with torch.no_grad():
weights_t2i = F.softmax(sim_t2i[:, :bs], dim=1) + 1e-4
weights_t2i.fill_diagonal_(0)
weights_i2t = F.softmax(sim_i2t[:, :bs], dim=1) + 1e-4
weights_i2t.fill_diagonal_(0)
# select a negative image for each text
image_embeds_neg = []
for b in range(bs):
neg_idx = torch.multinomial(weights_t2i[b], 1).item()
image_embeds_neg.append(image_embeds[neg_idx])
image_embeds_neg = torch.stack(image_embeds_neg, dim=0)
# select a negative text for each image
text_ids_neg = []
text_atts_neg = []
for b in range(bs):
neg_idx = torch.multinomial(weights_i2t[b], 1).item()
text_ids_neg.append(encoder_input_ids[neg_idx])
text_atts_neg.append(text.attention_mask[neg_idx])
text_ids_neg = torch.stack(text_ids_neg, dim=0)
text_atts_neg = torch.stack(text_atts_neg, dim=0)
text_ids_all = torch.cat([encoder_input_ids, text_ids_neg], dim=0)
text_atts_all = torch.cat([text.attention_mask, text_atts_neg], dim=0)
image_embeds_all = torch.cat([image_embeds_neg, image_embeds], dim=0)
image_atts_all = torch.cat([image_atts, image_atts], dim=0)
output_neg = self.text_encoder(
text_ids_all,
attention_mask=text_atts_all,
encoder_hidden_states=image_embeds_all,
encoder_attention_mask=image_atts_all,
return_dict=True,
)
vl_embeddings = torch.cat(
[
output_pos.last_hidden_state[:, 0, :],
output_neg.last_hidden_state[:, 0, :],
],
dim=0,
)
itm_logits = self.itm_head(vl_embeddings)
itm_labels = torch.cat(
[torch.ones(bs, dtype=torch.long), torch.zeros(2 * bs, dtype=torch.long)],
dim=0,
).to(image.device)
loss_itm = F.cross_entropy(itm_logits, itm_labels)
# LM
decoder_input_ids = text.input_ids.clone()
decoder_input_ids[:, 0] = self.tokenizer.bos_token_id
decoder_targets = decoder_input_ids.masked_fill(
decoder_input_ids == self.tokenizer.pad_token_id, -100
)
decoder_output = self.text_decoder(
decoder_input_ids,
attention_mask=text.attention_mask,
encoder_hidden_states=image_embeds,
encoder_attention_mask=image_atts,
labels=decoder_targets,
return_dict=True,
)
loss_lm = decoder_output.loss
return BlipOutput(
loss=loss_itc + loss_itm + loss_lm,
loss_itc=loss_itc,
loss_itm=loss_itm,
loss_lm=loss_lm,
sims=BlipSimilarity(
sim_i2t=sim_i2t,
sim_t2i=sim_t2i,
sim_i2t_m=sim_i2t_m,
sim_t2i_m=sim_t2i_m,
sim_i2t_targets=sim_i2t_targets,
sim_t2i_targets=sim_t2i_targets,
),
intermediate_output=BlipIntermediateOutput(
image_embeds=image_embeds,
text_embeds=text_embeds,
image_embeds_m=image_embeds_m,
text_embeds_m=text_embeds_m,
encoder_output=output_pos,
encoder_output_neg=output_neg,
itm_logits=itm_logits,
itm_labels=itm_labels,
decoder_output=decoder_output,
decoder_labels=decoder_targets,
),
)
def reset_queue_ptr(self):
self.queue_ptr = torch.zeros(1, dtype=torch.long)
@classmethod
def from_config(cls, cfg=None):
# set from_pretrained=True to load weights for 'bert-base-uncased'
image_encoder = VisionTransformerEncoder.from_config(cfg, from_pretrained=True)
text_encoder = XBertEncoder.from_config(cfg, from_pretrained=True)
text_decoder = XBertLMHeadDecoder.from_config(cfg, from_pretrained=True)
embed_dim = cfg.get("embed_dim", 256)
momentum = cfg.get("momentum", 0.995)
alpha = cfg.get("alpha", 0.4)
max_txt_len = cfg.get("max_txt_len", 30)
queue_size = cfg.get("queue_size", 57600)
model = cls(
image_encoder=image_encoder,
text_encoder=text_encoder,
text_decoder=text_decoder,
embed_dim=embed_dim,
queue_size=queue_size,
momentum=momentum,
alpha=alpha,
tie_enc_dec_weights=True,
max_txt_len=max_txt_len,
)
# [IMPORTANT] to reset queue pointer to 0.
# Otherwise when updating last batch in the queue, the batch size and remaining queue length may be un-equal.
model.reset_queue_ptr()
return model
| 3D-LLM-main | three_steps_3d_feature/second_step/lavis/models/blip_models/blip_pretrain.py |
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
Based on https://github.com/facebookresearch/TimeSformer
"""
# Copyright 2020 Ross Wightman
# Conv2d w/ Same Padding
import torch
import torch.nn as nn
import torch.nn.functional as F
from typing import Tuple, Optional
import math
from typing import List, Tuple
from .vit_utils import is_static_pad, get_padding
# Dynamically pad input x with 'SAME' padding for conv with specified args
def pad_same(x, k: List[int], s: List[int], d: List[int] = (1, 1), value: float = 0):
ih, iw = x.size()[-2:]
pad_h, pad_w = get_same_padding(ih, k[0], s[0], d[0]), get_same_padding(
iw, k[1], s[1], d[1]
)
if pad_h > 0 or pad_w > 0:
x = F.pad(
x,
[pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2],
value=value,
)
return x
# Calculate asymmetric TensorFlow-like 'SAME' padding for a convolution
def get_same_padding(x: int, k: int, s: int, d: int):
return max((math.ceil(x / s) - 1) * s + (k - 1) * d + 1 - x, 0)
def get_padding_value(padding, kernel_size, **kwargs) -> Tuple[Tuple, bool]:
dynamic = False
if isinstance(padding, str):
# for any string padding, the padding will be calculated for you, one of three ways
padding = padding.lower()
if padding == "same":
# TF compatible 'SAME' padding, has a performance and GPU memory allocation impact
if is_static_pad(kernel_size, **kwargs):
# static case, no extra overhead
padding = get_padding(kernel_size, **kwargs)
else:
# dynamic 'SAME' padding, has runtime/GPU memory overhead
padding = 0
dynamic = True
elif padding == "valid":
# 'VALID' padding, same as padding=0
padding = 0
else:
# Default to PyTorch style 'same'-ish symmetric padding
padding = get_padding(kernel_size, **kwargs)
return padding, dynamic
def conv2d_same(
x,
weight: torch.Tensor,
bias: Optional[torch.Tensor] = None,
stride: Tuple[int, int] = (1, 1),
padding: Tuple[int, int] = (0, 0),
dilation: Tuple[int, int] = (1, 1),
groups: int = 1,
):
x = pad_same(x, weight.shape[-2:], stride, dilation)
return F.conv2d(x, weight, bias, stride, (0, 0), dilation, groups)
class Conv2dSame(nn.Conv2d):
"""Tensorflow like 'SAME' convolution wrapper for 2D convolutions"""
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=True,
):
super(Conv2dSame, self).__init__(
in_channels, out_channels, kernel_size, stride, 0, dilation, groups, bias
)
def forward(self, x):
return conv2d_same(
x,
self.weight,
self.bias,
self.stride,
self.padding,
self.dilation,
self.groups,
)
def create_conv2d_pad(in_chs, out_chs, kernel_size, **kwargs):
padding = kwargs.pop("padding", "")
kwargs.setdefault("bias", False)
padding, is_dynamic = get_padding_value(padding, kernel_size, **kwargs)
if is_dynamic:
return Conv2dSame(in_chs, out_chs, kernel_size, **kwargs)
else:
return nn.Conv2d(in_chs, out_chs, kernel_size, padding=padding, **kwargs)
| 3D-LLM-main | three_steps_3d_feature/second_step/lavis/models/timesformer/conv2d_same.py |
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
""" Linear layer (alternate definition)
"""
import torch
import torch.nn.functional as F
from torch import nn as nn
class Linear(nn.Linear):
def forward(self, input: torch.Tensor) -> torch.Tensor:
if torch.jit.is_scripting():
bias = self.bias.to(dtype=input.dtype) if self.bias is not None else None
return F.linear(input, self.weight.to(dtype=input.dtype), bias=bias)
else:
return F.linear(input, self.weight, self.bias)
| 3D-LLM-main | three_steps_3d_feature/second_step/lavis/models/timesformer/linear.py |
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
Based on https://github.com/facebookresearch/TimeSformer
"""
| 3D-LLM-main | three_steps_3d_feature/second_step/lavis/models/timesformer/__init__.py |
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
Based on https://github.com/facebookresearch/TimeSformer
"""
# Copyright 2020 Ross Wightman
from collections import OrderedDict, defaultdict
from copy import deepcopy
from functools import partial
from typing import Dict, List, Tuple
import torch
import torch.nn as nn
class FeatureInfo:
def __init__(self, feature_info: List[Dict], out_indices: Tuple[int]):
prev_reduction = 1
for fi in feature_info:
# sanity check the mandatory fields, there may be additional fields depending on the model
assert "num_chs" in fi and fi["num_chs"] > 0
assert "reduction" in fi and fi["reduction"] >= prev_reduction
prev_reduction = fi["reduction"]
assert "module" in fi
self.out_indices = out_indices
self.info = feature_info
def from_other(self, out_indices: Tuple[int]):
return FeatureInfo(deepcopy(self.info), out_indices)
def get(self, key, idx=None):
"""Get value by key at specified index (indices)
if idx == None, returns value for key at each output index
if idx is an integer, return value for that feature module index (ignoring output indices)
if idx is a list/tupple, return value for each module index (ignoring output indices)
"""
if idx is None:
return [self.info[i][key] for i in self.out_indices]
if isinstance(idx, (tuple, list)):
return [self.info[i][key] for i in idx]
else:
return self.info[idx][key]
def get_dicts(self, keys=None, idx=None):
"""return info dicts for specified keys (or all if None) at specified indices (or out_indices if None)"""
if idx is None:
if keys is None:
return [self.info[i] for i in self.out_indices]
else:
return [{k: self.info[i][k] for k in keys} for i in self.out_indices]
if isinstance(idx, (tuple, list)):
return [
self.info[i] if keys is None else {k: self.info[i][k] for k in keys}
for i in idx
]
else:
return (
self.info[idx] if keys is None else {k: self.info[idx][k] for k in keys}
)
def channels(self, idx=None):
"""feature channels accessor"""
return self.get("num_chs", idx)
def reduction(self, idx=None):
"""feature reduction (output stride) accessor"""
return self.get("reduction", idx)
def module_name(self, idx=None):
"""feature module name accessor"""
return self.get("module", idx)
def __getitem__(self, item):
return self.info[item]
def __len__(self):
return len(self.info)
class FeatureHooks:
"""Feature Hook Helper
This module helps with the setup and extraction of hooks for extracting features from
internal nodes in a model by node name. This works quite well in eager Python but needs
redesign for torcscript.
"""
def __init__(self, hooks, named_modules, out_map=None, default_hook_type="forward"):
# setup feature hooks
modules = {k: v for k, v in named_modules}
for i, h in enumerate(hooks):
hook_name = h["module"]
m = modules[hook_name]
hook_id = out_map[i] if out_map else hook_name
hook_fn = partial(self._collect_output_hook, hook_id)
hook_type = h["hook_type"] if "hook_type" in h else default_hook_type
if hook_type == "forward_pre":
m.register_forward_pre_hook(hook_fn)
elif hook_type == "forward":
m.register_forward_hook(hook_fn)
else:
assert False, "Unsupported hook type"
self._feature_outputs = defaultdict(OrderedDict)
def _collect_output_hook(self, hook_id, *args):
x = args[
-1
] # tensor we want is last argument, output for fwd, input for fwd_pre
if isinstance(x, tuple):
x = x[0] # unwrap input tuple
self._feature_outputs[x.device][hook_id] = x
def get_output(self, device) -> Dict[str, torch.tensor]:
output = self._feature_outputs[device]
self._feature_outputs[device] = OrderedDict() # clear after reading
return output
def _module_list(module, flatten_sequential=False):
# a yield/iter would be better for this but wouldn't be compatible with torchscript
ml = []
for name, module in module.named_children():
if flatten_sequential and isinstance(module, nn.Sequential):
# first level of Sequential containers is flattened into containing model
for child_name, child_module in module.named_children():
combined = [name, child_name]
ml.append(("_".join(combined), ".".join(combined), child_module))
else:
ml.append((name, name, module))
return ml
def _get_feature_info(net, out_indices):
feature_info = getattr(net, "feature_info")
if isinstance(feature_info, FeatureInfo):
return feature_info.from_other(out_indices)
elif isinstance(feature_info, (list, tuple)):
return FeatureInfo(net.feature_info, out_indices)
else:
assert False, "Provided feature_info is not valid"
def _get_return_layers(feature_info, out_map):
module_names = feature_info.module_name()
return_layers = {}
for i, name in enumerate(module_names):
return_layers[name] = (
out_map[i] if out_map is not None else feature_info.out_indices[i]
)
return return_layers
class FeatureDictNet(nn.ModuleDict):
"""Feature extractor with OrderedDict return
Wrap a model and extract features as specified by the out indices, the network is
partially re-built from contained modules.
There is a strong assumption that the modules have been registered into the model in the same
order as they are used. There should be no reuse of the same nn.Module more than once, including
trivial modules like `self.relu = nn.ReLU`.
Only submodules that are directly assigned to the model class (`model.feature1`) or at most
one Sequential container deep (`model.features.1`, with flatten_sequent=True) can be captured.
All Sequential containers that are directly assigned to the original model will have their
modules assigned to this module with the name `model.features.1` being changed to `model.features_1`
Arguments:
model (nn.Module): model from which we will extract the features
out_indices (tuple[int]): model output indices to extract features for
out_map (sequence): list or tuple specifying desired return id for each out index,
otherwise str(index) is used
feature_concat (bool): whether to concatenate intermediate features that are lists or tuples
vs select element [0]
flatten_sequential (bool): whether to flatten sequential modules assigned to model
"""
def __init__(
self,
model,
out_indices=(0, 1, 2, 3, 4),
out_map=None,
feature_concat=False,
flatten_sequential=False,
):
super(FeatureDictNet, self).__init__()
self.feature_info = _get_feature_info(model, out_indices)
self.concat = feature_concat
self.return_layers = {}
return_layers = _get_return_layers(self.feature_info, out_map)
modules = _module_list(model, flatten_sequential=flatten_sequential)
remaining = set(return_layers.keys())
layers = OrderedDict()
for new_name, old_name, module in modules:
layers[new_name] = module
if old_name in remaining:
# return id has to be consistently str type for torchscript
self.return_layers[new_name] = str(return_layers[old_name])
remaining.remove(old_name)
if not remaining:
break
assert not remaining and len(self.return_layers) == len(
return_layers
), f"Return layers ({remaining}) are not present in model"
self.update(layers)
def _collect(self, x) -> (Dict[str, torch.Tensor]):
out = OrderedDict()
for name, module in self.items():
x = module(x)
if name in self.return_layers:
out_id = self.return_layers[name]
if isinstance(x, (tuple, list)):
# If model tap is a tuple or list, concat or select first element
# FIXME this may need to be more generic / flexible for some nets
out[out_id] = torch.cat(x, 1) if self.concat else x[0]
else:
out[out_id] = x
return out
def forward(self, x) -> Dict[str, torch.Tensor]:
return self._collect(x)
class FeatureListNet(FeatureDictNet):
"""Feature extractor with list return
See docstring for FeatureDictNet above, this class exists only to appease Torchscript typing constraints.
In eager Python we could have returned List[Tensor] vs Dict[id, Tensor] based on a member bool.
"""
def __init__(
self,
model,
out_indices=(0, 1, 2, 3, 4),
out_map=None,
feature_concat=False,
flatten_sequential=False,
):
super(FeatureListNet, self).__init__(
model,
out_indices=out_indices,
out_map=out_map,
feature_concat=feature_concat,
flatten_sequential=flatten_sequential,
)
def forward(self, x) -> (List[torch.Tensor]):
return list(self._collect(x).values())
class FeatureHookNet(nn.ModuleDict):
"""FeatureHookNet
Wrap a model and extract features specified by the out indices using forward/forward-pre hooks.
If `no_rewrite` is True, features are extracted via hooks without modifying the underlying
network in any way.
If `no_rewrite` is False, the model will be re-written as in the
FeatureList/FeatureDict case by folding first to second (Sequential only) level modules into this one.
FIXME this does not currently work with Torchscript, see FeatureHooks class
"""
def __init__(
self,
model,
out_indices=(0, 1, 2, 3, 4),
out_map=None,
out_as_dict=False,
no_rewrite=False,
feature_concat=False,
flatten_sequential=False,
default_hook_type="forward",
):
super(FeatureHookNet, self).__init__()
assert not torch.jit.is_scripting()
self.feature_info = _get_feature_info(model, out_indices)
self.out_as_dict = out_as_dict
layers = OrderedDict()
hooks = []
if no_rewrite:
assert not flatten_sequential
if hasattr(model, "reset_classifier"): # make sure classifier is removed?
model.reset_classifier(0)
layers["body"] = model
hooks.extend(self.feature_info.get_dicts())
else:
modules = _module_list(model, flatten_sequential=flatten_sequential)
remaining = {
f["module"]: f["hook_type"] if "hook_type" in f else default_hook_type
for f in self.feature_info.get_dicts()
}
for new_name, old_name, module in modules:
layers[new_name] = module
for fn, fm in module.named_modules(prefix=old_name):
if fn in remaining:
hooks.append(dict(module=fn, hook_type=remaining[fn]))
del remaining[fn]
if not remaining:
break
assert (
not remaining
), f"Return layers ({remaining}) are not present in model"
self.update(layers)
self.hooks = FeatureHooks(hooks, model.named_modules(), out_map=out_map)
def forward(self, x):
for name, module in self.items():
x = module(x)
out = self.hooks.get_output(x.device)
return out if self.out_as_dict else list(out.values())
| 3D-LLM-main | three_steps_3d_feature/second_step/lavis/models/timesformer/features.py |
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
Based on https://github.com/facebookresearch/TimeSformer
"""
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
# Copyright 2020 Ross Wightman
# Modified Model definition
import logging
from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils
import torch.utils.checkpoint
from einops import rearrange
from fairscale.nn.checkpoint.checkpoint_activations import checkpoint_wrapper
from .helpers import load_pretrained, load_pretrained_imagenet, load_pretrained_kinetics
from .vit_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
DropPath,
to_2tuple,
trunc_normal_,
)
def _cfg(url="", **kwargs):
return {
"url": url,
"num_classes": 1000,
"input_size": (3, 224, 224),
"pool_size": None,
"crop_pct": 0.9,
"interpolation": "bicubic",
"mean": IMAGENET_DEFAULT_MEAN,
"std": IMAGENET_DEFAULT_STD,
"first_conv": "patch_embed.proj",
"classifier": "head",
**kwargs,
}
default_cfgs = {
"vit_base_patch16_224": _cfg(
url="https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_base_p16_224-80ecf9dd.pth",
mean=(0.5, 0.5, 0.5),
std=(0.5, 0.5, 0.5),
),
}
class Mlp(nn.Module):
def __init__(
self,
in_features,
hidden_features=None,
out_features=None,
act_layer=nn.GELU,
drop=0.0,
):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class Attention(nn.Module):
def __init__(
self,
dim,
num_heads=8,
qkv_bias=False,
qk_scale=None,
attn_drop=0.0,
proj_drop=0.0,
with_qkv=True,
):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim**-0.5
self.with_qkv = with_qkv
if self.with_qkv:
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
self.attn_drop = nn.Dropout(attn_drop)
def forward(self, x):
B, N, C = x.shape
if self.with_qkv:
qkv = (
self.qkv(x)
.reshape(B, N, 3, self.num_heads, C // self.num_heads)
.permute(2, 0, 3, 1, 4)
)
q, k, v = qkv[0], qkv[1], qkv[2]
else:
qkv = x.reshape(B, N, self.num_heads, C // self.num_heads).permute(
0, 2, 1, 3
)
q, k, v = qkv, qkv, qkv
attn = (q @ k.transpose(-2, -1)) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
if self.with_qkv:
x = self.proj(x)
x = self.proj_drop(x)
return x
class Block(nn.Module):
def __init__(
self,
dim,
num_heads,
layer_num,
mlp_ratio=4.0,
qkv_bias=False,
qk_scale=None,
drop=0.0,
attn_drop=0.0,
drop_path=0.1,
act_layer=nn.GELU,
norm_layer=nn.LayerNorm,
attention_type="divided_space_time",
use_grad_checkpointing=False,
):
super().__init__()
self.attention_type = attention_type
assert attention_type in [
"divided_space_time",
"space_only",
"joint_space_time",
]
self.norm1 = norm_layer(dim)
self.attn = Attention(
dim,
num_heads=num_heads,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
attn_drop=attn_drop,
proj_drop=drop,
)
# Temporal Attention Parameters
if self.attention_type == "divided_space_time":
self.temporal_norm1 = norm_layer(dim)
self.temporal_attn = Attention(
dim,
num_heads=num_heads,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
attn_drop=attn_drop,
proj_drop=drop,
)
self.temporal_fc = nn.Linear(dim, dim)
# drop path
self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(
in_features=dim,
hidden_features=mlp_hidden_dim,
act_layer=act_layer,
drop=drop,
)
# [dxli]
self.layer_num = layer_num
self.use_grad_checkpointing = use_grad_checkpointing
if use_grad_checkpointing:
self.temporal_attn = checkpoint_wrapper(self.temporal_attn)
self.attn = checkpoint_wrapper(self.attn)
self.mlp = checkpoint_wrapper(self.mlp)
def forward(self, x, B, T, W):
num_spatial_tokens = (x.size(1) - 1) // T
H = num_spatial_tokens // W
if self.attention_type in ["space_only", "joint_space_time"]:
x = x + self.drop_path(self.attn(self.norm1(x)))
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
elif self.attention_type == "divided_space_time":
# Temporal
xt = x[:, 1:, :]
xt = rearrange(xt, "b (h w t) m -> (b h w) t m", b=B, h=H, w=W, t=T)
temporal_attn_out = self.temporal_attn(self.temporal_norm1(xt))
res_temporal = self.drop_path(temporal_attn_out)
res_temporal = rearrange(
res_temporal, "(b h w) t m -> b (h w t) m", b=B, h=H, w=W, t=T
)
res_temporal = self.temporal_fc(res_temporal)
xt = x[:, 1:, :] + res_temporal
# Spatial
init_cls_token = x[:, 0, :].unsqueeze(1)
cls_token = init_cls_token.repeat(1, T, 1)
cls_token = rearrange(cls_token, "b t m -> (b t) m", b=B, t=T).unsqueeze(1)
xs = xt
xs = rearrange(xs, "b (h w t) m -> (b t) (h w) m", b=B, h=H, w=W, t=T)
xs = torch.cat((cls_token, xs), 1)
spatial_attn_out = self.attn(self.norm1(xs))
res_spatial = self.drop_path(spatial_attn_out)
# Taking care of CLS token
cls_token = res_spatial[:, 0, :]
cls_token = rearrange(cls_token, "(b t) m -> b t m", b=B, t=T)
# averaging for every frame
cls_token = torch.mean(cls_token, 1, True)
res_spatial = res_spatial[:, 1:, :]
res_spatial = rearrange(
res_spatial, "(b t) (h w) m -> b (h w t) m", b=B, h=H, w=W, t=T
)
res = res_spatial
x = xt
# Mlp
x = torch.cat((init_cls_token, x), 1) + torch.cat((cls_token, res), 1)
x_res = x
x = self.norm2(x)
# x = x + self.drop_path(self.mlp(self.norm2(x)))
# MLP
mlp_out = self.mlp(x)
x = x_res + self.drop_path(mlp_out)
return x
class PatchEmbed(nn.Module):
"""Image to Patch Embedding"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768):
super().__init__()
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0])
self.img_size = img_size
self.patch_size = patch_size
self.num_patches = num_patches
self.proj = nn.Conv2d(
in_chans, embed_dim, kernel_size=patch_size, stride=patch_size
)
def forward(self, x):
B, C, T, H, W = x.shape
x = rearrange(x, "b c t h w -> (b t) c h w")
x = self.proj(x)
W = x.size(-1)
x = x.flatten(2).transpose(1, 2)
return x, T, W
class VisionTransformer(nn.Module):
"""Vision Transformere"""
def __init__(
self,
img_size=224,
patch_size=16,
in_chans=3,
num_classes=1000,
embed_dim=768,
depth=12,
num_heads=12,
mlp_ratio=4.0,
qkv_bias=False,
qk_scale=None,
drop_rate=0.0,
attn_drop_rate=0.0,
drop_path_rate=0.1,
hybrid_backbone=None,
norm_layer=nn.LayerNorm,
num_frames=8,
attention_type="divided_space_time",
dropout=0.0,
use_grad_checkpointing=False,
ckpt_layer=0,
):
super().__init__()
self.attention_type = attention_type
self.depth = depth
self.dropout = nn.Dropout(dropout)
self.num_classes = num_classes
# num_features for consistency with other models
self.num_features = self.embed_dim = embed_dim
self.patch_embed = PatchEmbed(
img_size=img_size,
patch_size=patch_size,
in_chans=in_chans,
embed_dim=embed_dim,
)
num_patches = self.patch_embed.num_patches
# Positional Embeddings
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim))
self.pos_drop = nn.Dropout(p=drop_rate)
if self.attention_type != "space_only":
self.time_embed = nn.Parameter(torch.zeros(1, num_frames, embed_dim))
self.time_drop = nn.Dropout(p=drop_rate)
# Attention Blocks
dpr = [
x.item() for x in torch.linspace(0, drop_path_rate, self.depth)
] # stochastic depth decay rule
self.blocks = nn.ModuleList(
[
Block(
layer_num=i,
use_grad_checkpointing=(
use_grad_checkpointing and i >= self.depth - ckpt_layer
),
dim=embed_dim,
num_heads=num_heads,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
drop=drop_rate,
attn_drop=attn_drop_rate,
drop_path=dpr[i],
norm_layer=norm_layer,
attention_type=self.attention_type,
)
for i in range(self.depth)
]
)
self.norm = norm_layer(embed_dim)
# Classifier head
self.head = (
nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity()
)
trunc_normal_(self.pos_embed, std=0.02)
trunc_normal_(self.cls_token, std=0.02)
self.apply(self._init_weights)
# initialization of temporal attention weights
if self.attention_type == "divided_space_time":
i = 0
for m in self.blocks.modules():
m_str = str(m)
if "Block" in m_str:
if i > 0:
nn.init.constant_(m.temporal_fc.weight, 0)
nn.init.constant_(m.temporal_fc.bias, 0)
i += 1
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=0.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
@torch.jit.ignore
def no_weight_decay(self):
return {"pos_embed", "cls_token", "time_embed"}
def get_classifier(self):
return self.head
def reset_classifier(self, num_classes, global_pool=""):
self.num_classes = num_classes
self.head = (
nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()
)
def remove_classifier(self):
self.num_classes = 0
self.head = None
def forward_features(self, x):
B = x.shape[0]
x, T, W = self.patch_embed(x)
cls_tokens = self.cls_token.expand(x.size(0), -1, -1)
x = torch.cat((cls_tokens, x), dim=1)
# resizing the positional embeddings in case they don't match the input at inference
if x.size(1) != self.pos_embed.size(1):
pos_embed = self.pos_embed
cls_pos_embed = pos_embed[0, 0, :].unsqueeze(0).unsqueeze(1)
other_pos_embed = pos_embed[0, 1:, :].unsqueeze(0).transpose(1, 2)
P = int(other_pos_embed.size(2) ** 0.5)
H = x.size(1) // W
other_pos_embed = other_pos_embed.reshape(1, x.size(2), P, P)
new_pos_embed = F.interpolate(other_pos_embed, size=(H, W), mode="nearest")
new_pos_embed = new_pos_embed.flatten(2)
new_pos_embed = new_pos_embed.transpose(1, 2)
new_pos_embed = torch.cat((cls_pos_embed, new_pos_embed), 1)
x = x + new_pos_embed
else:
x = x + self.pos_embed
x = self.pos_drop(x)
# Time Embeddings
if self.attention_type != "space_only":
cls_tokens = x[:B, 0, :].unsqueeze(1)
x = x[:, 1:]
x = rearrange(x, "(b t) n m -> (b n) t m", b=B, t=T)
# Resizing time embeddings in case they don't match
if T != self.time_embed.size(1):
time_embed = self.time_embed.transpose(1, 2)
new_time_embed = F.interpolate(time_embed, size=(T), mode="nearest")
new_time_embed = new_time_embed.transpose(1, 2)
x = x + new_time_embed
else:
x = x + self.time_embed
x = self.time_drop(x)
x = rearrange(x, "(b n) t m -> b (n t) m", b=B, t=T)
x = torch.cat((cls_tokens, x), dim=1)
# Attention blocks
for blk in self.blocks:
x = blk(x, B, T, W)
# Predictions for space-only baseline
if self.attention_type == "space_only":
x = rearrange(x, "(b t) n m -> b t n m", b=B, t=T)
x = torch.mean(x, 1) # averaging predictions for every frame
x = self.norm(x)
return x
def forward(self, x):
x = self.forward_features(x)
x = self.head(x)
return x
def _conv_filter(state_dict, patch_size=16):
"""convert patch embedding weight from manual patchify + linear proj to conv"""
out_dict = {}
for k, v in state_dict.items():
if "patch_embed.proj.weight" in k:
if v.shape[-1] != patch_size:
patch_size = v.shape[-1]
v = v.reshape((v.shape[0], 3, patch_size, patch_size))
out_dict[k] = v
return out_dict
class vit_base_patch16_224(nn.Module):
def __init__(self, cfg, **kwargs):
super(vit_base_patch16_224, self).__init__()
self.pretrained = True
patch_size = 16
self.model = VisionTransformer(
img_size=cfg.DATA.TRAIN_CROP_SIZE,
num_classes=cfg.MODEL.NUM_CLASSES,
patch_size=patch_size,
embed_dim=768,
depth=12,
num_heads=12,
mlp_ratio=4,
qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6),
drop_rate=0.0,
attn_drop_rate=0.0,
drop_path_rate=0.1,
num_frames=cfg.DATA.NUM_FRAMES,
attention_type=cfg.TIMESFORMER.ATTENTION_TYPE,
**kwargs,
)
self.attention_type = cfg.TIMESFORMER.ATTENTION_TYPE
self.model.default_cfg = default_cfgs["vit_base_patch16_224"]
self.num_patches = (cfg.DATA.TRAIN_CROP_SIZE // patch_size) * (
cfg.DATA.TRAIN_CROP_SIZE // patch_size
)
pretrained_model = cfg.TIMESFORMER.PRETRAINED_MODEL
if self.pretrained:
load_pretrained(
self.model,
num_classes=self.model.num_classes,
in_chans=kwargs.get("in_chans", 3),
filter_fn=_conv_filter,
img_size=cfg.DATA.TRAIN_CROP_SIZE,
num_patches=self.num_patches,
attention_type=self.attention_type,
pretrained_model=pretrained_model,
)
def forward(self, x):
x = self.model(x)
return x
class TimeSformer(nn.Module):
def __init__(
self,
image_size=224,
patch_size=16,
n_frms=8,
attn_drop_rate=0.0,
drop_path_rate=0.1,
drop_rate=0,
use_grad_ckpt=False,
ckpt_layer=0,
remove_classifier=True,
**kwargs,
):
super(TimeSformer, self).__init__()
self.img_size = image_size
self.patch_size = patch_size
self.num_frames = n_frms
self.attn_drop_rate = attn_drop_rate
self.drop_path_rate = drop_path_rate
self.drop_rate = drop_rate
self.use_grad_ckpt = use_grad_ckpt
self.ckpt_layer = ckpt_layer
self.attention_type = "divided_space_time"
logging.info(
f"Initializing TimeSformer with img_size={self.img_size}, patch_size={self.patch_size}, num_frames={self.num_frames}"
)
# will be ignored when loading official pretrained ckpt
self.num_classes = 400
self.model = VisionTransformer(
img_size=self.img_size,
num_classes=self.num_classes,
patch_size=self.patch_size,
embed_dim=768,
depth=12,
num_heads=12,
mlp_ratio=4,
qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6),
drop_rate=self.drop_rate,
attn_drop_rate=self.attn_drop_rate,
drop_path_rate=self.drop_path_rate,
num_frames=self.num_frames,
attention_type=self.attention_type,
use_grad_checkpointing=self.use_grad_ckpt,
ckpt_layer=self.ckpt_layer,
**kwargs,
)
if remove_classifier:
self.model.remove_classifier()
self.model.default_cfg = default_cfgs[
"vit_base_patch" + str(self.patch_size) + "_224"
]
self.num_patches = (self.img_size // self.patch_size) * (
self.img_size // self.patch_size
)
def forward(self, x):
x = self.model(x)
return x
def forward_features(self, x):
# b, c, t, h, w = x.shape
x = self.model.forward_features(x)
## apply pooling
W = H = self.img_size // self.patch_size
T = self.num_frames
cls_tokens = x[:, 0, :].unsqueeze(1)
other_tokens = x[:, 1:, :]
x = rearrange(other_tokens, "b (h w t) m -> b t (h w) m", h=H, w=W, t=T)
x = torch.mean(x, dim=1)
x = torch.cat((cls_tokens, x), dim=1)
return x
def load_state_dict(self, pretrained_ckpt_path):
logging.info(
"Loading TimeSformer checkpoints from {}".format(pretrained_ckpt_path)
)
if pretrained_ckpt_path == "vit_base_patch16_224":
load_ckpt_func = load_pretrained_imagenet
else:
load_ckpt_func = load_pretrained_kinetics
load_ckpt_func(
self.model,
num_classes=self.model.num_classes,
in_chans=3,
filter_fn=_conv_filter,
img_size=self.img_size,
num_frames=self.num_frames,
num_patches=self.num_patches,
attention_type=self.attention_type,
pretrained_model=pretrained_ckpt_path,
)
| 3D-LLM-main | three_steps_3d_feature/second_step/lavis/models/timesformer/vit.py |
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
Based on https://github.com/facebookresearch/TimeSformer
"""
# Copyright 2020 Ross Wightman
# Various utility functions
import torch
import torch.nn as nn
import math
import warnings
import torch.nn.functional as F
from itertools import repeat
import collections.abc as container_abcs
DEFAULT_CROP_PCT = 0.875
IMAGENET_DEFAULT_MEAN = (0.485, 0.456, 0.406)
IMAGENET_DEFAULT_STD = (0.229, 0.224, 0.225)
IMAGENET_INCEPTION_MEAN = (0.5, 0.5, 0.5)
IMAGENET_INCEPTION_STD = (0.5, 0.5, 0.5)
IMAGENET_DPN_MEAN = (124 / 255, 117 / 255, 104 / 255)
IMAGENET_DPN_STD = tuple([1 / (0.0167 * 255)] * 3)
def _no_grad_trunc_normal_(tensor, mean, std, a, b):
def norm_cdf(x):
# Computes standard normal cumulative distribution function
return (1.0 + math.erf(x / math.sqrt(2.0))) / 2.0
if (mean < a - 2 * std) or (mean > b + 2 * std):
warnings.warn(
"mean is more than 2 std from [a, b] in nn.init.trunc_normal_. "
"The distribution of values may be incorrect.",
stacklevel=2,
)
with torch.no_grad():
# Values are generated by using a truncated uniform distribution and
# then using the inverse CDF for the normal distribution.
# Get upper and lower cdf values
l = norm_cdf((a - mean) / std)
u = norm_cdf((b - mean) / std)
# Uniformly fill tensor with values from [l, u], then translate to
# [2l-1, 2u-1].
tensor.uniform_(2 * l - 1, 2 * u - 1)
# Use inverse cdf transform for normal distribution to get truncated
# standard normal
tensor.erfinv_()
# Transform to proper mean, std
tensor.mul_(std * math.sqrt(2.0))
tensor.add_(mean)
# Clamp to ensure it's in the proper range
tensor.clamp_(min=a, max=b)
return tensor
def trunc_normal_(tensor, mean=0.0, std=1.0, a=-2.0, b=2.0):
r"""Fills the input Tensor with values drawn from a truncated
normal distribution. The values are effectively drawn from the
normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)`
with values outside :math:`[a, b]` redrawn until they are within
the bounds. The method used for generating the random values works
best when :math:`a \leq \text{mean} \leq b`.
Args:
tensor: an n-dimensional `torch.Tensor`
mean: the mean of the normal distribution
std: the standard deviation of the normal distribution
a: the minimum cutoff value
b: the maximum cutoff value
Examples:
>>> w = torch.empty(3, 5)
>>> nn.init.trunc_normal_(w)
"""
return _no_grad_trunc_normal_(tensor, mean, std, a, b)
# From PyTorch internals
def _ntuple(n):
def parse(x):
if isinstance(x, container_abcs.Iterable):
return x
return tuple(repeat(x, n))
return parse
to_2tuple = _ntuple(2)
# Calculate symmetric padding for a convolution
def get_padding(kernel_size: int, stride: int = 1, dilation: int = 1, **_) -> int:
padding = ((stride - 1) + dilation * (kernel_size - 1)) // 2
return padding
def get_padding_value(padding, kernel_size, **kwargs):
dynamic = False
if isinstance(padding, str):
# for any string padding, the padding will be calculated for you, one of three ways
padding = padding.lower()
if padding == "same":
# TF compatible 'SAME' padding, has a performance and GPU memory allocation impact
if is_static_pad(kernel_size, **kwargs):
# static case, no extra overhead
padding = get_padding(kernel_size, **kwargs)
else:
# dynamic 'SAME' padding, has runtime/GPU memory overhead
padding = 0
dynamic = True
elif padding == "valid":
# 'VALID' padding, same as padding=0
padding = 0
else:
# Default to PyTorch style 'same'-ish symmetric padding
padding = get_padding(kernel_size, **kwargs)
return padding, dynamic
# Calculate asymmetric TensorFlow-like 'SAME' padding for a convolution
def get_same_padding(x: int, k: int, s: int, d: int):
return max((int(math.ceil(x // s)) - 1) * s + (k - 1) * d + 1 - x, 0)
# Can SAME padding for given args be done statically?
def is_static_pad(kernel_size: int, stride: int = 1, dilation: int = 1, **_):
return stride == 1 and (dilation * (kernel_size - 1)) % 2 == 0
# Dynamically pad input x with 'SAME' padding for conv with specified args
# def pad_same(x, k: List[int], s: List[int], d: List[int] = (1, 1), value: float = 0):
def pad_same(x, k, s, d=(1, 1), value=0):
ih, iw = x.size()[-2:]
pad_h, pad_w = get_same_padding(ih, k[0], s[0], d[0]), get_same_padding(
iw, k[1], s[1], d[1]
)
if pad_h > 0 or pad_w > 0:
x = F.pad(
x,
[pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2],
value=value,
)
return x
def adaptive_pool_feat_mult(pool_type="avg"):
if pool_type == "catavgmax":
return 2
else:
return 1
def drop_path(x, drop_prob: float = 0.0, training: bool = False):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
This is the same as the DropConnect impl I created for EfficientNet, etc networks, however,
the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for
changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use
'survival rate' as the argument.
"""
if drop_prob == 0.0 or not training:
return x
keep_prob = 1 - drop_prob
shape = (x.shape[0],) + (1,) * (
x.ndim - 1
) # work with diff dim tensors, not just 2D ConvNets
random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device)
random_tensor.floor_() # binarize
output = x.div(keep_prob) * random_tensor
return output
class DropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
def __init__(self, drop_prob=None):
super(DropPath, self).__init__()
self.drop_prob = drop_prob
def forward(self, x):
return drop_path(x, self.drop_prob, self.training)
| 3D-LLM-main | three_steps_3d_feature/second_step/lavis/models/timesformer/vit_utils.py |
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
Based on https://github.com/facebookresearch/TimeSformer
"""
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
# Copyright 2020 Ross Wightman
# Modified model creation / weight loading / state_dict helpers
import logging, warnings
import os
import math
from collections import OrderedDict
import torch
import torch.utils.model_zoo as model_zoo
import torch.nn.functional as F
def load_state_dict(checkpoint_path, use_ema=False):
if checkpoint_path and os.path.isfile(checkpoint_path):
checkpoint = torch.load(checkpoint_path, map_location="cpu")
state_dict_key = "state_dict"
if isinstance(checkpoint, dict):
if use_ema and "state_dict_ema" in checkpoint:
state_dict_key = "state_dict_ema"
if state_dict_key and state_dict_key in checkpoint:
new_state_dict = OrderedDict()
for k, v in checkpoint[state_dict_key].items():
# strip `module.` prefix
name = k[7:] if k.startswith("module") else k
new_state_dict[name] = v
state_dict = new_state_dict
elif "model_state" in checkpoint:
state_dict_key = "model_state"
new_state_dict = OrderedDict()
for k, v in checkpoint[state_dict_key].items():
# strip `model.` prefix
name = k[6:] if k.startswith("model") else k
new_state_dict[name] = v
state_dict = new_state_dict
else:
state_dict = checkpoint
logging.info(
"Loaded {} from checkpoint '{}'".format(state_dict_key, checkpoint_path)
)
return state_dict
else:
logging.error("No checkpoint found at '{}'".format(checkpoint_path))
raise FileNotFoundError()
def load_checkpoint(model, checkpoint_path, use_ema=False, strict=True):
state_dict = load_state_dict(checkpoint_path, use_ema)
model.load_state_dict(state_dict, strict=strict)
# def resume_checkpoint(model, checkpoint_path, optimizer=None, loss_scaler=None, log_info=True):
# resume_epoch = None
# if os.path.isfile(checkpoint_path):
# checkpoint = torch.load(checkpoint_path, map_location='cpu')
# if isinstance(checkpoint, dict) and 'state_dict' in checkpoint:
# if log_info:
# _logger.info('Restoring model state from checkpoint...')
# new_state_dict = OrderedDict()
# for k, v in checkpoint['state_dict'].items():
# name = k[7:] if k.startswith('module') else k
# new_state_dict[name] = v
# model.load_state_dict(new_state_dict)
# if optimizer is not None and 'optimizer' in checkpoint:
# if log_info:
# _logger.info('Restoring optimizer state from checkpoint...')
# optimizer.load_state_dict(checkpoint['optimizer'])
# if loss_scaler is not None and loss_scaler.state_dict_key in checkpoint:
# if log_info:
# _logger.info('Restoring AMP loss scaler state from checkpoint...')
# loss_scaler.load_state_dict(checkpoint[loss_scaler.state_dict_key])
# if 'epoch' in checkpoint:
# resume_epoch = checkpoint['epoch']
# if 'version' in checkpoint and checkpoint['version'] > 1:
# resume_epoch += 1 # start at the next epoch, old checkpoints incremented before save
# if log_info:
# _logger.info("Loaded checkpoint '{}' (epoch {})".format(checkpoint_path, checkpoint['epoch']))
# else:
# model.load_state_dict(checkpoint)
# if log_info:
# _logger.info("Loaded checkpoint '{}'".format(checkpoint_path))
# return resume_epoch
# else:
# _logger.error("No checkpoint found at '{}'".format(checkpoint_path))
# raise FileNotFoundError()
def load_pretrained(
model,
cfg=None,
num_classes=1000,
in_chans=3,
filter_fn=None,
img_size=224,
num_frames=8,
num_patches=196,
attention_type="divided_space_time",
pretrained_model="",
strict=True,
):
if cfg is None:
cfg = getattr(model, "default_cfg")
if cfg is None or "url" not in cfg or not cfg["url"]:
logging.warning("Pretrained model URL is invalid, using random initialization.")
return
if len(pretrained_model) == 0:
if cfg is None:
logging.info(f"loading from default config {model.default_cfg}.")
state_dict = model_zoo.load_url(cfg["url"], progress=False, map_location="cpu")
else:
try:
state_dict = load_state_dict(pretrained_model)["model"]
except:
state_dict = load_state_dict(pretrained_model)
if filter_fn is not None:
state_dict = filter_fn(state_dict)
if in_chans == 1:
conv1_name = cfg["first_conv"]
logging.info(
"Converting first conv (%s) pretrained weights from 3 to 1 channel"
% conv1_name
)
conv1_weight = state_dict[conv1_name + ".weight"]
conv1_type = conv1_weight.dtype
conv1_weight = conv1_weight.float()
O, I, J, K = conv1_weight.shape
if I > 3:
assert conv1_weight.shape[1] % 3 == 0
# For models with space2depth stems
conv1_weight = conv1_weight.reshape(O, I // 3, 3, J, K)
conv1_weight = conv1_weight.sum(dim=2, keepdim=False)
else:
conv1_weight = conv1_weight.sum(dim=1, keepdim=True)
conv1_weight = conv1_weight.to(conv1_type)
state_dict[conv1_name + ".weight"] = conv1_weight
elif in_chans != 3:
conv1_name = cfg["first_conv"]
conv1_weight = state_dict[conv1_name + ".weight"]
conv1_type = conv1_weight.dtype
conv1_weight = conv1_weight.float()
O, I, J, K = conv1_weight.shape
if I != 3:
logging.warning(
"Deleting first conv (%s) from pretrained weights." % conv1_name
)
del state_dict[conv1_name + ".weight"]
strict = False
else:
logging.info(
"Repeating first conv (%s) weights in channel dim." % conv1_name
)
repeat = int(math.ceil(in_chans / 3))
conv1_weight = conv1_weight.repeat(1, repeat, 1, 1)[:, :in_chans, :, :]
conv1_weight *= 3 / float(in_chans)
conv1_weight = conv1_weight.to(conv1_type)
state_dict[conv1_name + ".weight"] = conv1_weight
classifier_name = cfg["classifier"]
if num_classes == 1000 and cfg["num_classes"] == 1001:
# special case for imagenet trained models with extra background class in pretrained weights
classifier_weight = state_dict[classifier_name + ".weight"]
state_dict[classifier_name + ".weight"] = classifier_weight[1:]
classifier_bias = state_dict[classifier_name + ".bias"]
state_dict[classifier_name + ".bias"] = classifier_bias[1:]
elif num_classes != state_dict[classifier_name + ".weight"].size(0):
# print('Removing the last fully connected layer due to dimensions mismatch ('+str(num_classes)+ ' != '+str(state_dict[classifier_name + '.weight'].size(0))+').', flush=True)
# completely discard fully connected for all other differences between pretrained and created model
del state_dict[classifier_name + ".weight"]
del state_dict[classifier_name + ".bias"]
strict = False
## Resizing the positional embeddings in case they don't match
logging.info(
f"Resizing spatial position embedding from {state_dict['pos_embed'].size(1)} to {num_patches + 1}"
)
if num_patches + 1 != state_dict["pos_embed"].size(1):
pos_embed = state_dict["pos_embed"]
cls_pos_embed = pos_embed[0, 0, :].unsqueeze(0).unsqueeze(1)
other_pos_embed = pos_embed[0, 1:, :].unsqueeze(0).transpose(1, 2)
new_pos_embed = F.interpolate(
other_pos_embed, size=(num_patches), mode="nearest"
)
new_pos_embed = new_pos_embed.transpose(1, 2)
new_pos_embed = torch.cat((cls_pos_embed, new_pos_embed), 1)
state_dict["pos_embed"] = new_pos_embed
## Resizing time embeddings in case they don't match
if "time_embed" in state_dict and num_frames != state_dict["time_embed"].size(1):
logging.info(
f"Resizing temporal position embedding from {state_dict['time_embed'].size(1)} to {num_frames}"
)
time_embed = state_dict["time_embed"].transpose(1, 2)
new_time_embed = F.interpolate(time_embed, size=(num_frames), mode="nearest")
state_dict["time_embed"] = new_time_embed.transpose(1, 2)
## Initializing temporal attention
if attention_type == "divided_space_time":
new_state_dict = state_dict.copy()
for key in state_dict:
if "blocks" in key and "attn" in key:
new_key = key.replace("attn", "temporal_attn")
if not new_key in state_dict:
new_state_dict[new_key] = state_dict[key]
else:
new_state_dict[new_key] = state_dict[new_key]
if "blocks" in key and "norm1" in key:
new_key = key.replace("norm1", "temporal_norm1")
if not new_key in state_dict:
new_state_dict[new_key] = state_dict[key]
else:
new_state_dict[new_key] = state_dict[new_key]
state_dict = new_state_dict
## Loading the weights
model.load_state_dict(state_dict, strict=False)
def load_pretrained_imagenet(
model,
pretrained_model,
cfg=None,
ignore_classifier=True,
num_frames=8,
num_patches=196,
**kwargs,
):
import timm
logging.info(f"Loading vit_base_patch16_224 checkpoints.")
loaded_state_dict = timm.models.vision_transformer.vit_base_patch16_224(
pretrained=True
).state_dict()
del loaded_state_dict["head.weight"]
del loaded_state_dict["head.bias"]
## Initializing temporal attention
new_state_dict = loaded_state_dict.copy()
for key in loaded_state_dict:
if "blocks" in key and "attn" in key:
new_key = key.replace("attn", "temporal_attn")
if not new_key in loaded_state_dict:
new_state_dict[new_key] = loaded_state_dict[key]
else:
new_state_dict[new_key] = loaded_state_dict[new_key]
if "blocks" in key and "norm1" in key:
new_key = key.replace("norm1", "temporal_norm1")
if not new_key in loaded_state_dict:
new_state_dict[new_key] = loaded_state_dict[key]
else:
new_state_dict[new_key] = loaded_state_dict[new_key]
loaded_state_dict = new_state_dict
loaded_keys = loaded_state_dict.keys()
model_keys = model.state_dict().keys()
load_not_in_model = [k for k in loaded_keys if k not in model_keys]
model_not_in_load = [k for k in model_keys if k not in loaded_keys]
toload = dict()
mismatched_shape_keys = []
for k in model_keys:
if k in loaded_keys:
if model.state_dict()[k].shape != loaded_state_dict[k].shape:
mismatched_shape_keys.append(k)
else:
toload[k] = loaded_state_dict[k]
logging.info("Keys in loaded but not in model:")
logging.info(f"In total {len(load_not_in_model)}, {sorted(load_not_in_model)}")
logging.info("Keys in model but not in loaded:")
logging.info(f"In total {len(model_not_in_load)}, {sorted(model_not_in_load)}")
logging.info("Keys in model and loaded, but shape mismatched:")
logging.info(
f"In total {len(mismatched_shape_keys)}, {sorted(mismatched_shape_keys)}"
)
model.load_state_dict(toload, strict=False)
def load_pretrained_kinetics(
model,
pretrained_model,
cfg=None,
ignore_classifier=True,
num_frames=8,
num_patches=196,
**kwargs,
):
if cfg is None:
cfg = getattr(model, "default_cfg")
if cfg is None or "url" not in cfg or not cfg["url"]:
logging.warning("Pretrained model URL is invalid, using random initialization.")
return
assert (
len(pretrained_model) > 0
), "Path to pre-trained Kinetics weights not provided."
state_dict = load_state_dict(pretrained_model)
classifier_name = cfg["classifier"]
if ignore_classifier:
classifier_weight_key = classifier_name + ".weight"
classifier_bias_key = classifier_name + ".bias"
state_dict[classifier_weight_key] = model.state_dict()[classifier_weight_key]
state_dict[classifier_bias_key] = model.state_dict()[classifier_bias_key]
else:
raise NotImplementedError(
"[dxli] Not supporting loading Kinetics-pretrained ckpt with classifier."
)
## Resizing the positional embeddings in case they don't match
if num_patches + 1 != state_dict["pos_embed"].size(1):
new_pos_embed = resize_spatial_embedding(state_dict, "pos_embed", num_patches)
state_dict["pos_embed"] = new_pos_embed
## Resizing time embeddings in case they don't match
if "time_embed" in state_dict and num_frames != state_dict["time_embed"].size(1):
state_dict["time_embed"] = resize_temporal_embedding(
state_dict, "time_embed", num_frames
)
## Loading the weights
try:
model.load_state_dict(state_dict, strict=True)
logging.info("Succeeded in loading Kinetics pre-trained weights.")
except:
logging.error("Error in loading Kinetics pre-trained weights.")
def resize_spatial_embedding(state_dict, key, num_patches):
logging.info(
f"Resizing spatial position embedding from {state_dict[key].size(1)} to {num_patches + 1}"
)
pos_embed = state_dict[key]
cls_pos_embed = pos_embed[0, 0, :].unsqueeze(0).unsqueeze(1)
other_pos_embed = pos_embed[0, 1:, :].unsqueeze(0).transpose(1, 2)
new_pos_embed = F.interpolate(other_pos_embed, size=(num_patches), mode="nearest")
new_pos_embed = new_pos_embed.transpose(1, 2)
new_pos_embed = torch.cat((cls_pos_embed, new_pos_embed), 1)
return new_pos_embed
def resize_temporal_embedding(state_dict, key, num_frames):
logging.info(
f"Resizing temporal position embedding from {state_dict[key].size(1)} to {num_frames}"
)
time_embed = state_dict[key].transpose(1, 2)
new_time_embed = F.interpolate(time_embed, size=(num_frames), mode="nearest")
return new_time_embed.transpose(1, 2)
def detach_variable(inputs):
if isinstance(inputs, tuple):
out = []
for inp in inputs:
x = inp.detach()
x.requires_grad = inp.requires_grad
out.append(x)
return tuple(out)
else:
raise RuntimeError(
"Only tuple of tensors is supported. Got Unsupported input type: ",
type(inputs).__name__,
)
def check_backward_validity(inputs):
if not any(inp.requires_grad for inp in inputs):
warnings.warn(
"None of the inputs have requires_grad=True. Gradients will be None"
)
| 3D-LLM-main | three_steps_3d_feature/second_step/lavis/models/timesformer/helpers.py |
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
import torch
import torch.nn.functional as F
from lavis.common.registry import registry
from lavis.models.blip2_models.blip2_qformer import Blip2Qformer
@registry.register_model("blip2_image_text_matching")
class Blip2ITM(Blip2Qformer):
"""
BLIP Image-Text Matching (ITM) model.
Supported model types:
- pretrained: pretrained model
- coco: fintuned model on coco
Usage:
>>> from lavis.models import load_model
>>> model = load_model("blip2_image_text_matching", "pretrained")
>>> model = load_model("blip2_image_text_matching", "coco")
"""
def __init__(
self,
img_size=224,
drop_path_rate=0,
use_grad_checkpoint=False,
vit_precision="fp16",
freeze_vit=True,
num_query_token=32,
embed_dim=256,
max_txt_len=32,
):
super().__init__(
img_size=img_size,
drop_path_rate=drop_path_rate,
use_grad_checkpoint=use_grad_checkpoint,
vit_precision=vit_precision,
freeze_vit=freeze_vit,
num_query_token=num_query_token,
embed_dim=embed_dim,
max_txt_len=max_txt_len,
)
def forward(self, samples, match_head="itm"):
image = samples["image"]
caption = samples["text_input"]
image_embeds = self.ln_vision(self.visual_encoder(image))
image_atts = torch.ones(image_embeds.size()[:-1], dtype=torch.long).to(
image.device
)
text = self.tokenizer(
caption,
truncation=True,
max_length=self.max_txt_len,
return_tensors="pt",
).to(image.device)
if match_head == "itm":
query_tokens = self.query_tokens.expand(image_embeds.shape[0], -1, -1)
query_atts = torch.ones(query_tokens.size()[:-1], dtype=torch.long).to(
image.device
)
attention_mask = torch.cat([query_atts, text.attention_mask], dim=1)
output_itm = self.Qformer.bert(
text.input_ids,
query_embeds=query_tokens,
attention_mask=attention_mask,
encoder_hidden_states=image_embeds,
encoder_attention_mask=image_atts,
return_dict=True,
)
itm_embeddings = output_itm.last_hidden_state[:, : query_tokens.size(1), :]
itm_logit = self.itm_head(itm_embeddings)
itm_logit = itm_logit.mean(dim=1)
return itm_logit
elif match_head == "itc":
query_tokens = self.query_tokens.expand(image_embeds.shape[0], -1, -1)
query_output = self.Qformer.bert(
query_embeds=query_tokens,
encoder_hidden_states=image_embeds,
encoder_attention_mask=image_atts,
return_dict=True,
)
image_feats = F.normalize(
self.vision_proj(query_output.last_hidden_state), dim=-1
)
text_output = self.Qformer.bert(
text.input_ids,
attention_mask=text.attention_mask,
return_dict=True,
)
text_feat = F.normalize(
self.text_proj(text_output.last_hidden_state[:, 0, :]), dim=-1
)
sims = torch.bmm(image_feats, text_feat.unsqueeze(-1))
sim, _ = torch.max(sims, dim=1)
return sim
| 3D-LLM-main | three_steps_3d_feature/second_step/lavis/models/blip2_models/blip2_image_text_matching.py |
"""
Copyright (c) 2023, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
import logging
import torch
import torch.nn as nn
from torch.cuda.amp import autocast as autocast
from transformers import T5TokenizerFast
from lavis.common.registry import registry
from lavis.models.blip2_models.blip2 import Blip2Base, disabled_train
from lavis.models.blip2_models.modeling_t5 import T5Config, T5ForConditionalGeneration
@registry.register_model("blip2_t5")
class Blip2T5(Blip2Base):
"""
BLIP2 T5 model.
Supported model types:
- pretrain_flant5xl: pretrained model with FlanT5-XL
- pretrain_flant5xxl: pretrained model with FlanT5-XXL
- caption_coco_flant5xl: fintuned image captioning model with FlanT5-XL
Usage:
>>> from lavis.models import load_model
>>> model = load_model("blip2_t5", "pretrain_flant5xl")
"""
PRETRAINED_MODEL_CONFIG_DICT = {
"pretrain_flant5xl": "configs/models/blip2/blip2_pretrain_flant5xl.yaml",
"pretrain_flant5xxl": "configs/models/blip2/blip2_pretrain_flant5xxl.yaml",
"caption_coco_flant5xl": "configs/models/blip2/blip2_caption_flant5xl.yaml",
}
def __init__(
self,
img_size=224,
drop_path_rate=0,
use_grad_checkpoint=False,
vit_precision="fp16",
freeze_vit=True,
num_query_token=32,
t5_model="google/flan-t5-xl",
prompt="",
max_txt_len=32,
):
super().__init__()
self.tokenizer = self.init_tokenizer()
self.visual_encoder, self.ln_vision = self.init_vision_encoder(
img_size, drop_path_rate, use_grad_checkpoint, vit_precision
)
if freeze_vit:
self.visual_encoder = self.visual_encoder.eval()
self.visual_encoder.train = disabled_train
logging.info("freeze vision encoder")
self.Qformer, self.query_tokens = self.init_Qformer(
num_query_token, self.visual_encoder.num_features
)
self.Qformer.cls = None
self.Qformer.bert.embeddings.word_embeddings = None
self.Qformer.bert.embeddings.position_embeddings = None
for layer in self.Qformer.bert.encoder.layer:
layer.output = None
layer.intermediate = None
self.t5_tokenizer = T5TokenizerFast.from_pretrained(t5_model)
t5_config = T5Config.from_pretrained(t5_model)
t5_config.dense_act_fn = "gelu"
self.t5_model = T5ForConditionalGeneration.from_pretrained(
t5_model, config=t5_config
)
for name, param in self.t5_model.named_parameters():
param.requires_grad = False
param.data = param.data.bfloat16()
self.t5_proj = nn.Linear(
self.Qformer.config.hidden_size, self.t5_model.config.hidden_size
)
self.max_txt_len = max_txt_len
self.prompt = prompt
def forward(self, samples):
image = samples["image"]
image_embeds = self.ln_vision(self.visual_encoder(image))
image_atts = torch.ones(image_embeds.size()[:-1], dtype=torch.long).to(
image.device
)
query_tokens = self.query_tokens.expand(image_embeds.shape[0], -1, -1)
query_output = self.Qformer.bert(
query_embeds=query_tokens,
encoder_hidden_states=image_embeds,
encoder_attention_mask=image_atts,
return_dict=True,
)
inputs_t5 = self.t5_proj(query_output.last_hidden_state)
atts_t5 = torch.ones(inputs_t5.size()[:-1], dtype=torch.long).to(image.device)
with torch.cuda.amp.autocast(dtype=torch.bfloat16):
input_tokens = self.t5_tokenizer(
samples["text_input"],
padding="longest",
truncation=True,
max_length=self.max_text_length,
return_tensors="pt",
).to(image.device)
output_tokens = self.t5_tokenizer(
samples["text_output"],
padding="longest",
truncation=True,
max_length=self.max_text_length,
return_tensors="pt",
).to(image.device)
encoder_atts = torch.cat([atts_t5, input_tokens.attention_mask], dim=1)
targets = output_tokens.input_ids.masked_fill(
output_tokens.input_ids == self.t5_tokenizer.pad_token_id, -100
)
inputs_embeds = self.t5_model.encoder.embed_tokens(input_tokens.input_ids)
inputs_embeds = torch.cat([inputs_t5, inputs_embeds], dim=1)
outputs = self.t5_model(
inputs_embeds=inputs_embeds,
attention_mask=encoder_atts,
decoder_attention_mask=output_tokens.attention_mask,
return_dict=True,
labels=targets,
)
loss = outputs.loss
return {"loss": loss}
@torch.no_grad()
def generate(
self,
samples,
use_nucleus_sampling=False,
num_beams=5,
max_length=30,
min_length=1,
top_p=0.9,
repetition_penalty=1.0,
length_penalty=1.0,
num_captions=1,
temperature=1,
):
"""
Args:
samples (dict): A dictionary containing the following keys:
- image (torch.Tensor): A tensor of shape (batch_size, 3, H, W)
use_nucleus_sampling (bool): Whether to use nucleus sampling. If False, use top-k sampling.
num_beams (int): Number of beams for beam search. 1 means no beam search.
max_length (int): The maximum length of the sequence to be generated.
min_length (int): The minimum length of the sequence to be generated.
top_p (float): The cumulative probability for nucleus sampling.
repetition_penalty (float): The parameter for repetition penalty. 1.0 means no penalty.
num_captions (int): Number of captions to be generated for each image.
Returns:
captions (list): A list of strings of length batch_size * num_captions.
"""
image = samples["image"]
with torch.cuda.amp.autocast(enabled=(self.device != torch.device("cpu"))):
image_embeds = self.ln_vision(self.visual_encoder(image))
image_atts = torch.ones(image_embeds.size()[:-1], dtype=torch.long).to(
image.device
)
query_tokens = self.query_tokens.expand(image_embeds.shape[0], -1, -1)
query_output = self.Qformer.bert(
query_embeds=query_tokens,
encoder_hidden_states=image_embeds,
encoder_attention_mask=image_atts,
return_dict=True,
)
inputs_t5 = self.t5_proj(query_output.last_hidden_state)
atts_t5 = torch.ones(inputs_t5.size()[:-1], dtype=torch.long).to(image.device)
if "prompt" in samples.keys():
prompt = samples["prompt"]
else:
prompt = self.prompt
if isinstance(prompt, str):
prompt = [prompt] * image.size(0)
else:
assert len(prompt) == image.size(
0
), "The number of prompts must be equal to the batch size."
input_tokens = self.t5_tokenizer(
prompt, padding="longest", return_tensors="pt"
).to(image.device)
encoder_atts = torch.cat([atts_t5, input_tokens.attention_mask], dim=1)
device_type = "cuda" if "cuda" in str(self.device) else "cpu"
with torch.amp.autocast(device_type=device_type, dtype=torch.bfloat16):
inputs_embeds = self.t5_model.encoder.embed_tokens(input_tokens.input_ids)
inputs_embeds = torch.cat([inputs_t5, inputs_embeds], dim=1)
outputs = self.t5_model.generate(
inputs_embeds=inputs_embeds,
attention_mask=encoder_atts,
do_sample=use_nucleus_sampling,
top_p=top_p,
temperature=temperature,
num_beams=num_beams,
max_new_tokens=max_length,
min_length=min_length,
repetition_penalty=repetition_penalty,
length_penalty=length_penalty,
num_return_sequences=num_captions,
)
output_text = self.t5_tokenizer.batch_decode(
outputs, skip_special_tokens=True
)
return output_text
@classmethod
def from_config(cls, cfg):
img_size = cfg.get("image_size")
num_query_token = cfg.get("num_query_token")
t5_model = cfg.get("t5_model")
drop_path_rate = cfg.get("drop_path_rate", 0)
use_grad_checkpoint = cfg.get("use_grad_checkpoint", False)
vit_precision = cfg.get("vit_precision", "fp16")
freeze_vit = cfg.get("freeze_vit", True)
prompt = cfg.get("prompt", "")
max_txt_len = cfg.get("max_txt_len", 32)
model = cls(
img_size=img_size,
drop_path_rate=drop_path_rate,
use_grad_checkpoint=use_grad_checkpoint,
vit_precision=vit_precision,
freeze_vit=freeze_vit,
num_query_token=num_query_token,
t5_model=t5_model,
prompt=prompt,
max_txt_len=max_txt_len,
)
model.load_checkpoint_from_config(cfg)
return model
| 3D-LLM-main | three_steps_3d_feature/second_step/lavis/models/blip2_models/blip2_t5.py |
"""
Copyright (c) 2023, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
import logging
import torch
import torch.distributed as dist
import torch.nn as nn
from torch.cuda.amp import autocast as autocast
from torch.nn import functional as F
from lavis.common.registry import registry
from lavis.models.base_model import all_gather_with_grad, concat_all_gather
from lavis.models.blip2_models.blip2 import (
Blip2Base,
compute_sim_matrix,
disabled_train,
)
from lavis.models.blip_models.blip_outputs import BlipOutput, BlipOutputFeatures
@registry.register_model("blip2")
@registry.register_model("blip2_feature_extractor")
class Blip2Qformer(Blip2Base):
"""
BLIP2 first-stage model with Q-former and ViT.
Supported model types:
- pretrained: pretrained model
- coco: fintuned model on coco
Usage:
>>> from lavis.models import load_model
>>> model = load_model("blip2", "pretrain")
"""
PRETRAINED_MODEL_CONFIG_DICT = {
"pretrain": "configs/models/blip2/blip2_pretrain.yaml",
"coco": "configs/models/blip2/blip2_coco.yaml",
}
def __init__(
self,
img_size=224,
drop_path_rate=0,
use_grad_checkpoint=False,
vit_precision="fp16",
freeze_vit=True,
num_query_token=32,
embed_dim=256,
max_txt_len=32,
):
super().__init__()
self.tokenizer = self.init_tokenizer()
self.visual_encoder, self.ln_vision = self.init_vision_encoder(
img_size, drop_path_rate, use_grad_checkpoint, vit_precision
)
if freeze_vit:
self.visual_encoder = self.visual_encoder.eval()
self.visual_encoder.train = disabled_train
logging.info("freeze vision encoder")
self.Qformer, self.query_tokens = self.init_Qformer(
num_query_token, self.visual_encoder.num_features
)
self.Qformer.resize_token_embeddings(len(self.tokenizer))
state_dict = self.Qformer.state_dict()
for name, param in self.Qformer.named_parameters():
if "_query" in name:
key_orig = name.replace("_query", "")
param.data.copy_(state_dict[key_orig])
self.vision_proj = nn.Linear(self.Qformer.config.hidden_size, embed_dim)
self.text_proj = nn.Linear(self.Qformer.config.hidden_size, embed_dim)
self.itm_head = nn.Linear(self.Qformer.config.hidden_size, 2)
self.temp = nn.Parameter(0.07 * torch.ones([]))
self.max_txt_len = max_txt_len
def forward(self, samples):
image = samples["image"]
text = samples["text_input"]
image_embeds = self.ln_vision(self.visual_encoder(image))
image_atts = torch.ones(image_embeds.size()[:-1], dtype=torch.long).to(
image.device
)
print (image_embeds.shape)
query_tokens = self.query_tokens.expand(image_embeds.shape[0], -1, -1)
print (query_tokens.shape)
query_output = self.Qformer.bert(
query_embeds=query_tokens,
encoder_hidden_states=image_embeds,
encoder_attention_mask=image_atts,
use_cache=True,
return_dict=True,
)
image_feats = F.normalize(
self.vision_proj(query_output.last_hidden_state), dim=-1
)
print (image_feats.shape)
text_tokens = self.tokenizer(
text,
padding="max_length",
truncation=True,
max_length=self.max_txt_len,
return_tensors="pt",
).to(image.device)
text_output = self.Qformer.bert(
text_tokens.input_ids,
attention_mask=text_tokens.attention_mask,
return_dict=True,
)
text_feat = F.normalize(
self.text_proj(text_output.last_hidden_state[:, 0, :]), dim=-1
)
###============== Image-text Contrastive ===================###
image_feats_all = concat_all_gather(
image_feats
) # [batch_size*num_gpu, num_query_tokens, embed_dim]
text_feat_all = concat_all_gather(text_feat) # [batch_size*num_gpu, embed_dim]
sim_q2t = torch.matmul(
image_feats.unsqueeze(1), text_feat_all.unsqueeze(-1)
).squeeze()
# [batch_size, batch_size*num_gpu, num_query_tokens]
# image-text similarity: aggregate across all query tokens
sim_i2t, _ = sim_q2t.max(-1)
sim_i2t = sim_i2t / self.temp
# text-query similarity: [batch_size, batch_size*num_gpu, num_query_tokens]
sim_t2q = torch.matmul(
text_feat.unsqueeze(1).unsqueeze(1), image_feats_all.permute(0, 2, 1)
).squeeze()
# text-image similarity: aggregate across all query tokens
sim_t2i, _ = sim_t2q.max(-1)
sim_t2i = sim_t2i / self.temp # [batch_size, batch_size*num_gpu]
rank = dist.get_rank()
bs = image.size(0)
targets = torch.linspace(rank * bs, rank * bs + bs - 1, bs, dtype=int).to(
image.device
)
loss_itc = (
F.cross_entropy(sim_i2t, targets, label_smoothing=0.1)
+ F.cross_entropy(sim_t2i, targets, label_smoothing=0.1)
) / 2
###============== Image-text Matching ===================###
text_input_ids_world = concat_all_gather(text_tokens.input_ids)
text_attention_mask_world = concat_all_gather(text_tokens.attention_mask)
image_embeds_world = all_gather_with_grad(image_embeds)
with torch.no_grad():
weights_t2i = F.softmax(sim_t2i, dim=1) + 1e-4
weights_t2i[:, rank * bs : rank * bs + bs].fill_diagonal_(0)
weights_i2t = F.softmax(sim_i2t, dim=1) + 1e-4
weights_i2t[:, rank * bs : rank * bs + bs].fill_diagonal_(0)
# select a negative image for each text
image_embeds_neg = []
for b in range(bs):
neg_idx = torch.multinomial(weights_t2i[b], 1).item()
image_embeds_neg.append(image_embeds_world[neg_idx])
image_embeds_neg = torch.stack(image_embeds_neg, dim=0)
# select a negative text for each image
text_ids_neg = []
text_atts_neg = []
for b in range(bs):
neg_idx = torch.multinomial(weights_i2t[b], 1).item()
text_ids_neg.append(text_input_ids_world[neg_idx])
text_atts_neg.append(text_attention_mask_world[neg_idx])
text_ids_neg = torch.stack(text_ids_neg, dim=0)
text_atts_neg = torch.stack(text_atts_neg, dim=0)
text_ids_all = torch.cat(
[text_tokens.input_ids, text_tokens.input_ids, text_ids_neg], dim=0
) # pos, pos, neg
text_atts_all = torch.cat(
[text_tokens.attention_mask, text_tokens.attention_mask, text_atts_neg],
dim=0,
)
query_tokens_itm = self.query_tokens.expand(text_ids_all.shape[0], -1, -1)
query_atts_itm = torch.ones(query_tokens_itm.size()[:-1], dtype=torch.long).to(
image.device
)
attention_mask_all = torch.cat([query_atts_itm, text_atts_all], dim=1)
image_embeds_all = torch.cat(
[image_embeds, image_embeds_neg, image_embeds], dim=0
) # pos, neg, pos
image_atts_all = torch.ones(image_embeds_all.size()[:-1], dtype=torch.long).to(
image.device
)
output_itm = self.Qformer.bert(
text_ids_all,
query_embeds=query_tokens_itm,
attention_mask=attention_mask_all,
encoder_hidden_states=image_embeds_all,
encoder_attention_mask=image_atts_all,
return_dict=True,
)
vl_embeddings = output_itm.last_hidden_state[:, : query_tokens_itm.size(1), :]
vl_output = self.itm_head(vl_embeddings)
logits = vl_output.mean(dim=1)
itm_labels = torch.cat(
[torch.ones(bs, dtype=torch.long), torch.zeros(2 * bs, dtype=torch.long)],
dim=0,
).to(image.device)
loss_itm = F.cross_entropy(logits, itm_labels)
##================= Image Captioning ========================##
decoder_input_ids = text_tokens.input_ids.clone()
decoder_input_ids[:, 0] = self.tokenizer.bos_token_id
labels = decoder_input_ids.masked_fill(
decoder_input_ids == self.tokenizer.pad_token_id, -100
)
query_atts = torch.ones(query_tokens.size()[:-1], dtype=torch.long).to(
image.device
)
attention_mask = torch.cat([query_atts, text_tokens.attention_mask], dim=1)
lm_output = self.Qformer(
decoder_input_ids,
attention_mask=attention_mask,
past_key_values=query_output.past_key_values,
return_dict=True,
labels=labels,
)
loss_lm = lm_output.loss
return BlipOutput(
loss=loss_itc + loss_itm + loss_lm,
loss_itc=loss_itc,
loss_itm=loss_itm,
loss_lm=loss_lm,
)
@torch.no_grad()
def generate(
self,
samples,
use_nucleus_sampling=False,
num_beams=3,
max_length=30,
min_length=10,
top_p=0.9,
repetition_penalty=1.0,
):
"""
Args:
samples (dict): A dictionary containing the following keys:
- image (torch.Tensor): A tensor of shape (batch_size, 3, H, W)
use_nucleus_sampling (bool): Whether to use nucleus sampling. If False, use top-k sampling.
num_beams (int): Number of beams for beam search. 1 means no beam search.
max_length (int): The maximum length of the sequence to be generated.
min_length (int): The minimum length of the sequence to be generated.
top_p (float): The cumulative probability for nucleus sampling.
repetition_penalty (float): The parameter for repetition penalty. 1.0 means no penalty.
num_captions (int): Number of captions to be generated for each image.
Returns:
captions (list): A list of strings of length batch_size * num_captions.
"""
image = samples["image"]
image_embeds = self.ln_vision(self.visual_encoder(image))
if not use_nucleus_sampling:
image_embeds = image_embeds.repeat_interleave(num_beams, dim=0)
else:
num_beams = 1
image_atts = torch.ones(image_embeds.size()[:-1], dtype=torch.long).to(
image.device
)
model_kwargs = {
"encoder_hidden_states": image_embeds,
"encoder_attention_mask": image_atts,
}
input_ids = (
torch.LongTensor(image.size(0), 1)
.fill_(self.tokenizer.bos_token_id)
.to(image.device)
)
query_tokens = self.query_tokens.expand(image_embeds.shape[0], -1, -1)
outputs = self.Qformer.generate(
input_ids=input_ids,
query_embeds=query_tokens,
max_length=max_length,
min_length=min_length,
num_beams=num_beams,
do_sample=use_nucleus_sampling,
top_p=top_p,
eos_token_id=self.tokenizer.sep_token_id,
pad_token_id=self.tokenizer.pad_token_id,
**model_kwargs
)
captions = self.tokenizer.batch_decode(outputs, skip_special_tokens=True)
return captions
def forward_image(self, image):
image_embeds = self.ln_vision(self.visual_encoder(image))
image_atts = torch.ones(image_embeds.size()[:-1], dtype=torch.long).to(
image.device
)
query_tokens = self.query_tokens.expand(image_embeds.shape[0], -1, -1)
query_output = self.Qformer.bert(
query_embeds=query_tokens,
encoder_hidden_states=image_embeds,
encoder_attention_mask=image_atts,
return_dict=True,
)
return query_output.last_hidden_state, image_embeds
def forward_text(self, text_tokens):
text_output = self.Qformer.bert(
text_tokens.input_ids,
attention_mask=text_tokens.attention_mask,
return_dict=True,
)
return text_output.last_hidden_state[:, 0, :]
def compute_itm(self, image_inputs, text_ids, text_atts):
image_atts = torch.ones(image_inputs.size()[:-1], dtype=torch.long).to(
image_inputs.device
)
query_tokens = self.query_tokens.expand(image_inputs.shape[0], -1, -1)
query_atts = torch.ones(query_tokens.size()[:-1], dtype=torch.long).to(
image_inputs.device
)
attention_mask = torch.cat([query_atts, text_atts], dim=1)
output_itm = self.Qformer.bert(
text_ids,
query_embeds=query_tokens,
attention_mask=attention_mask,
encoder_hidden_states=image_inputs,
encoder_attention_mask=image_atts,
return_dict=True,
)
vl_embeddings = output_itm.last_hidden_state[:, : query_tokens.size(1), :]
itm_logit = self.itm_head(vl_embeddings)
itm_logit = itm_logit[:, :, 1].mean(dim=1)
return itm_logit
@torch.no_grad()
def extract_features(self, samples, mode="multimodal"):
"""
Extract features for multimodal or unimodal samples.
Args:
samples (dict): A dictionary of samples, containing the following keys:
- image (torch.Tensor): A tensor of shape (B, C, H, W) containing the image.
Raw images should be preprocessed before being passed to feature extractor.
- text_input (list): A list of strings containing the text, length B.
mode (str): The mode of feature extraction. Can be either "multimodal", "text" or "image".
If "multimodal", return image features and multimodal features;
if "text", return text features;
if "image", return image features.
Default: "multimodal".
Returns:
BlipOutputFeatures: A BlipOutputFeatures object containing the features.
See lavis/models/blip_models/blip_outputs.py for more details.
"""
image = samples.get("image")
caption = samples.get("text_input")
# assert mode is one of "image", "text", "multimodal"
assert mode in [
"image",
"text",
"multimodal",
], "mode must be one of 'image', 'text', 'multimodal'"
# initalize output
image_embeds, text_embeds, multimodal_embeds = None, None, None
image_features, text_features = None, None
if mode == "image":
assert (
image is not None
), "Image is not provided for mode 'image' or 'multimodal'"
# return query features
image_embeds_frozen = self.ln_vision(self.visual_encoder(image))
image_atts = torch.ones(
image_embeds_frozen.size()[:-1], dtype=torch.long
).to(self.device)
query_tokens = self.query_tokens.expand(
image_embeds_frozen.shape[0], -1, -1
)
query_output = self.Qformer.bert(
query_embeds=query_tokens,
encoder_hidden_states=image_embeds_frozen,
encoder_attention_mask=image_atts,
return_dict=True,
)
image_embeds = query_output.last_hidden_state
image_features = F.normalize(self.vision_proj(image_embeds), dim=-1)
elif mode == "text":
assert (
caption is not None
), "text input is None for mode 'text' or 'multimodal'"
# return text features
text = self.tokenizer(caption, return_tensors="pt", padding=True).to(
self.device
)
text_output = self.Qformer.bert(
text.input_ids,
attention_mask=text.attention_mask,
return_dict=True,
)
text_embeds = text_output.last_hidden_state
text_features = self.text_proj(text_embeds)
text_features = F.normalize(text_features, dim=-1)
elif mode == "multimodal":
# return multimodel query features
image_embeds_frozen = self.ln_vision(self.visual_encoder(image))
image_atts = torch.ones(
image_embeds_frozen.size()[:-1], dtype=torch.long
).to(self.device)
query_tokens = self.query_tokens.expand(
image_embeds_frozen.shape[0], -1, -1
)
query_atts = torch.ones(query_tokens.size()[:-1], dtype=torch.long).to(
self.device
)
text = self.tokenizer(caption, return_tensors="pt", padding=True).to(
self.device
)
attention_mask = torch.cat([query_atts, text.attention_mask], dim=1)
output = self.Qformer.bert(
text.input_ids,
query_embeds=query_tokens,
attention_mask=attention_mask,
encoder_hidden_states=image_embeds_frozen,
encoder_attention_mask=image_atts,
return_dict=True,
)
multimodal_embeds = output.last_hidden_state[:, : query_tokens.size(1), :]
return BlipOutputFeatures(
image_embeds=image_embeds,
image_embeds_proj=image_features,
text_embeds=text_embeds,
text_embeds_proj=text_features,
multimodal_embeds=multimodal_embeds,
)
@classmethod
def from_config(cls, cfg):
img_size = cfg.get("image_size")
num_query_token = cfg.get("num_query_token")
drop_path_rate = cfg.get("drop_path_rate", 0)
use_grad_checkpoint = cfg.get("use_grad_checkpoint", False)
vit_precision = cfg.get("vit_precision", "fp16")
freeze_vit = cfg.get("freeze_vit", True)
max_txt_len = cfg.get("max_txt_len", 32)
model = cls(
img_size=img_size,
drop_path_rate=drop_path_rate,
use_grad_checkpoint=use_grad_checkpoint,
vit_precision=vit_precision,
freeze_vit=freeze_vit,
num_query_token=num_query_token,
max_txt_len=max_txt_len,
)
model.load_checkpoint_from_config(cfg)
return model
def compute_sim_matrix(self, data_loader, task_cfg):
"""
Compute similarity i2t, t2i matrix for the given data loader.
"""
k_test = task_cfg.k_test
return compute_sim_matrix(model=self, data_loader=data_loader, k_test=k_test)
| 3D-LLM-main | three_steps_3d_feature/second_step/lavis/models/blip2_models/blip2_qformer.py |
3D-LLM-main | three_steps_3d_feature/second_step/lavis/models/blip2_models/__init__.py |
|
"""
Copyright (c) 2023, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
import logging
import torch
from torch.cuda.amp import autocast as autocast
import torch.nn as nn
from lavis.common.registry import registry
from lavis.models.blip2_models.blip2 import Blip2Base, disabled_train
from lavis.models.blip2_models.modeling_opt import OPTForCausalLM, OPTConfig
from transformers import AutoTokenizer
@registry.register_model("blip2_opt")
class Blip2OPT(Blip2Base):
"""
BLIP2 OPT model.
Supported model types:
- pretrained_opt2.7b: pretrained model with OPT2.7b
- pretrained_opt6.7b: pretrained model with OPT6.7b
- caption_coco_opt2.7b: fintuned image captioning model with OPT2.7b
- caption_coco_opt6.7b: fintuned image captioning model with OPT6.7b
Usage:
>>> from lavis.models import load_model
>>> model = load_model("blip2_opt", "caption_coco_opt2.7b")
"""
PRETRAINED_MODEL_CONFIG_DICT = {
"pretrain_opt2.7b": "configs/models/blip2/blip2_pretrain_opt2.7b.yaml",
"pretrain_opt6.7b": "configs/models/blip2/blip2_pretrain_opt6.7b.yaml",
"caption_coco_opt2.7b": "configs/models/blip2/blip2_caption_opt2.7b.yaml",
"caption_coco_opt6.7b": "configs/models/blip2/blip2_caption_opt6.7b.yaml",
}
def __init__(
self,
img_size=224,
drop_path_rate=0,
use_grad_checkpoint=False,
vit_precision="fp16",
freeze_vit=True,
num_query_token=32,
opt_model="facebook/opt-2.7b",
prompt="",
max_txt_len=32,
):
super().__init__()
self.tokenizer = self.init_tokenizer()
self.visual_encoder, self.ln_vision = self.init_vision_encoder(
img_size, drop_path_rate, use_grad_checkpoint, vit_precision
)
if freeze_vit:
self.visual_encoder = self.visual_encoder.eval()
self.visual_encoder.train = disabled_train
logging.info("freeze vision encoder")
self.Qformer, self.query_tokens = self.init_Qformer(
num_query_token, self.visual_encoder.num_features
)
self.Qformer.cls = None
self.Qformer.bert.embeddings.word_embeddings = None
self.Qformer.bert.embeddings.position_embeddings = None
for layer in self.Qformer.bert.encoder.layer:
layer.output = None
layer.intermediate = None
self.opt_tokenizer = AutoTokenizer.from_pretrained(opt_model, use_fast=False)
self.opt_model = OPTForCausalLM.from_pretrained(
opt_model, torch_dtype=torch.float16
)
for name, param in self.opt_model.named_parameters():
param.requires_grad = False
self.eos_token_id = self.opt_tokenizer(
"\n", add_special_tokens=False
).input_ids[0]
self.opt_proj = nn.Linear(
self.Qformer.config.hidden_size, self.opt_model.config.hidden_size
)
self.max_txt_len = max_txt_len
self.prompt = prompt
prompt_tokens = self.opt_tokenizer(self.prompt, return_tensors="pt")
self.prompt_length = prompt_tokens.attention_mask.sum(1)
def forward(self, samples):
image = samples["image"]
image_embeds = self.ln_vision(self.visual_encoder(image))
image_atts = torch.ones(image_embeds.size()[:-1], dtype=torch.long).to(
image.device
)
query_tokens = self.query_tokens.expand(image_embeds.shape[0], -1, -1)
query_output = self.Qformer.bert(
query_embeds=query_tokens,
encoder_hidden_states=image_embeds,
encoder_attention_mask=image_atts,
return_dict=True,
)
inputs_opt = self.opt_proj(query_output.last_hidden_state)
atts_opt = torch.ones(inputs_opt.size()[:-1], dtype=torch.long).to(image.device)
self.opt_tokenizer.padding_side = "right"
text = [t + "\n" for t in samples["text_input"]]
opt_tokens = self.opt_tokenizer(
text,
return_tensors="pt",
padding="longest",
truncation=True,
max_length=self.max_txt_len,
).to(image.device)
targets = opt_tokens.input_ids.masked_fill(
opt_tokens.input_ids == self.opt_tokenizer.pad_token_id, -100
)
if self.prompt:
targets[:, : self.prompt_length] = -100 # do not apply loss to the prompt
empty_targets = (
torch.ones(atts_opt.size(), dtype=torch.long).to(image.device).fill_(-100)
)
targets = torch.cat([empty_targets, targets], dim=1)
inputs_embeds = self.opt_model.model.decoder.embed_tokens(opt_tokens.input_ids)
inputs_embeds = torch.cat([inputs_opt, inputs_embeds], dim=1)
attention_mask = torch.cat([atts_opt, opt_tokens.attention_mask], dim=1)
outputs = self.opt_model(
inputs_embeds=inputs_embeds,
attention_mask=attention_mask,
return_dict=True,
labels=targets,
)
loss = outputs.loss
return {"loss": loss}
@torch.no_grad()
def generate(
self,
samples,
use_nucleus_sampling=False,
num_beams=5,
max_length=30,
min_length=1,
top_p=0.9,
repetition_penalty=1.0,
length_penalty=1.0,
num_captions=1,
temperature=1,
):
"""
Args:
samples (dict): A dictionary containing the following keys:
- image (torch.Tensor): A tensor of shape (batch_size, 3, H, W)
use_nucleus_sampling (bool): Whether to use nucleus sampling. If False, use top-k sampling.
num_beams (int): Number of beams for beam search. 1 means no beam search.
max_length (int): The maximum length of the sequence to be generated.
min_length (int): The minimum length of the sequence to be generated.
top_p (float): The cumulative probability for nucleus sampling.
repetition_penalty (float): The parameter for repetition penalty. 1.0 means no penalty.
num_captions (int): Number of captions to be generated for each image.
Returns:
captions (list): A list of strings of length batch_size * num_captions.
"""
image = samples["image"]
with torch.cuda.amp.autocast(
enabled=(self.device != torch.device("cpu"))
):
image_embeds = self.ln_vision(self.visual_encoder(image))
image_atts = torch.ones(image_embeds.size()[:-1], dtype=torch.long).to(
image.device
)
query_tokens = self.query_tokens.expand(image_embeds.shape[0], -1, -1)
query_output = self.Qformer.bert(
query_embeds=query_tokens,
encoder_hidden_states=image_embeds,
encoder_attention_mask=image_atts,
return_dict=True,
)
print (image_embeds.shape)
print (image_atts.shape)
print (query_tokens.shape)
inputs_opt = self.opt_proj(query_output.last_hidden_state)
atts_opt = torch.ones(inputs_opt.size()[:-1], dtype=torch.long).to(image.device)
print (inputs_opt.shape)
print (atts_opt.shape)
if "prompt" in samples.keys():
prompt = samples["prompt"]
else:
prompt = self.prompt
prompt = [prompt] * image.size(0)
opt_tokens = self.opt_tokenizer(prompt, return_tensors="pt").to(image.device)
input_ids = opt_tokens.input_ids
attention_mask = torch.cat([atts_opt, opt_tokens.attention_mask], dim=1)
if use_nucleus_sampling:
query_embeds = inputs_opt.repeat_interleave(num_captions, dim=0)
num_beams = 1
else:
query_embeds = inputs_opt.repeat_interleave(num_beams, dim=0)
print (query_embeds.shape)
outputs = self.opt_model.generate(
input_ids=input_ids,
query_embeds=query_embeds,
attention_mask=attention_mask,
do_sample=use_nucleus_sampling,
top_p=top_p,
temperature=temperature,
num_beams=num_beams,
max_new_tokens=max_length,
min_length=min_length,
eos_token_id=self.eos_token_id,
repetition_penalty=repetition_penalty,
length_penalty=length_penalty,
num_return_sequences=num_captions,
)
prompt_length = opt_tokens.input_ids.shape[1]
output_text = self.opt_tokenizer.batch_decode(
outputs[:, prompt_length:], skip_special_tokens=True
)
output_text = [text.strip() for text in output_text]
return output_text
@classmethod
def from_config(cls, cfg):
img_size = cfg.get("image_size")
num_query_token = cfg.get("num_query_token")
opt_model = cfg.get("opt_model")
drop_path_rate = cfg.get("drop_path_rate", 0)
use_grad_checkpoint = cfg.get("use_grad_checkpoint", False)
vit_precision = cfg.get("vit_precision", "fp16")
freeze_vit = cfg.get("freeze_vit", True)
prompt = cfg.get("prompt", "")
max_txt_len = cfg.get("max_txt_len", 32)
model = cls(
img_size=img_size,
drop_path_rate=drop_path_rate,
use_grad_checkpoint=use_grad_checkpoint,
vit_precision=vit_precision,
freeze_vit=freeze_vit,
num_query_token=num_query_token,
opt_model=opt_model,
prompt=prompt,
max_txt_len=max_txt_len,
)
model.load_checkpoint_from_config(cfg)
return model
| 3D-LLM-main | three_steps_3d_feature/second_step/lavis/models/blip2_models/blip2_opt.py |
# coding=utf-8
# Copyright 2018 Mesh TensorFlow authors, T5 Authors and HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch T5 model."""
import copy
import math
import os
import warnings
from typing import Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from torch.utils.checkpoint import checkpoint
from transformers.activations import ACT2FN
from transformers.modeling_outputs import (
BaseModelOutput,
BaseModelOutputWithPastAndCrossAttentions,
Seq2SeqLMOutput,
Seq2SeqModelOutput,
)
from transformers.modeling_utils import PreTrainedModel
from transformers.pytorch_utils import (
ALL_LAYERNORM_LAYERS,
find_pruneable_heads_and_indices,
prune_linear_layer,
)
from transformers.utils import (
DUMMY_INPUTS,
DUMMY_MASK,
add_start_docstrings,
add_start_docstrings_to_model_forward,
is_torch_fx_proxy,
logging,
replace_return_docstrings,
)
from transformers.utils.model_parallel_utils import assert_device_map, get_device_map
from transformers.models.t5.configuration_t5 import T5Config
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = "T5Config"
_TOKENIZER_FOR_DOC = "T5Tokenizer"
_CHECKPOINT_FOR_DOC = "t5-small"
####################################################
# This dict contains ids and associated url
# for the pretrained weights provided with the models
####################################################
T5_PRETRAINED_MODEL_ARCHIVE_LIST = [
"t5-small",
"t5-base",
"t5-large",
"t5-3b",
"t5-11b",
# See all T5 models at https://huggingface.co/models?filter=t5
]
####################################################
# This is a conversion method from TF 1.0 to PyTorch
# More details: https://medium.com/huggingface/from-tensorflow-to-pytorch-265f40ef2a28
####################################################
def load_tf_weights_in_t5(model, config, tf_checkpoint_path):
"""Load tf checkpoints in a pytorch model."""
try:
import re
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions."
)
raise
tf_path = os.path.abspath(tf_checkpoint_path)
logger.info(f"Converting TensorFlow checkpoint from {tf_path}")
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
tf_weights = {}
for name, shape in init_vars:
logger.info(f"Loading TF weight {name} with shape {shape}")
array = tf.train.load_variable(tf_path, name)
names.append(name)
tf_weights[name] = array
for txt_name in names:
name = txt_name.split("/")
# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
# which are not required for using pretrained model
if any(
n
in [
"adam_v",
"adam_m",
"AdamWeightDecayOptimizer",
"AdamWeightDecayOptimizer_1",
"global_step",
]
for n in name
):
logger.info(f"Skipping {'/'.join(name)}")
tf_weights.pop(txt_name, None)
continue
if "_slot_" in name[-1]:
logger.info(f"Skipping {'/'.join(name)}")
tf_weights.pop(txt_name, None)
continue
pointer = model
array = tf_weights[txt_name]
for m_name in name:
if re.fullmatch(r"[A-Za-z]+_\d+", m_name):
scope_names = re.split(r"_(\d+)", m_name)
else:
scope_names = [m_name]
if scope_names[0] in ["kernel", "scale", "embedding"]:
pointer = getattr(pointer, "weight")
elif scope_names[0] == "self_attention":
pointer = getattr(pointer, "layer")
pointer = pointer[0]
elif scope_names[0] == "enc_dec_attention":
pointer = getattr(pointer, "layer")
pointer = pointer[1]
elif scope_names[0] == "dense_relu_dense":
pointer = getattr(pointer, "layer")
pointer = pointer[2]
elif scope_names[0] == "rms_norm":
if hasattr(pointer, "layer_norm"):
pointer = getattr(pointer, "layer_norm")
elif hasattr(pointer, "final_layer_norm"):
pointer = getattr(pointer, "final_layer_norm")
elif scope_names[0] == "scale":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "output_bias" or scope_names[0] == "beta":
pointer = getattr(pointer, "bias")
elif scope_names[0] == "squad":
pointer = getattr(pointer, "classifier")
elif scope_names[0] == "decoder" and name[1] == "logits":
continue
elif scope_names[0] == "logits":
pointer = getattr(pointer, "lm_head")
elif (
scope_names[0] == "wi"
and len(scope_names) > 1
and scope_names[1].isdigit()
):
pointer = getattr(pointer, f"wi_{scope_names[1]}")
continue
else:
try:
pointer = getattr(pointer, scope_names[0])
except AttributeError:
logger.info(f"Skipping {'/'.join(name)}")
continue
if len(scope_names) >= 2:
num = int(scope_names[1])
pointer = pointer[num]
if scope_names[0] not in ["kernel", "scale", "embedding"]:
pointer = getattr(pointer, "weight")
if scope_names[0] != "embedding":
logger.info(f"Transposing numpy weight of shape {array.shape} for {name}")
array = np.transpose(array)
try:
assert (
pointer.shape == array.shape
), f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched"
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
logger.info(f"Initialize PyTorch weight {name}")
pointer.data = torch.from_numpy(array.astype(np.float32))
tf_weights.pop(txt_name, None)
logger.info(f"Weights not copied to PyTorch model: {', '.join(tf_weights.keys())}.")
return model
####################################################
# PyTorch Models are constructed by sub-classing
# - torch.nn.Module for the layers and
# - PreTrainedModel for the models (it-self a sub-class of nn.Module)
####################################################
PARALLELIZE_DOCSTRING = r"""
This is an experimental feature and is a subject to change at a moment's notice.
Uses a device map to distribute attention modules of the model across several devices. If no device map is given,
it will evenly distribute blocks across all devices.
Args:
device_map (`Dict[int, list]`, optional, defaults to None):
A dictionary that maps attention modules to devices. Note that the embedding module and LMHead are always
automatically mapped to the first device (for esoteric reasons). That means that the first device should
have fewer attention modules mapped to it than other devices. For reference, the t5 models have the
following number of attention modules:
- t5-small: 6
- t5-base: 12
- t5-large: 24
- t5-3b: 24
- t5-11b: 24
Example:
```python
# Here is an example of a device map on a machine with 4 GPUs using t5-3b, which has a total of 24 attention modules:
model = T5ForConditionalGeneration.from_pretrained("t5-3b")
device_map = {
0: [0, 1, 2],
1: [3, 4, 5, 6, 7, 8, 9],
2: [10, 11, 12, 13, 14, 15, 16],
3: [17, 18, 19, 20, 21, 22, 23],
}
model.parallelize(device_map)
```
"""
DEPARALLELIZE_DOCSTRING = r"""
Moves the model to cpu from a model parallel state.
Example:
```python
# On a 4 GPU machine with t5-3b:
model = T5ForConditionalGeneration.from_pretrained("t5-3b")
device_map = {
0: [0, 1, 2],
1: [3, 4, 5, 6, 7, 8, 9],
2: [10, 11, 12, 13, 14, 15, 16],
3: [17, 18, 19, 20, 21, 22, 23],
}
model.parallelize(device_map) # Splits the model across several devices
model.deparallelize() # Put the model back on cpu and cleans memory by calling torch.cuda.empty_cache()
```
"""
class T5LayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-6):
"""
Construct a layernorm module in the T5 style. No bias and no subtraction of mean.
"""
super().__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.variance_epsilon = eps
def forward(self, hidden_states):
# T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
# Square Layer Normalization https://arxiv.org/abs/1910.07467 thus varience is calculated
# w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
# half-precision inputs is done in fp32
variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True)
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
# convert into half-precision if necessary
if self.weight.dtype in [torch.float16, torch.bfloat16]:
hidden_states = hidden_states.to(self.weight.dtype)
return self.weight * hidden_states
try:
from apex.normalization import FusedRMSNorm
T5LayerNorm = FusedRMSNorm # noqa
logger.info(
"Discovered apex.normalization.FusedRMSNorm - will use it instead of T5LayerNorm"
)
except ImportError:
# using the normal T5LayerNorm
pass
except Exception:
logger.warning("discovered apex but it failed to load, falling back to T5LayerNorm")
pass
ALL_LAYERNORM_LAYERS.append(T5LayerNorm)
class T5DenseActDense(nn.Module):
def __init__(self, config: T5Config):
super().__init__()
self.wi = nn.Linear(config.d_model, config.d_ff, bias=False)
self.wo = nn.Linear(config.d_ff, config.d_model, bias=False)
self.dropout = nn.Dropout(config.dropout_rate)
self.act = ACT2FN[config.dense_act_fn]
def forward(self, hidden_states):
hidden_states = self.wi(hidden_states)
hidden_states = self.act(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.wo(hidden_states)
return hidden_states
class T5DenseGatedActDense(nn.Module):
def __init__(self, config: T5Config):
super().__init__()
self.wi_0 = nn.Linear(config.d_model, config.d_ff, bias=False)
self.wi_1 = nn.Linear(config.d_model, config.d_ff, bias=False)
self.wo = nn.Linear(config.d_ff, config.d_model, bias=False)
self.dropout = nn.Dropout(config.dropout_rate)
self.act = ACT2FN[config.dense_act_fn]
def forward(self, hidden_states):
hidden_gelu = self.act(self.wi_0(hidden_states))
hidden_linear = self.wi_1(hidden_states)
hidden_states = hidden_gelu * hidden_linear
hidden_states = self.dropout(hidden_states)
hidden_states = self.wo(hidden_states)
return hidden_states
class T5LayerFF(nn.Module):
def __init__(self, config: T5Config):
super().__init__()
if config.is_gated_act:
self.DenseReluDense = T5DenseGatedActDense(config)
else:
self.DenseReluDense = T5DenseActDense(config)
self.layer_norm = T5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)
self.dropout = nn.Dropout(config.dropout_rate)
def forward(self, hidden_states):
forwarded_states = self.layer_norm(hidden_states)
forwarded_states = self.DenseReluDense(forwarded_states)
hidden_states = hidden_states + self.dropout(forwarded_states)
return hidden_states
class T5Attention(nn.Module):
def __init__(self, config: T5Config, has_relative_attention_bias=False):
super().__init__()
self.is_decoder = config.is_decoder
self.has_relative_attention_bias = has_relative_attention_bias
self.relative_attention_num_buckets = config.relative_attention_num_buckets
self.relative_attention_max_distance = config.relative_attention_max_distance
self.d_model = config.d_model
self.key_value_proj_dim = config.d_kv
self.n_heads = config.num_heads
self.dropout = config.dropout_rate
self.inner_dim = self.n_heads * self.key_value_proj_dim
# Mesh TensorFlow initialization to avoid scaling before softmax
self.q = nn.Linear(self.d_model, self.inner_dim, bias=False)
self.k = nn.Linear(self.d_model, self.inner_dim, bias=False)
self.v = nn.Linear(self.d_model, self.inner_dim, bias=False)
self.o = nn.Linear(self.inner_dim, self.d_model, bias=False)
if self.has_relative_attention_bias:
self.relative_attention_bias = nn.Embedding(
self.relative_attention_num_buckets, self.n_heads
)
self.pruned_heads = set()
self.gradient_checkpointing = False
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(
heads, self.n_heads, self.key_value_proj_dim, self.pruned_heads
)
# Prune linear layers
self.q = prune_linear_layer(self.q, index)
self.k = prune_linear_layer(self.k, index)
self.v = prune_linear_layer(self.v, index)
self.o = prune_linear_layer(self.o, index, dim=1)
# Update hyper params
self.n_heads = self.n_heads - len(heads)
self.inner_dim = self.key_value_proj_dim * self.n_heads
self.pruned_heads = self.pruned_heads.union(heads)
@staticmethod
def _relative_position_bucket(
relative_position, bidirectional=True, num_buckets=32, max_distance=128
):
"""
Adapted from Mesh Tensorflow:
https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593
Translate relative position to a bucket number for relative attention. The relative position is defined as
memory_position - query_position, i.e. the distance in tokens from the attending position to the attended-to
position. If bidirectional=False, then positive relative positions are invalid. We use smaller buckets for
small absolute relative_position and larger buckets for larger absolute relative_positions. All relative
positions >=max_distance map to the same bucket. All relative positions <=-max_distance map to the same bucket.
This should allow for more graceful generalization to longer sequences than the model has been trained on
Args:
relative_position: an int32 Tensor
bidirectional: a boolean - whether the attention is bidirectional
num_buckets: an integer
max_distance: an integer
Returns:
a Tensor with the same shape as relative_position, containing int32 values in the range [0, num_buckets)
"""
relative_buckets = 0
if bidirectional:
num_buckets //= 2
relative_buckets += (relative_position > 0).to(torch.long) * num_buckets
relative_position = torch.abs(relative_position)
else:
relative_position = -torch.min(
relative_position, torch.zeros_like(relative_position)
)
# now relative_position is in the range [0, inf)
# half of the buckets are for exact increments in positions
max_exact = num_buckets // 2
is_small = relative_position < max_exact
# The other half of the buckets are for logarithmically bigger bins in positions up to max_distance
relative_position_if_large = max_exact + (
torch.log(relative_position.float() / max_exact)
/ math.log(max_distance / max_exact)
* (num_buckets - max_exact)
).to(torch.long)
relative_position_if_large = torch.min(
relative_position_if_large,
torch.full_like(relative_position_if_large, num_buckets - 1),
)
relative_buckets += torch.where(
is_small, relative_position, relative_position_if_large
)
return relative_buckets
def compute_bias(self, query_length, key_length, device=None):
"""Compute binned relative position bias"""
if device is None:
device = self.relative_attention_bias.weight.device
context_position = torch.arange(query_length, dtype=torch.long, device=device)[
:, None
]
memory_position = torch.arange(key_length, dtype=torch.long, device=device)[
None, :
]
relative_position = (
memory_position - context_position
) # shape (query_length, key_length)
relative_position_bucket = self._relative_position_bucket(
relative_position, # shape (query_length, key_length)
bidirectional=(not self.is_decoder),
num_buckets=self.relative_attention_num_buckets,
max_distance=self.relative_attention_max_distance,
)
values = self.relative_attention_bias(
relative_position_bucket
) # shape (query_length, key_length, num_heads)
values = values.permute([2, 0, 1]).unsqueeze(
0
) # shape (1, num_heads, query_length, key_length)
return values
def forward(
self,
hidden_states,
mask=None,
key_value_states=None,
position_bias=None,
past_key_value=None,
layer_head_mask=None,
query_length=None,
use_cache=False,
output_attentions=False,
):
"""
Self-attention (if key_value_states is None) or attention over source sentence (provided by key_value_states).
"""
# Input is (batch_size, seq_length, dim)
# Mask is (batch_size, key_length) (non-causal) or (batch_size, key_length, key_length)
# past_key_value[0] is (batch_size, n_heads, q_len - 1, dim_per_head)
batch_size, seq_length = hidden_states.shape[:2]
real_seq_length = seq_length
if past_key_value is not None:
assert (
len(past_key_value) == 2
), f"past_key_value should have 2 past states: keys and values. Got { len(past_key_value)} past states"
real_seq_length += (
past_key_value[0].shape[2] if query_length is None else query_length
)
key_length = (
real_seq_length if key_value_states is None else key_value_states.shape[1]
)
def shape(states):
"""projection"""
return states.view(
batch_size, -1, self.n_heads, self.key_value_proj_dim
).transpose(1, 2)
def unshape(states):
"""reshape"""
return (
states.transpose(1, 2).contiguous().view(batch_size, -1, self.inner_dim)
)
def project(hidden_states, proj_layer, key_value_states, past_key_value):
"""projects hidden states correctly to key/query states"""
if key_value_states is None:
# self-attn
# (batch_size, n_heads, seq_length, dim_per_head)
hidden_states = shape(proj_layer(hidden_states))
elif past_key_value is None:
# cross-attn
# (batch_size, n_heads, seq_length, dim_per_head)
hidden_states = shape(proj_layer(key_value_states))
if past_key_value is not None:
if key_value_states is None:
# self-attn
# (batch_size, n_heads, key_length, dim_per_head)
hidden_states = torch.cat([past_key_value, hidden_states], dim=2)
else:
# cross-attn
hidden_states = past_key_value
return hidden_states
# get query states
query_states = shape(
self.q(hidden_states)
) # (batch_size, n_heads, seq_length, dim_per_head)
# get key/value states
key_states = project(
hidden_states,
self.k,
key_value_states,
past_key_value[0] if past_key_value is not None else None,
)
value_states = project(
hidden_states,
self.v,
key_value_states,
past_key_value[1] if past_key_value is not None else None,
)
# compute scores
scores = torch.matmul(
query_states, key_states.transpose(3, 2)
) # equivalent of torch.einsum("bnqd,bnkd->bnqk", query_states, key_states), compatible with onnx op>9
if position_bias is None:
if not self.has_relative_attention_bias:
position_bias = torch.zeros(
(1, self.n_heads, real_seq_length, key_length),
device=scores.device,
dtype=scores.dtype,
)
if self.gradient_checkpointing and self.training:
position_bias.requires_grad = True
else:
position_bias = self.compute_bias(
real_seq_length, key_length, device=scores.device
)
# if key and values are already calculated
# we want only the last query position bias
if past_key_value is not None:
position_bias = position_bias[:, :, -hidden_states.size(1) :, :]
if mask is not None:
position_bias = (
position_bias + mask
) # (batch_size, n_heads, seq_length, key_length)
if self.pruned_heads:
mask = torch.ones(position_bias.shape[1])
mask[list(self.pruned_heads)] = 0
position_bias_masked = position_bias[:, mask.bool()]
else:
position_bias_masked = position_bias
scores += position_bias_masked
attn_weights = nn.functional.softmax(scores.float(), dim=-1).type_as(
scores
) # (batch_size, n_heads, seq_length, key_length)
attn_weights = nn.functional.dropout(
attn_weights, p=self.dropout, training=self.training
) # (batch_size, n_heads, seq_length, key_length)
# Mask heads if we want to
if layer_head_mask is not None:
attn_weights = attn_weights * layer_head_mask
attn_output = unshape(
torch.matmul(attn_weights, value_states)
) # (batch_size, seq_length, dim)
attn_output = self.o(attn_output)
present_key_value_state = (
(key_states, value_states) if (self.is_decoder and use_cache) else None
)
outputs = (attn_output,) + (present_key_value_state,) + (position_bias,)
if output_attentions:
outputs = outputs + (attn_weights,)
return outputs
class T5LayerSelfAttention(nn.Module):
def __init__(self, config, has_relative_attention_bias=False):
super().__init__()
self.SelfAttention = T5Attention(
config, has_relative_attention_bias=has_relative_attention_bias
)
self.layer_norm = T5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)
self.dropout = nn.Dropout(config.dropout_rate)
def forward(
self,
hidden_states,
attention_mask=None,
position_bias=None,
layer_head_mask=None,
past_key_value=None,
use_cache=False,
output_attentions=False,
):
normed_hidden_states = self.layer_norm(hidden_states)
attention_output = self.SelfAttention(
normed_hidden_states,
mask=attention_mask,
position_bias=position_bias,
layer_head_mask=layer_head_mask,
past_key_value=past_key_value,
use_cache=use_cache,
output_attentions=output_attentions,
)
hidden_states = hidden_states + self.dropout(attention_output[0])
outputs = (hidden_states,) + attention_output[
1:
] # add attentions if we output them
return outputs
class T5LayerCrossAttention(nn.Module):
def __init__(self, config):
super().__init__()
self.EncDecAttention = T5Attention(config, has_relative_attention_bias=False)
self.layer_norm = T5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)
self.dropout = nn.Dropout(config.dropout_rate)
def forward(
self,
hidden_states,
key_value_states,
attention_mask=None,
position_bias=None,
layer_head_mask=None,
past_key_value=None,
use_cache=False,
query_length=None,
output_attentions=False,
):
normed_hidden_states = self.layer_norm(hidden_states)
attention_output = self.EncDecAttention(
normed_hidden_states,
mask=attention_mask,
key_value_states=key_value_states,
position_bias=position_bias,
layer_head_mask=layer_head_mask,
past_key_value=past_key_value,
use_cache=use_cache,
query_length=query_length,
output_attentions=output_attentions,
)
layer_output = hidden_states + self.dropout(attention_output[0])
outputs = (layer_output,) + attention_output[
1:
] # add attentions if we output them
return outputs
class T5Block(nn.Module):
def __init__(self, config, has_relative_attention_bias=False):
super().__init__()
self.is_decoder = config.is_decoder
self.layer = nn.ModuleList()
self.layer.append(
T5LayerSelfAttention(
config, has_relative_attention_bias=has_relative_attention_bias
)
)
if self.is_decoder:
self.layer.append(T5LayerCrossAttention(config))
self.layer.append(T5LayerFF(config))
def forward(
self,
hidden_states,
attention_mask=None,
position_bias=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
encoder_decoder_position_bias=None,
layer_head_mask=None,
cross_attn_layer_head_mask=None,
past_key_value=None,
use_cache=False,
output_attentions=False,
return_dict=True,
):
if past_key_value is not None:
if not self.is_decoder:
logger.warning(
"`past_key_values` is passed to the encoder. Please make sure this is intended."
)
expected_num_past_key_values = 2 if encoder_hidden_states is None else 4
if len(past_key_value) != expected_num_past_key_values:
raise ValueError(
f"There should be {expected_num_past_key_values} past states. "
f"{'2 (past / key) for cross attention. ' if expected_num_past_key_values == 4 else ''}"
f"Got {len(past_key_value)} past key / value states"
)
self_attn_past_key_value = past_key_value[:2]
cross_attn_past_key_value = past_key_value[2:]
else:
self_attn_past_key_value, cross_attn_past_key_value = None, None
self_attention_outputs = self.layer[0](
hidden_states,
attention_mask=attention_mask,
position_bias=position_bias,
layer_head_mask=layer_head_mask,
past_key_value=self_attn_past_key_value,
use_cache=use_cache,
output_attentions=output_attentions,
)
hidden_states, present_key_value_state = self_attention_outputs[:2]
attention_outputs = self_attention_outputs[
2:
] # Keep self-attention outputs and relative position weights
# clamp inf values to enable fp16 training
if hidden_states.dtype == torch.float16 and torch.isinf(hidden_states).any():
clamp_value = torch.finfo(hidden_states.dtype).max - 1000
hidden_states = torch.clamp(
hidden_states, min=-clamp_value, max=clamp_value
)
do_cross_attention = self.is_decoder and encoder_hidden_states is not None
if do_cross_attention:
# the actual query length is unknown for cross attention
# if using past key value states. Need to inject it here
if present_key_value_state is not None:
query_length = present_key_value_state[0].shape[2]
else:
query_length = None
cross_attention_outputs = self.layer[1](
hidden_states,
key_value_states=encoder_hidden_states,
attention_mask=encoder_attention_mask,
position_bias=encoder_decoder_position_bias,
layer_head_mask=cross_attn_layer_head_mask,
past_key_value=cross_attn_past_key_value,
query_length=query_length,
use_cache=use_cache,
output_attentions=output_attentions,
)
hidden_states = cross_attention_outputs[0]
# clamp inf values to enable fp16 training
if (
hidden_states.dtype == torch.float16
and torch.isinf(hidden_states).any()
):
clamp_value = torch.finfo(hidden_states.dtype).max - 1000
hidden_states = torch.clamp(
hidden_states, min=-clamp_value, max=clamp_value
)
# Combine self attn and cross attn key value states
if present_key_value_state is not None:
present_key_value_state = (
present_key_value_state + cross_attention_outputs[1]
)
# Keep cross-attention outputs and relative position weights
attention_outputs = attention_outputs + cross_attention_outputs[2:]
# Apply Feed Forward layer
hidden_states = self.layer[-1](hidden_states)
# clamp inf values to enable fp16 training
if hidden_states.dtype == torch.float16 and torch.isinf(hidden_states).any():
clamp_value = torch.finfo(hidden_states.dtype).max - 1000
hidden_states = torch.clamp(
hidden_states, min=-clamp_value, max=clamp_value
)
outputs = (hidden_states,)
if use_cache:
outputs = outputs + (present_key_value_state,) + attention_outputs
else:
outputs = outputs + attention_outputs
return outputs # hidden-states, present_key_value_states, (self-attention position bias), (self-attention weights), (cross-attention position bias), (cross-attention weights)
class T5PreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = T5Config
load_tf_weights = load_tf_weights_in_t5
base_model_prefix = "transformer"
is_parallelizable = True
supports_gradient_checkpointing = True
_no_split_modules = ["T5Block"]
@property
def dummy_inputs(self):
input_ids = torch.tensor(DUMMY_INPUTS)
input_mask = torch.tensor(DUMMY_MASK)
dummy_inputs = {
"decoder_input_ids": input_ids,
"input_ids": input_ids,
"decoder_attention_mask": input_mask,
}
return dummy_inputs
def _init_weights(self, module):
"""Initialize the weights"""
factor = (
self.config.initializer_factor
) # Used for testing weights initialization
if isinstance(module, T5LayerNorm):
module.weight.data.fill_(factor * 1.0)
elif isinstance(module, (T5Model, T5ForConditionalGeneration, T5EncoderModel)):
# Mesh TensorFlow embeddings initialization
# See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L1624
module.shared.weight.data.normal_(mean=0.0, std=factor * 1.0)
if hasattr(module, "lm_head") and not self.config.tie_word_embeddings:
module.lm_head.weight.data.normal_(mean=0.0, std=factor * 1.0)
elif isinstance(module, T5DenseActDense):
# Mesh TensorFlow FF initialization
# See https://github.com/tensorflow/mesh/blob/master/mesh_tensorflow/transformer/transformer_layers.py#L56
# and https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L89
module.wi.weight.data.normal_(
mean=0.0, std=factor * ((self.config.d_model) ** -0.5)
)
if hasattr(module.wi, "bias") and module.wi.bias is not None:
module.wi.bias.data.zero_()
module.wo.weight.data.normal_(
mean=0.0, std=factor * ((self.config.d_ff) ** -0.5)
)
if hasattr(module.wo, "bias") and module.wo.bias is not None:
module.wo.bias.data.zero_()
elif isinstance(module, T5DenseGatedActDense):
module.wi_0.weight.data.normal_(
mean=0.0, std=factor * ((self.config.d_model) ** -0.5)
)
if hasattr(module.wi_0, "bias") and module.wi_0.bias is not None:
module.wi_0.bias.data.zero_()
module.wi_1.weight.data.normal_(
mean=0.0, std=factor * ((self.config.d_model) ** -0.5)
)
if hasattr(module.wi_1, "bias") and module.wi_1.bias is not None:
module.wi_1.bias.data.zero_()
module.wo.weight.data.normal_(
mean=0.0, std=factor * ((self.config.d_ff) ** -0.5)
)
if hasattr(module.wo, "bias") and module.wo.bias is not None:
module.wo.bias.data.zero_()
elif isinstance(module, T5Attention):
# Mesh TensorFlow attention initialization to avoid scaling before softmax
# See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/attention.py#L136
d_model = self.config.d_model
key_value_proj_dim = self.config.d_kv
n_heads = self.config.num_heads
module.q.weight.data.normal_(
mean=0.0, std=factor * ((d_model * key_value_proj_dim) ** -0.5)
)
module.k.weight.data.normal_(mean=0.0, std=factor * (d_model**-0.5))
module.v.weight.data.normal_(mean=0.0, std=factor * (d_model**-0.5))
module.o.weight.data.normal_(
mean=0.0, std=factor * ((n_heads * key_value_proj_dim) ** -0.5)
)
if module.has_relative_attention_bias:
module.relative_attention_bias.weight.data.normal_(
mean=0.0, std=factor * ((d_model) ** -0.5)
)
def _set_gradient_checkpointing(self, module, value=False):
if isinstance(module, (T5Attention, T5Stack)):
module.gradient_checkpointing = value
def _shift_right(self, input_ids):
decoder_start_token_id = self.config.decoder_start_token_id
pad_token_id = self.config.pad_token_id
assert decoder_start_token_id is not None, (
"self.model.config.decoder_start_token_id has to be defined. In T5 it is usually set to the pad_token_id."
" See T5 docs for more information"
)
# shift inputs to the right
if is_torch_fx_proxy(input_ids):
# Item assignment is not supported natively for proxies.
shifted_input_ids = torch.full(
input_ids.shape[:-1] + (1,), decoder_start_token_id
)
shifted_input_ids = torch.cat(
[shifted_input_ids, input_ids[..., :-1]], dim=-1
)
else:
shifted_input_ids = input_ids.new_zeros(input_ids.shape)
shifted_input_ids[..., 1:] = input_ids[..., :-1].clone()
shifted_input_ids[..., 0] = decoder_start_token_id
assert (
pad_token_id is not None
), "self.model.config.pad_token_id has to be defined."
# replace possible -100 values in labels by `pad_token_id`
shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id)
return shifted_input_ids
class T5Stack(T5PreTrainedModel):
def __init__(self, config, embed_tokens=None):
super().__init__(config)
self.embed_tokens = embed_tokens
self.is_decoder = config.is_decoder
self.block = nn.ModuleList(
[
T5Block(config, has_relative_attention_bias=bool(i == 0))
for i in range(config.num_layers)
]
)
self.final_layer_norm = T5LayerNorm(
config.d_model, eps=config.layer_norm_epsilon
)
self.dropout = nn.Dropout(config.dropout_rate)
# Initialize weights and apply final processing
self.post_init()
# Model parallel
self.model_parallel = False
self.device_map = None
self.gradient_checkpointing = False
@add_start_docstrings(PARALLELIZE_DOCSTRING)
def parallelize(self, device_map=None):
# Check validity of device_map
self.device_map = (
get_device_map(len(self.block), range(torch.cuda.device_count()))
if device_map is None
else device_map
)
assert_device_map(self.device_map, len(self.block))
self.model_parallel = True
self.first_device = (
"cpu"
if "cpu" in self.device_map.keys()
else "cuda:" + str(min(self.device_map.keys()))
)
self.last_device = "cuda:" + str(max(self.device_map.keys()))
# Load onto devices
for k, v in self.device_map.items():
for layer in v:
cuda_device = "cuda:" + str(k)
self.block[layer] = self.block[layer].to(cuda_device)
# Set embed_tokens to first layer
self.embed_tokens = self.embed_tokens.to(self.first_device)
# Set final layer norm to last device
self.final_layer_norm = self.final_layer_norm.to(self.last_device)
@add_start_docstrings(PARALLELIZE_DOCSTRING)
def deparallelize(self):
self.model_parallel = False
self.device_map = None
self.first_device = "cpu"
self.last_device = "cpu"
for i in range(len(self.block)):
self.block[i] = self.block[i].to("cpu")
self.embed_tokens = self.embed_tokens.to("cpu")
self.final_layer_norm = self.final_layer_norm.to("cpu")
torch.cuda.empty_cache()
def get_input_embeddings(self):
return self.embed_tokens
def set_input_embeddings(self, new_embeddings):
self.embed_tokens = new_embeddings
def forward(
self,
input_ids=None,
attention_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
inputs_embeds=None,
head_mask=None,
cross_attn_head_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
# Model parallel
if self.model_parallel:
torch.cuda.set_device(self.first_device)
self.embed_tokens = self.embed_tokens.to(self.first_device)
use_cache = use_cache if use_cache is not None else self.config.use_cache
output_attentions = (
output_attentions
if output_attentions is not None
else self.config.output_attentions
)
output_hidden_states = (
output_hidden_states
if output_hidden_states is not None
else self.config.output_hidden_states
)
return_dict = (
return_dict if return_dict is not None else self.config.use_return_dict
)
if input_ids is not None and inputs_embeds is not None:
err_msg_prefix = "decoder_" if self.is_decoder else ""
raise ValueError(
f"You cannot specify both {err_msg_prefix}input_ids and {err_msg_prefix}inputs_embeds at the same time"
)
elif input_ids is not None:
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
err_msg_prefix = "decoder_" if self.is_decoder else ""
raise ValueError(
f"You have to specify either {err_msg_prefix}input_ids or {err_msg_prefix}inputs_embeds"
)
if inputs_embeds is None:
assert (
self.embed_tokens is not None
), "You have to initialize the model with valid token embeddings"
inputs_embeds = self.embed_tokens(input_ids)
batch_size, seq_length = input_shape
# required mask seq length can be calculated via length of past
mask_seq_length = (
past_key_values[0][0].shape[2] + seq_length
if past_key_values is not None
else seq_length
)
if use_cache is True:
assert (
self.is_decoder
), f"`use_cache` can only be set to `True` if {self} is used as a decoder"
if attention_mask is None:
attention_mask = torch.ones(
batch_size, mask_seq_length, device=inputs_embeds.device
)
if (
self.is_decoder
and encoder_attention_mask is None
and encoder_hidden_states is not None
):
encoder_seq_length = encoder_hidden_states.shape[1]
encoder_attention_mask = torch.ones(
batch_size,
encoder_seq_length,
device=inputs_embeds.device,
dtype=torch.long,
)
# initialize past_key_values with `None` if past does not exist
if past_key_values is None:
past_key_values = [None] * len(self.block)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
extended_attention_mask = self.get_extended_attention_mask(
attention_mask, input_shape
)
# If a 2D or 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.is_decoder and encoder_hidden_states is not None:
(
encoder_batch_size,
encoder_sequence_length,
_,
) = encoder_hidden_states.size()
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
encoder_attention_mask = torch.ones(
encoder_hidden_shape, device=inputs_embeds.device
)
encoder_extended_attention_mask = self.invert_attention_mask(
encoder_attention_mask
)
else:
encoder_extended_attention_mask = None
# Prepare head mask if needed
head_mask = self.get_head_mask(head_mask, self.config.num_layers)
cross_attn_head_mask = self.get_head_mask(
cross_attn_head_mask, self.config.num_layers
)
present_key_value_states = () if use_cache else None
all_hidden_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
all_cross_attentions = () if (output_attentions and self.is_decoder) else None
position_bias = None
encoder_decoder_position_bias = None
hidden_states = self.dropout(inputs_embeds)
for i, (layer_module, past_key_value) in enumerate(
zip(self.block, past_key_values)
):
layer_head_mask = head_mask[i]
cross_attn_layer_head_mask = cross_attn_head_mask[i]
# Model parallel
if self.model_parallel:
torch.cuda.set_device(hidden_states.device)
# Ensure that attention_mask is always on the same device as hidden_states
if attention_mask is not None:
attention_mask = attention_mask.to(hidden_states.device)
if position_bias is not None:
position_bias = position_bias.to(hidden_states.device)
if encoder_hidden_states is not None:
encoder_hidden_states = encoder_hidden_states.to(
hidden_states.device
)
if encoder_extended_attention_mask is not None:
encoder_extended_attention_mask = (
encoder_extended_attention_mask.to(hidden_states.device)
)
if encoder_decoder_position_bias is not None:
encoder_decoder_position_bias = encoder_decoder_position_bias.to(
hidden_states.device
)
if layer_head_mask is not None:
layer_head_mask = layer_head_mask.to(hidden_states.device)
if cross_attn_layer_head_mask is not None:
cross_attn_layer_head_mask = cross_attn_layer_head_mask.to(
hidden_states.device
)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if self.gradient_checkpointing and self.training:
if use_cache:
logger.warning(
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
)
use_cache = False
def create_custom_forward(module):
def custom_forward(*inputs):
return tuple(module(*inputs, use_cache, output_attentions))
return custom_forward
layer_outputs = checkpoint(
create_custom_forward(layer_module),
hidden_states,
extended_attention_mask,
position_bias,
encoder_hidden_states,
encoder_extended_attention_mask,
encoder_decoder_position_bias,
layer_head_mask,
cross_attn_layer_head_mask,
None, # past_key_value is always None with gradient checkpointing
)
else:
layer_outputs = layer_module(
hidden_states,
attention_mask=extended_attention_mask,
position_bias=position_bias,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_extended_attention_mask,
encoder_decoder_position_bias=encoder_decoder_position_bias,
layer_head_mask=layer_head_mask,
cross_attn_layer_head_mask=cross_attn_layer_head_mask,
past_key_value=past_key_value,
use_cache=use_cache,
output_attentions=output_attentions,
)
# layer_outputs is a tuple with:
# hidden-states, key-value-states, (self-attention position bias), (self-attention weights), (cross-attention position bias), (cross-attention weights)
if use_cache is False:
layer_outputs = layer_outputs[:1] + (None,) + layer_outputs[1:]
hidden_states, present_key_value_state = layer_outputs[:2]
# We share the position biases between the layers - the first layer store them
# layer_outputs = hidden-states, key-value-states (self-attention position bias), (self-attention weights),
# (cross-attention position bias), (cross-attention weights)
position_bias = layer_outputs[2]
if self.is_decoder and encoder_hidden_states is not None:
encoder_decoder_position_bias = layer_outputs[
4 if output_attentions else 3
]
# append next layer key value states
if use_cache:
present_key_value_states = present_key_value_states + (
present_key_value_state,
)
if output_attentions:
all_attentions = all_attentions + (layer_outputs[3],)
if self.is_decoder:
all_cross_attentions = all_cross_attentions + (layer_outputs[5],)
# Model Parallel: If it's the last layer for that device, put things on the next device
if self.model_parallel:
for k, v in self.device_map.items():
if i == v[-1] and "cuda:" + str(k) != self.last_device:
hidden_states = hidden_states.to("cuda:" + str(k + 1))
hidden_states = self.final_layer_norm(hidden_states)
hidden_states = self.dropout(hidden_states)
# Add last layer
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(
v
for v in [
hidden_states,
present_key_value_states,
all_hidden_states,
all_attentions,
all_cross_attentions,
]
if v is not None
)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=present_key_value_states,
hidden_states=all_hidden_states,
attentions=all_attentions,
cross_attentions=all_cross_attentions,
)
T5_START_DOCSTRING = r"""
The T5 model was proposed in [Exploring the Limits of Transfer Learning with a Unified Text-to-Text
Transformer](https://arxiv.org/abs/1910.10683) by Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan
Narang, Michael Matena, Yanqi Zhou, Wei Li, Peter J. Liu. It's an encoder decoder transformer pre-trained in a
text-to-text denoising generative setting.
This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
etc.)
This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
and behavior.
Parameters:
config ([`T5Config`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
T5_INPUTS_DOCSTRING = r"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. T5 is a model with relative position embeddings so you
should be able to pad the inputs on both the right and the left.
Indices can be obtained using [`T5Tokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for detail.
[What are input IDs?](../glossary#input-ids)
To know more on how to prepare `input_ids` for pretraining take a look a [T5 Training](./t5#training).
attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Indices of decoder input sequence tokens in the vocabulary.
Indices can be obtained using [`T5Tokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are decoder input IDs?](../glossary#decoder-input-ids)
T5 uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values`
is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`).
To know more on how to prepare `decoder_input_ids` for pretraining take a look at [T5
Training](./t5#training).
decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
be used by default.
head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
Mask to nullify selected heads of the self-attention modules in the encoder. Mask values selected in `[0,
1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
decoder_head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
Mask to nullify selected heads of the self-attention modules in the decoder. Mask values selected in `[0,
1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
cross_attn_head_mask (`torch.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in
`[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*):
Tuple consists of (`last_hidden_state`, `optional`: *hidden_states*, `optional`: *attentions*)
`last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)` is a sequence of hidden states at
the output of the last layer of the encoder. Used in the cross-attention of the decoder.
past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
`decoder_input_ids` of shape `(batch_size, sequence_length)`.
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
model's internal embedding lookup matrix.
decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded
representation. If `past_key_values` is used, optionally only the last `decoder_inputs_embeds` have to be
input (see `past_key_values`). This is useful if you want more control over how to convert
`decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix.
If `decoder_input_ids` and `decoder_inputs_embeds` are both unset, `decoder_inputs_embeds` takes the value
of `inputs_embeds`.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
`past_key_values`).
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
T5_ENCODER_INPUTS_DOCSTRING = r"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. T5 is a model with relative position embeddings so you
should be able to pad the inputs on both the right and the left.
Indices can be obtained using [`T5Tokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for detail.
To know more on how to prepare `input_ids` for pretraining take a look a [T5 Training](./t5#training).
attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
model's internal embedding lookup matrix.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
# Warning message for FutureWarning: head_mask was separated into two input args - head_mask, decoder_head_mask
__HEAD_MASK_WARNING_MSG = """
The input argument `head_mask` was split into two arguments `head_mask` and `decoder_head_mask`. Currently,
`decoder_head_mask` is set to copy `head_mask`, but this feature is deprecated and will be removed in future versions.
If you do not want to use any `decoder_head_mask` now, please set `decoder_head_mask = torch.ones(num_layers,
num_heads)`.
"""
@add_start_docstrings(
"The bare T5 Model transformer outputting raw hidden-states without any specific head on top.",
T5_START_DOCSTRING,
)
class T5Model(T5PreTrainedModel):
_keys_to_ignore_on_load_missing = [
r"encoder.embed_tokens.weight",
r"decoder.embed_tokens.weight",
]
_keys_to_ignore_on_load_unexpected = [
r"decoder.block.0.layer.1.EncDecAttention.relative_attention_bias.weight",
]
def __init__(self, config: T5Config):
super().__init__(config)
self.shared = nn.Embedding(config.vocab_size, config.d_model)
encoder_config = copy.deepcopy(config)
encoder_config.is_decoder = False
encoder_config.use_cache = False
encoder_config.is_encoder_decoder = False
self.encoder = T5Stack(encoder_config, self.shared)
decoder_config = copy.deepcopy(config)
decoder_config.is_decoder = True
decoder_config.is_encoder_decoder = False
decoder_config.num_layers = config.num_decoder_layers
self.decoder = T5Stack(decoder_config, self.shared)
# Initialize weights and apply final processing
self.post_init()
# Model parallel
self.model_parallel = False
self.device_map = None
@add_start_docstrings(PARALLELIZE_DOCSTRING)
def parallelize(self, device_map=None):
self.device_map = (
get_device_map(len(self.encoder.block), range(torch.cuda.device_count()))
if device_map is None
else device_map
)
assert_device_map(self.device_map, len(self.encoder.block))
self.encoder.parallelize(self.device_map)
self.decoder.parallelize(self.device_map)
self.model_parallel = True
@add_start_docstrings(DEPARALLELIZE_DOCSTRING)
def deparallelize(self):
self.encoder.deparallelize()
self.decoder.deparallelize()
self.encoder = self.encoder.to("cpu")
self.decoder = self.decoder.to("cpu")
self.model_parallel = False
self.device_map = None
torch.cuda.empty_cache()
def get_input_embeddings(self):
return self.shared
def set_input_embeddings(self, new_embeddings):
self.shared = new_embeddings
self.encoder.set_input_embeddings(new_embeddings)
self.decoder.set_input_embeddings(new_embeddings)
def get_encoder(self):
return self.encoder
def get_decoder(self):
return self.decoder
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@add_start_docstrings_to_model_forward(T5_INPUTS_DOCSTRING)
@replace_return_docstrings(
output_type=Seq2SeqModelOutput, config_class=_CONFIG_FOR_DOC
)
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
decoder_input_ids: Optional[torch.LongTensor] = None,
decoder_attention_mask: Optional[torch.BoolTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
decoder_head_mask: Optional[torch.FloatTensor] = None,
cross_attn_head_mask: Optional[torch.Tensor] = None,
encoder_outputs: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
inputs_embeds: Optional[torch.Tensor] = None,
decoder_inputs_embeds: Optional[torch.Tensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple[torch.FloatTensor], Seq2SeqModelOutput]:
r"""
Returns:
Example:
```python
>>> from transformers import T5Tokenizer, T5Model
>>> tokenizer = T5Tokenizer.from_pretrained("t5-small")
>>> model = T5Model.from_pretrained("t5-small")
>>> input_ids = tokenizer(
... "Studies have been shown that owning a dog is good for you", return_tensors="pt"
... ).input_ids # Batch size 1
>>> decoder_input_ids = tokenizer("Studies show that", return_tensors="pt").input_ids # Batch size 1
>>> # preprocess: Prepend decoder_input_ids with start token which is pad token for T5Model.
>>> # This is not needed for torch's T5ForConditionalGeneration as it does this internally using labels arg.
>>> decoder_input_ids = model._shift_right(decoder_input_ids)
>>> # forward pass
>>> outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids)
>>> last_hidden_states = outputs.last_hidden_state
```"""
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = (
return_dict if return_dict is not None else self.config.use_return_dict
)
# FutureWarning: head_mask was separated into two input args - head_mask, decoder_head_mask
if head_mask is not None and decoder_head_mask is None:
if self.config.num_layers == self.config.num_decoder_layers:
warnings.warn(__HEAD_MASK_WARNING_MSG, FutureWarning)
decoder_head_mask = head_mask
# Encode if needed (training, first prediction pass)
if encoder_outputs is None:
encoder_outputs = self.encoder(
input_ids=input_ids,
attention_mask=attention_mask,
inputs_embeds=inputs_embeds,
head_mask=head_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):
encoder_outputs = BaseModelOutput(
last_hidden_state=encoder_outputs[0],
hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
)
hidden_states = encoder_outputs[0]
# Set device for model parallelism
if self.model_parallel:
torch.cuda.set_device(self.decoder.first_device)
hidden_states = hidden_states.to(self.decoder.first_device)
if decoder_input_ids is not None:
decoder_input_ids = decoder_input_ids.to(self.decoder.first_device)
if attention_mask is not None:
attention_mask = attention_mask.to(self.decoder.first_device)
if decoder_attention_mask is not None:
decoder_attention_mask = decoder_attention_mask.to(
self.decoder.first_device
)
# Decode
decoder_outputs = self.decoder(
input_ids=decoder_input_ids,
attention_mask=decoder_attention_mask,
inputs_embeds=decoder_inputs_embeds,
past_key_values=past_key_values,
encoder_hidden_states=hidden_states,
encoder_attention_mask=attention_mask,
head_mask=decoder_head_mask,
cross_attn_head_mask=cross_attn_head_mask,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
if not return_dict:
return decoder_outputs + encoder_outputs
return Seq2SeqModelOutput(
last_hidden_state=decoder_outputs.last_hidden_state,
past_key_values=decoder_outputs.past_key_values,
decoder_hidden_states=decoder_outputs.hidden_states,
decoder_attentions=decoder_outputs.attentions,
cross_attentions=decoder_outputs.cross_attentions,
encoder_last_hidden_state=encoder_outputs.last_hidden_state,
encoder_hidden_states=encoder_outputs.hidden_states,
encoder_attentions=encoder_outputs.attentions,
)
@add_start_docstrings(
"""T5 Model with a `language modeling` head on top.""", T5_START_DOCSTRING
)
class T5ForConditionalGeneration(T5PreTrainedModel):
_keys_to_ignore_on_load_missing = [
r"encoder.embed_tokens.weight",
r"decoder.embed_tokens.weight",
r"lm_head.weight",
]
_keys_to_ignore_on_load_unexpected = [
r"decoder.block.0.layer.1.EncDecAttention.relative_attention_bias.weight",
]
def __init__(self, config: T5Config):
super().__init__(config)
self.model_dim = config.d_model
self.shared = nn.Embedding(config.vocab_size, config.d_model)
encoder_config = copy.deepcopy(config)
encoder_config.is_decoder = False
encoder_config.use_cache = False
encoder_config.is_encoder_decoder = False
self.encoder = T5Stack(encoder_config, self.shared)
decoder_config = copy.deepcopy(config)
decoder_config.is_decoder = True
decoder_config.is_encoder_decoder = False
decoder_config.num_layers = config.num_decoder_layers
self.decoder = T5Stack(decoder_config, self.shared)
self.lm_head = nn.Linear(config.d_model, config.vocab_size, bias=False)
# Initialize weights and apply final processing
self.post_init()
# Model parallel
self.model_parallel = False
self.device_map = None
@add_start_docstrings(PARALLELIZE_DOCSTRING)
def parallelize(self, device_map=None):
self.device_map = (
get_device_map(len(self.encoder.block), range(torch.cuda.device_count()))
if device_map is None
else device_map
)
assert_device_map(self.device_map, len(self.encoder.block))
self.encoder.parallelize(self.device_map)
self.decoder.parallelize(self.device_map)
self.lm_head = self.lm_head.to(self.decoder.first_device)
self.model_parallel = True
@add_start_docstrings(DEPARALLELIZE_DOCSTRING)
def deparallelize(self):
self.encoder.deparallelize()
self.decoder.deparallelize()
self.encoder = self.encoder.to("cpu")
self.decoder = self.decoder.to("cpu")
self.lm_head = self.lm_head.to("cpu")
self.model_parallel = False
self.device_map = None
torch.cuda.empty_cache()
def get_input_embeddings(self):
return self.shared
def set_input_embeddings(self, new_embeddings):
self.shared = new_embeddings
self.encoder.set_input_embeddings(new_embeddings)
self.decoder.set_input_embeddings(new_embeddings)
def set_output_embeddings(self, new_embeddings):
self.lm_head = new_embeddings
def get_output_embeddings(self):
return self.lm_head
def get_encoder(self):
return self.encoder
def get_decoder(self):
return self.decoder
@add_start_docstrings_to_model_forward(T5_INPUTS_DOCSTRING)
@replace_return_docstrings(
output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC
)
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
decoder_input_ids: Optional[torch.LongTensor] = None,
decoder_attention_mask: Optional[torch.BoolTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
decoder_head_mask: Optional[torch.FloatTensor] = None,
cross_attn_head_mask: Optional[torch.Tensor] = None,
encoder_outputs: Optional[Tuple[Tuple[torch.Tensor]]] = None,
past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
reduction: Optional[str] = "mean",
) -> Union[Tuple[torch.FloatTensor], Seq2SeqLMOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[-100, 0, ...,
config.vocab_size - 1]`. All labels set to `-100` are ignored (masked), the loss is only computed for
labels in `[0, ..., config.vocab_size]`
Returns:
Examples:
```python
>>> from transformers import T5Tokenizer, T5ForConditionalGeneration
>>> tokenizer = T5Tokenizer.from_pretrained("t5-small")
>>> model = T5ForConditionalGeneration.from_pretrained("t5-small")
>>> # training
>>> input_ids = tokenizer("The <extra_id_0> walks in <extra_id_1> park", return_tensors="pt").input_ids
>>> labels = tokenizer("<extra_id_0> cute dog <extra_id_1> the <extra_id_2>", return_tensors="pt").input_ids
>>> outputs = model(input_ids=input_ids, labels=labels)
>>> loss = outputs.loss
>>> logits = outputs.logits
>>> # inference
>>> input_ids = tokenizer(
... "summarize: studies have shown that owning a dog is good for you", return_tensors="pt"
... ).input_ids # Batch size 1
>>> outputs = model.generate(input_ids)
>>> print(tokenizer.decode(outputs[0], skip_special_tokens=True))
>>> # studies have shown that owning a dog is good for you.
```"""
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = (
return_dict if return_dict is not None else self.config.use_return_dict
)
# FutureWarning: head_mask was separated into two input args - head_mask, decoder_head_mask
if head_mask is not None and decoder_head_mask is None:
if self.config.num_layers == self.config.num_decoder_layers:
warnings.warn(__HEAD_MASK_WARNING_MSG, FutureWarning)
decoder_head_mask = head_mask
# Encode if needed (training, first prediction pass)
if encoder_outputs is None:
# Convert encoder inputs in embeddings if needed
encoder_outputs = self.encoder(
input_ids=input_ids,
attention_mask=attention_mask,
inputs_embeds=inputs_embeds,
head_mask=head_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):
encoder_outputs = BaseModelOutput(
last_hidden_state=encoder_outputs[0],
hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
)
hidden_states = encoder_outputs[0]
if self.model_parallel:
torch.cuda.set_device(self.decoder.first_device)
if (
labels is not None
and decoder_input_ids is None
and decoder_inputs_embeds is None
):
# get decoder inputs from shifting lm labels to the right
decoder_input_ids = self._shift_right(labels)
# Set device for model parallelism
if self.model_parallel:
torch.cuda.set_device(self.decoder.first_device)
hidden_states = hidden_states.to(self.decoder.first_device)
if decoder_input_ids is not None:
decoder_input_ids = decoder_input_ids.to(self.decoder.first_device)
if attention_mask is not None:
attention_mask = attention_mask.to(self.decoder.first_device)
if decoder_attention_mask is not None:
decoder_attention_mask = decoder_attention_mask.to(
self.decoder.first_device
)
# Decode
decoder_outputs = self.decoder(
input_ids=decoder_input_ids,
attention_mask=decoder_attention_mask,
inputs_embeds=decoder_inputs_embeds,
past_key_values=past_key_values,
encoder_hidden_states=hidden_states,
encoder_attention_mask=attention_mask,
head_mask=decoder_head_mask,
cross_attn_head_mask=cross_attn_head_mask,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = decoder_outputs[0]
# Set device for model parallelism
if self.model_parallel:
torch.cuda.set_device(self.encoder.first_device)
self.lm_head = self.lm_head.to(self.encoder.first_device)
sequence_output = sequence_output.to(self.lm_head.weight.device)
if self.config.tie_word_embeddings:
# Rescale output before projecting on vocab
# See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/transformer.py#L586
sequence_output = sequence_output * (self.model_dim**-0.5)
lm_logits = self.lm_head(sequence_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss(ignore_index=-100, reduction=reduction)
loss = loss_fct(lm_logits.view(-1, lm_logits.size(-1)), labels.view(-1))
if reduction == "none":
loss = loss.view(lm_logits.size(0), -1).sum(1)
if not return_dict:
output = (lm_logits,) + decoder_outputs[1:] + encoder_outputs
return ((loss,) + output) if loss is not None else output
return Seq2SeqLMOutput(
loss=loss,
logits=lm_logits,
past_key_values=decoder_outputs.past_key_values,
decoder_hidden_states=decoder_outputs.hidden_states,
decoder_attentions=decoder_outputs.attentions,
cross_attentions=decoder_outputs.cross_attentions,
encoder_last_hidden_state=encoder_outputs.last_hidden_state,
encoder_hidden_states=encoder_outputs.hidden_states,
encoder_attentions=encoder_outputs.attentions,
)
def prepare_inputs_for_generation(
self,
input_ids,
past=None,
attention_mask=None,
head_mask=None,
decoder_head_mask=None,
cross_attn_head_mask=None,
use_cache=None,
encoder_outputs=None,
**kwargs,
):
# cut decoder_input_ids if past is used
if past is not None:
input_ids = input_ids[:, -1:]
return {
"decoder_input_ids": input_ids,
"past_key_values": past,
"encoder_outputs": encoder_outputs,
"attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
"use_cache": use_cache,
}
def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor):
return self._shift_right(labels)
def _reorder_cache(self, past, beam_idx):
# if decoder past is not included in output
# speedy decoding is disabled and no need to reorder
if past is None:
logger.warning(
"You might want to consider setting `use_cache=True` to speed up decoding"
)
return past
reordered_decoder_past = ()
for layer_past_states in past:
# get the correct batch idx from layer past batch dim
# batch dim of `past` is at 2nd position
reordered_layer_past_states = ()
for layer_past_state in layer_past_states:
# need to set correct `past` for each of the four key / value states
reordered_layer_past_states = reordered_layer_past_states + (
layer_past_state.index_select(
0, beam_idx.to(layer_past_state.device)
),
)
assert reordered_layer_past_states[0].shape == layer_past_states[0].shape
assert len(reordered_layer_past_states) == len(layer_past_states)
reordered_decoder_past = reordered_decoder_past + (
reordered_layer_past_states,
)
return reordered_decoder_past
@add_start_docstrings(
"The bare T5 Model transformer outputting encoder's raw hidden-states without any specific head on top.",
T5_START_DOCSTRING,
)
class T5EncoderModel(T5PreTrainedModel):
authorized_missing_keys = [
r"encoder.embed_tokens.weight",
]
def __init__(self, config: T5Config):
super().__init__(config)
self.shared = nn.Embedding(config.vocab_size, config.d_model)
encoder_config = copy.deepcopy(config)
encoder_config.use_cache = False
encoder_config.is_encoder_decoder = False
self.encoder = T5Stack(encoder_config, self.shared)
# Initialize weights and apply final processing
self.post_init()
# Model parallel
self.model_parallel = False
self.device_map = None
@add_start_docstrings(PARALLELIZE_DOCSTRING)
def parallelize(self, device_map=None):
self.device_map = (
get_device_map(len(self.encoder.block), range(torch.cuda.device_count()))
if device_map is None
else device_map
)
assert_device_map(self.device_map, len(self.encoder.block))
self.encoder.parallelize(self.device_map)
self.model_parallel = True
@add_start_docstrings(DEPARALLELIZE_DOCSTRING)
def deparallelize(self):
self.encoder.deparallelize()
self.encoder = self.encoder.to("cpu")
self.model_parallel = False
self.device_map = None
torch.cuda.empty_cache()
def get_input_embeddings(self):
return self.shared
def set_input_embeddings(self, new_embeddings):
self.shared = new_embeddings
self.encoder.set_input_embeddings(new_embeddings)
def get_encoder(self):
return self.encoder
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.block[layer].layer[0].SelfAttention.prune_heads(heads)
@add_start_docstrings_to_model_forward(T5_ENCODER_INPUTS_DOCSTRING)
@replace_return_docstrings(
output_type=BaseModelOutput, config_class=_CONFIG_FOR_DOC
)
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple[torch.FloatTensor], BaseModelOutput]:
r"""
Returns:
Example:
```python
>>> from transformers import T5Tokenizer, T5EncoderModel
>>> tokenizer = T5Tokenizer.from_pretrained("t5-small")
>>> model = T5EncoderModel.from_pretrained("t5-small")
>>> input_ids = tokenizer(
... "Studies have been shown that owning a dog is good for you", return_tensors="pt"
... ).input_ids # Batch size 1
>>> outputs = model(input_ids=input_ids)
>>> last_hidden_states = outputs.last_hidden_state
```"""
return_dict = (
return_dict if return_dict is not None else self.config.use_return_dict
)
encoder_outputs = self.encoder(
input_ids=input_ids,
attention_mask=attention_mask,
inputs_embeds=inputs_embeds,
head_mask=head_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
return encoder_outputs
| 3D-LLM-main | three_steps_3d_feature/second_step/lavis/models/blip2_models/modeling_t5.py |
# coding=utf-8
# Copyright 2022 The Fairseq Authors and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch OPT model."""
import random
from typing import List, Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from transformers.activations import ACT2FN
from transformers.modeling_outputs import (
BaseModelOutputWithPast,
CausalLMOutputWithPast,
)
from transformers.modeling_utils import PreTrainedModel
from transformers.utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from transformers.models.opt.configuration_opt import OPTConfig
logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC = "facebook/opt-350m"
_CONFIG_FOR_DOC = "OPTConfig"
_TOKENIZER_FOR_DOC = "GPT2Tokenizer"
# Base model docstring
_EXPECTED_OUTPUT_SHAPE = [1, 8, 1024]
# SequenceClassification docstring
_CHECKPOINT_FOR_SEQUENCE_CLASSIFICATION = "ArthurZ/opt-350m-dummy-sc"
_SEQ_CLASS_EXPECTED_LOSS = 1.71
_SEQ_CLASS_EXPECTED_OUTPUT = "'LABEL_0'"
# QuestionAnswering docstring
_QA_EXPECTED_OUTPUT = "'a nice puppet'"
_QA_EXPECTED_LOSS = 7.41
_QA_TARGET_START_INDEX = 14
_QA_TARGET_END_INDEX = 15
OPT_PRETRAINED_MODEL_ARCHIVE_LIST = [
"facebook/opt-125m",
"facebook/opt-350m",
"facebook/opt-1.3b",
"facebook/opt-2.7b",
"facebook/opt-6.7b",
"facebook/opt-13b",
"facebook/opt-30b",
# See all OPT models at https://huggingface.co/models?filter=opt
]
def _make_causal_mask(
input_ids_shape: torch.Size, dtype: torch.dtype, past_key_values_length: int = 0
):
"""
Make causal mask used for bi-directional self-attention.
"""
bsz, tgt_len = input_ids_shape
mask = torch.full((tgt_len, tgt_len), torch.tensor(torch.finfo(dtype).min))
mask_cond = torch.arange(mask.size(-1))
mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0)
mask = mask.to(dtype)
if past_key_values_length > 0:
mask = torch.cat(
[torch.zeros(tgt_len, past_key_values_length, dtype=dtype), mask], dim=-1
)
return mask[None, None, :, :].expand(
bsz, 1, tgt_len, tgt_len + past_key_values_length
)
def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None):
"""
Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
"""
bsz, src_len = mask.size()
tgt_len = tgt_len if tgt_len is not None else src_len
expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype)
inverted_mask = 1.0 - expanded_mask
return inverted_mask.masked_fill(
inverted_mask.to(torch.bool), torch.finfo(dtype).min
)
class OPTLearnedPositionalEmbedding(nn.Embedding):
"""
This module learns positional embeddings up to a fixed maximum size.
"""
def __init__(self, num_embeddings: int, embedding_dim: int):
# OPT is set up so that if padding_idx is specified then offset the embedding ids by 2
# and adjust num_embeddings appropriately. Other models don't have this hack
self.offset = 2
super().__init__(num_embeddings + self.offset, embedding_dim)
def forward(
self, attention_mask: torch.LongTensor, past_key_values_length: int = 0
):
"""`input_ids_shape` is expected to be [bsz x seqlen]."""
attention_mask = attention_mask.long()
# create positions depending on attention_mask
positions = (
torch.cumsum(attention_mask, dim=1).type_as(attention_mask) * attention_mask
).long() - 1
# cut positions if `past_key_values_length` is > 0
positions = positions[:, past_key_values_length:]
return super().forward(positions + self.offset)
class OPTAttention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(
self,
embed_dim: int,
num_heads: int,
dropout: float = 0.0,
is_decoder: bool = False,
bias: bool = True,
):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
if (self.head_dim * num_heads) != self.embed_dim:
raise ValueError(
f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
f" and `num_heads`: {num_heads})."
)
self.scaling = self.head_dim**-0.5
self.is_decoder = is_decoder
self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
return (
tensor.view(bsz, seq_len, self.num_heads, self.head_dim)
.transpose(1, 2)
.contiguous()
)
def forward(
self,
hidden_states: torch.Tensor,
key_value_states: Optional[torch.Tensor] = None,
past_key_value: Optional[Tuple[torch.Tensor]] = None,
attention_mask: Optional[torch.Tensor] = None,
layer_head_mask: Optional[torch.Tensor] = None,
output_attentions: bool = False,
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
"""Input shape: Batch x Time x Channel"""
# if key_value_states are provided this layer is used as a cross-attention layer
# for the decoder
is_cross_attention = key_value_states is not None
bsz, tgt_len, _ = hidden_states.size()
# get query proj
query_states = self.q_proj(hidden_states) * self.scaling
# get key, value proj
if is_cross_attention and past_key_value is not None:
# reuse k,v, cross_attentions
key_states = past_key_value[0]
value_states = past_key_value[1]
elif is_cross_attention:
# cross_attentions
key_states = self._shape(self.k_proj(key_value_states), -1, bsz)
value_states = self._shape(self.v_proj(key_value_states), -1, bsz)
elif past_key_value is not None:
# reuse k, v, self_attention
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
key_states = torch.cat([past_key_value[0], key_states], dim=2)
value_states = torch.cat([past_key_value[1], value_states], dim=2)
else:
# self_attention
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
if self.is_decoder:
# if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
# Further calls to cross_attention layer can then reuse all cross-attention
# key/value_states (first "if" case)
# if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
# all previous decoder key/value_states. Further calls to uni-directional self-attention
# can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
# if encoder bi-directional self-attention `past_key_value` is always `None`
past_key_value = (key_states, value_states)
proj_shape = (bsz * self.num_heads, -1, self.head_dim)
query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
key_states = key_states.view(*proj_shape)
value_states = value_states.view(*proj_shape)
src_len = key_states.size(1)
attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len):
raise ValueError(
f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is"
f" {attn_weights.size()}"
)
if attention_mask is not None:
if attention_mask.size() != (bsz, 1, tgt_len, src_len):
raise ValueError(
f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}"
)
attn_weights = (
attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
+ attention_mask
)
attn_weights = torch.max(
attn_weights, torch.tensor(torch.finfo(attn_weights.dtype).min)
)
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
# upcast to fp32 if the weights are in fp16. Please see https://github.com/huggingface/transformers/pull/17437
if attn_weights.dtype == torch.float16:
attn_weights = nn.functional.softmax(
attn_weights, dim=-1, dtype=torch.float32
).to(torch.float16)
else:
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
if layer_head_mask is not None:
if layer_head_mask.size() != (self.num_heads,):
raise ValueError(
f"Head mask for a single layer should be of size {(self.num_heads,)}, but is"
f" {layer_head_mask.size()}"
)
attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(
bsz, self.num_heads, tgt_len, src_len
)
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
if output_attentions:
# this operation is a bit awkward, but it's required to
# make sure that attn_weights keeps its gradient.
# In order to do so, attn_weights have to be reshaped
# twice and have to be reused in the following
attn_weights_reshaped = attn_weights.view(
bsz, self.num_heads, tgt_len, src_len
)
attn_weights = attn_weights_reshaped.view(
bsz * self.num_heads, tgt_len, src_len
)
else:
attn_weights_reshaped = None
attn_probs = nn.functional.dropout(
attn_weights, p=self.dropout, training=self.training
)
attn_output = torch.bmm(attn_probs, value_states)
if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
raise ValueError(
f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is"
f" {attn_output.size()}"
)
attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
attn_output = attn_output.transpose(1, 2)
# Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be
# partitioned aross GPUs when using tensor-parallelism.
attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim)
attn_output = self.out_proj(attn_output)
return attn_output, attn_weights_reshaped, past_key_value
class OPTDecoderLayer(nn.Module):
def __init__(self, config: OPTConfig):
super().__init__()
self.embed_dim = config.hidden_size
self.self_attn = OPTAttention(
embed_dim=self.embed_dim,
num_heads=config.num_attention_heads,
dropout=config.attention_dropout,
is_decoder=True,
)
self.do_layer_norm_before = config.do_layer_norm_before
self.dropout = config.dropout
self.activation_fn = ACT2FN[config.activation_function]
self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.fc1 = nn.Linear(self.embed_dim, config.ffn_dim)
self.fc2 = nn.Linear(config.ffn_dim, self.embed_dim)
self.final_layer_norm = nn.LayerNorm(self.embed_dim)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
layer_head_mask: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = False,
use_cache: Optional[bool] = False,
past_key_value: Optional[Tuple[torch.Tensor]] = None,
) -> Tuple[
torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]
]:
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
layer_head_mask (`torch.FloatTensor`, *optional*): mask for attention heads in a given layer of size
`(encoder_attention_heads,)`.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
(see `past_key_values`).
past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
"""
residual = hidden_states
# 125m, 1.7B, ..., 175B applies layer norm BEFORE attention
if self.do_layer_norm_before:
hidden_states = self.self_attn_layer_norm(hidden_states)
# Self Attention
hidden_states, self_attn_weights, present_key_value = self.self_attn(
hidden_states=hidden_states,
past_key_value=past_key_value,
attention_mask=attention_mask,
layer_head_mask=layer_head_mask,
output_attentions=output_attentions,
)
hidden_states = nn.functional.dropout(
hidden_states, p=self.dropout, training=self.training
)
hidden_states = residual + hidden_states
# 350m applies layer norm AFTER attention
if not self.do_layer_norm_before:
hidden_states = self.self_attn_layer_norm(hidden_states)
# Fully Connected
hidden_states_shape = hidden_states.shape
hidden_states = hidden_states.reshape(-1, hidden_states.size(-1))
residual = hidden_states
# 125m, 1.7B, ..., 175B applies layer norm BEFORE attention
if self.do_layer_norm_before:
hidden_states = self.final_layer_norm(hidden_states)
hidden_states = self.fc1(hidden_states)
hidden_states = self.activation_fn(hidden_states)
hidden_states = self.fc2(hidden_states)
hidden_states = nn.functional.dropout(
hidden_states, p=self.dropout, training=self.training
)
hidden_states = (residual + hidden_states).view(hidden_states_shape)
# 350m applies layer norm AFTER attention
if not self.do_layer_norm_before:
hidden_states = self.final_layer_norm(hidden_states)
outputs = (hidden_states,)
if output_attentions:
outputs += (self_attn_weights,)
if use_cache:
outputs += (present_key_value,)
return outputs
OPT_START_DOCSTRING = r"""
This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
etc.)
This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
and behavior.
Parameters:
config ([`OPTConfig`]):
Model configuration class with all the parameters of the model. Initializing with a config file does not
load the weights associated with the model, only the configuration. Check out the
[`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
@add_start_docstrings(
"The bare OPT Model outputting raw hidden-states without any specific head on top.",
OPT_START_DOCSTRING,
)
class OPTPreTrainedModel(PreTrainedModel):
config_class = OPTConfig
base_model_prefix = "model"
supports_gradient_checkpointing = True
_no_split_modules = ["OPTDecoderLayer"]
_keys_to_ignore_on_load_unexpected = [r"decoder\.version"]
def _init_weights(self, module):
std = self.config.init_std
if isinstance(module, nn.Linear):
module.weight.data.normal_(mean=0.0, std=std)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=std)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
def _set_gradient_checkpointing(self, module, value=False):
if isinstance(module, (OPTDecoder)):
module.gradient_checkpointing = value
OPT_INPUTS_DOCSTRING = r"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
it.
Indices can be obtained using [`GPT2Tokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
Indices can be obtained using [`OPTTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
`past_key_values`).
If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
information on the default strategy.
head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
`(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
`(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
`decoder_input_ids` of shape `(batch_size, sequence_length)`.
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
model's internal embedding lookup matrix.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
`past_key_values`).
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
class OPTDecoder(OPTPreTrainedModel):
"""
Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`OPTDecoderLayer`]
Args:
config: OPTConfig
"""
def __init__(self, config: OPTConfig):
super().__init__(config)
self.dropout = config.dropout
self.layerdrop = config.layerdrop
self.padding_idx = config.pad_token_id
self.max_target_positions = config.max_position_embeddings
self.vocab_size = config.vocab_size
self.embed_tokens = nn.Embedding(
config.vocab_size, config.word_embed_proj_dim, self.padding_idx
)
self.embed_positions = OPTLearnedPositionalEmbedding(
config.max_position_embeddings, config.hidden_size
)
if config.word_embed_proj_dim != config.hidden_size:
self.project_out = nn.Linear(
config.hidden_size, config.word_embed_proj_dim, bias=False
)
else:
self.project_out = None
if config.word_embed_proj_dim != config.hidden_size:
self.project_in = nn.Linear(
config.word_embed_proj_dim, config.hidden_size, bias=False
)
else:
self.project_in = None
# Note that the only purpose of `config._remove_final_layer_norm` is to keep backward compatibility
# with checkpoints that have been fine-tuned before transformers v4.20.1
# see https://github.com/facebookresearch/metaseq/pull/164
if config.do_layer_norm_before and not config._remove_final_layer_norm:
self.final_layer_norm = nn.LayerNorm(config.hidden_size)
else:
self.final_layer_norm = None
self.layers = nn.ModuleList(
[OPTDecoderLayer(config) for _ in range(config.num_hidden_layers)]
)
self.gradient_checkpointing = False
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.embed_tokens
def set_input_embeddings(self, value):
self.embed_tokens = value
# Copied from transformers.models.bart.modeling_bart.BartDecoder._prepare_decoder_attention_mask
def _prepare_decoder_attention_mask(
self, attention_mask, input_shape, inputs_embeds, past_key_values_length
):
# create causal mask
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
combined_attention_mask = None
if input_shape[-1] > 1:
combined_attention_mask = _make_causal_mask(
input_shape,
inputs_embeds.dtype,
past_key_values_length=past_key_values_length,
).to(inputs_embeds.device)
if attention_mask is not None:
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
expanded_attn_mask = _expand_mask(
attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]
).to(inputs_embeds.device)
combined_attention_mask = (
expanded_attn_mask
if combined_attention_mask is None
else expanded_attn_mask + combined_attention_mask
)
return combined_attention_mask
def forward(
self,
input_ids: torch.LongTensor = None,
attention_mask: Optional[torch.Tensor] = None,
head_mask: Optional[torch.Tensor] = None,
past_key_values: Optional[List[torch.FloatTensor]] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
query_embeds: Optional[torch.FloatTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, BaseModelOutputWithPast]:
r"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
provide it.
Indices can be obtained using [`OPTTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
head_mask (`torch.Tensor` of shape `(num_hidden_layers, num_attention_heads)`, *optional*):
Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of
Contains pre-computed hidden-states (key and values in the self-attention blocks and in the
cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those
that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of
all `decoder_input_ids` of shape `(batch_size, sequence_length)`.
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
output_attentions = (
output_attentions
if output_attentions is not None
else self.config.output_attentions
)
output_hidden_states = (
output_hidden_states
if output_hidden_states is not None
else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = (
return_dict if return_dict is not None else self.config.use_return_dict
)
# retrieve input_ids and inputs_embeds
if input_ids is not None and inputs_embeds is not None:
raise ValueError(
"You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time"
)
elif input_ids is not None:
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError(
"You have to specify either decoder_input_ids or decoder_inputs_embeds"
)
past_key_values_length = (
past_key_values[0][0].shape[2] if past_key_values is not None else 0
)
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids)
if query_embeds is not None:
inputs_embeds = torch.cat([query_embeds, inputs_embeds], dim=1)
input_shape = inputs_embeds.size()[:-1]
# embed positions
if attention_mask is None:
attention_mask = torch.ones(
inputs_embeds.shape[:2], dtype=torch.bool, device=inputs_embeds.device
)
pos_embeds = self.embed_positions(attention_mask, past_key_values_length)
attention_mask = self._prepare_decoder_attention_mask(
attention_mask, input_shape, inputs_embeds, past_key_values_length
)
if self.project_in is not None:
inputs_embeds = self.project_in(inputs_embeds)
hidden_states = inputs_embeds + pos_embeds
# decoder layers
all_hidden_states = () if output_hidden_states else None
all_self_attns = () if output_attentions else None
next_decoder_cache = () if use_cache else None
# check if head_mask has a correct number of layers specified if desired
for attn_mask, mask_name in zip([head_mask], ["head_mask"]):
if attn_mask is not None:
if attn_mask.size()[0] != (len(self.layers)):
raise ValueError(
f"The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for"
f" {head_mask.size()[0]}."
)
for idx, decoder_layer in enumerate(self.layers):
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
if output_hidden_states:
all_hidden_states += (hidden_states,)
dropout_probability = random.uniform(0, 1)
if self.training and (dropout_probability < self.layerdrop):
continue
past_key_value = (
past_key_values[idx] if past_key_values is not None else None
)
if self.gradient_checkpointing and self.training:
if use_cache:
logger.warning(
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
)
use_cache = False
def create_custom_forward(module):
def custom_forward(*inputs):
# None for past_key_value
return module(*inputs, output_attentions, None)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(decoder_layer),
hidden_states,
attention_mask,
head_mask[idx] if head_mask is not None else None,
None,
)
else:
layer_outputs = decoder_layer(
hidden_states,
attention_mask=attention_mask,
layer_head_mask=(head_mask[idx] if head_mask is not None else None),
past_key_value=past_key_value,
output_attentions=output_attentions,
use_cache=use_cache,
)
hidden_states = layer_outputs[0]
if use_cache:
next_decoder_cache += (layer_outputs[2 if output_attentions else 1],)
if output_attentions:
all_self_attns += (layer_outputs[1],)
if self.final_layer_norm is not None:
hidden_states = self.final_layer_norm(hidden_states)
if self.project_out is not None:
hidden_states = self.project_out(hidden_states)
# add hidden states from the last decoder layer
if output_hidden_states:
all_hidden_states += (hidden_states,)
next_cache = next_decoder_cache if use_cache else None
if not return_dict:
return tuple(
v
for v in [hidden_states, next_cache, all_hidden_states, all_self_attns]
if v is not None
)
return BaseModelOutputWithPast(
last_hidden_state=hidden_states,
past_key_values=next_cache,
hidden_states=all_hidden_states,
attentions=all_self_attns,
)
@add_start_docstrings(
"The bare OPT Model outputting raw hidden-states without any specific head on top.",
OPT_START_DOCSTRING,
)
class OPTModel(OPTPreTrainedModel):
def __init__(self, config: OPTConfig):
super().__init__(config)
self.decoder = OPTDecoder(config)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.decoder.embed_tokens
def set_input_embeddings(self, value):
self.decoder.embed_tokens = value
def get_decoder(self):
return self.decoder
@add_start_docstrings_to_model_forward(OPT_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=BaseModelOutputWithPast,
config_class=_CONFIG_FOR_DOC,
expected_output=_EXPECTED_OUTPUT_SHAPE,
)
def forward(
self,
input_ids: torch.LongTensor = None,
attention_mask: Optional[torch.Tensor] = None,
head_mask: Optional[torch.Tensor] = None,
past_key_values: Optional[List[torch.FloatTensor]] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
query_embeds: Optional[torch.FloatTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, BaseModelOutputWithPast]:
output_attentions = (
output_attentions
if output_attentions is not None
else self.config.output_attentions
)
output_hidden_states = (
output_hidden_states
if output_hidden_states is not None
else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = (
return_dict if return_dict is not None else self.config.use_return_dict
)
# decoder outputs consists of (dec_features, past_key_value, dec_hidden, dec_attn)
decoder_outputs = self.decoder(
input_ids=input_ids,
attention_mask=attention_mask,
head_mask=head_mask,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
query_embeds=query_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
if not return_dict:
return decoder_outputs
return BaseModelOutputWithPast(
last_hidden_state=decoder_outputs.last_hidden_state,
past_key_values=decoder_outputs.past_key_values,
hidden_states=decoder_outputs.hidden_states,
attentions=decoder_outputs.attentions,
)
class OPTForCausalLM(OPTPreTrainedModel):
_keys_to_ignore_on_load_missing = [r"lm_head.weight"]
def __init__(self, config):
super().__init__(config)
self.model = OPTModel(config)
# the lm_head weight is automatically tied to the embed tokens weight
self.lm_head = nn.Linear(
config.word_embed_proj_dim, config.vocab_size, bias=False
)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.model.decoder.embed_tokens
def set_input_embeddings(self, value):
self.model.decoder.embed_tokens = value
def get_output_embeddings(self):
return self.lm_head
def set_output_embeddings(self, new_embeddings):
self.lm_head = new_embeddings
def set_decoder(self, decoder):
self.model.decoder = decoder
def get_decoder(self):
return self.model.decoder
@replace_return_docstrings(
output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC
)
def forward(
self,
input_ids: torch.LongTensor = None,
attention_mask: Optional[torch.Tensor] = None,
head_mask: Optional[torch.Tensor] = None,
past_key_values: Optional[List[torch.FloatTensor]] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
query_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
reduction: Optional[str] = "mean",
) -> Union[Tuple, CausalLMOutputWithPast]:
r"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
provide it.
Indices can be obtained using [`OPTTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
head_mask (`torch.Tensor` of shape `(num_hidden_layers, num_attention_heads)`, *optional*):
Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of
shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. The two additional
tensors are only required when the model is used as a decoder in a Sequence to Sequence model.
Contains pre-computed hidden-states (key and values in the self-attention blocks and in the
cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those
that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of
all `decoder_input_ids` of shape `(batch_size, sequence_length)`.
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
(see `past_key_values`).
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
Returns:
Example:
```python
>>> from transformers import GPT2Tokenizer, OPTForCausalLM
>>> model = OPTForCausalLM.from_pretrained("facebook/opt-350m")
>>> tokenizer = GPT2Tokenizer.from_pretrained("facebook/opt-350m")
>>> prompt = "Hey, are you consciours? Can you talk to me?"
>>> inputs = tokenizer(prompt, return_tensors="pt")
>>> # Generate
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"Hey, are you consciours? Can you talk to me?\nI'm not consciours, but I can talk to you."
```"""
output_attentions = (
output_attentions
if output_attentions is not None
else self.config.output_attentions
)
output_hidden_states = (
output_hidden_states
if output_hidden_states is not None
else self.config.output_hidden_states
)
return_dict = (
return_dict if return_dict is not None else self.config.use_return_dict
)
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
outputs = self.model.decoder(
input_ids=input_ids,
attention_mask=attention_mask,
head_mask=head_mask,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
query_embeds=query_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
logits = self.lm_head(outputs[0]).contiguous()
loss = None
if labels is not None:
logits = logits[:, -labels.size(1) :, :]
# Shift so that tokens < n predict n
shift_logits = logits[..., :-1, :].contiguous()
shift_labels = labels[..., 1:].contiguous()
# Flatten the tokens
loss_fct = CrossEntropyLoss(reduction=reduction)
loss = loss_fct(
shift_logits.view(-1, self.config.vocab_size), shift_labels.view(-1)
)
if reduction == "none":
loss = loss.view(shift_logits.size(0), -1).sum(1)
if not return_dict:
output = (logits,) + outputs[1:]
return (loss,) + output if loss is not None else output
return CausalLMOutputWithPast(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
def prepare_inputs_for_generation(
self,
input_ids=None,
query_embeds=None,
past=None,
attention_mask=None,
use_cache=None,
**kwargs,
):
# if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
if attention_mask is None:
if input_ids is not None:
attention_mask = input_ids.new_ones(input_ids.shape)
if past:
input_ids = input_ids[:, -1:]
query_embeds = None
# first step, decoder_cached_states are empty
return {
"input_ids": input_ids,
"query_embeds": query_embeds,
"attention_mask": attention_mask,
"past_key_values": past,
"use_cache": use_cache,
}
@staticmethod
def _reorder_cache(past, beam_idx):
reordered_past = ()
for layer_past in past:
reordered_past += (
tuple(
past_state.index_select(0, beam_idx) for past_state in layer_past
),
)
return reordered_past
| 3D-LLM-main | three_steps_3d_feature/second_step/lavis/models/blip2_models/modeling_opt.py |
"""
* Copyright (c) 2023, salesforce.com, inc.
* All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
* For full license text, see LICENSE.txt file in the repo root or https://opensource.org/licenses/BSD-3-Clause
* By Junnan Li
* Based on huggingface code base
* https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bert
"""
import math
import os
import warnings
from dataclasses import dataclass
from typing import Optional, Tuple, Dict, Any
import torch
from torch import Tensor, device, dtype, nn
import torch.utils.checkpoint
from torch import nn
from torch.nn import CrossEntropyLoss
import torch.nn.functional as F
from transformers.activations import ACT2FN
from transformers.file_utils import (
ModelOutput,
)
from transformers.modeling_outputs import (
BaseModelOutputWithPastAndCrossAttentions,
BaseModelOutputWithPoolingAndCrossAttentions,
CausalLMOutputWithCrossAttentions,
MaskedLMOutput,
MultipleChoiceModelOutput,
NextSentencePredictorOutput,
QuestionAnsweringModelOutput,
SequenceClassifierOutput,
TokenClassifierOutput,
)
from transformers.modeling_utils import (
PreTrainedModel,
apply_chunking_to_forward,
find_pruneable_heads_and_indices,
prune_linear_layer,
)
from transformers.utils import logging
from transformers.models.bert.configuration_bert import BertConfig
logger = logging.get_logger(__name__)
class BertEmbeddings(nn.Module):
"""Construct the embeddings from word and position embeddings."""
def __init__(self, config):
super().__init__()
self.word_embeddings = nn.Embedding(
config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id
)
self.position_embeddings = nn.Embedding(
config.max_position_embeddings, config.hidden_size
)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
# position_ids (1, len position emb) is contiguous in memory and exported when serialized
self.register_buffer(
"position_ids", torch.arange(config.max_position_embeddings).expand((1, -1))
)
self.position_embedding_type = getattr(
config, "position_embedding_type", "absolute"
)
self.config = config
def forward(
self,
input_ids=None,
position_ids=None,
query_embeds=None,
past_key_values_length=0,
):
if input_ids is not None:
seq_length = input_ids.size()[1]
else:
seq_length = 0
if position_ids is None:
position_ids = self.position_ids[
:, past_key_values_length : seq_length + past_key_values_length
].clone()
if input_ids is not None:
embeddings = self.word_embeddings(input_ids)
if self.position_embedding_type == "absolute":
position_embeddings = self.position_embeddings(position_ids)
embeddings = embeddings + position_embeddings
if query_embeds is not None:
embeddings = torch.cat((query_embeds, embeddings), dim=1)
else:
embeddings = query_embeds
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class BertSelfAttention(nn.Module):
def __init__(self, config, is_cross_attention):
super().__init__()
self.config = config
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(
config, "embedding_size"
):
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads)
)
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
if is_cross_attention:
self.key = nn.Linear(config.encoder_width, self.all_head_size)
self.value = nn.Linear(config.encoder_width, self.all_head_size)
else:
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
self.position_embedding_type = getattr(
config, "position_embedding_type", "absolute"
)
if (
self.position_embedding_type == "relative_key"
or self.position_embedding_type == "relative_key_query"
):
self.max_position_embeddings = config.max_position_embeddings
self.distance_embedding = nn.Embedding(
2 * config.max_position_embeddings - 1, self.attention_head_size
)
self.save_attention = False
def save_attn_gradients(self, attn_gradients):
self.attn_gradients = attn_gradients
def get_attn_gradients(self):
return self.attn_gradients
def save_attention_map(self, attention_map):
self.attention_map = attention_map
def get_attention_map(self):
return self.attention_map
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (
self.num_attention_heads,
self.attention_head_size,
)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
):
# If this is instantiated as a cross-attention module, the keys
# and values come from an encoder; the attention mask needs to be
# such that the encoder's padding tokens are not attended to.
is_cross_attention = encoder_hidden_states is not None
if is_cross_attention:
key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
attention_mask = encoder_attention_mask
elif past_key_value is not None:
key_layer = self.transpose_for_scores(self.key(hidden_states))
value_layer = self.transpose_for_scores(self.value(hidden_states))
key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
else:
key_layer = self.transpose_for_scores(self.key(hidden_states))
value_layer = self.transpose_for_scores(self.value(hidden_states))
mixed_query_layer = self.query(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
past_key_value = (key_layer, value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
if (
self.position_embedding_type == "relative_key"
or self.position_embedding_type == "relative_key_query"
):
seq_length = hidden_states.size()[1]
position_ids_l = torch.arange(
seq_length, dtype=torch.long, device=hidden_states.device
).view(-1, 1)
position_ids_r = torch.arange(
seq_length, dtype=torch.long, device=hidden_states.device
).view(1, -1)
distance = position_ids_l - position_ids_r
positional_embedding = self.distance_embedding(
distance + self.max_position_embeddings - 1
)
positional_embedding = positional_embedding.to(
dtype=query_layer.dtype
) # fp16 compatibility
if self.position_embedding_type == "relative_key":
relative_position_scores = torch.einsum(
"bhld,lrd->bhlr", query_layer, positional_embedding
)
attention_scores = attention_scores + relative_position_scores
elif self.position_embedding_type == "relative_key_query":
relative_position_scores_query = torch.einsum(
"bhld,lrd->bhlr", query_layer, positional_embedding
)
relative_position_scores_key = torch.einsum(
"bhrd,lrd->bhlr", key_layer, positional_embedding
)
attention_scores = (
attention_scores
+ relative_position_scores_query
+ relative_position_scores_key
)
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
if attention_mask is not None:
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
if is_cross_attention and self.save_attention:
self.save_attention_map(attention_probs)
attention_probs.register_hook(self.save_attn_gradients)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs_dropped = self.dropout(attention_probs)
# Mask heads if we want to
if head_mask is not None:
attention_probs_dropped = attention_probs_dropped * head_mask
context_layer = torch.matmul(attention_probs_dropped, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
outputs = (
(context_layer, attention_probs) if output_attentions else (context_layer,)
)
outputs = outputs + (past_key_value,)
return outputs
class BertSelfOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertAttention(nn.Module):
def __init__(self, config, is_cross_attention=False):
super().__init__()
self.self = BertSelfAttention(config, is_cross_attention)
self.output = BertSelfOutput(config)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(
heads,
self.self.num_attention_heads,
self.self.attention_head_size,
self.pruned_heads,
)
# Prune linear layers
self.self.query = prune_linear_layer(self.self.query, index)
self.self.key = prune_linear_layer(self.self.key, index)
self.self.value = prune_linear_layer(self.self.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
# Update hyper params and store pruned heads
self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
self.self.all_head_size = (
self.self.attention_head_size * self.self.num_attention_heads
)
self.pruned_heads = self.pruned_heads.union(heads)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
):
self_outputs = self.self(
hidden_states,
attention_mask,
head_mask,
encoder_hidden_states,
encoder_attention_mask,
past_key_value,
output_attentions,
)
attention_output = self.output(self_outputs[0], hidden_states)
outputs = (attention_output,) + self_outputs[
1:
] # add attentions if we output them
return outputs
class BertIntermediate(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class BertOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertLayer(nn.Module):
def __init__(self, config, layer_num):
super().__init__()
self.config = config
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = BertAttention(config)
self.layer_num = layer_num
if (
self.config.add_cross_attention
and layer_num % self.config.cross_attention_freq == 0
):
self.crossattention = BertAttention(
config, is_cross_attention=self.config.add_cross_attention
)
self.has_cross_attention = True
else:
self.has_cross_attention = False
self.intermediate = BertIntermediate(config)
self.output = BertOutput(config)
self.intermediate_query = BertIntermediate(config)
self.output_query = BertOutput(config)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
query_length=0,
):
# decoder uni-directional self-attention cached key/values tuple is at positions 1,2
self_attn_past_key_value = (
past_key_value[:2] if past_key_value is not None else None
)
self_attention_outputs = self.attention(
hidden_states,
attention_mask,
head_mask,
output_attentions=output_attentions,
past_key_value=self_attn_past_key_value,
)
attention_output = self_attention_outputs[0]
outputs = self_attention_outputs[1:-1]
present_key_value = self_attention_outputs[-1]
if query_length > 0:
query_attention_output = attention_output[:, :query_length, :]
if self.has_cross_attention:
assert (
encoder_hidden_states is not None
), "encoder_hidden_states must be given for cross-attention layers"
cross_attention_outputs = self.crossattention(
query_attention_output,
attention_mask,
head_mask,
encoder_hidden_states,
encoder_attention_mask,
output_attentions=output_attentions,
)
query_attention_output = cross_attention_outputs[0]
outputs = (
outputs + cross_attention_outputs[1:-1]
) # add cross attentions if we output attention weights
layer_output = apply_chunking_to_forward(
self.feed_forward_chunk_query,
self.chunk_size_feed_forward,
self.seq_len_dim,
query_attention_output,
)
if attention_output.shape[1] > query_length:
layer_output_text = apply_chunking_to_forward(
self.feed_forward_chunk,
self.chunk_size_feed_forward,
self.seq_len_dim,
attention_output[:, query_length:, :],
)
layer_output = torch.cat([layer_output, layer_output_text], dim=1)
else:
layer_output = apply_chunking_to_forward(
self.feed_forward_chunk,
self.chunk_size_feed_forward,
self.seq_len_dim,
attention_output,
)
outputs = (layer_output,) + outputs
outputs = outputs + (present_key_value,)
return outputs
def feed_forward_chunk(self, attention_output):
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
def feed_forward_chunk_query(self, attention_output):
intermediate_output = self.intermediate_query(attention_output)
layer_output = self.output_query(intermediate_output, attention_output)
return layer_output
class BertEncoder(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.layer = nn.ModuleList(
[BertLayer(config, i) for i in range(config.num_hidden_layers)]
)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=False,
output_hidden_states=False,
return_dict=True,
query_length=0,
):
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
all_cross_attentions = (
() if output_attentions and self.config.add_cross_attention else None
)
next_decoder_cache = () if use_cache else None
for i in range(self.config.num_hidden_layers):
layer_module = self.layer[i]
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_head_mask = head_mask[i] if head_mask is not None else None
past_key_value = past_key_values[i] if past_key_values is not None else None
if getattr(self.config, "gradient_checkpointing", False) and self.training:
if use_cache:
logger.warn(
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
)
use_cache = False
def create_custom_forward(module):
def custom_forward(*inputs):
return module(
*inputs, past_key_value, output_attentions, query_length
)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(layer_module),
hidden_states,
attention_mask,
layer_head_mask,
encoder_hidden_states,
encoder_attention_mask,
)
else:
layer_outputs = layer_module(
hidden_states,
attention_mask,
layer_head_mask,
encoder_hidden_states,
encoder_attention_mask,
past_key_value,
output_attentions,
query_length,
)
hidden_states = layer_outputs[0]
if use_cache:
next_decoder_cache += (layer_outputs[-1],)
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(
v
for v in [
hidden_states,
next_decoder_cache,
all_hidden_states,
all_self_attentions,
all_cross_attentions,
]
if v is not None
)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=next_decoder_cache,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
cross_attentions=all_cross_attentions,
)
class BertPooler(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class BertPredictionHeadTransform(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
if isinstance(config.hidden_act, str):
self.transform_act_fn = ACT2FN[config.hidden_act]
else:
self.transform_act_fn = config.hidden_act
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
class BertLMPredictionHead(nn.Module):
def __init__(self, config):
super().__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
class BertOnlyMLMHead(nn.Module):
def __init__(self, config):
super().__init__()
self.predictions = BertLMPredictionHead(config)
def forward(self, sequence_output):
prediction_scores = self.predictions(sequence_output)
return prediction_scores
class BertPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = BertConfig
base_model_prefix = "bert"
_keys_to_ignore_on_load_missing = [r"position_ids"]
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
class BertModel(BertPreTrainedModel):
"""
The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
cross-attention is added between the self-attention layers, following the architecture described in `Attention is
all you need <https://arxiv.org/abs/1706.03762>`__ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit,
Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.
argument and :obj:`add_cross_attention` set to :obj:`True`; an :obj:`encoder_hidden_states` is then expected as an
input to the forward pass.
"""
def __init__(self, config, add_pooling_layer=False):
super().__init__(config)
self.config = config
self.embeddings = BertEmbeddings(config)
self.encoder = BertEncoder(config)
self.pooler = BertPooler(config) if add_pooling_layer else None
self.init_weights()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
def get_extended_attention_mask(
self,
attention_mask: Tensor,
input_shape: Tuple[int],
device: device,
is_decoder: bool,
has_query: bool = False,
) -> Tensor:
"""
Makes broadcastable attention and causal masks so that future and masked tokens are ignored.
Arguments:
attention_mask (:obj:`torch.Tensor`):
Mask with ones indicating tokens to attend to, zeros for tokens to ignore.
input_shape (:obj:`Tuple[int]`):
The shape of the input to the model.
device: (:obj:`torch.device`):
The device of the input to the model.
Returns:
:obj:`torch.Tensor` The extended attention mask, with a the same dtype as :obj:`attention_mask.dtype`.
"""
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
if attention_mask.dim() == 3:
extended_attention_mask = attention_mask[:, None, :, :]
elif attention_mask.dim() == 2:
# Provided a padding mask of dimensions [batch_size, seq_length]
# - if the model is a decoder, apply a causal mask in addition to the padding mask
# - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length]
if is_decoder:
batch_size, seq_length = input_shape
seq_ids = torch.arange(seq_length, device=device)
causal_mask = (
seq_ids[None, None, :].repeat(batch_size, seq_length, 1)
<= seq_ids[None, :, None]
)
# add a prefix ones mask to the causal mask
# causal and attention masks must have same type with pytorch version < 1.3
causal_mask = causal_mask.to(attention_mask.dtype)
if causal_mask.shape[1] < attention_mask.shape[1]:
prefix_seq_len = attention_mask.shape[1] - causal_mask.shape[1]
if has_query: # UniLM style attention mask
causal_mask = torch.cat(
[
torch.zeros(
(batch_size, prefix_seq_len, seq_length),
device=device,
dtype=causal_mask.dtype,
),
causal_mask,
],
axis=1,
)
causal_mask = torch.cat(
[
torch.ones(
(batch_size, causal_mask.shape[1], prefix_seq_len),
device=device,
dtype=causal_mask.dtype,
),
causal_mask,
],
axis=-1,
)
extended_attention_mask = (
causal_mask[:, None, :, :] * attention_mask[:, None, None, :]
)
else:
extended_attention_mask = attention_mask[:, None, None, :]
else:
raise ValueError(
"Wrong shape for input_ids (shape {}) or attention_mask (shape {})".format(
input_shape, attention_mask.shape
)
)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = extended_attention_mask.to(
dtype=self.dtype
) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
return extended_attention_mask
def forward(
self,
input_ids=None,
attention_mask=None,
position_ids=None,
head_mask=None,
query_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
is_decoder=False,
):
r"""
encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
the model is configured as a decoder.
encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`
(those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.
use_cache (:obj:`bool`, `optional`):
If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
decoding (see :obj:`past_key_values`).
"""
output_attentions = (
output_attentions
if output_attentions is not None
else self.config.output_attentions
)
output_hidden_states = (
output_hidden_states
if output_hidden_states is not None
else self.config.output_hidden_states
)
return_dict = (
return_dict if return_dict is not None else self.config.use_return_dict
)
# use_cache = use_cache if use_cache is not None else self.config.use_cache
if input_ids is None:
assert (
query_embeds is not None
), "You have to specify query_embeds when input_ids is None"
# past_key_values_length
past_key_values_length = (
past_key_values[0][0].shape[2] - self.config.query_length
if past_key_values is not None
else 0
)
query_length = query_embeds.shape[1] if query_embeds is not None else 0
embedding_output = self.embeddings(
input_ids=input_ids,
position_ids=position_ids,
query_embeds=query_embeds,
past_key_values_length=past_key_values_length,
)
input_shape = embedding_output.size()[:-1]
batch_size, seq_length = input_shape
device = embedding_output.device
if attention_mask is None:
attention_mask = torch.ones(
((batch_size, seq_length + past_key_values_length)), device=device
)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
if is_decoder:
extended_attention_mask = self.get_extended_attention_mask(
attention_mask,
input_ids.shape,
device,
is_decoder,
has_query=(query_embeds is not None),
)
else:
extended_attention_mask = self.get_extended_attention_mask(
attention_mask, input_shape, device, is_decoder
)
# If a 2D or 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_hidden_states is not None:
if type(encoder_hidden_states) == list:
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states[
0
].size()
else:
(
encoder_batch_size,
encoder_sequence_length,
_,
) = encoder_hidden_states.size()
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
if type(encoder_attention_mask) == list:
encoder_extended_attention_mask = [
self.invert_attention_mask(mask) for mask in encoder_attention_mask
]
elif encoder_attention_mask is None:
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
encoder_extended_attention_mask = self.invert_attention_mask(
encoder_attention_mask
)
else:
encoder_extended_attention_mask = self.invert_attention_mask(
encoder_attention_mask
)
else:
encoder_extended_attention_mask = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
encoder_outputs = self.encoder(
embedding_output,
attention_mask=extended_attention_mask,
head_mask=head_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_extended_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
query_length=query_length,
)
sequence_output = encoder_outputs[0]
pooled_output = (
self.pooler(sequence_output) if self.pooler is not None else None
)
if not return_dict:
return (sequence_output, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndCrossAttentions(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
past_key_values=encoder_outputs.past_key_values,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
cross_attentions=encoder_outputs.cross_attentions,
)
class BertLMHeadModel(BertPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
_keys_to_ignore_on_load_missing = [r"position_ids", r"predictions.decoder.bias"]
def __init__(self, config):
super().__init__(config)
self.bert = BertModel(config, add_pooling_layer=False)
self.cls = BertOnlyMLMHead(config)
self.init_weights()
def get_output_embeddings(self):
return self.cls.predictions.decoder
def set_output_embeddings(self, new_embeddings):
self.cls.predictions.decoder = new_embeddings
def forward(
self,
input_ids=None,
attention_mask=None,
position_ids=None,
head_mask=None,
query_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
labels=None,
past_key_values=None,
use_cache=True,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
return_logits=False,
is_decoder=True,
reduction="mean",
):
r"""
encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
the model is configured as a decoder.
encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in
``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are
ignored (masked), the loss is only computed for the tokens with labels n ``[0, ..., config.vocab_size]``
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`
(those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.
use_cache (:obj:`bool`, `optional`):
If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
decoding (see :obj:`past_key_values`).
Returns:
Example::
>>> from transformers import BertTokenizer, BertLMHeadModel, BertConfig
>>> import torch
>>> tokenizer = BertTokenizer.from_pretrained('bert-base-cased')
>>> config = BertConfig.from_pretrained("bert-base-cased")
>>> model = BertLMHeadModel.from_pretrained('bert-base-cased', config=config)
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
>>> prediction_logits = outputs.logits
"""
return_dict = (
return_dict if return_dict is not None else self.config.use_return_dict
)
if labels is not None:
use_cache = False
if past_key_values is not None:
query_embeds = None
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
head_mask=head_mask,
query_embeds=query_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
is_decoder=is_decoder,
)
sequence_output = outputs[0]
if query_embeds is not None:
sequence_output = outputs[0][:, query_embeds.shape[1] :, :]
prediction_scores = self.cls(sequence_output)
if return_logits:
return prediction_scores[:, :-1, :].contiguous()
lm_loss = None
if labels is not None:
# we are doing next-token prediction; shift prediction scores and input ids by one
shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous()
labels = labels[:, 1:].contiguous()
loss_fct = CrossEntropyLoss(reduction=reduction, label_smoothing=0.1)
lm_loss = loss_fct(
shifted_prediction_scores.view(-1, self.config.vocab_size),
labels.view(-1),
)
if reduction == "none":
lm_loss = lm_loss.view(prediction_scores.size(0), -1).sum(1)
if not return_dict:
output = (prediction_scores,) + outputs[2:]
return ((lm_loss,) + output) if lm_loss is not None else output
return CausalLMOutputWithCrossAttentions(
loss=lm_loss,
logits=prediction_scores,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
cross_attentions=outputs.cross_attentions,
)
def prepare_inputs_for_generation(
self, input_ids, query_embeds, past=None, attention_mask=None, **model_kwargs
):
# if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
if attention_mask is None:
attention_mask = input_ids.new_ones(input_ids.shape)
query_mask = input_ids.new_ones(query_embeds.shape[:-1])
attention_mask = torch.cat([query_mask, attention_mask], dim=-1)
# cut decoder_input_ids if past is used
if past is not None:
input_ids = input_ids[:, -1:]
return {
"input_ids": input_ids,
"query_embeds": query_embeds,
"attention_mask": attention_mask,
"past_key_values": past,
"encoder_hidden_states": model_kwargs.get("encoder_hidden_states", None),
"encoder_attention_mask": model_kwargs.get("encoder_attention_mask", None),
"is_decoder": True,
}
def _reorder_cache(self, past, beam_idx):
reordered_past = ()
for layer_past in past:
reordered_past += (
tuple(
past_state.index_select(0, beam_idx) for past_state in layer_past
),
)
return reordered_past
class BertForMaskedLM(BertPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
_keys_to_ignore_on_load_missing = [r"position_ids", r"predictions.decoder.bias"]
def __init__(self, config):
super().__init__(config)
self.bert = BertModel(config, add_pooling_layer=False)
self.cls = BertOnlyMLMHead(config)
self.init_weights()
def get_output_embeddings(self):
return self.cls.predictions.decoder
def set_output_embeddings(self, new_embeddings):
self.cls.predictions.decoder = new_embeddings
def forward(
self,
input_ids=None,
attention_mask=None,
position_ids=None,
head_mask=None,
query_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
return_logits=False,
is_decoder=False,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ...,
config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored
(masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``
"""
return_dict = (
return_dict if return_dict is not None else self.config.use_return_dict
)
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
head_mask=head_mask,
query_embeds=query_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
is_decoder=is_decoder,
)
if query_embeds is not None:
sequence_output = outputs[0][:, query_embeds.shape[1] :, :]
prediction_scores = self.cls(sequence_output)
if return_logits:
return prediction_scores
masked_lm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss() # -100 index = padding token
masked_lm_loss = loss_fct(
prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)
)
if not return_dict:
output = (prediction_scores,) + outputs[2:]
return (
((masked_lm_loss,) + output) if masked_lm_loss is not None else output
)
return MaskedLMOutput(
loss=masked_lm_loss,
logits=prediction_scores,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
| 3D-LLM-main | three_steps_3d_feature/second_step/lavis/models/blip2_models/Qformer.py |
"""
Copyright (c) 2023, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
import logging
import os
import time
import datetime
import torch
import torch.nn as nn
import torch.distributed as dist
import torch.nn.functional as F
import lavis.common.dist_utils as dist_utils
from lavis.common.dist_utils import download_cached_file
from lavis.common.utils import is_url
from lavis.common.logger import MetricLogger
from lavis.models.base_model import BaseModel
from lavis.models.blip2_models.Qformer import BertConfig, BertLMHeadModel
from lavis.models.eva_vit import create_eva_vit_g
from transformers import BertTokenizer
class Blip2Base(BaseModel):
@classmethod
def init_tokenizer(cls):
tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
tokenizer.add_special_tokens({"bos_token": "[DEC]"})
return tokenizer
@classmethod
def init_Qformer(cls, num_query_token, vision_width):
encoder_config = BertConfig.from_pretrained("bert-base-uncased")
encoder_config.encoder_width = vision_width
# insert cross-attention layer every other block
encoder_config.add_cross_attention = True
encoder_config.cross_attention_freq = 2
encoder_config.query_length = num_query_token
Qformer = BertLMHeadModel.from_pretrained(
"bert-base-uncased", config=encoder_config
)
query_tokens = nn.Parameter(
torch.zeros(1, num_query_token, encoder_config.hidden_size)
)
query_tokens.data.normal_(mean=0.0, std=encoder_config.initializer_range)
return Qformer, query_tokens
@classmethod
def init_vision_encoder(
cls, img_size, drop_path_rate, use_grad_checkpoint, precision
):
visual_encoder = create_eva_vit_g(
img_size, drop_path_rate, use_grad_checkpoint, precision
)
ln_vision = LayerNorm(visual_encoder.num_features)
return visual_encoder, ln_vision
def load_from_pretrained(self, url_or_filename):
if is_url(url_or_filename):
cached_file = download_cached_file(
url_or_filename, check_hash=False, progress=True
)
checkpoint = torch.load(cached_file, map_location="cpu")
elif os.path.isfile(url_or_filename):
checkpoint = torch.load(url_or_filename, map_location="cpu")
else:
raise RuntimeError("checkpoint url or path is invalid")
state_dict = checkpoint["model"]
msg = self.load_state_dict(state_dict, strict=False)
logging.info("Missing keys {}".format(msg.missing_keys))
logging.info("load checkpoint from %s" % url_or_filename)
return msg
def disabled_train(self, mode=True):
"""Overwrite model.train with this function to make sure train/eval mode
does not change anymore."""
return self
class LayerNorm(nn.LayerNorm):
"""Subclass torch's LayerNorm to handle fp16."""
def forward(self, x: torch.Tensor):
orig_type = x.dtype
ret = super().forward(x.type(torch.float32))
return ret.type(orig_type)
def compute_sim_matrix(model, data_loader, **kwargs):
k_test = kwargs.pop("k_test")
metric_logger = MetricLogger(delimiter=" ")
header = "Evaluation:"
logging.info("Computing features for evaluation...")
start_time = time.time()
texts = data_loader.dataset.text
num_text = len(texts)
text_bs = 256
text_ids = []
text_embeds = []
text_atts = []
for i in range(0, num_text, text_bs):
text = texts[i : min(num_text, i + text_bs)]
text_input = model.tokenizer(
text,
padding="max_length",
truncation=True,
max_length=35,
return_tensors="pt",
).to(model.device)
text_feat = model.forward_text(text_input)
text_embed = F.normalize(model.text_proj(text_feat))
text_embeds.append(text_embed)
text_ids.append(text_input.input_ids)
text_atts.append(text_input.attention_mask)
text_embeds = torch.cat(text_embeds, dim=0)
text_ids = torch.cat(text_ids, dim=0)
text_atts = torch.cat(text_atts, dim=0)
vit_feats = []
image_embeds = []
for samples in data_loader:
image = samples["image"]
image = image.to(model.device)
image_feat, vit_feat = model.forward_image(image)
image_embed = model.vision_proj(image_feat)
image_embed = F.normalize(image_embed, dim=-1)
vit_feats.append(vit_feat.cpu())
image_embeds.append(image_embed)
vit_feats = torch.cat(vit_feats, dim=0)
image_embeds = torch.cat(image_embeds, dim=0)
sims_matrix = []
for image_embed in image_embeds:
sim_q2t = image_embed @ text_embeds.t()
sim_i2t, _ = sim_q2t.max(0)
sims_matrix.append(sim_i2t)
sims_matrix = torch.stack(sims_matrix, dim=0)
score_matrix_i2t = torch.full(
(len(data_loader.dataset.image), len(texts)), -100.0
).to(model.device)
num_tasks = dist_utils.get_world_size()
rank = dist_utils.get_rank()
step = sims_matrix.size(0) // num_tasks + 1
start = rank * step
end = min(sims_matrix.size(0), start + step)
for i, sims in enumerate(
metric_logger.log_every(sims_matrix[start:end], 50, header)
):
topk_sim, topk_idx = sims.topk(k=k_test, dim=0)
image_inputs = vit_feats[start + i].repeat(k_test, 1, 1).to(model.device)
score = model.compute_itm(
image_inputs=image_inputs,
text_ids=text_ids[topk_idx],
text_atts=text_atts[topk_idx],
).float()
score_matrix_i2t[start + i, topk_idx] = score + topk_sim
sims_matrix = sims_matrix.t()
score_matrix_t2i = torch.full(
(len(texts), len(data_loader.dataset.image)), -100.0
).to(model.device)
step = sims_matrix.size(0) // num_tasks + 1
start = rank * step
end = min(sims_matrix.size(0), start + step)
for i, sims in enumerate(
metric_logger.log_every(sims_matrix[start:end], 50, header)
):
topk_sim, topk_idx = sims.topk(k=k_test, dim=0)
image_inputs = vit_feats[topk_idx.cpu()].to(model.device)
score = model.compute_itm(
image_inputs=image_inputs,
text_ids=text_ids[start + i].repeat(k_test, 1),
text_atts=text_atts[start + i].repeat(k_test, 1),
).float()
score_matrix_t2i[start + i, topk_idx] = score + topk_sim
if dist_utils.is_dist_avail_and_initialized():
dist.barrier()
torch.distributed.all_reduce(
score_matrix_i2t, op=torch.distributed.ReduceOp.SUM
)
torch.distributed.all_reduce(
score_matrix_t2i, op=torch.distributed.ReduceOp.SUM
)
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
logging.info("Evaluation time {}".format(total_time_str))
return score_matrix_i2t.cpu().numpy(), score_matrix_t2i.cpu().numpy()
| 3D-LLM-main | three_steps_3d_feature/second_step/lavis/models/blip2_models/blip2.py |
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
Based on https://github.com/mlfoundations/open_clip
"""
from dataclasses import dataclass
from typing import Optional
import torch
from transformers.modeling_outputs import ModelOutput
@dataclass
class ClipOutputFeatures(ModelOutput):
"""
Data class of features from AlbefFeatureExtractor.
Args:
image_embeds: `torch.FloatTensor` of shape `(batch_size, 1, embed_dim)`, `optional`
image_features: `torch.FloatTensor` of shape `(batch_size, 1, feature_dim)`, `optional`
text_embeds: `torch.FloatTensor` of shape `(batch_size, 1, embed_dim)`, `optional`
text_features: `torch.FloatTensor` of shape `(batch_size, 1, feature_dim)`, `optional`
"""
image_embeds: Optional[torch.FloatTensor] = None
image_embeds_proj: Optional[torch.FloatTensor] = None
text_embeds: Optional[torch.FloatTensor] = None
text_embeds_proj: Optional[torch.FloatTensor] = None
@dataclass
class ClipOutput(ModelOutput):
intermediate_output: Optional[ClipOutputFeatures] = None
logit_scale_exp: Optional[torch.FloatTensor] = None
loss: Optional[torch.FloatTensor] = None
| 3D-LLM-main | three_steps_3d_feature/second_step/lavis/models/clip_models/clip_outputs.py |
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
Based on https://github.com/mlfoundations/open_clip
"""
import hashlib
import os
import urllib
import warnings
from tqdm import tqdm
_RN50 = dict(
openai="https://openaipublic.azureedge.net/clip/models/afeb0e10f9e5a86da6080e35cf09123aca3b358a0c3e3b6c78a7b63bc04b6762/RN50.pt",
yfcc15m="https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/rn50-quickgelu-yfcc15m-455df137.pt",
cc12m="https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/rn50-quickgelu-cc12m-f000538c.pt",
)
_RN50_quickgelu = dict(
openai="https://openaipublic.azureedge.net/clip/models/afeb0e10f9e5a86da6080e35cf09123aca3b358a0c3e3b6c78a7b63bc04b6762/RN50.pt",
yfcc15m="https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/rn50-quickgelu-yfcc15m-455df137.pt",
cc12m="https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/rn50-quickgelu-cc12m-f000538c.pt",
)
_RN101 = dict(
openai="https://openaipublic.azureedge.net/clip/models/8fa8567bab74a42d41c5915025a8e4538c3bdbe8804a470a72f30b0d94fab599/RN101.pt",
yfcc15m="https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/rn101-quickgelu-yfcc15m-3e04b30e.pt",
)
_RN101_quickgelu = dict(
openai="https://openaipublic.azureedge.net/clip/models/8fa8567bab74a42d41c5915025a8e4538c3bdbe8804a470a72f30b0d94fab599/RN101.pt",
yfcc15m="https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/rn101-quickgelu-yfcc15m-3e04b30e.pt",
)
_RN50x4 = dict(
openai="https://openaipublic.azureedge.net/clip/models/7e526bd135e493cef0776de27d5f42653e6b4c8bf9e0f653bb11773263205fdd/RN50x4.pt",
)
_RN50x16 = dict(
openai="https://openaipublic.azureedge.net/clip/models/52378b407f34354e150460fe41077663dd5b39c54cd0bfd2b27167a4a06ec9aa/RN50x16.pt",
)
_RN50x64 = dict(
openai="https://openaipublic.azureedge.net/clip/models/be1cfb55d75a9666199fb2206c106743da0f6468c9d327f3e0d0a543a9919d9c/RN50x64.pt",
)
_VITB32 = dict(
openai="https://openaipublic.azureedge.net/clip/models/40d365715913c9da98579312b702a82c18be219cc2a73407c4526f58eba950af/ViT-B-32.pt",
laion400m_e31="https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_b_32-quickgelu-laion400m_e31-d867053b.pt",
laion400m_e32="https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_b_32-quickgelu-laion400m_e32-46683a32.pt",
laion400m_avg="https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_b_32-quickgelu-laion400m_avg-8a00ab3c.pt",
)
_VITB32_quickgelu = dict(
openai="https://openaipublic.azureedge.net/clip/models/40d365715913c9da98579312b702a82c18be219cc2a73407c4526f58eba950af/ViT-B-32.pt",
laion400m_e31="https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_b_32-quickgelu-laion400m_e31-d867053b.pt",
laion400m_e32="https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_b_32-quickgelu-laion400m_e32-46683a32.pt",
laion400m_avg="https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_b_32-quickgelu-laion400m_avg-8a00ab3c.pt",
)
_VITB16 = dict(
openai="https://openaipublic.azureedge.net/clip/models/5806e77cd80f8b59890b7e101eabd078d9fb84e6937f9e85e4ecb61988df416f/ViT-B-16.pt",
)
_VITL14 = dict(
openai="https://openaipublic.azureedge.net/clip/models/b8cca3fd41ae0c99ba7e8951adf17d267cdb84cd88be6f7c2e0eca1737a03836/ViT-L-14.pt",
)
_VITL14_336 = dict(
openai="https://openaipublic.azureedge.net/clip/models/3035c92b350959924f9f00213499208652fc7ea050643e8b385c2dac08641f02/ViT-L-14-336px.pt"
)
_PRETRAINED = {
"RN50": _RN50,
"RN50-quickgelu": _RN50_quickgelu,
"RN101": _RN101,
"RN101-quickgelu": _RN101_quickgelu,
"RN50x4": _RN50x4,
"RN50x16": _RN50x16,
"ViT-B-32": _VITB32,
"ViT-B-32-quickgelu": _VITB32_quickgelu,
"ViT-B-16": _VITB16,
"ViT-L-14": _VITL14,
"ViT-L-14-336": _VITL14_336,
}
def list_pretrained(as_str: bool = False):
"""returns list of pretrained models
Returns a tuple (model_name, pretrain_tag) by default or 'name:tag' if as_str == True
"""
return [
":".join([k, t]) if as_str else (k, t)
for k in _PRETRAINED.keys()
for t in _PRETRAINED[k].keys()
]
def list_pretrained_tag_models(tag: str):
"""return all models having the specified pretrain tag"""
models = []
for k in _PRETRAINED.keys():
if tag in _PRETRAINED[k]:
models.append(k)
return models
def list_pretrained_model_tags(model: str):
"""return all pretrain tags for the specified model architecture"""
tags = []
if model in _PRETRAINED:
tags.extend(_PRETRAINED[model].keys())
return tags
def get_pretrained_url(model: str, tag: str):
if model not in _PRETRAINED:
return ""
model_pretrained = _PRETRAINED[model]
tag = tag.lower()
if tag not in model_pretrained:
return ""
return model_pretrained[tag]
def download_pretrained(url: str, root: str = os.path.expanduser("~/.cache/clip")):
os.makedirs(root, exist_ok=True)
filename = os.path.basename(url)
if "openaipublic" in url:
expected_sha256 = url.split("/")[-2]
else:
expected_sha256 = ""
download_target = os.path.join(root, filename)
if os.path.exists(download_target) and not os.path.isfile(download_target):
raise RuntimeError(f"{download_target} exists and is not a regular file")
if os.path.isfile(download_target):
if expected_sha256:
if (
hashlib.sha256(open(download_target, "rb").read()).hexdigest()
== expected_sha256
):
return download_target
else:
warnings.warn(
f"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file"
)
else:
return download_target
with urllib.request.urlopen(url) as source, open(download_target, "wb") as output:
with tqdm(
total=int(source.info().get("Content-Length")),
ncols=80,
unit="iB",
unit_scale=True,
) as loop:
while True:
buffer = source.read(8192)
if not buffer:
break
output.write(buffer)
loop.update(len(buffer))
if (
expected_sha256
and hashlib.sha256(open(download_target, "rb").read()).hexdigest()
!= expected_sha256
):
raise RuntimeError(
f"Model has been downloaded but the SHA256 checksum does not not match"
)
return download_target
| 3D-LLM-main | three_steps_3d_feature/second_step/lavis/models/clip_models/pretrained.py |
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
Based on https://github.com/mlfoundations/open_clip
"""
""" OpenAI pretrained model functions
Adapted from https://github.com/mlfoundations/open_clip and https://github.com/openai/CLIP.
Originally MIT License, Copyright (c) 2021 OpenAI.
"""
| 3D-LLM-main | three_steps_3d_feature/second_step/lavis/models/clip_models/__init__.py |
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
Based on https://github.com/mlfoundations/open_clip
"""
""" CLIP Model
Adapted from https://github.com/openai/CLIP. Originally MIT License, Copyright (c) 2021 OpenAI.
"""
import datetime
import json
import logging
import os
import re
import time
import warnings
from collections import OrderedDict
from copy import deepcopy
from dataclasses import dataclass
from pathlib import Path
from typing import Callable, List, Optional, Tuple, Union
import numpy as np
import torch
import torch.nn.functional as F
from lavis.common.registry import registry
from lavis.common.utils import get_abs_path
from lavis.models.base_model import BaseModel
from lavis.models.clip_models.clip_outputs import ClipOutput, ClipOutputFeatures
from lavis.models.clip_models.timm_model import TimmModel
from lavis.models.clip_models.transform import image_transform
from lavis.models.clip_models.utils import freeze_batch_norm_2d
from lavis.tasks.multimodal_classification import MultimodalClassificationTask
from torch import nn
from .pretrained import (
download_pretrained,
get_pretrained_url,
list_pretrained_tag_models,
)
_MODEL_CONFIG_PATHS = [Path(__file__).parent.parent.parent / f"configs/models/clip/"]
_MODEL_CONFIGS = {} # directory (model_name: config) of model architecture configs
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1):
super().__init__()
# all conv layers have stride 1. an avgpool is performed after the second convolution when stride > 1
self.conv1 = nn.Conv2d(inplanes, planes, 1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, 3, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.avgpool = nn.AvgPool2d(stride) if stride > 1 else nn.Identity()
self.conv3 = nn.Conv2d(planes, planes * self.expansion, 1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = None
self.stride = stride
if stride > 1 or inplanes != planes * Bottleneck.expansion:
# downsampling layer is prepended with an avgpool, and the subsequent convolution has stride 1
self.downsample = nn.Sequential(
OrderedDict(
[
("-1", nn.AvgPool2d(stride)),
(
"0",
nn.Conv2d(
inplanes,
planes * self.expansion,
1,
stride=1,
bias=False,
),
),
("1", nn.BatchNorm2d(planes * self.expansion)),
]
)
)
def forward(self, x: torch.Tensor):
identity = x
out = self.relu(self.bn1(self.conv1(x)))
out = self.relu(self.bn2(self.conv2(out)))
out = self.avgpool(out)
out = self.bn3(self.conv3(out))
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class AttentionPool2d(nn.Module):
def __init__(
self, spacial_dim: int, embed_dim: int, num_heads: int, output_dim: int = None
):
super().__init__()
self.positional_embedding = nn.Parameter(
torch.randn(spacial_dim**2 + 1, embed_dim) / embed_dim**0.5
)
self.k_proj = nn.Linear(embed_dim, embed_dim)
self.q_proj = nn.Linear(embed_dim, embed_dim)
self.v_proj = nn.Linear(embed_dim, embed_dim)
self.c_proj = nn.Linear(embed_dim, output_dim or embed_dim)
self.num_heads = num_heads
def forward(self, x):
x = x.reshape(x.shape[0], x.shape[1], x.shape[2] * x.shape[3]).permute(
2, 0, 1
) # NCHW -> (HW)NC
x = torch.cat([x.mean(dim=0, keepdim=True), x], dim=0) # (HW+1)NC
x = x + self.positional_embedding[:, None, :].to(x.dtype) # (HW+1)NC
x, _ = F.multi_head_attention_forward(
query=x,
key=x,
value=x,
embed_dim_to_check=x.shape[-1],
num_heads=self.num_heads,
q_proj_weight=self.q_proj.weight,
k_proj_weight=self.k_proj.weight,
v_proj_weight=self.v_proj.weight,
in_proj_weight=None,
in_proj_bias=torch.cat(
[self.q_proj.bias, self.k_proj.bias, self.v_proj.bias]
),
bias_k=None,
bias_v=None,
add_zero_attn=False,
dropout_p=0,
out_proj_weight=self.c_proj.weight,
out_proj_bias=self.c_proj.bias,
use_separate_proj_weight=True,
training=self.training,
need_weights=False,
)
return x[0]
class ModifiedResNet(nn.Module):
"""
A ResNet class that is similar to torchvision's but contains the following changes:
- There are now 3 "stem" convolutions as opposed to 1, with an average pool instead of a max pool.
- Performs anti-aliasing strided convolutions, where an avgpool is prepended to convolutions with stride > 1
- The final pooling layer is a QKV attention instead of an average pool
"""
def __init__(self, layers, output_dim, heads, image_size=224, width=64):
super().__init__()
self.output_dim = output_dim
self.image_size = image_size
# the 3-layer stem
self.conv1 = nn.Conv2d(
3, width // 2, kernel_size=3, stride=2, padding=1, bias=False
)
self.bn1 = nn.BatchNorm2d(width // 2)
self.conv2 = nn.Conv2d(
width // 2, width // 2, kernel_size=3, padding=1, bias=False
)
self.bn2 = nn.BatchNorm2d(width // 2)
self.conv3 = nn.Conv2d(width // 2, width, kernel_size=3, padding=1, bias=False)
self.bn3 = nn.BatchNorm2d(width)
self.avgpool = nn.AvgPool2d(2)
self.relu = nn.ReLU(inplace=True)
# residual layers
self._inplanes = width # this is a *mutable* variable used during construction
self.layer1 = self._make_layer(width, layers[0])
self.layer2 = self._make_layer(width * 2, layers[1], stride=2)
self.layer3 = self._make_layer(width * 4, layers[2], stride=2)
self.layer4 = self._make_layer(width * 8, layers[3], stride=2)
embed_dim = width * 32 # the ResNet feature dimension
self.attnpool = AttentionPool2d(image_size // 32, embed_dim, heads, output_dim)
self.init_parameters()
def _make_layer(self, planes, blocks, stride=1):
layers = [Bottleneck(self._inplanes, planes, stride)]
self._inplanes = planes * Bottleneck.expansion
for _ in range(1, blocks):
layers.append(Bottleneck(self._inplanes, planes))
return nn.Sequential(*layers)
def init_parameters(self):
if self.attnpool is not None:
std = self.attnpool.c_proj.in_features**-0.5
nn.init.normal_(self.attnpool.q_proj.weight, std=std)
nn.init.normal_(self.attnpool.k_proj.weight, std=std)
nn.init.normal_(self.attnpool.v_proj.weight, std=std)
nn.init.normal_(self.attnpool.c_proj.weight, std=std)
for resnet_block in [self.layer1, self.layer2, self.layer3, self.layer4]:
for name, param in resnet_block.named_parameters():
if name.endswith("bn3.weight"):
nn.init.zeros_(param)
def lock(self, unlocked_groups=0, freeze_bn_stats=False):
assert (
unlocked_groups == 0
), "partial locking not currently supported for this model"
for param in self.parameters():
param.requires_grad = False
if freeze_bn_stats:
freeze_batch_norm_2d(self)
def stem(self, x):
for conv, bn in [
(self.conv1, self.bn1),
(self.conv2, self.bn2),
(self.conv3, self.bn3),
]:
x = self.relu(bn(conv(x)))
x = self.avgpool(x)
return x
def forward(self, x):
x = self.stem(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.attnpool(x)
return x
class LayerNorm(nn.LayerNorm):
"""Subclass torch's LayerNorm to handle fp16."""
def forward(self, x: torch.Tensor):
orig_type = x.dtype
x = F.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps)
return x.to(orig_type)
class QuickGELU(nn.Module):
# NOTE This is slower than nn.GELU or nn.SiLU and uses more GPU memory
def forward(self, x: torch.Tensor):
return x * torch.sigmoid(1.702 * x)
class ResidualAttentionBlock(nn.Module):
def __init__(self, d_model: int, n_head: int, act_layer: Callable = nn.GELU):
super().__init__()
self.attn = nn.MultiheadAttention(d_model, n_head)
self.ln_1 = LayerNorm(d_model)
self.mlp = nn.Sequential(
OrderedDict(
[
("c_fc", nn.Linear(d_model, d_model * 4)),
("gelu", act_layer()),
("c_proj", nn.Linear(d_model * 4, d_model)),
]
)
)
self.ln_2 = LayerNorm(d_model)
def attention(self, x: torch.Tensor, attn_mask: Optional[torch.Tensor] = None):
return self.attn(x, x, x, need_weights=False, attn_mask=attn_mask)[0]
def forward(self, x: torch.Tensor, attn_mask: Optional[torch.Tensor] = None):
x = x + self.attention(self.ln_1(x), attn_mask=attn_mask)
x = x + self.mlp(self.ln_2(x))
return x
class Transformer(nn.Module):
def __init__(
self, width: int, layers: int, heads: int, act_layer: Callable = nn.GELU
):
super().__init__()
self.width = width
self.layers = layers
self.resblocks = nn.ModuleList(
[
ResidualAttentionBlock(width, heads, act_layer=act_layer)
for _ in range(layers)
]
)
def forward(self, x: torch.Tensor, attn_mask: Optional[torch.Tensor] = None):
for r in self.resblocks:
x = r(x, attn_mask=attn_mask)
return x
class VisualTransformer(nn.Module):
def __init__(
self,
image_size: int,
patch_size: int,
width: int,
layers: int,
heads: int,
output_dim: int,
act_layer: Callable = nn.GELU,
):
super().__init__()
self.image_size = image_size
self.output_dim = output_dim
self.conv1 = nn.Conv2d(
in_channels=3,
out_channels=width,
kernel_size=patch_size,
stride=patch_size,
bias=False,
)
scale = width**-0.5
self.class_embedding = nn.Parameter(scale * torch.randn(width))
self.positional_embedding = nn.Parameter(
scale * torch.randn((image_size // patch_size) ** 2 + 1, width)
)
self.ln_pre = LayerNorm(width)
self.transformer = Transformer(width, layers, heads, act_layer=act_layer)
self.ln_post = LayerNorm(width)
self.proj = nn.Parameter(scale * torch.randn(width, output_dim))
def lock(self, unlocked_groups=0, freeze_bn_stats=False):
assert (
unlocked_groups == 0
), "partial locking not currently supported for this model"
for param in self.parameters():
param.requires_grad = False
def forward(self, x: torch.Tensor):
x = self.conv1(x) # shape = [*, width, grid, grid]
x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2]
x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width]
x = torch.cat(
[
self.class_embedding.to(x.dtype)
+ torch.zeros(
x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device
),
x,
],
dim=1,
) # shape = [*, grid ** 2 + 1, width]
x = x + self.positional_embedding.to(x.dtype)
x = self.ln_pre(x)
x = x.permute(1, 0, 2) # NLD -> LND
x = self.transformer(x)
x = x.permute(1, 0, 2) # LND -> NLD
x = self.ln_post(x[:, 0, :])
if self.proj is not None:
x = x @ self.proj
return x
@dataclass
class CLIPVisionCfg:
layers: Union[Tuple[int, int, int, int], int] = 12
width: int = 768
patch_size: int = 16
image_size: Union[Tuple[int, int], int] = 224
timm_model_name: str = (
None # a valid model name overrides layers, width, patch_size
)
timm_model_pretrained: bool = (
False # use (imagenet) pretrained weights for named model
)
timm_pool: str = (
"avg" # feature pooling for timm model ('abs_attn', 'rot_attn', 'avg', '')
)
timm_proj: str = (
"linear" # linear projection for timm model output ('linear', 'mlp', '')
)
@dataclass
class CLIPTextCfg:
context_length: int
vocab_size: int
width: int
heads: int
layers: int
@registry.register_model("clip")
@registry.register_model("clip_feature_extractor")
class CLIP(BaseModel):
PRETRAINED_MODEL_CONFIG_DICT = {
"ViT-B-32": "configs/models/clip_vit_base32.yaml",
"ViT-B-16": "configs/models/clip_vit_base16.yaml",
"ViT-L-14": "configs/models/clip_vit_large14.yaml",
"ViT-L-14-336": "configs/models/clip_vit_large14_336.yaml",
"RN50": "configs/models/clip_resnet50.yaml",
}
def __init__(
self,
embed_dim: int,
vision_cfg: CLIPVisionCfg,
text_cfg: CLIPTextCfg,
quick_gelu: bool = False,
):
from .tokenizer import tokenize
super().__init__()
self.tokenizer = tokenize
self._loss = None
if isinstance(vision_cfg, dict):
vision_cfg = CLIPVisionCfg(**vision_cfg)
if isinstance(text_cfg, dict):
text_cfg = CLIPTextCfg(**text_cfg)
self.context_length = text_cfg.context_length
# OpenAI models are pretrained w/ QuickGELU but native nn.GELU is both faster and more
# memory efficient in recent PyTorch releases (>= 1.10).
# NOTE: timm models always use native GELU regardless of quick_gelu flag.
act_layer = QuickGELU if quick_gelu else nn.GELU
if vision_cfg.timm_model_name:
self.visual = TimmModel(
vision_cfg.timm_model_name,
pretrained=vision_cfg.timm_model_pretrained,
pool=vision_cfg.timm_pool,
proj=vision_cfg.timm_proj,
embed_dim=embed_dim,
image_size=vision_cfg.image_size,
)
act_layer = (
nn.GELU
) # so that text transformer doesn't use QuickGELU w/ timm models
elif isinstance(vision_cfg.layers, (tuple, list)):
vision_heads = vision_cfg.width * 32 // 64
self.visual = ModifiedResNet(
layers=vision_cfg.layers,
output_dim=embed_dim,
heads=vision_heads,
image_size=vision_cfg.image_size,
width=vision_cfg.width,
)
else:
vision_heads = vision_cfg.width // 64
self.visual = VisualTransformer(
image_size=vision_cfg.image_size,
patch_size=vision_cfg.patch_size,
width=vision_cfg.width,
layers=vision_cfg.layers,
heads=vision_heads,
output_dim=embed_dim,
act_layer=act_layer,
)
self.transformer = Transformer(
width=text_cfg.width,
layers=text_cfg.layers,
heads=text_cfg.heads,
act_layer=act_layer,
)
self.vocab_size = text_cfg.vocab_size
self.token_embedding = nn.Embedding(text_cfg.vocab_size, text_cfg.width)
self.positional_embedding = nn.Parameter(
torch.empty(self.context_length, text_cfg.width)
)
self.ln_final = LayerNorm(text_cfg.width)
self.text_projection = nn.Parameter(torch.empty(text_cfg.width, embed_dim))
self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07))
self.register_buffer("attn_mask", self.build_attention_mask(), persistent=False)
self.prompt_templates = openai_imagenet_template
self.classifier = None
self.init_parameters()
@property
def loss(self):
if self._loss is None:
from lavis.models.clip_models.loss import ClipLoss
from torch import distributed as dist
self._loss = ClipLoss(
world_size=dist.get_world_size(),
rank=dist.get_rank(),
local_loss=False,
gather_with_grad=False,
use_horovod=False,
)
return self._loss
def init_parameters(self):
nn.init.normal_(self.token_embedding.weight, std=0.02)
nn.init.normal_(self.positional_embedding, std=0.01)
nn.init.constant_(self.logit_scale, np.log(1 / 0.07))
if hasattr(self.visual, "init_parameters"):
self.visual.init_parameters()
proj_std = (self.transformer.width**-0.5) * (
(2 * self.transformer.layers) ** -0.5
)
attn_std = self.transformer.width**-0.5
fc_std = (2 * self.transformer.width) ** -0.5
for block in self.transformer.resblocks:
nn.init.normal_(block.attn.in_proj_weight, std=attn_std)
nn.init.normal_(block.attn.out_proj.weight, std=proj_std)
nn.init.normal_(block.mlp.c_fc.weight, std=fc_std)
nn.init.normal_(block.mlp.c_proj.weight, std=proj_std)
if self.text_projection is not None:
nn.init.normal_(self.text_projection, std=self.transformer.width**-0.5)
def build_attention_mask(self):
# lazily create causal attention mask, with full attention between the vision tokens
# pytorch uses additive attention mask; fill with -inf
mask = torch.empty(self.context_length, self.context_length)
mask.fill_(float("-inf"))
mask.triu_(1) # zero out the lower diagonal
return mask
def lock_image_tower(self, unlocked_groups=0, freeze_bn_stats=False):
# lock image tower as per LiT - https://arxiv.org/abs/2111.07991
self.visual.lock(
unlocked_groups=unlocked_groups, freeze_bn_stats=freeze_bn_stats
)
def encode_image(self, image):
return self.visual(image)
def encode_text(self, text):
x = self.token_embedding(text) # [batch_size, n_ctx, d_model]
x = x + self.positional_embedding
x = x.permute(1, 0, 2) # NLD -> LND
x = self.transformer(x, attn_mask=self.attn_mask)
x = x.permute(1, 0, 2) # LND -> NLD
x = self.ln_final(x)
# x.shape = [batch_size, n_ctx, transformer.width]
# take features from the eot embedding (eot_token is the highest number in each sequence)
x = x[torch.arange(x.shape[0]), text.argmax(dim=-1)] @ self.text_projection
return x
# def forward(self, image, text):
def forward(self, samples):
image = samples.get("image")
text = samples.get("text_input")
if text is not None:
text = self.tokenizer(text).to(self.device)
if image is None:
return self.encode_text(text)
elif text is None:
return self.encode_image(image)
image_embeds = self.encode_image(image)
image_features = F.normalize(image_embeds, dim=-1)
text_embeds = self.encode_text(text)
text_features = F.normalize(text_embeds, dim=-1)
loss = self.loss(image_features, text_features, self.logit_scale.exp())
# return image_features, text_features, self.logit_scale.exp()
# return {"loss": loss}
return ClipOutput(
intermediate_output=ClipOutputFeatures(
image_embeds=image_embeds,
image_embeds_proj=image_features,
text_embeds=text_embeds,
text_embeds_proj=text_features,
),
loss=loss,
logit_scale_exp=self.logit_scale.exp(),
)
def extract_features(self, samples):
"""
Extract features from the model for samples.
Keys allowed are "image" and "text_input" in samples.
If either key is missing, the corresponding features are not extracted.
Args:
samples: dict of samples to extract features from.
Returns:
ClipOutputFeatures object with features for the samples.
"""
image = samples.get("image")
text = samples.get("text_input")
if text is not None:
text = self.tokenizer(text).to(self.device)
if image is None:
return self.encode_text(text)
elif text is None:
return self.encode_image(image)
image_embeds = self.encode_image(image)
image_features = F.normalize(image_embeds, dim=-1)
text_embeds = self.encode_text(text)
text_features = F.normalize(text_embeds, dim=-1)
return ClipOutputFeatures(
image_embeds=image_embeds,
image_embeds_proj=image_features,
text_embeds=text_embeds,
text_embeds_proj=text_features,
)
def predict(self, samples):
image = samples["image"]
targets = samples["label"]
image_features = self.encode_image(image)
image_features = F.normalize(image_features, dim=-1)
logits = 100.0 * image_features @ self.classifier
return {"predictions": logits, "targets": targets}
def before_evaluation(self, dataset, task_type, **kwargs):
if task_type == MultimodalClassificationTask:
self.classifier = self.zero_shot_classifier(
classnames=dataset.classnames,
templates=self.prompt_templates,
)
def zero_shot_classifier(self, classnames, templates):
with torch.no_grad():
zeroshot_weights = []
for classname in classnames:
texts = [
template(classname) for template in templates
] # format with class
texts = self.tokenizer(texts).to(self.device) # tokenize
class_embeddings = self.encode_text(texts)
class_embedding = F.normalize(class_embeddings, dim=-1).mean(dim=0)
class_embedding /= class_embedding.norm()
zeroshot_weights.append(class_embedding)
zeroshot_weights = torch.stack(zeroshot_weights, dim=1).to(self.device)
return zeroshot_weights
@classmethod
def default_config_path(cls, model_type="base"):
model_type = "ViT-B-32" if model_type == "base" else model_type
assert (
model_type in cls.PRETRAINED_MODEL_CONFIG_DICT
), "Unknown model type {}. \n Available types: {}".format(
model_type, cls.PRETRAINED_MODEL_CONFIG_DICT.keys()
)
return get_abs_path(cls.PRETRAINED_MODEL_CONFIG_DICT[model_type])
@classmethod
def from_config(cls, cfg=None):
model_name = cfg.model_type
pretrained = cfg.pretrained
precision = cfg.get("precision", "fp32")
return create_model(
model_name=model_name, pretrained=pretrained, precision=precision
)
def zero_shot_predict(self, image_path, categories):
assert isinstance(
categories, list
), f"categories must be a list, got {type(categories)}."
assert os.path.exists(image_path), f"File {image_path} does not exist."
from lavis.processors.clip_processors import ClipImageEvalProcessor
from PIL import Image
image_preprocess = ClipImageEvalProcessor()
image = image_preprocess(Image.open(image_path)).unsqueeze(0)
text = self.tokenizer(categories)
with torch.no_grad():
image_features = self.encode_image(image)
text_features = self.encode_text(text)
image_features /= image_features.norm(dim=-1, keepdim=True)
text_features /= text_features.norm(dim=-1, keepdim=True)
text_probs = (100.0 * image_features @ text_features.T).softmax(dim=-1)
print("Label probs:", text_probs) # prints: [[1., 0., 0.]]
def compute_sim_matrix(self, data_loader, **kwargs):
logging.info("Computing features for evaluation...")
start_time = time.time()
texts = data_loader.dataset.text
num_text = len(texts)
text_bs = 256
text_features = []
for i in range(0, num_text, text_bs):
text = texts[i : min(num_text, i + text_bs)]
text_input = self.tokenizer(text).to(self.device)
text_feat = self.encode_text(text_input)
text_feat = F.normalize(text_feat, dim=-1)
text_features.append(text_feat)
text_features = torch.cat(text_features, dim=0)
image_features = []
for samples in data_loader:
image = samples["image"]
image = image.to(self.device)
image_feat = self.encode_image(image)
image_feat = F.normalize(image_feat, dim=-1)
image_features.append(image_feat)
image_features = torch.cat(image_features, dim=0)
sims_matrix_i2t = image_features @ text_features.t()
sims_matrix_t2i = sims_matrix_i2t.t()
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
logging.info("Evaluation time {}".format(total_time_str))
return sims_matrix_i2t.cpu().numpy(), sims_matrix_t2i.cpu().numpy()
def convert_weights_to_fp16(model: nn.Module):
"""Convert applicable model parameters to fp16"""
def _convert_weights_to_fp16(l):
if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Linear)):
l.weight.data = l.weight.data.half()
if l.bias is not None:
l.bias.data = l.bias.data.half()
if isinstance(l, nn.MultiheadAttention):
for attr in [
*[f"{s}_proj_weight" for s in ["in", "q", "k", "v"]],
"in_proj_bias",
"bias_k",
"bias_v",
]:
tensor = getattr(l, attr)
if tensor is not None:
tensor.data = tensor.data.half()
for name in ["text_projection", "proj"]:
if hasattr(l, name):
attr = getattr(l, name)
if attr is not None:
attr.data = attr.data.half()
model.apply(_convert_weights_to_fp16)
def build_model_from_openai_state_dict(state_dict: dict):
vit = "visual.proj" in state_dict
if vit:
vision_width = state_dict["visual.conv1.weight"].shape[0]
vision_layers = len(
[
k
for k in state_dict.keys()
if k.startswith("visual.") and k.endswith(".attn.in_proj_weight")
]
)
vision_patch_size = state_dict["visual.conv1.weight"].shape[-1]
grid_size = round(
(state_dict["visual.positional_embedding"].shape[0] - 1) ** 0.5
)
image_size = vision_patch_size * grid_size
else:
counts: list = [
len(
set(
k.split(".")[2]
for k in state_dict
if k.startswith(f"visual.layer{b}")
)
)
for b in [1, 2, 3, 4]
]
vision_layers = tuple(counts)
vision_width = state_dict["visual.layer1.0.conv1.weight"].shape[0]
output_width = round(
(state_dict["visual.attnpool.positional_embedding"].shape[0] - 1) ** 0.5
)
vision_patch_size = None
assert (
output_width**2 + 1
== state_dict["visual.attnpool.positional_embedding"].shape[0]
)
image_size = output_width * 32
embed_dim = state_dict["text_projection"].shape[1]
context_length = state_dict["positional_embedding"].shape[0]
vocab_size = state_dict["token_embedding.weight"].shape[0]
transformer_width = state_dict["ln_final.weight"].shape[0]
transformer_heads = transformer_width // 64
transformer_layers = len(
set(
k.split(".")[2]
for k in state_dict
if k.startswith(f"transformer.resblocks")
)
)
vision_cfg = CLIPVisionCfg(
layers=vision_layers,
width=vision_width,
patch_size=vision_patch_size,
image_size=image_size,
)
text_cfg = CLIPTextCfg(
context_length=context_length,
vocab_size=vocab_size,
width=transformer_width,
heads=transformer_heads,
layers=transformer_layers,
)
model = CLIP(
embed_dim,
vision_cfg=vision_cfg,
text_cfg=text_cfg,
quick_gelu=True, # OpenAI models were trained with QuickGELU
)
for key in ["input_resolution", "context_length", "vocab_size"]:
state_dict.pop(key, None)
convert_weights_to_fp16(model)
model.load_state_dict(state_dict)
return model.eval()
def trace_model(model, batch_size=256, device=torch.device("cpu")):
model.eval()
image_size = model.visual.image_size
example_images = torch.ones((batch_size, 3, image_size, image_size), device=device)
example_text = torch.zeros(
(batch_size, model.context_length), dtype=torch.int, device=device
)
model = torch.jit.trace_module(
model,
inputs=dict(
forward=(example_images, example_text),
encode_text=(example_text,),
encode_image=(example_images,),
),
)
model.visual.image_size = image_size
return
def _natural_key(string_):
return [int(s) if s.isdigit() else s for s in re.split(r"(\d+)", string_.lower())]
def _rescan_model_configs():
global _MODEL_CONFIGS
config_ext = (".json",)
config_files = []
for config_path in _MODEL_CONFIG_PATHS:
if config_path.is_file() and config_path.suffix in config_ext:
config_files.append(config_path)
elif config_path.is_dir():
for ext in config_ext:
config_files.extend(config_path.glob(f"*{ext}"))
for cf in config_files:
with open(cf, "r") as f:
model_cfg = json.load(f)
if all(a in model_cfg for a in ("embed_dim", "vision_cfg", "text_cfg")):
_MODEL_CONFIGS[cf.stem] = model_cfg
_MODEL_CONFIGS = {
k: v
for k, v in sorted(_MODEL_CONFIGS.items(), key=lambda x: _natural_key(x[0]))
}
_rescan_model_configs() # initial populate of model config registry
def load_state_dict(checkpoint_path: str, map_location="cpu"):
checkpoint = torch.load(checkpoint_path, map_location=map_location)
if isinstance(checkpoint, dict) and "state_dict" in checkpoint:
state_dict = checkpoint["state_dict"]
else:
state_dict = checkpoint
if next(iter(state_dict.items()))[0].startswith("module"):
state_dict = {k[7:]: v for k, v in state_dict.items()}
return state_dict
def create_model(
model_name: str,
pretrained: str = "",
precision: str = "fp32",
device: torch.device = torch.device("cpu"),
jit: bool = False,
force_quick_gelu: bool = False,
pretrained_image: bool = False,
):
model_name = model_name.replace(
"/", "-"
) # for callers using old naming with / in ViT names
if pretrained.lower() == "openai":
logging.info(f"Loading pretrained {model_name} from OpenAI.")
model = load_openai_model(model_name, device=device, jit=jit)
# See https://discuss.pytorch.org/t/valueerror-attemting-to-unscale-fp16-gradients/81372
if precision == "amp" or precision == "fp32":
model = model.float()
else:
logging.info(f"No pretrained weights loaded for {model_name} model.")
if model_name in _MODEL_CONFIGS:
logging.info(f"Loading {model_name} model config.")
model_cfg = deepcopy(_MODEL_CONFIGS[model_name])
else:
logging.error(
f"Model config for {model_name} not found; available models {list_models()}."
)
raise RuntimeError(f"Model config for {model_name} not found.")
if force_quick_gelu:
# override for use of QuickGELU on non-OpenAI transformer models
model_cfg["quick_gelu"] = True
if pretrained_image:
if "timm_model_name" in model_cfg.get("vision_cfg", {}):
# pretrained weight loading for timm models set via vision_cfg
model_cfg["vision_cfg"]["timm_model_pretrained"] = True
else:
assert (
False
), "pretrained image towers currently only supported for timm models"
model = CLIP(**model_cfg)
if pretrained:
checkpoint_path = ""
url = get_pretrained_url(model_name, pretrained)
if url:
checkpoint_path = download_pretrained(url)
elif os.path.exists(pretrained):
checkpoint_path = pretrained
if checkpoint_path:
logging.info(f"Loading pretrained {model_name} weights ({pretrained}).")
model.load_state_dict(load_state_dict(checkpoint_path))
else:
logging.warning(
f"Pretrained weights ({pretrained}) not found for model {model_name}."
)
raise RuntimeError(
f"Pretrained weights ({pretrained}) not found for model {model_name}."
)
model.to(device=device)
if precision == "fp16":
assert device.type != "cpu"
convert_weights_to_fp16(model)
if jit:
model = torch.jit.script(model)
return model
def create_model_and_transforms(
model_name: str,
pretrained: str = "",
precision: str = "fp32",
device: torch.device = torch.device("cpu"),
jit: bool = False,
force_quick_gelu: bool = False,
pretrained_image: bool = False,
):
model = create_model(
model_name,
pretrained,
precision,
device,
jit,
force_quick_gelu=force_quick_gelu,
pretrained_image=pretrained_image,
)
preprocess_train = image_transform(model.visual.image_size, is_train=True)
preprocess_val = image_transform(model.visual.image_size, is_train=False)
return model, preprocess_train, preprocess_val
def list_models():
"""enumerate available model architectures based on config files"""
return list(_MODEL_CONFIGS.keys())
def add_model_config(path):
"""add model config path or file and update registry"""
if not isinstance(path, Path):
path = Path(path)
_MODEL_CONFIG_PATHS.append(path)
_rescan_model_configs()
def list_openai_models() -> List[str]:
"""Returns the names of available CLIP models"""
return list_pretrained_tag_models("openai")
def load_openai_model(
name: str,
device: Union[str, torch.device] = "cuda" if torch.cuda.is_available() else "cpu",
jit=True,
):
"""Load a CLIP model
Parameters
----------
name : str
A model name listed by `clip.available_models()`, or the path to a model checkpoint containing the state_dict
device : Union[str, torch.device]
The device to put the loaded model
jit : bool
Whether to load the optimized JIT model (default) or more hackable non-JIT model.
Returns
-------
model : torch.nn.Module
The CLIP model
preprocess : Callable[[PIL.Image], torch.Tensor]
A torchvision transform that converts a PIL image into a tensor that the returned model can take as its input
"""
if get_pretrained_url(name, "openai"):
model_path = download_pretrained(get_pretrained_url(name, "openai"))
elif os.path.isfile(name):
model_path = name
else:
raise RuntimeError(
f"Model {name} not found; available models = {list_openai_models()}"
)
try:
# loading JIT archive
model = torch.jit.load(model_path, map_location=device if jit else "cpu").eval()
state_dict = None
except RuntimeError:
# loading saved state dict
if jit:
warnings.warn(
f"File {model_path} is not a JIT archive. Loading as a state dict instead"
)
jit = False
state_dict = torch.load(model_path, map_location="cpu")
if not jit:
try:
model = build_model_from_openai_state_dict(
state_dict or model.state_dict()
).to(device)
except KeyError:
sd = {k[7:]: v for k, v in state_dict["state_dict"].items()}
model = build_model_from_openai_state_dict(sd).to(device)
if str(device) == "cpu":
model.float()
return model
# patch the device names
device_holder = torch.jit.trace(
lambda: torch.ones([]).to(torch.device(device)), example_inputs=[]
)
device_node = [
n
for n in device_holder.graph.findAllNodes("prim::Constant")
if "Device" in repr(n)
][-1]
def patch_device(module):
try:
graphs = [module.graph] if hasattr(module, "graph") else []
except RuntimeError:
graphs = []
if hasattr(module, "forward1"):
graphs.append(module.forward1.graph)
for graph in graphs:
for node in graph.findAllNodes("prim::Constant"):
if "value" in node.attributeNames() and str(node["value"]).startswith(
"cuda"
):
node.copyAttributes(device_node)
model.apply(patch_device)
patch_device(model.encode_image)
patch_device(model.encode_text)
# patch dtype to float32 on CPU
if str(device) == "cpu":
float_holder = torch.jit.trace(
lambda: torch.ones([]).float(), example_inputs=[]
)
float_input = list(float_holder.graph.findNode("aten::to").inputs())[1]
float_node = float_input.node()
def patch_float(module):
try:
graphs = [module.graph] if hasattr(module, "graph") else []
except RuntimeError:
graphs = []
if hasattr(module, "forward1"):
graphs.append(module.forward1.graph)
for graph in graphs:
for node in graph.findAllNodes("aten::to"):
inputs = list(node.inputs())
for i in [
1,
2,
]: # dtype can be the second or third argument to aten::to()
if inputs[i].node()["value"] == 5:
inputs[i].node().copyAttributes(float_node)
model.apply(patch_float)
patch_float(model.encode_image)
patch_float(model.encode_text)
model.float()
# ensure image_size attr available at consistent location for both jit and non-jit
model.visual.image_size = model.input_resolution.item()
return model
openai_imagenet_template = [
lambda c: f"a bad photo of a {c}.",
lambda c: f"a photo of many {c}.",
lambda c: f"a sculpture of a {c}.",
lambda c: f"a photo of the hard to see {c}.",
lambda c: f"a low resolution photo of the {c}.",
lambda c: f"a rendering of a {c}.",
lambda c: f"graffiti of a {c}.",
lambda c: f"a bad photo of the {c}.",
lambda c: f"a cropped photo of the {c}.",
lambda c: f"a tattoo of a {c}.",
lambda c: f"the embroidered {c}.",
lambda c: f"a photo of a hard to see {c}.",
lambda c: f"a bright photo of a {c}.",
lambda c: f"a photo of a clean {c}.",
lambda c: f"a photo of a dirty {c}.",
lambda c: f"a dark photo of the {c}.",
lambda c: f"a drawing of a {c}.",
lambda c: f"a photo of my {c}.",
lambda c: f"the plastic {c}.",
lambda c: f"a photo of the cool {c}.",
lambda c: f"a close-up photo of a {c}.",
lambda c: f"a black and white photo of the {c}.",
lambda c: f"a painting of the {c}.",
lambda c: f"a painting of a {c}.",
lambda c: f"a pixelated photo of the {c}.",
lambda c: f"a sculpture of the {c}.",
lambda c: f"a bright photo of the {c}.",
lambda c: f"a cropped photo of a {c}.",
lambda c: f"a plastic {c}.",
lambda c: f"a photo of the dirty {c}.",
lambda c: f"a jpeg corrupted photo of a {c}.",
lambda c: f"a blurry photo of the {c}.",
lambda c: f"a photo of the {c}.",
lambda c: f"a good photo of the {c}.",
lambda c: f"a rendering of the {c}.",
lambda c: f"a {c} in a video game.",
lambda c: f"a photo of one {c}.",
lambda c: f"a doodle of a {c}.",
lambda c: f"a close-up photo of the {c}.",
lambda c: f"a photo of a {c}.",
lambda c: f"the origami {c}.",
lambda c: f"the {c} in a video game.",
lambda c: f"a sketch of a {c}.",
lambda c: f"a doodle of the {c}.",
lambda c: f"a origami {c}.",
lambda c: f"a low resolution photo of a {c}.",
lambda c: f"the toy {c}.",
lambda c: f"a rendition of the {c}.",
lambda c: f"a photo of the clean {c}.",
lambda c: f"a photo of a large {c}.",
lambda c: f"a rendition of a {c}.",
lambda c: f"a photo of a nice {c}.",
lambda c: f"a photo of a weird {c}.",
lambda c: f"a blurry photo of a {c}.",
lambda c: f"a cartoon {c}.",
lambda c: f"art of a {c}.",
lambda c: f"a sketch of the {c}.",
lambda c: f"a embroidered {c}.",
lambda c: f"a pixelated photo of a {c}.",
lambda c: f"itap of the {c}.",
lambda c: f"a jpeg corrupted photo of the {c}.",
lambda c: f"a good photo of a {c}.",
lambda c: f"a plushie {c}.",
lambda c: f"a photo of the nice {c}.",
lambda c: f"a photo of the small {c}.",
lambda c: f"a photo of the weird {c}.",
lambda c: f"the cartoon {c}.",
lambda c: f"art of the {c}.",
lambda c: f"a drawing of the {c}.",
lambda c: f"a photo of the large {c}.",
lambda c: f"a black and white photo of a {c}.",
lambda c: f"the plushie {c}.",
lambda c: f"a dark photo of a {c}.",
lambda c: f"itap of a {c}.",
lambda c: f"graffiti of the {c}.",
lambda c: f"a toy {c}.",
lambda c: f"itap of my {c}.",
lambda c: f"a photo of a cool {c}.",
lambda c: f"a photo of a small {c}.",
lambda c: f"a tattoo of the {c}.",
]
| 3D-LLM-main | three_steps_3d_feature/second_step/lavis/models/clip_models/model.py |
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
Based on https://github.com/mlfoundations/open_clip
"""
""" CLIP tokenizer
Copied from https://github.com/openai/CLIP. Originally MIT License, Copyright (c) 2021 OpenAI.
"""
import gzip
import html
import os
from functools import lru_cache
from typing import Union, List
import ftfy
import regex as re
import torch
@lru_cache()
def default_bpe():
return os.path.join(
os.path.dirname(os.path.abspath(__file__)), "bpe_simple_vocab_16e6.txt.gz"
)
@lru_cache()
def bytes_to_unicode():
"""
Returns list of utf-8 byte and a corresponding list of unicode strings.
The reversible bpe codes work on unicode strings.
This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.
When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.
This is a signficant percentage of your normal, say, 32K bpe vocab.
To avoid that, we want lookup tables between utf-8 bytes and unicode strings.
And avoids mapping to whitespace/control characters the bpe code barfs on.
"""
bs = (
list(range(ord("!"), ord("~") + 1))
+ list(range(ord("¡"), ord("¬") + 1))
+ list(range(ord("®"), ord("ÿ") + 1))
)
cs = bs[:]
n = 0
for b in range(2**8):
if b not in bs:
bs.append(b)
cs.append(2**8 + n)
n += 1
cs = [chr(n) for n in cs]
return dict(zip(bs, cs))
def get_pairs(word):
"""Return set of symbol pairs in a word.
Word is represented as tuple of symbols (symbols being variable-length strings).
"""
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
return pairs
def basic_clean(text):
text = ftfy.fix_text(text)
text = html.unescape(html.unescape(text))
return text.strip()
def whitespace_clean(text):
text = re.sub(r"\s+", " ", text)
text = text.strip()
return text
class SimpleTokenizer(object):
def __init__(self, bpe_path: str = default_bpe(), special_tokens=None):
self.byte_encoder = bytes_to_unicode()
self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
merges = gzip.open(bpe_path).read().decode("utf-8").split("\n")
merges = merges[1 : 49152 - 256 - 2 + 1]
merges = [tuple(merge.split()) for merge in merges]
vocab = list(bytes_to_unicode().values())
vocab = vocab + [v + "</w>" for v in vocab]
for merge in merges:
vocab.append("".join(merge))
if not special_tokens:
special_tokens = ["<start_of_text>", "<end_of_text>"]
else:
special_tokens = ["<start_of_text>", "<end_of_text>"] + special_tokens
vocab.extend(special_tokens)
self.encoder = dict(zip(vocab, range(len(vocab))))
self.decoder = {v: k for k, v in self.encoder.items()}
self.bpe_ranks = dict(zip(merges, range(len(merges))))
self.cache = {t: t for t in special_tokens}
special = "|".join(special_tokens)
self.pat = re.compile(
special + r"""|'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+""",
re.IGNORECASE,
)
self.vocab_size = len(self.encoder)
self.all_special_ids = [self.encoder[t] for t in special_tokens]
def bpe(self, token):
if token in self.cache:
return self.cache[token]
word = tuple(token[:-1]) + (token[-1] + "</w>",)
pairs = get_pairs(word)
if not pairs:
return token + "</w>"
while True:
bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf")))
if bigram not in self.bpe_ranks:
break
first, second = bigram
new_word = []
i = 0
while i < len(word):
try:
j = word.index(first, i)
new_word.extend(word[i:j])
i = j
except:
new_word.extend(word[i:])
break
if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if len(word) == 1:
break
else:
pairs = get_pairs(word)
word = " ".join(word)
self.cache[token] = word
return word
def encode(self, text):
bpe_tokens = []
text = whitespace_clean(basic_clean(text)).lower()
for token in re.findall(self.pat, text):
token = "".join(self.byte_encoder[b] for b in token.encode("utf-8"))
bpe_tokens.extend(
self.encoder[bpe_token] for bpe_token in self.bpe(token).split(" ")
)
return bpe_tokens
def decode(self, tokens):
text = "".join([self.decoder[token] for token in tokens])
text = (
bytearray([self.byte_decoder[c] for c in text])
.decode("utf-8", errors="replace")
.replace("</w>", " ")
)
return text
_tokenizer = SimpleTokenizer()
def tokenize(
texts: Union[str, List[str]], context_length: int = 77
) -> torch.LongTensor:
"""
Returns the tokenized representation of given input string(s)
Parameters
----------
texts : Union[str, List[str]]
An input string or a list of input strings to tokenize
context_length : int
The context length to use; all CLIP models use 77 as the context length
Returns
-------
A two-dimensional tensor containing the resulting tokens, shape = [number of input strings, context_length]
"""
if isinstance(texts, str):
texts = [texts]
sot_token = _tokenizer.encoder["<start_of_text>"]
eot_token = _tokenizer.encoder["<end_of_text>"]
all_tokens = [[sot_token] + _tokenizer.encode(text) + [eot_token] for text in texts]
result = torch.zeros(len(all_tokens), context_length, dtype=torch.long)
for i, tokens in enumerate(all_tokens):
if len(tokens) > context_length:
tokens = tokens[:context_length] # Truncate
result[i, : len(tokens)] = torch.tensor(tokens)
return result
| 3D-LLM-main | three_steps_3d_feature/second_step/lavis/models/clip_models/tokenizer.py |
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
import logging
import torch
import torch.distributed.nn
from torch import distributed as dist, nn as nn
from torch.nn import functional as F
try:
import horovod.torch as hvd
except ImportError:
hvd = None
def gather_features(
image_features,
text_features,
local_loss=False,
gather_with_grad=False,
rank=0,
world_size=1,
use_horovod=False,
):
if use_horovod:
assert hvd is not None, "Please install horovod"
if gather_with_grad:
all_image_features = hvd.allgather(image_features)
all_text_features = hvd.allgather(text_features)
else:
with torch.no_grad():
all_image_features = hvd.allgather(image_features)
all_text_features = hvd.allgather(text_features)
if not local_loss:
# ensure grads for local rank when all_* features don't have a gradient
gathered_image_features = list(
all_image_features.chunk(world_size, dim=0)
)
gathered_text_features = list(
all_text_features.chunk(world_size, dim=0)
)
gathered_image_features[rank] = image_features
gathered_text_features[rank] = text_features
all_image_features = torch.cat(gathered_image_features, dim=0)
all_text_features = torch.cat(gathered_text_features, dim=0)
else:
# We gather tensors from all gpus
if gather_with_grad:
all_image_features = torch.cat(
torch.distributed.nn.all_gather(image_features), dim=0
)
all_text_features = torch.cat(
torch.distributed.nn.all_gather(text_features), dim=0
)
else:
gathered_image_features = [
torch.zeros_like(image_features) for _ in range(world_size)
]
gathered_text_features = [
torch.zeros_like(text_features) for _ in range(world_size)
]
dist.all_gather(gathered_image_features, image_features)
dist.all_gather(gathered_text_features, text_features)
if not local_loss:
# ensure grads for local rank when all_* features don't have a gradient
gathered_image_features[rank] = image_features
gathered_text_features[rank] = text_features
all_image_features = torch.cat(gathered_image_features, dim=0)
all_text_features = torch.cat(gathered_text_features, dim=0)
return all_image_features, all_text_features
class ClipLoss(nn.Module):
def __init__(
self,
local_loss=False,
gather_with_grad=False,
cache_labels=False,
rank=0,
world_size=1,
use_horovod=False,
):
super().__init__()
self.local_loss = local_loss
self.gather_with_grad = gather_with_grad
self.cache_labels = cache_labels
self.rank = rank
self.world_size = world_size
self.use_horovod = use_horovod
# cache state
self.prev_num_logits = 0
self.labels = {}
def forward(self, image_features, text_features, logit_scale):
device = image_features.device
if self.world_size > 1:
all_image_features, all_text_features = gather_features(
image_features,
text_features,
self.local_loss,
self.gather_with_grad,
self.rank,
self.world_size,
self.use_horovod,
)
if self.local_loss:
logits_per_image = logit_scale * image_features @ all_text_features.T
logits_per_text = logit_scale * text_features @ all_image_features.T
else:
logits_per_image = (
logit_scale * all_image_features @ all_text_features.T
)
logits_per_text = logits_per_image.T
else:
logits_per_image = logit_scale * image_features @ text_features.T
logits_per_text = logit_scale * text_features @ image_features.T
# calculated ground-truth and cache if enabled
num_logits = logits_per_image.shape[0]
if self.prev_num_logits != num_logits or device not in self.labels:
labels = torch.arange(num_logits, device=device, dtype=torch.long)
if self.world_size > 1 and self.local_loss:
labels = labels + num_logits * self.rank
if self.cache_labels:
self.labels[device] = labels
self.prev_num_logits = num_logits
else:
labels = self.labels[device]
total_loss = (
F.cross_entropy(logits_per_image, labels)
+ F.cross_entropy(logits_per_text, labels)
) / 2
return total_loss
| 3D-LLM-main | three_steps_3d_feature/second_step/lavis/models/clip_models/loss.py |
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
Based on https://github.com/mlfoundations/open_clip
"""
from torch import nn as nn
from torchvision.ops.misc import FrozenBatchNorm2d
def freeze_batch_norm_2d(module, module_match={}, name=""):
"""
Converts all `BatchNorm2d` and `SyncBatchNorm` layers of provided module into `FrozenBatchNorm2d`. If `module` is
itself an instance of either `BatchNorm2d` or `SyncBatchNorm`, it is converted into `FrozenBatchNorm2d` and
returned. Otherwise, the module is walked recursively and submodules are converted in place.
Args:
module (torch.nn.Module): Any PyTorch module.
module_match (dict): Dictionary of full module names to freeze (all if empty)
name (str): Full module name (prefix)
Returns:
torch.nn.Module: Resulting module
Inspired by https://github.com/pytorch/pytorch/blob/a5895f85be0f10212791145bfedc0261d364f103/torch/nn/modules/batchnorm.py#L762
"""
res = module
is_match = True
if module_match:
is_match = name in module_match
if is_match and isinstance(
module, (nn.modules.batchnorm.BatchNorm2d, nn.modules.batchnorm.SyncBatchNorm)
):
res = FrozenBatchNorm2d(module.num_features)
res.num_features = module.num_features
res.affine = module.affine
if module.affine:
res.weight.data = module.weight.data.clone().detach()
res.bias.data = module.bias.data.clone().detach()
res.running_mean.data = module.running_mean.data
res.running_var.data = module.running_var.data
res.eps = module.eps
else:
for child_name, child in module.named_children():
full_child_name = ".".join([name, child_name]) if name else child_name
new_child = freeze_batch_norm_2d(child, module_match, full_child_name)
if new_child is not child:
res.add_module(child_name, new_child)
return res
| 3D-LLM-main | three_steps_3d_feature/second_step/lavis/models/clip_models/utils.py |
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
Based on https://github.com/mlfoundations/open_clip
"""
from typing import Optional, Sequence, Tuple
import torch
import torch.nn as nn
import torchvision.transforms.functional as F
from torchvision.transforms import (
Normalize,
Compose,
RandomResizedCrop,
InterpolationMode,
ToTensor,
Resize,
CenterCrop,
)
class ResizeMaxSize(nn.Module):
def __init__(
self, max_size, interpolation=InterpolationMode.BICUBIC, fn="max", fill=0
):
super().__init__()
if not isinstance(max_size, int):
raise TypeError(f"Size should be int. Got {type(max_size)}")
self.max_size = max_size
self.interpolation = interpolation
self.fn = min if fn == "min" else min
self.fill = fill
def forward(self, img):
if isinstance(img, torch.Tensor):
height, width = img.shape[:2]
else:
width, height = img.size
scale = self.max_size / float(max(height, width))
if scale != 1.0:
new_size = tuple(round(dim * scale) for dim in (height, width))
img = F.resize(img, new_size, self.interpolation)
pad_h = self.max_size - new_size[0]
pad_w = self.max_size - new_size[1]
img = F.pad(
img,
padding=[
pad_w // 2,
pad_h // 2,
pad_w - pad_w // 2,
pad_h - pad_h // 2,
],
fill=self.fill,
)
return img
def _convert_to_rgb(image):
return image.convert("RGB")
def image_transform(
image_size: int,
is_train: bool,
mean: Optional[Tuple[float, ...]] = None,
std: Optional[Tuple[float, ...]] = None,
resize_longest_max: bool = False,
fill_color: int = 0,
):
mean = mean or (0.48145466, 0.4578275, 0.40821073) # OpenAI dataset mean
std = std or (0.26862954, 0.26130258, 0.27577711) # OpenAI dataset std
if isinstance(image_size, (list, tuple)) and image_size[0] == image_size[1]:
# for square size, pass size as int so that Resize() uses aspect preserving shortest edge
image_size = image_size[0]
normalize = Normalize(mean=mean, std=std)
if is_train:
return Compose(
[
RandomResizedCrop(
image_size,
scale=(0.9, 1.0),
interpolation=InterpolationMode.BICUBIC,
),
_convert_to_rgb,
ToTensor(),
normalize,
]
)
else:
if resize_longest_max:
transforms = [ResizeMaxSize(image_size, fill=fill_color)]
else:
transforms = [
Resize(image_size, interpolation=InterpolationMode.BICUBIC),
CenterCrop(image_size),
]
transforms.extend(
[
_convert_to_rgb,
ToTensor(),
normalize,
]
)
return Compose(transforms)
| 3D-LLM-main | three_steps_3d_feature/second_step/lavis/models/clip_models/transform.py |
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
Based on https://github.com/mlfoundations/open_clip
"""
""" timm model adapter
Wraps timm (https://github.com/rwightman/pytorch-image-models) models for use as a vision tower in CLIP model.
"""
import math
import warnings
from collections import OrderedDict
from typing import List, Optional, Tuple, Union
import torch
import torch.nn as nn
from torch import nn as nn
try:
import timm
from timm.models.layers import Mlp, to_2tuple
# from timm.models.layers.attention_pool2d import RotAttentionPool2d
# from timm.models.layers.attention_pool2d import (
# AttentionPool2d as AbsAttentionPool2d,
# )
except ImportError as e:
timm = None
from lavis.models.clip_models.utils import freeze_batch_norm_2d
class TimmModel(nn.Module):
"""timm model adapter
# FIXME this adapter is a work in progress, may change in ways that break weight compat
"""
def __init__(
self,
model_name,
embed_dim,
image_size=224,
pool="avg",
proj="linear",
drop=0.0,
pretrained=False,
):
super().__init__()
if timm is None:
raise RuntimeError("Please `pip install timm` to use timm models.")
self.image_size = to_2tuple(image_size)
self.trunk = timm.create_model(model_name, pretrained=pretrained)
feat_size = self.trunk.default_cfg.get("pool_size", None)
feature_ndim = 1 if not feat_size else 2
if pool in ("abs_attn", "rot_attn"):
assert feature_ndim == 2
# if attn pooling used, remove both classifier and default pool
self.trunk.reset_classifier(0, global_pool="")
else:
# reset global pool if pool config set, otherwise leave as network default
reset_kwargs = dict(global_pool=pool) if pool else {}
self.trunk.reset_classifier(0, **reset_kwargs)
prev_chs = self.trunk.num_features
head_layers = OrderedDict()
if pool == "abs_attn":
head_layers["pool"] = AttentionPool2d(
prev_chs, feat_size=feat_size, out_features=embed_dim
)
prev_chs = embed_dim
elif pool == "rot_attn":
head_layers["pool"] = RotAttentionPool2d(prev_chs, out_features=embed_dim)
prev_chs = embed_dim
else:
assert proj, "projection layer needed if non-attention pooling is used."
# NOTE attention pool ends with a projection layer, so proj should usually be set to '' if such pooling is used
if proj == "linear":
head_layers["drop"] = nn.Dropout(drop)
head_layers["proj"] = nn.Linear(prev_chs, embed_dim)
elif proj == "mlp":
head_layers["mlp"] = Mlp(prev_chs, 2 * embed_dim, embed_dim, drop=drop)
self.head = nn.Sequential(head_layers)
def lock(self, unlocked_groups=0, freeze_bn_stats=False):
"""lock modules
Args:
unlocked_groups (int): leave last n layer groups unlocked (default: 0)
"""
if not unlocked_groups:
# lock full model
for param in self.trunk.parameters():
param.requires_grad = False
if freeze_bn_stats:
freeze_batch_norm_2d(self.trunk)
else:
# NOTE: partial freeze requires latest timm (master) branch and is subject to change
try:
# FIXME import here until API stable and in an official release
from timm.models.helpers import group_modules, group_parameters
except ImportError:
raise RuntimeError(
"Please install latest timm `pip install git+https://github.com/rwightman/pytorch-image-models`"
)
matcher = self.trunk.group_matcher()
gparams = group_parameters(self.trunk, matcher)
max_layer_id = max(gparams.keys())
max_layer_id = max_layer_id - unlocked_groups
for group_idx in range(max_layer_id + 1):
group = gparams[group_idx]
for param in group:
self.trunk.get_parameter(param).requires_grad = False
if freeze_bn_stats:
gmodules = group_modules(self.trunk, matcher, reverse=True)
gmodules = {k for k, v in gmodules.items() if v <= max_layer_id}
freeze_batch_norm_2d(self.trunk, gmodules)
def forward(self, x):
x = self.trunk(x)
x = self.head(x)
return x
class RotAttentionPool2d(nn.Module):
"""Attention based 2D feature pooling w/ rotary (relative) pos embedding.
This is a multi-head attention based replacement for (spatial) average pooling in NN architectures.
Adapted from the AttentionPool2d in CLIP w/ rotary embedding instead of learned embed.
https://github.com/openai/CLIP/blob/3b473b0e682c091a9e53623eebc1ca1657385717/clip/model.py
NOTE: While this impl does not require a fixed feature size, performance at differeing resolutions from
train varies widely and falls off dramatically. I'm not sure if there is a way around this... -RW
"""
def __init__(
self,
in_features: int,
out_features: int = None,
embed_dim: int = None,
num_heads: int = 4,
qkv_bias: bool = True,
):
super().__init__()
embed_dim = embed_dim or in_features
out_features = out_features or in_features
self.qkv = nn.Linear(in_features, embed_dim * 3, bias=qkv_bias)
self.proj = nn.Linear(embed_dim, out_features)
self.num_heads = num_heads
assert embed_dim % num_heads == 0
self.head_dim = embed_dim // num_heads
self.scale = self.head_dim**-0.5
self.pos_embed = RotaryEmbedding(self.head_dim)
trunc_normal_(self.qkv.weight, std=in_features**-0.5)
nn.init.zeros_(self.qkv.bias)
def forward(self, x):
B, _, H, W = x.shape
N = H * W
x = x.reshape(B, -1, N).permute(0, 2, 1)
x = torch.cat([x.mean(1, keepdim=True), x], dim=1)
x = (
self.qkv(x)
.reshape(B, N + 1, 3, self.num_heads, self.head_dim)
.permute(2, 0, 3, 1, 4)
)
q, k, v = x[0], x[1], x[2]
qc, q = q[:, :, :1], q[:, :, 1:]
sin_emb, cos_emb = self.pos_embed.get_embed((H, W))
q = apply_rot_embed(q, sin_emb, cos_emb)
q = torch.cat([qc, q], dim=2)
kc, k = k[:, :, :1], k[:, :, 1:]
k = apply_rot_embed(k, sin_emb, cos_emb)
k = torch.cat([kc, k], dim=2)
attn = (q @ k.transpose(-2, -1)) * self.scale
attn = attn.softmax(dim=-1)
x = (attn @ v).transpose(1, 2).reshape(B, N + 1, -1)
x = self.proj(x)
return x[:, 0]
class AttentionPool2d(nn.Module):
"""Attention based 2D feature pooling w/ learned (absolute) pos embedding.
This is a multi-head attention based replacement for (spatial) average pooling in NN architectures.
It was based on impl in CLIP by OpenAI
https://github.com/openai/CLIP/blob/3b473b0e682c091a9e53623eebc1ca1657385717/clip/model.py
NOTE: This requires feature size upon construction and well prevent adaptive sizing of the network.
"""
def __init__(
self,
in_features: int,
feat_size: Union[int, Tuple[int, int]],
out_features: int = None,
embed_dim: int = None,
num_heads: int = 4,
qkv_bias: bool = True,
):
super().__init__()
embed_dim = embed_dim or in_features
out_features = out_features or in_features
assert embed_dim % num_heads == 0
self.feat_size = to_2tuple(feat_size)
self.qkv = nn.Linear(in_features, embed_dim * 3, bias=qkv_bias)
self.proj = nn.Linear(embed_dim, out_features)
self.num_heads = num_heads
self.head_dim = embed_dim // num_heads
self.scale = self.head_dim**-0.5
spatial_dim = self.feat_size[0] * self.feat_size[1]
self.pos_embed = nn.Parameter(torch.zeros(spatial_dim + 1, in_features))
trunc_normal_(self.pos_embed, std=in_features**-0.5)
trunc_normal_(self.qkv.weight, std=in_features**-0.5)
nn.init.zeros_(self.qkv.bias)
def forward(self, x):
B, _, H, W = x.shape
N = H * W
assert self.feat_size[0] == H
assert self.feat_size[1] == W
x = x.reshape(B, -1, N).permute(0, 2, 1)
x = torch.cat([x.mean(1, keepdim=True), x], dim=1)
x = x + self.pos_embed.unsqueeze(0).to(x.dtype)
x = (
self.qkv(x)
.reshape(B, N + 1, 3, self.num_heads, self.head_dim)
.permute(2, 0, 3, 1, 4)
)
q, k, v = x[0], x[1], x[2]
attn = (q @ k.transpose(-2, -1)) * self.scale
attn = attn.softmax(dim=-1)
x = (attn @ v).transpose(1, 2).reshape(B, N + 1, -1)
x = self.proj(x)
return x[:, 0]
def pixel_freq_bands(
num_bands: int,
max_freq: float = 224.0,
linear_bands: bool = True,
dtype: torch.dtype = torch.float32,
device: Optional[torch.device] = None,
):
if linear_bands:
bands = torch.linspace(1.0, max_freq / 2, num_bands, dtype=dtype, device=device)
else:
bands = 2 ** torch.linspace(
0, math.log(max_freq, 2) - 1, num_bands, dtype=dtype, device=device
)
return bands * torch.pi
def inv_freq_bands(
num_bands: int,
temperature: float = 100000.0,
step: int = 2,
dtype: torch.dtype = torch.float32,
device: Optional[torch.device] = None,
) -> torch.Tensor:
inv_freq = 1.0 / (
temperature
** (torch.arange(0, num_bands, step, dtype=dtype, device=device) / num_bands)
)
return inv_freq
def build_sincos2d_pos_embed(
feat_shape: List[int],
dim: int = 64,
temperature: float = 10000.0,
reverse_coord: bool = False,
interleave_sin_cos: bool = False,
dtype: torch.dtype = torch.float32,
device: Optional[torch.device] = None,
) -> torch.Tensor:
"""
Args:
feat_shape:
dim:
temperature:
reverse_coord: stack grid order W, H instead of H, W
interleave_sin_cos: sin, cos, sin, cos stack instead of sin, sin, cos, cos
dtype:
device:
Returns:
"""
assert (
dim % 4 == 0
), "Embed dimension must be divisible by 4 for sin-cos 2D position embedding"
pos_dim = dim // 4
bands = inv_freq_bands(
pos_dim, temperature=temperature, step=1, dtype=dtype, device=device
)
if reverse_coord:
feat_shape = feat_shape[::-1] # stack W, H instead of H, W
grid = (
torch.stack(
torch.meshgrid(
[torch.arange(s, device=device, dtype=dtype) for s in feat_shape]
)
)
.flatten(1)
.transpose(0, 1)
)
pos2 = grid.unsqueeze(-1) * bands.unsqueeze(0)
# FIXME add support for unflattened spatial dim?
stack_dim = (
2 if interleave_sin_cos else 1
) # stack sin, cos, sin, cos instead of sin sin cos cos
pos_emb = torch.stack([torch.sin(pos2), torch.cos(pos2)], dim=stack_dim).flatten(1)
return pos_emb
def build_fourier_pos_embed(
feat_shape: List[int],
bands: Optional[torch.Tensor] = None,
num_bands: int = 64,
max_res: int = 224,
linear_bands: bool = False,
include_grid: bool = False,
concat_out: bool = True,
in_pixels: bool = True,
dtype: torch.dtype = torch.float32,
device: Optional[torch.device] = None,
) -> List[torch.Tensor]:
if bands is None:
if in_pixels:
bands = pixel_freq_bands(
num_bands,
float(max_res),
linear_bands=linear_bands,
dtype=dtype,
device=device,
)
else:
bands = inv_freq_bands(num_bands, step=1, dtype=dtype, device=device)
else:
if device is None:
device = bands.device
if dtype is None:
dtype = bands.dtype
if in_pixels:
grid = torch.stack(
torch.meshgrid(
[
torch.linspace(-1.0, 1.0, steps=s, device=device, dtype=dtype)
for s in feat_shape
]
),
dim=-1,
)
else:
grid = torch.stack(
torch.meshgrid(
[torch.arange(s, device=device, dtype=dtype) for s in feat_shape]
),
dim=-1,
)
grid = grid.unsqueeze(-1)
pos = grid * bands
pos_sin, pos_cos = pos.sin(), pos.cos()
out = (grid, pos_sin, pos_cos) if include_grid else (pos_sin, pos_cos)
# FIXME torchscript doesn't like multiple return types, probably need to always cat?
if concat_out:
out = torch.cat(out, dim=-1)
return out
class FourierEmbed(nn.Module):
def __init__(
self,
max_res: int = 224,
num_bands: int = 64,
concat_grid=True,
keep_spatial=False,
):
super().__init__()
self.max_res = max_res
self.num_bands = num_bands
self.concat_grid = concat_grid
self.keep_spatial = keep_spatial
self.register_buffer(
"bands", pixel_freq_bands(max_res, num_bands), persistent=False
)
def forward(self, x):
B, C = x.shape[:2]
feat_shape = x.shape[2:]
emb = build_fourier_pos_embed(
feat_shape,
self.bands,
include_grid=self.concat_grid,
dtype=x.dtype,
device=x.device,
)
emb = emb.transpose(-1, -2).flatten(len(feat_shape))
batch_expand = (B,) + (-1,) * (x.ndim - 1)
# FIXME support nD
if self.keep_spatial:
x = torch.cat(
[x, emb.unsqueeze(0).expand(batch_expand).permute(0, 3, 1, 2)], dim=1
)
else:
x = torch.cat(
[x.permute(0, 2, 3, 1), emb.unsqueeze(0).expand(batch_expand)], dim=-1
)
x = x.reshape(B, feat_shape.numel(), -1)
return x
def rot(x):
return torch.stack([-x[..., 1::2], x[..., ::2]], -1).reshape(x.shape)
def apply_rot_embed(x: torch.Tensor, sin_emb, cos_emb):
return x * cos_emb + rot(x) * sin_emb
def apply_rot_embed_list(x: List[torch.Tensor], sin_emb, cos_emb):
if isinstance(x, torch.Tensor):
x = [x]
return [t * cos_emb + rot(t) * sin_emb for t in x]
def apply_rot_embed_split(x: torch.Tensor, emb):
split = emb.shape[-1] // 2
return x * emb[:, :split] + rot(x) * emb[:, split:]
def build_rotary_pos_embed(
feat_shape: List[int],
bands: Optional[torch.Tensor] = None,
dim: int = 64,
max_freq: float = 224,
linear_bands: bool = False,
dtype: torch.dtype = torch.float32,
device: Optional[torch.device] = None,
):
"""
NOTE: shape arg should include spatial dim only
"""
feat_shape = torch.Size(feat_shape)
sin_emb, cos_emb = build_fourier_pos_embed(
feat_shape,
bands=bands,
num_bands=dim // 4,
max_res=max_freq,
linear_bands=linear_bands,
concat_out=False,
device=device,
dtype=dtype,
)
N = feat_shape.numel()
sin_emb = sin_emb.reshape(N, -1).repeat_interleave(2, -1)
cos_emb = cos_emb.reshape(N, -1).repeat_interleave(2, -1)
return sin_emb, cos_emb
class RotaryEmbedding(nn.Module):
"""Rotary position embedding
NOTE: This is my initial attempt at impl rotary embedding for spatial use, it has not
been well tested, and will likely change. It will be moved to its own file.
The following impl/resources were referenced for this impl:
* https://github.com/lucidrains/vit-pytorch/blob/6f3a5fcf0bca1c5ec33a35ef48d97213709df4ba/vit_pytorch/rvt.py
* https://blog.eleuther.ai/rotary-embeddings/
"""
def __init__(self, dim, max_res=224, linear_bands: bool = False):
super().__init__()
self.dim = dim
self.register_buffer(
"bands",
pixel_freq_bands(dim // 4, max_res, linear_bands=linear_bands),
persistent=False,
)
def get_embed(self, shape: List[int]):
return build_rotary_pos_embed(shape, self.bands)
def forward(self, x):
# assuming channel-first tensor where spatial dim are >= 2
sin_emb, cos_emb = self.get_embed(x.shape[2:])
return apply_rot_embed(x, sin_emb, cos_emb)
def _no_grad_trunc_normal_(tensor, mean, std, a, b):
# Cut & paste from PyTorch official master until it's in a few official releases - RW
# Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
def norm_cdf(x):
# Computes standard normal cumulative distribution function
return (1.0 + math.erf(x / math.sqrt(2.0))) / 2.0
if (mean < a - 2 * std) or (mean > b + 2 * std):
warnings.warn(
"mean is more than 2 std from [a, b] in nn.init.trunc_normal_. "
"The distribution of values may be incorrect.",
stacklevel=2,
)
with torch.no_grad():
# Values are generated by using a truncated uniform distribution and
# then using the inverse CDF for the normal distribution.
# Get upper and lower cdf values
l = norm_cdf((a - mean) / std)
u = norm_cdf((b - mean) / std)
# Uniformly fill tensor with values from [l, u], then translate to
# [2l-1, 2u-1].
tensor.uniform_(2 * l - 1, 2 * u - 1)
# Use inverse cdf transform for normal distribution to get truncated
# standard normal
tensor.erfinv_()
# Transform to proper mean, std
tensor.mul_(std * math.sqrt(2.0))
tensor.add_(mean)
# Clamp to ensure it's in the proper range
tensor.clamp_(min=a, max=b)
return tensor
def trunc_normal_(tensor, mean=0.0, std=1.0, a=-2.0, b=2.0):
r"""Fills the input Tensor with values drawn from a truncated
normal distribution. The values are effectively drawn from the
normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)`
with values outside :math:`[a, b]` redrawn until they are within
the bounds. The method used for generating the random values works
best when :math:`a \leq \text{mean} \leq b`.
Args:
tensor: an n-dimensional `torch.Tensor`
mean: the mean of the normal distribution
std: the standard deviation of the normal distribution
a: the minimum cutoff value
b: the maximum cutoff value
Examples:
>>> w = torch.empty(3, 5)
>>> nn.init.trunc_normal_(w)
"""
return _no_grad_trunc_normal_(tensor, mean, std, a, b)
| 3D-LLM-main | three_steps_3d_feature/second_step/lavis/models/clip_models/timm_model.py |
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
from dataclasses import dataclass
from typing import Optional
import torch
from transformers.modeling_outputs import (
BaseModelOutputWithPoolingAndCrossAttentions,
ModelOutput,
)
@dataclass
class AlproSimilarity(ModelOutput):
sim_v2t: torch.FloatTensor = None
sim_t2v: torch.FloatTensor = None
sim_v2t_targets: Optional[torch.FloatTensor] = None
sim_t2v_targets: Optional[torch.FloatTensor] = None
@dataclass
class AlproIntermediateOutput(ModelOutput):
# uni-modal features
video_embeds: torch.FloatTensor = None
text_embeds: Optional[torch.FloatTensor] = None
# intermediate outputs of multimodal encoder
encoder_output: Optional[BaseModelOutputWithPoolingAndCrossAttentions] = None
encoder_output_neg: Optional[BaseModelOutputWithPoolingAndCrossAttentions] = None
vtm_logits: Optional[torch.FloatTensor] = None
vtm_labels: Optional[torch.LongTensor] = None
@dataclass
class AlproOutput(ModelOutput):
# some finetuned models (e.g. BlipVQA) do not compute similarity, thus optional.
sims: Optional[AlproSimilarity] = None
intermediate_output: AlproIntermediateOutput = None
loss: Optional[torch.FloatTensor] = None
loss_vtc: Optional[torch.FloatTensor] = None
loss_vtm: Optional[torch.FloatTensor] = None
loss_mlm: Optional[torch.FloatTensor] = None
@dataclass
class AlproOutputWithLogits(AlproOutput):
logits: torch.FloatTensor = None
| 3D-LLM-main | three_steps_3d_feature/second_step/lavis/models/alpro_models/alpro_outputs.py |
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
import logging
import os
import torch
import torch.nn.functional as F
from lavis.common.dist_utils import download_cached_file
from lavis.common.utils import is_url
from lavis.models.base_model import BaseModel
from transformers import BertTokenizer
class AlproBase(BaseModel):
@classmethod
def init_tokenizer(cls):
return BertTokenizer.from_pretrained("bert-base-uncased")
def load_from_pretrained(self, url_or_filename, num_frames, num_patches):
if is_url(url_or_filename):
cached_file = download_cached_file(
url_or_filename, check_hash=False, progress=True
)
checkpoint = torch.load(cached_file, map_location="cpu")
elif os.path.isfile(url_or_filename):
checkpoint = torch.load(url_or_filename, map_location="cpu")
else:
raise RuntimeError("checkpoint url or path is invalid")
if "model" in checkpoint:
state_dict = checkpoint["model"]
else:
state_dict = checkpoint
for key in list(state_dict.keys()):
if "bert" in key:
new_key = key.replace("bert.", "")
state_dict[new_key] = state_dict[key]
del state_dict[key]
spatial_embed_key = "visual_encoder.model.pos_embed"
temporal_embed_key = "visual_encoder.model.time_embed"
## Resizing spatial embeddings in case they don't match
if num_patches + 1 != state_dict[spatial_embed_key].size(1):
state_dict[spatial_embed_key] = resize_spatial_embedding(
state_dict, spatial_embed_key, num_patches
)
else:
logging.info(
"The length of spatial position embedding matches. No need to resize."
)
## Resizing time embeddings in case they don't match
if temporal_embed_key in state_dict and num_frames != state_dict[
temporal_embed_key
].size(1):
state_dict[temporal_embed_key] = resize_temporal_embedding(
state_dict, temporal_embed_key, num_frames
)
else:
logging.info(
"No temporal encoding found. Or the length of temporal position embedding matches. No need to resize."
)
msg = self.load_state_dict(state_dict, strict=False)
logging.info("Missing keys {}".format(msg.missing_keys))
logging.info("load checkpoint from %s" % url_or_filename)
return msg
def resize_spatial_embedding(state_dict, key, num_patches):
logging.info(
f"Resizing spatial position embedding from {state_dict[key].size(1)} to {num_patches + 1}"
)
pos_embed = state_dict[key]
cls_pos_embed = pos_embed[0, 0, :].unsqueeze(0).unsqueeze(1)
other_pos_embed = pos_embed[0, 1:, :].unsqueeze(0).transpose(1, 2)
new_pos_embed = F.interpolate(other_pos_embed, size=(num_patches), mode="nearest")
new_pos_embed = new_pos_embed.transpose(1, 2)
new_pos_embed = torch.cat((cls_pos_embed, new_pos_embed), 1)
return new_pos_embed
def resize_temporal_embedding(state_dict, key, num_frames):
logging.info(
f"Resizing temporal position embedding from {state_dict[key].size(1)} to {num_frames}"
)
time_embed = state_dict[key].transpose(1, 2)
new_time_embed = F.interpolate(time_embed, size=(num_frames), mode="nearest")
return new_time_embed.transpose(1, 2)
| 3D-LLM-main | three_steps_3d_feature/second_step/lavis/models/alpro_models/__init__.py |
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
import datetime
import logging
import time
import lavis.common.dist_utils as dist_utils
import numpy as np
import torch
import torch.distributed as dist
import torch.nn.functional as F
from lavis.common.config import node_to_dict
from lavis.common.dist_utils import get_rank
from lavis.common.logger import MetricLogger
from lavis.common.registry import registry
from lavis.models.alpro_models import AlproBase
from lavis.models.alpro_models.alpro_outputs import AlproIntermediateOutput, AlproOutput
from lavis.models.base_model import all_gather_with_grad
from lavis.models.med import XBertEncoder
from lavis.models.timesformer.vit import TimeSformer
from torch import nn
@registry.register_model("alpro_retrieval")
class AlproRetrieval(AlproBase):
PRETRAINED_MODEL_CONFIG_DICT = {
"msrvtt": "configs/models/alpro_retrieval_msrvtt.yaml",
"didemo": "configs/models/alpro_retrieval_didemo.yaml",
}
def __init__(
self,
visual_encoder,
text_encoder,
vision_width=768,
text_width=768,
embed_dim=256,
max_txt_len=35,
temp=0.07,
):
super().__init__()
self.temp = nn.Parameter(torch.ones([]) * temp)
self.tokenizer = self.init_tokenizer()
self.visual_encoder = visual_encoder
self.text_encoder = text_encoder
vision_width = vision_width
text_width = text_width
self.vision_proj = nn.Linear(vision_width, embed_dim)
self.text_proj = nn.Linear(text_width, embed_dim)
self.itm_head = nn.Linear(text_width, 2)
self.max_txt_len = max_txt_len
def forward(self, samples):
with torch.no_grad():
self.temp.clamp_(0.001, 0.5)
visual_inputs = samples["video"]
caption = samples["text_input"]
b, t, c, h, w = visual_inputs.shape
# forward text
text = self.tokenizer(
caption,
padding="max_length",
truncation=True,
max_length=self.max_txt_len,
return_tensors="pt",
).to(self.device)
text_output = self.text_encoder.forward_text(
text,
token_type_ids=torch.zeros(
text.input_ids.shape, dtype=torch.long, device=self.device
),
)
text_embeds = text_output.last_hidden_state
text_feat = F.normalize(self.text_proj(text_embeds[:, 0, :]), dim=-1)
# forward visual
# timeSformer asks for (b, c, t, h, w) as input.
video_embeds = self.visual_encoder.forward_features(visual_inputs)
video_feat = F.normalize(self.vision_proj(video_embeds[:, 0, :]), dim=-1)
video_atts = torch.ones(video_embeds.size()[:-1], dtype=torch.long).to(
self.device
)
# ========== (in-batch) ITC loss ==========
gathered_video_feats = all_gather_with_grad(video_feat)
gathered_text_feats = all_gather_with_grad(text_feat)
sim_v2t = video_feat @ gathered_text_feats.t() / self.temp
sim_t2v = text_feat @ gathered_video_feats.t() / self.temp
sim_targets = torch.zeros_like(sim_v2t)
local_rank = get_rank()
b_start, b_end = b * local_rank, b * (local_rank + 1)
sim_targets[:, b_start:b_end] = torch.eye(b)
loss_v2t = -torch.sum(F.log_softmax(sim_v2t, dim=1) * sim_targets, dim=1).mean()
loss_t2v = -torch.sum(F.log_softmax(sim_t2v, dim=1) * sim_targets, dim=1).mean()
vtc_loss = (loss_v2t + loss_t2v) / 2
(
vtm_loss,
vtm_logits,
vtm_labels,
encoder_output,
encoder_output_neg,
) = self.compute_vtm(
text_embeds=text_embeds,
text_atts=text.attention_mask,
image_embeds=video_embeds,
image_atts=video_atts,
sim_i2t=sim_v2t.clone(), # for hard mining
sim_t2i=sim_t2v.clone(), # for hard mining
)
loss = vtc_loss + vtm_loss
# return {"loss": loss}
return AlproOutput(
loss=loss,
loss_vtc=vtc_loss,
loss_vtm=vtm_loss,
intermediate_output=AlproIntermediateOutput(
video_embeds=video_embeds,
text_embeds=text_embeds,
encoder_output=encoder_output,
encoder_output_neg=encoder_output_neg,
vtm_logits=vtm_logits,
vtm_labels=vtm_labels,
),
)
def compute_vtm(
self, text_embeds, text_atts, image_embeds, image_atts, sim_i2t, sim_t2i
):
device = self.device
# ====== positive pairs =======
attention_mask = torch.cat([text_atts, image_atts], dim=1)
embedding_output_pos = torch.cat([text_embeds, image_embeds], dim=1)
encoder_outputs_pos = self.text_encoder(
encoder_embeds=embedding_output_pos,
attention_mask=attention_mask,
return_dict=True,
mode="fusion",
)
# ====== negative pairs =======
bs = text_embeds.shape[0]
local_rank = get_rank()
b_start, b_end = bs * local_rank, bs * (local_rank + 1)
with torch.no_grad():
weights_v2t = sim_i2t[:, b_start:b_end]
weights_t2v = sim_t2i[:, b_start:b_end]
# never select self as negative
weights_v2t.fill_diagonal_(-np.Inf)
weights_t2v.fill_diagonal_(-np.Inf)
weights_v2t = F.softmax(weights_v2t, dim=1)
weights_t2v = F.softmax(weights_t2v, dim=1)
# select a negative image for each text
# FIXME to optimize using indexing operations
image_embeds_neg = []
for b in range(bs):
neg_idx = torch.multinomial(weights_t2v[b], 1).item()
image_embeds_neg.append(image_embeds[neg_idx])
image_embeds_neg = torch.stack(image_embeds_neg, dim=0)
# select a negative text for each image
text_embeds_neg = []
text_atts_neg = []
for b in range(bs):
neg_idx = torch.multinomial(weights_v2t[b], 1).item()
text_embeds_neg.append(text_embeds[neg_idx])
text_atts_neg.append(text_atts[neg_idx])
text_embeds_neg = torch.stack(text_embeds_neg, dim=0)
text_atts_neg = torch.stack(text_atts_neg, dim=0)
text_embeds_all = torch.cat([text_embeds, text_embeds_neg], dim=0)
text_atts_all = torch.cat([text_atts, text_atts_neg], dim=0)
video_embeds_all = torch.cat([image_embeds_neg, image_embeds], dim=0)
video_atts_all = torch.cat([image_atts, image_atts], dim=0)
attention_mask_all = torch.cat([text_atts_all, video_atts_all], dim=1)
embedding_output_all = torch.cat([text_embeds_all, video_embeds_all], dim=1)
# forward negative pairs via cross encoder
encoder_outputs_neg = self.text_encoder(
encoder_embeds=embedding_output_all,
attention_mask=attention_mask_all,
return_dict=True,
mode="fusion",
)
vl_embeddings = torch.cat(
[
encoder_outputs_pos.last_hidden_state[:, 0, :],
encoder_outputs_neg.last_hidden_state[:, 0, :],
],
dim=0,
)
vtm_logits = self.itm_head(vl_embeddings)
vtm_labels = torch.cat(
[torch.ones(bs, dtype=torch.long), torch.zeros(2 * bs, dtype=torch.long)],
dim=0,
).to(device)
vtm_loss = F.cross_entropy(vtm_logits, vtm_labels)
return (
vtm_loss,
vtm_logits,
vtm_labels,
encoder_outputs_pos,
encoder_outputs_neg,
)
def compute_sim_matrix(self, data_loader, task_cfg):
k_test = task_cfg.get("k_test")
metric_logger = MetricLogger(delimiter=" ")
header = "Evaluation:"
logging.info("Computing features for evaluation...")
start_time = time.time()
texts = data_loader.dataset.text
num_text = len(texts)
text_bs = 256
text_ids = []
text_embeds = []
text_feats = []
text_atts = []
for i in range(0, num_text, text_bs):
text = texts[i : min(num_text, i + text_bs)]
text_input = self.tokenizer(
text,
padding="max_length",
truncation=True,
max_length=self.max_txt_len,
return_tensors="pt",
).to(self.device)
text_output = self.text_encoder.forward_text(
text_input,
token_type_ids=torch.zeros(
text_input.input_ids.shape, dtype=torch.long, device=self.device
),
)
text_feats.append(text_output.last_hidden_state.cpu())
text_embed = F.normalize(
self.text_proj(text_output.last_hidden_state[:, 0, :])
)
text_embeds.append(text_embed)
text_ids.append(text_input.input_ids)
text_atts.append(text_input.attention_mask)
text_embeds = torch.cat(text_embeds, dim=0)
text_ids = torch.cat(text_ids, dim=0)
text_atts = torch.cat(text_atts, dim=0)
text_feats = torch.cat(text_feats, dim=0)
video_feats = []
video_embeds = []
for samples in data_loader:
video = samples["video"]
video = video.to(self.device)
video_feat = self.visual_encoder.forward_features(video)
video_embed = self.vision_proj(video_feat[:, 0, :])
video_embed = F.normalize(video_embed, dim=-1)
video_feats.append(video_feat.cpu())
video_embeds.append(video_embed)
video_feats = torch.cat(video_feats, dim=0)
video_embeds = torch.cat(video_embeds, dim=0)
sims_matrix = video_embeds @ text_embeds.t()
score_matrix_v2t = torch.full(
(len(data_loader.dataset.image), len(texts)), -100.0
).to(self.device)
num_tasks = dist_utils.get_world_size()
rank = dist_utils.get_rank()
step = sims_matrix.size(0) // num_tasks + 1
start = rank * step
end = min(sims_matrix.size(0), start + step)
# video-to-text
for i, sims in enumerate(
metric_logger.log_every(sims_matrix[start:end], 50, header)
):
topk_sim, topk_idx = sims.topk(k=k_test, dim=0)
video_feats_repeat = (
video_feats[start + i].repeat(k_test, 1, 1).to(self.device)
)
video_atts_repeat = torch.ones(
video_feats_repeat.size()[:-1], dtype=torch.long
).to(self.device)
attention_mask = torch.cat([text_atts[topk_idx], video_atts_repeat], dim=1)
embedding_output = torch.cat(
[text_feats[topk_idx].to(self.device), video_feats_repeat], dim=1
)
output = self.text_encoder(
encoder_embeds=embedding_output,
attention_mask=attention_mask,
return_dict=True,
mode="fusion",
)
score = self.itm_head(output.last_hidden_state[:, 0, :])[:, 1]
score_matrix_v2t[start + i, topk_idx] = score + topk_sim
# text-to-video
sims_matrix = sims_matrix.t()
score_matrix_t2v = torch.full(
(len(texts), len(data_loader.dataset.image)), -100.0
).to(self.device)
step = sims_matrix.size(0) // num_tasks + 1
start = rank * step
end = min(sims_matrix.size(0), start + step)
for i, sims in enumerate(
metric_logger.log_every(sims_matrix[start:end], 50, header)
):
topk_sim, topk_idx = sims.topk(k=k_test, dim=0)
text_feats_repeat = (
text_feats[start + i].repeat(k_test, 1, 1).to(self.device)
)
text_atts_repeat = text_atts[start + i].repeat(k_test, 1).to(self.device)
video_atts = torch.ones(
video_feats[topk_idx].size()[:-1], dtype=torch.long
).to(self.device)
embedding_output = torch.cat(
[text_feats_repeat, video_feats[topk_idx].to(self.device)], dim=1
)
attention_mask = torch.cat([text_atts_repeat, video_atts], dim=1)
output = self.text_encoder(
encoder_embeds=embedding_output,
attention_mask=attention_mask,
return_dict=True,
mode="fusion",
)
score = self.itm_head(output.last_hidden_state[:, 0, :])[:, 1]
score_matrix_t2v[start + i, topk_idx] = score + topk_sim
if dist_utils.is_dist_avail_and_initialized():
dist.barrier()
torch.distributed.all_reduce(
score_matrix_v2t, op=torch.distributed.ReduceOp.SUM
)
torch.distributed.all_reduce(
score_matrix_t2v, op=torch.distributed.ReduceOp.SUM
)
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
logging.info("Evaluation time {}".format(total_time_str))
return score_matrix_v2t.cpu().numpy(), score_matrix_t2v.cpu().numpy()
@classmethod
def from_config(cls, cfg):
# vision encoder
visual_encoder_config = node_to_dict(cfg.timesformer)
visual_encoder = TimeSformer(**visual_encoder_config)
# text encoder
text_encoder = XBertEncoder.from_config(cfg)
max_txt_len = cfg.get("max_txt_len", 35)
model = cls(
visual_encoder=visual_encoder,
text_encoder=text_encoder,
max_txt_len=max_txt_len,
)
num_patches = (
visual_encoder_config["image_size"] // visual_encoder_config["patch_size"]
) ** 2
num_frames = visual_encoder_config["n_frms"]
model.load_checkpoint_from_config(
cfg, num_frames=num_frames, num_patches=num_patches
)
return model
| 3D-LLM-main | three_steps_3d_feature/second_step/lavis/models/alpro_models/alpro_retrieval.py |
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
from warnings import warn
import torch
import torch.nn.functional as F
from lavis.common.config import node_to_dict
from lavis.common.registry import registry
from lavis.models.alpro_models import AlproBase
from lavis.models.alpro_models.alpro_outputs import (
AlproIntermediateOutput,
AlproOutputWithLogits,
)
from lavis.models.med import XBertEncoder
from lavis.models.timesformer.vit import TimeSformer
from torch import nn
@registry.register_model("alpro_qa")
class AlproQA(AlproBase):
PRETRAINED_MODEL_CONFIG_DICT = {
"msrvtt": "configs/models/alpro_qa_msrvtt.yaml",
"msvd": "configs/models/alpro_qa_msvd.yaml",
}
def __init__(
self, visual_encoder, text_encoder, hidden_size, num_classes, max_txt_len=40
):
super().__init__()
self.tokenizer = self.init_tokenizer()
self.visual_encoder = visual_encoder
self.text_encoder = text_encoder
if num_classes > 0:
self.classifier = nn.Sequential(
nn.Linear(hidden_size, hidden_size * 2),
nn.ReLU(True),
nn.Linear(hidden_size * 2, num_classes),
)
else:
warn(f"num_classes is 0. Initialized {type(self)} without classifier.")
self.max_txt_len = max_txt_len
def forward(self, samples, is_train=True):
visual_inputs = samples["video"]
question = samples["text_input"]
targets = samples["answers"]
# forward text
text = self.tokenizer(
question,
padding="max_length",
truncation=True,
max_length=self.max_txt_len,
return_tensors="pt",
).to(self.device)
text_output = self.text_encoder.forward_text(
text,
token_type_ids=torch.zeros(
text.input_ids.shape, dtype=torch.long, device=self.device
),
)
text_embeds = text_output.last_hidden_state
# forward visual
# timeSformer asks for (b, c, t, h, w) as input.
video_embeds = self.visual_encoder.forward_features(visual_inputs)
video_atts = torch.ones(video_embeds.size()[:-1], dtype=torch.long).to(
self.device
)
# forward cross-encoder
attention_mask = torch.cat([text.attention_mask, video_atts], dim=1)
embedding_output = torch.cat([text_embeds, video_embeds], dim=1)
encoder_output = self.text_encoder(
encoder_embeds=embedding_output,
attention_mask=attention_mask,
return_dict=True,
mode="fusion",
)
prediction = self.classifier(encoder_output.last_hidden_state[:, 0, :])
if is_train:
loss = F.cross_entropy(prediction, targets)
# return {"loss": loss}
return AlproOutputWithLogits(
loss=loss,
intermediate_output=AlproIntermediateOutput(
video_embeds=video_embeds,
text_embeds=text_embeds,
encoder_output=encoder_output,
),
logits=prediction,
)
else:
return {"predictions": prediction, "targets": targets}
def predict(self, samples):
output = self.forward(samples, is_train=False)
return output
@classmethod
def from_config(cls, cfg):
# vision encoder
visual_encoder_config = node_to_dict(cfg.timesformer)
visual_encoder = TimeSformer(**visual_encoder_config)
# text encoder
text_encoder = XBertEncoder.from_config(cfg)
num_classes = cfg.get("num_classes", -1)
hidden_size = cfg.get("hidden_size", 768)
model = cls(
visual_encoder=visual_encoder,
text_encoder=text_encoder,
hidden_size=hidden_size,
num_classes=num_classes,
)
num_patches = (
visual_encoder_config["image_size"] // visual_encoder_config["patch_size"]
) ** 2
num_frames = visual_encoder_config["n_frms"]
model.load_checkpoint_from_config(
cfg, num_frames=num_frames, num_patches=num_patches
)
return model
| 3D-LLM-main | three_steps_3d_feature/second_step/lavis/models/alpro_models/alpro_qa.py |
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
import warnings
import torch
def _is_tensor_video_clip(clip):
if not torch.is_tensor(clip):
raise TypeError("clip should be Tensor. Got %s" % type(clip))
if not clip.ndimension() == 4:
raise ValueError("clip should be 4D. Got %dD" % clip.dim())
return True
def crop(clip, i, j, h, w):
"""
Args:
clip (torch.tensor): Video clip to be cropped. Size is (C, T, H, W)
"""
if len(clip.size()) != 4:
raise ValueError("clip should be a 4D tensor")
return clip[..., i : i + h, j : j + w]
def resize(clip, target_size, interpolation_mode):
if len(target_size) != 2:
raise ValueError(
f"target size should be tuple (height, width), instead got {target_size}"
)
return torch.nn.functional.interpolate(
clip, size=target_size, mode=interpolation_mode, align_corners=False
)
def resized_crop(clip, i, j, h, w, size, interpolation_mode="bilinear"):
"""
Do spatial cropping and resizing to the video clip
Args:
clip (torch.tensor): Video clip to be cropped. Size is (C, T, H, W)
i (int): i in (i,j) i.e coordinates of the upper left corner.
j (int): j in (i,j) i.e coordinates of the upper left corner.
h (int): Height of the cropped region.
w (int): Width of the cropped region.
size (tuple(int, int)): height and width of resized clip
Returns:
clip (torch.tensor): Resized and cropped clip. Size is (C, T, H, W)
"""
if not _is_tensor_video_clip(clip):
raise ValueError("clip should be a 4D torch.tensor")
clip = crop(clip, i, j, h, w)
clip = resize(clip, size, interpolation_mode)
return clip
def center_crop(clip, crop_size):
if not _is_tensor_video_clip(clip):
raise ValueError("clip should be a 4D torch.tensor")
h, w = clip.size(-2), clip.size(-1)
th, tw = crop_size
if h < th or w < tw:
raise ValueError("height and width must be no smaller than crop_size")
i = int(round((h - th) / 2.0))
j = int(round((w - tw) / 2.0))
return crop(clip, i, j, th, tw)
def to_tensor(clip):
"""
Convert tensor data type from uint8 to float, divide value by 255.0 and
permute the dimensions of clip tensor
Args:
clip (torch.tensor, dtype=torch.uint8): Size is (T, H, W, C)
Return:
clip (torch.tensor, dtype=torch.float): Size is (C, T, H, W)
"""
_is_tensor_video_clip(clip)
if not clip.dtype == torch.uint8:
raise TypeError(
"clip tensor should have data type uint8. Got %s" % str(clip.dtype)
)
return clip.float().permute(3, 0, 1, 2) / 255.0
def normalize(clip, mean, std, inplace=False):
"""
Args:
clip (torch.tensor): Video clip to be normalized. Size is (C, T, H, W)
mean (tuple): pixel RGB mean. Size is (3)
std (tuple): pixel standard deviation. Size is (3)
Returns:
normalized clip (torch.tensor): Size is (C, T, H, W)
"""
if not _is_tensor_video_clip(clip):
raise ValueError("clip should be a 4D torch.tensor")
if not inplace:
clip = clip.clone()
mean = torch.as_tensor(mean, dtype=clip.dtype, device=clip.device)
std = torch.as_tensor(std, dtype=clip.dtype, device=clip.device)
clip.sub_(mean[:, None, None, None]).div_(std[:, None, None, None])
return clip
def hflip(clip):
"""
Args:
clip (torch.tensor): Video clip to be normalized. Size is (C, T, H, W)
Returns:
flipped clip (torch.tensor): Size is (C, T, H, W)
"""
if not _is_tensor_video_clip(clip):
raise ValueError("clip should be a 4D torch.tensor")
return clip.flip(-1)
| 3D-LLM-main | three_steps_3d_feature/second_step/lavis/processors/functional_video.py |
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
import re
from lavis.common.registry import registry
from lavis.processors.base_processor import BaseProcessor
from lavis.processors.randaugment import RandomAugment
from omegaconf import OmegaConf
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
class BlipImageBaseProcessor(BaseProcessor):
def __init__(self, mean=None, std=None):
if mean is None:
mean = (0.48145466, 0.4578275, 0.40821073)
if std is None:
std = (0.26862954, 0.26130258, 0.27577711)
self.normalize = transforms.Normalize(mean, std)
@registry.register_processor("blip_caption")
class BlipCaptionProcessor(BaseProcessor):
def __init__(self, prompt="", max_words=50):
self.prompt = prompt
self.max_words = max_words
def __call__(self, caption):
caption = self.prompt + self.pre_caption(caption)
return caption
@classmethod
def from_config(cls, cfg=None):
if cfg is None:
cfg = OmegaConf.create()
prompt = cfg.get("prompt", "")
max_words = cfg.get("max_words", 50)
return cls(prompt=prompt, max_words=max_words)
def pre_caption(self, caption):
caption = re.sub(
r"([.!\"()*#:;~])",
" ",
caption.lower(),
)
caption = re.sub(
r"\s{2,}",
" ",
caption,
)
caption = caption.rstrip("\n")
caption = caption.strip(" ")
# truncate caption
caption_words = caption.split(" ")
if len(caption_words) > self.max_words:
caption = " ".join(caption_words[: self.max_words])
return caption
@registry.register_processor("blip_question")
class BlipQuestionProcessor(BaseProcessor):
def __init__(self, max_words=50):
self.max_words = max_words
def __call__(self, question):
return self.pre_question(question)
@classmethod
def from_config(cls, cfg=None):
if cfg is None:
cfg = OmegaConf.create()
max_words = cfg.get("max_words", 50)
return cls(max_words=max_words)
def pre_question(self, question):
question = re.sub(
r"([.!\"()*#:;~])",
"",
question.lower(),
)
question = question.rstrip(" ")
# truncate question
question_words = question.split(" ")
if len(question_words) > self.max_words:
question = " ".join(question_words[: self.max_words])
return question
@registry.register_processor("blip_image_train")
class BlipImageTrainProcessor(BlipImageBaseProcessor):
def __init__(
self, image_size=384, mean=None, std=None, min_scale=0.5, max_scale=1.0
):
super().__init__(mean=mean, std=std)
self.transform = transforms.Compose(
[
transforms.RandomResizedCrop(
image_size,
scale=(min_scale, max_scale),
interpolation=InterpolationMode.BICUBIC,
),
transforms.RandomHorizontalFlip(),
RandomAugment(
2,
5,
isPIL=True,
augs=[
"Identity",
"AutoContrast",
"Brightness",
"Sharpness",
"Equalize",
"ShearX",
"ShearY",
"TranslateX",
"TranslateY",
"Rotate",
],
),
transforms.ToTensor(),
self.normalize,
]
)
def __call__(self, item):
return self.transform(item)
@classmethod
def from_config(cls, cfg=None):
if cfg is None:
cfg = OmegaConf.create()
image_size = cfg.get("image_size", 384)
mean = cfg.get("mean", None)
std = cfg.get("std", None)
min_scale = cfg.get("min_scale", 0.5)
max_scale = cfg.get("max_scale", 1.0)
return cls(
image_size=image_size,
mean=mean,
std=std,
min_scale=min_scale,
max_scale=max_scale,
)
@registry.register_processor("blip_image_eval")
class BlipImageEvalProcessor(BlipImageBaseProcessor):
def __init__(self, image_size=384, mean=None, std=None):
super().__init__(mean=mean, std=std)
self.transform = transforms.Compose(
[
transforms.Resize(
(image_size, image_size), interpolation=InterpolationMode.BICUBIC
),
transforms.ToTensor(),
self.normalize,
]
)
def __call__(self, item):
return self.transform(item)
@classmethod
def from_config(cls, cfg=None):
if cfg is None:
cfg = OmegaConf.create()
image_size = cfg.get("image_size", 384)
mean = cfg.get("mean", None)
std = cfg.get("std", None)
return cls(image_size=image_size, mean=mean, std=std)
| 3D-LLM-main | three_steps_3d_feature/second_step/lavis/processors/blip_processors.py |
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
from lavis.common.registry import registry
from lavis.processors.blip_processors import BlipImageBaseProcessor
from omegaconf import OmegaConf
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
def _convert_to_rgb(image):
return image.convert("RGB")
@registry.register_processor("clip_image_train")
class ClipImageTrainProcessor(BlipImageBaseProcessor):
def __init__(
self, image_size=224, mean=None, std=None, min_scale=0.9, max_scale=1.0
):
super().__init__(mean=mean, std=std)
self.transform = transforms.Compose(
[
transforms.RandomResizedCrop(
image_size,
scale=(min_scale, max_scale),
interpolation=InterpolationMode.BICUBIC,
),
_convert_to_rgb,
transforms.ToTensor(),
self.normalize,
]
)
@classmethod
def from_config(cls, cfg=None):
if cfg is None:
cfg = OmegaConf.create()
image_size = cfg.get("image_size", 224)
mean = cfg.get("mean", None)
std = cfg.get("std", None)
min_scale = cfg.get("min_scale", 0.9)
max_scale = cfg.get("max_scale", 1.0)
return cls(
image_size=image_size,
mean=mean,
std=std,
min_scale=min_scale,
max_scale=max_scale,
)
@registry.register_processor("clip_image_eval")
class ClipImageEvalProcessor(BlipImageBaseProcessor):
def __init__(self, image_size=224, mean=None, std=None):
super().__init__(mean=mean, std=std)
self.transform = transforms.Compose(
[
transforms.Resize(image_size, interpolation=InterpolationMode.BICUBIC),
transforms.CenterCrop(image_size),
_convert_to_rgb,
transforms.ToTensor(),
self.normalize,
]
)
@classmethod
def from_config(cls, cfg=None):
if cfg is None:
cfg = OmegaConf.create()
image_size = cfg.get("image_size", 224)
mean = cfg.get("mean", None)
std = cfg.get("std", None)
return cls(
image_size=image_size,
mean=mean,
std=std,
)
| 3D-LLM-main | three_steps_3d_feature/second_step/lavis/processors/clip_processors.py |
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
import torch
from lavis.common.registry import registry
from lavis.datasets.data_utils import load_video
from lavis.processors import transforms_video
from lavis.processors.base_processor import BaseProcessor
from lavis.processors.randaugment import VideoRandomAugment
from lavis.processors import functional_video as F
from omegaconf import OmegaConf
from torchvision import transforms
MAX_INT = registry.get("MAX_INT")
class AlproVideoBaseProcessor(BaseProcessor):
def __init__(self, mean=None, std=None, n_frms=MAX_INT):
if mean is None:
mean = (0.48145466, 0.4578275, 0.40821073)
if std is None:
std = (0.26862954, 0.26130258, 0.27577711)
self.normalize = transforms_video.NormalizeVideo(mean, std)
self.n_frms = n_frms
class ToUint8(object):
def __init__(self):
pass
def __call__(self, tensor):
return tensor.to(torch.uint8)
def __repr__(self):
return self.__class__.__name__
class ToTHWC(object):
"""
Args:
clip (torch.tensor, dtype=torch.uint8): Size is (C, T, H, W)
Return:
clip (torch.tensor, dtype=torch.float): Size is (T, H, W, C)
"""
def __init__(self):
pass
def __call__(self, tensor):
return tensor.permute(1, 2, 3, 0)
def __repr__(self):
return self.__class__.__name__
class ResizeVideo(object):
def __init__(self, target_size, interpolation_mode="bilinear"):
self.target_size = target_size
self.interpolation_mode = interpolation_mode
def __call__(self, clip):
"""
Args:
clip (torch.tensor): Video clip to be cropped. Size is (C, T, H, W)
Returns:
torch.tensor: central cropping of video clip. Size is
(C, T, crop_size, crop_size)
"""
return F.resize(clip, self.target_size, self.interpolation_mode)
def __repr__(self):
return self.__class__.__name__ + "(resize_size={0})".format(self.target_size)
@registry.register_processor("alpro_video_train")
class AlproVideoTrainProcessor(AlproVideoBaseProcessor):
def __init__(
self,
image_size=384,
mean=None,
std=None,
min_scale=0.5,
max_scale=1.0,
n_frms=MAX_INT,
):
super().__init__(mean=mean, std=std, n_frms=n_frms)
self.image_size = image_size
self.transform = transforms.Compose(
[
# Video size is (C, T, H, W)
transforms_video.RandomResizedCropVideo(
image_size,
scale=(min_scale, max_scale),
interpolation_mode="bicubic",
),
transforms_video.RandomHorizontalFlipVideo(),
ToTHWC(), # C, T, H, W -> T, H, W, C
VideoRandomAugment(
2,
5,
augs=[
"Identity",
"AutoContrast",
"Brightness",
"Sharpness",
"Equalize",
"ShearX",
"ShearY",
"TranslateX",
"TranslateY",
"Rotate",
],
),
ToUint8(),
transforms_video.ToTensorVideo(), # T, H, W, C -> C, T, H, W
self.normalize,
]
)
def __call__(self, vpath):
"""
Args:
clip (torch.tensor): Video clip to be cropped. Size is (C, T, H, W)
Returns:
torch.tensor: video clip after transforms. Size is (C, T, size, size).
"""
clip = load_video(
video_path=vpath,
n_frms=self.n_frms,
height=self.image_size,
width=self.image_size,
sampling="headtail",
)
return self.transform(clip)
@classmethod
def from_config(cls, cfg=None):
if cfg is None:
cfg = OmegaConf.create()
image_size = cfg.get("image_size", 256)
mean = cfg.get("mean", None)
std = cfg.get("std", None)
min_scale = cfg.get("min_scale", 0.5)
max_scale = cfg.get("max_scale", 1.0)
n_frms = cfg.get("n_frms", MAX_INT)
return cls(
image_size=image_size,
mean=mean,
std=std,
min_scale=min_scale,
max_scale=max_scale,
n_frms=n_frms,
)
@registry.register_processor("alpro_video_eval")
class AlproVideoEvalProcessor(AlproVideoBaseProcessor):
def __init__(self, image_size=256, mean=None, std=None, n_frms=MAX_INT):
super().__init__(mean=mean, std=std, n_frms=n_frms)
self.image_size = image_size
# Input video size is (C, T, H, W)
self.transform = transforms.Compose(
[
# frames will be resized during decord loading.
ToUint8(), # C, T, H, W
ToTHWC(), # T, H, W, C
transforms_video.ToTensorVideo(), # C, T, H, W
self.normalize, # C, T, H, W
]
)
def __call__(self, vpath):
"""
Args:
clip (torch.tensor): Video clip to be cropped. Size is (C, T, H, W)
Returns:
torch.tensor: video clip after transforms. Size is (C, T, size, size).
"""
clip = load_video(
video_path=vpath,
n_frms=self.n_frms,
height=self.image_size,
width=self.image_size,
)
return self.transform(clip)
@classmethod
def from_config(cls, cfg=None):
if cfg is None:
cfg = OmegaConf.create()
image_size = cfg.get("image_size", 256)
mean = cfg.get("mean", None)
std = cfg.get("std", None)
n_frms = cfg.get("n_frms", MAX_INT)
return cls(image_size=image_size, mean=mean, std=std, n_frms=n_frms)
| 3D-LLM-main | three_steps_3d_feature/second_step/lavis/processors/alpro_processors.py |
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
from lavis.processors.base_processor import BaseProcessor
from lavis.processors.alpro_processors import (
AlproVideoTrainProcessor,
AlproVideoEvalProcessor,
)
from lavis.processors.blip_processors import (
BlipImageTrainProcessor,
BlipImageEvalProcessor,
BlipCaptionProcessor,
)
from lavis.processors.gpt_processors import (
GPTVideoFeatureProcessor,
GPTDialogueProcessor,
)
from lavis.processors.clip_processors import ClipImageTrainProcessor
from lavis.common.registry import registry
__all__ = [
"BaseProcessor",
# ALPRO
"AlproVideoTrainProcessor",
"AlproVideoEvalProcessor",
# BLIP
"BlipImageTrainProcessor",
"BlipImageEvalProcessor",
"BlipCaptionProcessor",
"ClipImageTrainProcessor",
# GPT
"GPTVideoFeatureProcessor",
"GPTDialogueProcessor",
]
def load_processor(name, cfg=None):
"""
Example
>>> processor = load_processor("alpro_video_train", cfg=None)
"""
processor = registry.get_processor_class(name).from_config(cfg)
return processor
| 3D-LLM-main | three_steps_3d_feature/second_step/lavis/processors/__init__.py |
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
import re
from lavis.common.registry import registry
from lavis.processors.base_processor import BaseProcessor
from lavis.processors.randaugment import RandomAugment
from omegaconf import OmegaConf
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
import os
from itertools import chain
import numpy as np
import torch
from transformers import GPT2Tokenizer
SPECIAL_TOKENS_DICT = {
"bos_token": "<bos>",
"eos_token": "<eos>",
"additional_special_tokens": ["<speaker1>", "<speaker2>", "<video>", "<cap>"],
"pad_token": "<pad>",
}
SPECIAL_TOKENS = [
"<bos>",
"<eos>",
"<speaker1>",
"<speaker2>",
"<cap>",
"<video>",
"<pad>",
]
class GPTVideoFeatureBaseProcessor(BaseProcessor):
def __init__(self, visual_ft=["i3d_rgb"], audio_ft=["vggish"]):
self.visual_ft = visual_ft
self.audio_ft = audio_ft
@registry.register_processor("gpt_dialogue")
class GPTDialogueProcessor(BaseProcessor):
def __init__(self, max_turns=3, use_caption=True):
self.max_turns = max_turns
self.use_caption = use_caption
self.tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
self.tokenizer.add_special_tokens(SPECIAL_TOKENS_DICT)
def sample_sequence(self, caption, history, answer):
bos, eos, speaker1, speaker2, cap = self.tokenizer.convert_tokens_to_ids(
SPECIAL_TOKENS[:-2]
)
instance = {}
sequence = [caption] + history + [answer]
sequence = [s + [eos] for s in sequence]
instance["input_ids"] = list(chain(*sequence))
instance["token_type_ids"] = [cap] * len(sequence[0]) + [
speaker2 if i % 2 else speaker1
for i, s in enumerate(sequence[1:])
for _ in s
]
instance["labels"] = ([-1] * sum(len(s) for s in sequence[:-1])) + sequence[-1]
assert len(instance["input_ids"]) == len(instance["token_type_ids"])
assert len(instance["token_type_ids"]) == len(instance["labels"])
for k, v in instance.items():
instance[k] = torch.Tensor(v).long()
return instance
def padding(self, seq, pad_token=-1):
if pad_token == -1:
pad_token = self.tokenizer.pad_token_id
padded_seq = torch.nn.utils.rnn.pad_sequence(
seq, batch_first=True, padding_value=pad_token
)
return padded_seq
def get_attention_mask(self, seq, pad_token=-1):
if pad_token == -1:
pad_token = self.tokenizer.pad_token_id
return seq != pad_token
def __call__(self, ann):
if self.use_caption:
caption = " ".join([ann["caption"], ann["summary"]])
caption = self.tokenizer.encode(caption)
else:
caption = []
dial_history = []
for turn in ann["dialog"][-self.max_turns :]:
dial_history.append(turn["question"])
dial_history.append(turn["answer"])
dial_history.append(ann["question"])
dial_history = [self.tokenizer.encode(t) for t in dial_history]
answer = self.tokenizer.encode(ann["answer"])
item = self.sample_sequence(caption, dial_history, answer)
return item
@classmethod
def from_config(cls, cfg=None):
if cfg is None:
cfg = OmegaConf.create()
use_caption = cfg.get("use_caption", True)
max_turns = cfg.get("max_turns", 3)
return cls(max_turns=max_turns, use_caption=use_caption)
@registry.register_processor("gpt_video_ft")
class GPTVideoFeatureProcessor(GPTVideoFeatureBaseProcessor):
def __init__(self, visual_ft, audio_ft):
super().__init__(visual_ft, audio_ft)
self.tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
self.tokenizer.add_special_tokens(SPECIAL_TOKENS_DICT)
def padding(self, seq):
padded_seq = torch.nn.utils.rnn.pad_sequence(
seq, batch_first=True, padding_value=1.0
)
return padded_seq
def get_attention_mask(self, seq):
return torch.sum(seq != 1, dim=2) != 0
def __call__(self, ft_root, vname):
all_ft = []
for ft_name in self.visual_ft:
ft_path = os.path.join(ft_root, ft_name, vname)
all_ft.append(np.load(ft_path + ".npy"))
for ft_name in self.audio_ft:
ft_path = os.path.join(ft_root, ft_name, vname)
all_ft.append(np.load(ft_path + ".npy"))
min_len = min([len(ft) for ft in all_ft])
# TODO: use other sampling method (e.g. uniform sampling)
sampled_ft = [ft[:min_len] for ft in all_ft]
sampled_ft = np.concatenate(sampled_ft, axis=1)
item = {}
item["video_fts"] = torch.Tensor(sampled_ft)
video_type_token = self.tokenizer.convert_tokens_to_ids("<video>")
item["token_type_ids"] = torch.Tensor(
[video_type_token] * len(sampled_ft)
).long()
return item
@classmethod
def from_config(cls, cfg=None):
if cfg is None:
cfg = OmegaConf.create()
visual_ft = cfg.get("visual_ft", ["i3d_rgb"])
audio_ft = cfg.get("audio_ft", ["vggish"])
return cls(visual_ft=visual_ft, audio_ft=audio_ft)
| 3D-LLM-main | three_steps_3d_feature/second_step/lavis/processors/gpt_processors.py |
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
from omegaconf import OmegaConf
class BaseProcessor:
def __init__(self):
self.transform = lambda x: x
return
def __call__(self, item):
return self.transform(item)
@classmethod
def from_config(cls, cfg=None):
return cls()
def build(self, **kwargs):
cfg = OmegaConf.create(kwargs)
return self.from_config(cfg)
| 3D-LLM-main | three_steps_3d_feature/second_step/lavis/processors/base_processor.py |
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
import cv2
import numpy as np
import torch
## aug functions
def identity_func(img):
return img
def autocontrast_func(img, cutoff=0):
"""
same output as PIL.ImageOps.autocontrast
"""
n_bins = 256
def tune_channel(ch):
n = ch.size
cut = cutoff * n // 100
if cut == 0:
high, low = ch.max(), ch.min()
else:
hist = cv2.calcHist([ch], [0], None, [n_bins], [0, n_bins])
low = np.argwhere(np.cumsum(hist) > cut)
low = 0 if low.shape[0] == 0 else low[0]
high = np.argwhere(np.cumsum(hist[::-1]) > cut)
high = n_bins - 1 if high.shape[0] == 0 else n_bins - 1 - high[0]
if high <= low:
table = np.arange(n_bins)
else:
scale = (n_bins - 1) / (high - low)
offset = -low * scale
table = np.arange(n_bins) * scale + offset
table[table < 0] = 0
table[table > n_bins - 1] = n_bins - 1
table = table.clip(0, 255).astype(np.uint8)
return table[ch]
channels = [tune_channel(ch) for ch in cv2.split(img)]
out = cv2.merge(channels)
return out
def equalize_func(img):
"""
same output as PIL.ImageOps.equalize
PIL's implementation is different from cv2.equalize
"""
n_bins = 256
def tune_channel(ch):
hist = cv2.calcHist([ch], [0], None, [n_bins], [0, n_bins])
non_zero_hist = hist[hist != 0].reshape(-1)
step = np.sum(non_zero_hist[:-1]) // (n_bins - 1)
if step == 0:
return ch
n = np.empty_like(hist)
n[0] = step // 2
n[1:] = hist[:-1]
table = (np.cumsum(n) // step).clip(0, 255).astype(np.uint8)
return table[ch]
channels = [tune_channel(ch) for ch in cv2.split(img)]
out = cv2.merge(channels)
return out
def rotate_func(img, degree, fill=(0, 0, 0)):
"""
like PIL, rotate by degree, not radians
"""
H, W = img.shape[0], img.shape[1]
center = W / 2, H / 2
M = cv2.getRotationMatrix2D(center, degree, 1)
out = cv2.warpAffine(img, M, (W, H), borderValue=fill)
return out
def solarize_func(img, thresh=128):
"""
same output as PIL.ImageOps.posterize
"""
table = np.array([el if el < thresh else 255 - el for el in range(256)])
table = table.clip(0, 255).astype(np.uint8)
out = table[img]
return out
def color_func(img, factor):
"""
same output as PIL.ImageEnhance.Color
"""
## implementation according to PIL definition, quite slow
# degenerate = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)[:, :, np.newaxis]
# out = blend(degenerate, img, factor)
# M = (
# np.eye(3) * factor
# + np.float32([0.114, 0.587, 0.299]).reshape(3, 1) * (1. - factor)
# )[np.newaxis, np.newaxis, :]
M = np.float32(
[[0.886, -0.114, -0.114], [-0.587, 0.413, -0.587], [-0.299, -0.299, 0.701]]
) * factor + np.float32([[0.114], [0.587], [0.299]])
out = np.matmul(img, M).clip(0, 255).astype(np.uint8)
return out
def contrast_func(img, factor):
"""
same output as PIL.ImageEnhance.Contrast
"""
mean = np.sum(np.mean(img, axis=(0, 1)) * np.array([0.114, 0.587, 0.299]))
table = (
np.array([(el - mean) * factor + mean for el in range(256)])
.clip(0, 255)
.astype(np.uint8)
)
out = table[img]
return out
def brightness_func(img, factor):
"""
same output as PIL.ImageEnhance.Contrast
"""
table = (np.arange(256, dtype=np.float32) * factor).clip(0, 255).astype(np.uint8)
out = table[img]
return out
def sharpness_func(img, factor):
"""
The differences the this result and PIL are all on the 4 boundaries, the center
areas are same
"""
kernel = np.ones((3, 3), dtype=np.float32)
kernel[1][1] = 5
kernel /= 13
degenerate = cv2.filter2D(img, -1, kernel)
if factor == 0.0:
out = degenerate
elif factor == 1.0:
out = img
else:
out = img.astype(np.float32)
degenerate = degenerate.astype(np.float32)[1:-1, 1:-1, :]
out[1:-1, 1:-1, :] = degenerate + factor * (out[1:-1, 1:-1, :] - degenerate)
out = out.astype(np.uint8)
return out
def shear_x_func(img, factor, fill=(0, 0, 0)):
H, W = img.shape[0], img.shape[1]
M = np.float32([[1, factor, 0], [0, 1, 0]])
out = cv2.warpAffine(
img, M, (W, H), borderValue=fill, flags=cv2.INTER_LINEAR
).astype(np.uint8)
return out
def translate_x_func(img, offset, fill=(0, 0, 0)):
"""
same output as PIL.Image.transform
"""
H, W = img.shape[0], img.shape[1]
M = np.float32([[1, 0, -offset], [0, 1, 0]])
out = cv2.warpAffine(
img, M, (W, H), borderValue=fill, flags=cv2.INTER_LINEAR
).astype(np.uint8)
return out
def translate_y_func(img, offset, fill=(0, 0, 0)):
"""
same output as PIL.Image.transform
"""
H, W = img.shape[0], img.shape[1]
M = np.float32([[1, 0, 0], [0, 1, -offset]])
out = cv2.warpAffine(
img, M, (W, H), borderValue=fill, flags=cv2.INTER_LINEAR
).astype(np.uint8)
return out
def posterize_func(img, bits):
"""
same output as PIL.ImageOps.posterize
"""
out = np.bitwise_and(img, np.uint8(255 << (8 - bits)))
return out
def shear_y_func(img, factor, fill=(0, 0, 0)):
H, W = img.shape[0], img.shape[1]
M = np.float32([[1, 0, 0], [factor, 1, 0]])
out = cv2.warpAffine(
img, M, (W, H), borderValue=fill, flags=cv2.INTER_LINEAR
).astype(np.uint8)
return out
def cutout_func(img, pad_size, replace=(0, 0, 0)):
replace = np.array(replace, dtype=np.uint8)
H, W = img.shape[0], img.shape[1]
rh, rw = np.random.random(2)
pad_size = pad_size // 2
ch, cw = int(rh * H), int(rw * W)
x1, x2 = max(ch - pad_size, 0), min(ch + pad_size, H)
y1, y2 = max(cw - pad_size, 0), min(cw + pad_size, W)
out = img.copy()
out[x1:x2, y1:y2, :] = replace
return out
### level to args
def enhance_level_to_args(MAX_LEVEL):
def level_to_args(level):
return ((level / MAX_LEVEL) * 1.8 + 0.1,)
return level_to_args
def shear_level_to_args(MAX_LEVEL, replace_value):
def level_to_args(level):
level = (level / MAX_LEVEL) * 0.3
if np.random.random() > 0.5:
level = -level
return (level, replace_value)
return level_to_args
def translate_level_to_args(translate_const, MAX_LEVEL, replace_value):
def level_to_args(level):
level = (level / MAX_LEVEL) * float(translate_const)
if np.random.random() > 0.5:
level = -level
return (level, replace_value)
return level_to_args
def cutout_level_to_args(cutout_const, MAX_LEVEL, replace_value):
def level_to_args(level):
level = int((level / MAX_LEVEL) * cutout_const)
return (level, replace_value)
return level_to_args
def solarize_level_to_args(MAX_LEVEL):
def level_to_args(level):
level = int((level / MAX_LEVEL) * 256)
return (level,)
return level_to_args
def none_level_to_args(level):
return ()
def posterize_level_to_args(MAX_LEVEL):
def level_to_args(level):
level = int((level / MAX_LEVEL) * 4)
return (level,)
return level_to_args
def rotate_level_to_args(MAX_LEVEL, replace_value):
def level_to_args(level):
level = (level / MAX_LEVEL) * 30
if np.random.random() < 0.5:
level = -level
return (level, replace_value)
return level_to_args
func_dict = {
"Identity": identity_func,
"AutoContrast": autocontrast_func,
"Equalize": equalize_func,
"Rotate": rotate_func,
"Solarize": solarize_func,
"Color": color_func,
"Contrast": contrast_func,
"Brightness": brightness_func,
"Sharpness": sharpness_func,
"ShearX": shear_x_func,
"TranslateX": translate_x_func,
"TranslateY": translate_y_func,
"Posterize": posterize_func,
"ShearY": shear_y_func,
}
translate_const = 10
MAX_LEVEL = 10
replace_value = (128, 128, 128)
arg_dict = {
"Identity": none_level_to_args,
"AutoContrast": none_level_to_args,
"Equalize": none_level_to_args,
"Rotate": rotate_level_to_args(MAX_LEVEL, replace_value),
"Solarize": solarize_level_to_args(MAX_LEVEL),
"Color": enhance_level_to_args(MAX_LEVEL),
"Contrast": enhance_level_to_args(MAX_LEVEL),
"Brightness": enhance_level_to_args(MAX_LEVEL),
"Sharpness": enhance_level_to_args(MAX_LEVEL),
"ShearX": shear_level_to_args(MAX_LEVEL, replace_value),
"TranslateX": translate_level_to_args(translate_const, MAX_LEVEL, replace_value),
"TranslateY": translate_level_to_args(translate_const, MAX_LEVEL, replace_value),
"Posterize": posterize_level_to_args(MAX_LEVEL),
"ShearY": shear_level_to_args(MAX_LEVEL, replace_value),
}
class RandomAugment(object):
def __init__(self, N=2, M=10, isPIL=False, augs=[]):
self.N = N
self.M = M
self.isPIL = isPIL
if augs:
self.augs = augs
else:
self.augs = list(arg_dict.keys())
def get_random_ops(self):
sampled_ops = np.random.choice(self.augs, self.N)
return [(op, 0.5, self.M) for op in sampled_ops]
def __call__(self, img):
if self.isPIL:
img = np.array(img)
ops = self.get_random_ops()
for name, prob, level in ops:
if np.random.random() > prob:
continue
args = arg_dict[name](level)
img = func_dict[name](img, *args)
return img
class VideoRandomAugment(object):
def __init__(self, N=2, M=10, p=0.0, tensor_in_tensor_out=True, augs=[]):
self.N = N
self.M = M
self.p = p
self.tensor_in_tensor_out = tensor_in_tensor_out
if augs:
self.augs = augs
else:
self.augs = list(arg_dict.keys())
def get_random_ops(self):
sampled_ops = np.random.choice(self.augs, self.N, replace=False)
return [(op, self.M) for op in sampled_ops]
def __call__(self, frames):
assert (
frames.shape[-1] == 3
), "Expecting last dimension for 3-channels RGB (b, h, w, c)."
if self.tensor_in_tensor_out:
frames = frames.numpy().astype(np.uint8)
num_frames = frames.shape[0]
ops = num_frames * [self.get_random_ops()]
apply_or_not = num_frames * [np.random.random(size=self.N) > self.p]
frames = torch.stack(
list(map(self._aug, frames, ops, apply_or_not)), dim=0
).float()
return frames
def _aug(self, img, ops, apply_or_not):
for i, (name, level) in enumerate(ops):
if not apply_or_not[i]:
continue
args = arg_dict[name](level)
img = func_dict[name](img, *args)
return torch.from_numpy(img)
if __name__ == "__main__":
a = RandomAugment()
img = np.random.randn(32, 32, 3)
a(img)
| 3D-LLM-main | three_steps_3d_feature/second_step/lavis/processors/randaugment.py |
#!/usr/bin/env python3
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
import numbers
import random
from torchvision.transforms import (
RandomCrop,
RandomResizedCrop,
)
import lavis.processors.functional_video as F
__all__ = [
"RandomCropVideo",
"RandomResizedCropVideo",
"CenterCropVideo",
"NormalizeVideo",
"ToTensorVideo",
"RandomHorizontalFlipVideo",
]
class RandomCropVideo(RandomCrop):
def __init__(self, size):
if isinstance(size, numbers.Number):
self.size = (int(size), int(size))
else:
self.size = size
def __call__(self, clip):
"""
Args:
clip (torch.tensor): Video clip to be cropped. Size is (C, T, H, W)
Returns:
torch.tensor: randomly cropped/resized video clip.
size is (C, T, OH, OW)
"""
i, j, h, w = self.get_params(clip, self.size)
return F.crop(clip, i, j, h, w)
def __repr__(self) -> str:
return f"{self.__class__.__name__}(size={self.size})"
class RandomResizedCropVideo(RandomResizedCrop):
def __init__(
self,
size,
scale=(0.08, 1.0),
ratio=(3.0 / 4.0, 4.0 / 3.0),
interpolation_mode="bilinear",
):
if isinstance(size, tuple):
if len(size) != 2:
raise ValueError(
f"size should be tuple (height, width), instead got {size}"
)
self.size = size
else:
self.size = (size, size)
self.interpolation_mode = interpolation_mode
self.scale = scale
self.ratio = ratio
def __call__(self, clip):
"""
Args:
clip (torch.tensor): Video clip to be cropped. Size is (C, T, H, W)
Returns:
torch.tensor: randomly cropped/resized video clip.
size is (C, T, H, W)
"""
i, j, h, w = self.get_params(clip, self.scale, self.ratio)
return F.resized_crop(clip, i, j, h, w, self.size, self.interpolation_mode)
def __repr__(self) -> str:
return f"{self.__class__.__name__}(size={self.size}, interpolation_mode={self.interpolation_mode}, scale={self.scale}, ratio={self.ratio})"
class CenterCropVideo:
def __init__(self, crop_size):
if isinstance(crop_size, numbers.Number):
self.crop_size = (int(crop_size), int(crop_size))
else:
self.crop_size = crop_size
def __call__(self, clip):
"""
Args:
clip (torch.tensor): Video clip to be cropped. Size is (C, T, H, W)
Returns:
torch.tensor: central cropping of video clip. Size is
(C, T, crop_size, crop_size)
"""
return F.center_crop(clip, self.crop_size)
def __repr__(self) -> str:
return f"{self.__class__.__name__}(crop_size={self.crop_size})"
class NormalizeVideo:
"""
Normalize the video clip by mean subtraction and division by standard deviation
Args:
mean (3-tuple): pixel RGB mean
std (3-tuple): pixel RGB standard deviation
inplace (boolean): whether do in-place normalization
"""
def __init__(self, mean, std, inplace=False):
self.mean = mean
self.std = std
self.inplace = inplace
def __call__(self, clip):
"""
Args:
clip (torch.tensor): video clip to be normalized. Size is (C, T, H, W)
"""
return F.normalize(clip, self.mean, self.std, self.inplace)
def __repr__(self) -> str:
return f"{self.__class__.__name__}(mean={self.mean}, std={self.std}, inplace={self.inplace})"
class ToTensorVideo:
"""
Convert tensor data type from uint8 to float, divide value by 255.0 and
permute the dimensions of clip tensor
"""
def __init__(self):
pass
def __call__(self, clip):
"""
Args:
clip (torch.tensor, dtype=torch.uint8): Size is (T, H, W, C)
Return:
clip (torch.tensor, dtype=torch.float): Size is (C, T, H, W)
"""
return F.to_tensor(clip)
def __repr__(self) -> str:
return self.__class__.__name__
class RandomHorizontalFlipVideo:
"""
Flip the video clip along the horizonal direction with a given probability
Args:
p (float): probability of the clip being flipped. Default value is 0.5
"""
def __init__(self, p=0.5):
self.p = p
def __call__(self, clip):
"""
Args:
clip (torch.tensor): Size is (C, T, H, W)
Return:
clip (torch.tensor): Size is (C, T, H, W)
"""
if random.random() < self.p:
clip = F.hflip(clip)
return clip
def __repr__(self) -> str:
return f"{self.__class__.__name__}(p={self.p})"
| 3D-LLM-main | three_steps_3d_feature/second_step/lavis/processors/transforms_video.py |
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
import math
from lavis.common.registry import registry
@registry.register_lr_scheduler("linear_warmup_step_lr")
class LinearWarmupStepLRScheduler:
def __init__(
self,
optimizer,
max_epoch,
min_lr,
init_lr,
decay_rate=1,
warmup_start_lr=-1,
warmup_steps=0,
**kwargs
):
self.optimizer = optimizer
self.max_epoch = max_epoch
self.min_lr = min_lr
self.decay_rate = decay_rate
self.init_lr = init_lr
self.warmup_steps = warmup_steps
self.warmup_start_lr = warmup_start_lr if warmup_start_lr >= 0 else init_lr
def step(self, cur_epoch, cur_step):
if cur_epoch == 0:
warmup_lr_schedule(
step=cur_step,
optimizer=self.optimizer,
max_step=self.warmup_steps,
init_lr=self.warmup_start_lr,
max_lr=self.init_lr,
)
else:
step_lr_schedule(
epoch=cur_epoch,
optimizer=self.optimizer,
init_lr=self.init_lr,
min_lr=self.min_lr,
decay_rate=self.decay_rate,
)
@registry.register_lr_scheduler("linear_warmup_cosine_lr")
class LinearWarmupCosineLRScheduler:
def __init__(
self,
optimizer,
max_epoch,
min_lr,
init_lr,
warmup_steps=0,
warmup_start_lr=-1,
**kwargs
):
self.optimizer = optimizer
self.max_epoch = max_epoch
self.min_lr = min_lr
self.init_lr = init_lr
self.warmup_steps = warmup_steps
self.warmup_start_lr = warmup_start_lr if warmup_start_lr >= 0 else init_lr
def step(self, cur_epoch, cur_step):
# assuming the warmup iters less than one epoch
if cur_epoch == 0:
warmup_lr_schedule(
step=cur_step,
optimizer=self.optimizer,
max_step=self.warmup_steps,
init_lr=self.warmup_start_lr,
max_lr=self.init_lr,
)
else:
cosine_lr_schedule(
epoch=cur_epoch,
optimizer=self.optimizer,
max_epoch=self.max_epoch,
init_lr=self.init_lr,
min_lr=self.min_lr,
)
def cosine_lr_schedule(optimizer, epoch, max_epoch, init_lr, min_lr):
"""Decay the learning rate"""
lr = (init_lr - min_lr) * 0.5 * (
1.0 + math.cos(math.pi * epoch / max_epoch)
) + min_lr
for param_group in optimizer.param_groups:
param_group["lr"] = lr
def warmup_lr_schedule(optimizer, step, max_step, init_lr, max_lr):
"""Warmup the learning rate"""
lr = min(max_lr, init_lr + (max_lr - init_lr) * step / max(max_step, 1))
for param_group in optimizer.param_groups:
param_group["lr"] = lr
def step_lr_schedule(optimizer, epoch, init_lr, min_lr, decay_rate):
"""Decay the learning rate"""
lr = max(min_lr, init_lr * (decay_rate**epoch))
for param_group in optimizer.param_groups:
param_group["lr"] = lr
| 3D-LLM-main | three_steps_3d_feature/second_step/lavis/common/optims.py |
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
import logging
import json
from typing import Dict
from omegaconf import OmegaConf
from lavis.common.registry import registry
class Config:
def __init__(self, args):
self.config = {}
self.args = args
# Register the config and configuration for setup
registry.register("configuration", self)
user_config = self._build_opt_list(self.args.options)
config = OmegaConf.load(self.args.cfg_path)
runner_config = self.build_runner_config(config)
model_config = self.build_model_config(config, **user_config)
dataset_config = self.build_dataset_config(config)
# Validate the user-provided runner configuration
# model and dataset configuration are supposed to be validated by the respective classes
# [TODO] validate the model/dataset configuration
# self._validate_runner_config(runner_config)
# Override the default configuration with user options.
self.config = OmegaConf.merge(
runner_config, model_config, dataset_config, user_config
)
def _validate_runner_config(self, runner_config):
"""
This method validates the configuration, such that
1) all the user specified options are valid;
2) no type mismatches between the user specified options and the config.
"""
runner_config_validator = create_runner_config_validator()
runner_config_validator.validate(runner_config)
def _build_opt_list(self, opts):
opts_dot_list = self._convert_to_dot_list(opts)
return OmegaConf.from_dotlist(opts_dot_list)
@staticmethod
def build_model_config(config, **kwargs):
model = config.get("model", None)
assert model is not None, "Missing model configuration file."
model_cls = registry.get_model_class(model.arch)
assert model_cls is not None, f"Model '{model.arch}' has not been registered."
model_type = kwargs.get("model.model_type", None)
if not model_type:
model_type = model.get("model_type", None)
# else use the model type selected by user.
assert model_type is not None, "Missing model_type."
model_config_path = model_cls.default_config_path(model_type=model_type)
model_config = OmegaConf.create()
# hiararchy override, customized config > default config
model_config = OmegaConf.merge(
model_config,
OmegaConf.load(model_config_path),
{"model": config["model"]},
)
return model_config
@staticmethod
def build_runner_config(config):
return {"run": config.run}
@staticmethod
def build_dataset_config(config):
datasets = config.get("datasets", None)
if datasets is None:
raise KeyError(
"Expecting 'datasets' as the root key for dataset configuration."
)
dataset_config = OmegaConf.create()
for dataset_name in datasets:
builder_cls = registry.get_builder_class(dataset_name)
dataset_config_type = datasets[dataset_name].get("type", "default")
dataset_config_path = builder_cls.default_config_path(
type=dataset_config_type
)
# hiararchy override, customized config > default config
dataset_config = OmegaConf.merge(
dataset_config,
OmegaConf.load(dataset_config_path),
{"datasets": {dataset_name: config["datasets"][dataset_name]}},
)
return dataset_config
def _convert_to_dot_list(self, opts):
if opts is None:
opts = []
if len(opts) == 0:
return opts
has_equal = opts[0].find("=") != -1
if has_equal:
return opts
return [(opt + "=" + value) for opt, value in zip(opts[0::2], opts[1::2])]
def get_config(self):
return self.config
@property
def run_cfg(self):
return self.config.run
@property
def datasets_cfg(self):
return self.config.datasets
@property
def model_cfg(self):
return self.config.model
def pretty_print(self):
logging.info("\n===== Running Parameters =====")
logging.info(self._convert_node_to_json(self.config.run))
logging.info("\n====== Dataset Attributes ======")
datasets = self.config.datasets
for dataset in datasets:
if dataset in self.config.datasets:
logging.info(f"\n======== {dataset} =======")
dataset_config = self.config.datasets[dataset]
logging.info(self._convert_node_to_json(dataset_config))
else:
logging.warning(f"No dataset named '{dataset}' in config. Skipping")
logging.info(f"\n====== Model Attributes ======")
logging.info(self._convert_node_to_json(self.config.model))
def _convert_node_to_json(self, node):
container = OmegaConf.to_container(node, resolve=True)
return json.dumps(container, indent=4, sort_keys=True)
def to_dict(self):
return OmegaConf.to_container(self.config)
def node_to_dict(node):
return OmegaConf.to_container(node)
class ConfigValidator:
"""
This is a preliminary implementation to centralize and validate the configuration.
May be altered in the future.
A helper class to validate configurations from yaml file.
This serves the following purposes:
1. Ensure all the options in the yaml are defined, raise error if not.
2. when type mismatches are found, the validator will raise an error.
3. a central place to store and display helpful messages for supported configurations.
"""
class _Argument:
def __init__(self, name, choices=None, type=None, help=None):
self.name = name
self.val = None
self.choices = choices
self.type = type
self.help = help
def __str__(self):
s = f"{self.name}={self.val}"
if self.type is not None:
s += f", ({self.type})"
if self.choices is not None:
s += f", choices: {self.choices}"
if self.help is not None:
s += f", ({self.help})"
return s
def __init__(self, description):
self.description = description
self.arguments = dict()
self.parsed_args = None
def __getitem__(self, key):
assert self.parsed_args is not None, "No arguments parsed yet."
return self.parsed_args[key]
def __str__(self) -> str:
return self.format_help()
def add_argument(self, *args, **kwargs):
"""
Assume the first argument is the name of the argument.
"""
self.arguments[args[0]] = self._Argument(*args, **kwargs)
def validate(self, config=None):
"""
Convert yaml config (dict-like) to list, required by argparse.
"""
for k, v in config.items():
assert (
k in self.arguments
), f"""{k} is not a valid argument. Support arguments are {self.format_arguments()}."""
if self.arguments[k].type is not None:
try:
self.arguments[k].val = self.arguments[k].type(v)
except ValueError:
raise ValueError(f"{k} is not a valid {self.arguments[k].type}.")
if self.arguments[k].choices is not None:
assert (
v in self.arguments[k].choices
), f"""{k} must be one of {self.arguments[k].choices}."""
return config
def format_arguments(self):
return str([f"{k}" for k in sorted(self.arguments.keys())])
def format_help(self):
# description + key-value pair string for each argument
help_msg = str(self.description)
return help_msg + ", available arguments: " + self.format_arguments()
def print_help(self):
# display help message
print(self.format_help())
def create_runner_config_validator():
validator = ConfigValidator(description="Runner configurations")
validator.add_argument(
"runner",
type=str,
choices=["runner_base", "runner_iter"],
help="""Runner to use. The "runner_base" uses epoch-based training while iter-based
runner runs based on iters. Default: runner_base""",
)
# add argumetns for training dataset ratios
validator.add_argument(
"train_dataset_ratios",
type=Dict[str, float],
help="""Ratios of training dataset. This is used in iteration-based runner.
Do not support for epoch-based runner because how to define an epoch becomes tricky.
Default: None""",
)
validator.add_argument(
"max_iters",
type=float,
help="Maximum number of iterations to run.",
)
validator.add_argument(
"max_epoch",
type=int,
help="Maximum number of epochs to run.",
)
# add arguments for iters_per_inner_epoch
validator.add_argument(
"iters_per_inner_epoch",
type=float,
help="Number of iterations per inner epoch. This is required when runner is runner_iter.",
)
lr_scheds_choices = registry.list_lr_schedulers()
validator.add_argument(
"lr_sched",
type=str,
choices=lr_scheds_choices,
help="Learning rate scheduler to use, from {}".format(lr_scheds_choices),
)
task_choices = registry.list_tasks()
validator.add_argument(
"task",
type=str,
choices=task_choices,
help="Task to use, from {}".format(task_choices),
)
# add arguments for init_lr
validator.add_argument(
"init_lr",
type=float,
help="Initial learning rate. This will be the learning rate after warmup and before decay.",
)
# add arguments for min_lr
validator.add_argument(
"min_lr",
type=float,
help="Minimum learning rate (after decay).",
)
# add arguments for warmup_lr
validator.add_argument(
"warmup_lr",
type=float,
help="Starting learning rate for warmup.",
)
# add arguments for learning rate decay rate
validator.add_argument(
"lr_decay_rate",
type=float,
help="Learning rate decay rate. Required if using a decaying learning rate scheduler.",
)
# add arguments for weight decay
validator.add_argument(
"weight_decay",
type=float,
help="Weight decay rate.",
)
# add arguments for training batch size
validator.add_argument(
"batch_size_train",
type=int,
help="Training batch size.",
)
# add arguments for evaluation batch size
validator.add_argument(
"batch_size_eval",
type=int,
help="Evaluation batch size, including validation and testing.",
)
# add arguments for number of workers for data loading
validator.add_argument(
"num_workers",
help="Number of workers for data loading.",
)
# add arguments for warm up steps
validator.add_argument(
"warmup_steps",
type=int,
help="Number of warmup steps. Required if a warmup schedule is used.",
)
# add arguments for random seed
validator.add_argument(
"seed",
type=int,
help="Random seed.",
)
# add arguments for output directory
validator.add_argument(
"output_dir",
type=str,
help="Output directory to save checkpoints and logs.",
)
# add arguments for whether only use evaluation
validator.add_argument(
"evaluate",
help="Whether to only evaluate the model. If true, training will not be performed.",
)
# add arguments for splits used for training, e.g. ["train", "val"]
validator.add_argument(
"train_splits",
type=list,
help="Splits to use for training.",
)
# add arguments for splits used for validation, e.g. ["val"]
validator.add_argument(
"valid_splits",
type=list,
help="Splits to use for validation. If not provided, will skip the validation.",
)
# add arguments for splits used for testing, e.g. ["test"]
validator.add_argument(
"test_splits",
type=list,
help="Splits to use for testing. If not provided, will skip the testing.",
)
# add arguments for accumulating gradient for iterations
validator.add_argument(
"accum_grad_iters",
type=int,
help="Number of iterations to accumulate gradient for.",
)
# ====== distributed training ======
validator.add_argument(
"device",
type=str,
choices=["cpu", "cuda"],
help="Device to use. Support 'cuda' or 'cpu' as for now.",
)
validator.add_argument(
"world_size",
type=int,
help="Number of processes participating in the job.",
)
validator.add_argument("dist_url", type=str)
validator.add_argument("distributed", type=bool)
# add arguments to opt using distributed sampler during evaluation or not
validator.add_argument(
"use_dist_eval_sampler",
type=bool,
help="Whether to use distributed sampler during evaluation or not.",
)
# ====== task specific ======
# generation task specific arguments
# add arguments for maximal length of text output
validator.add_argument(
"max_len",
type=int,
help="Maximal length of text output.",
)
# add arguments for minimal length of text output
validator.add_argument(
"min_len",
type=int,
help="Minimal length of text output.",
)
# add arguments number of beams
validator.add_argument(
"num_beams",
type=int,
help="Number of beams used for beam search.",
)
# vqa task specific arguments
# add arguments for number of answer candidates
validator.add_argument(
"num_ans_candidates",
type=int,
help="""For ALBEF and BLIP, these models first rank answers according to likelihood to select answer candidates.""",
)
# add arguments for inference method
validator.add_argument(
"inference_method",
type=str,
choices=["genearte", "rank"],
help="""Inference method to use for question answering. If rank, requires a answer list.""",
)
# ====== model specific ======
validator.add_argument(
"k_test",
type=int,
help="Number of top k most similar samples from ITC/VTC selection to be tested.",
)
return validator
| 3D-LLM-main | three_steps_3d_feature/second_step/lavis/common/config.py |
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
class Registry:
mapping = {
"builder_name_mapping": {},
"task_name_mapping": {},
"processor_name_mapping": {},
"model_name_mapping": {},
"lr_scheduler_name_mapping": {},
"runner_name_mapping": {},
"state": {},
"paths": {},
}
@classmethod
def register_builder(cls, name):
r"""Register a dataset builder to registry with key 'name'
Args:
name: Key with which the builder will be registered.
Usage:
from lavis.common.registry import registry
from lavis.datasets.base_dataset_builder import BaseDatasetBuilder
"""
def wrap(builder_cls):
from lavis.datasets.builders.base_dataset_builder import BaseDatasetBuilder
assert issubclass(
builder_cls, BaseDatasetBuilder
), "All builders must inherit BaseDatasetBuilder class, found {}".format(
builder_cls
)
if name in cls.mapping["builder_name_mapping"]:
raise KeyError(
"Name '{}' already registered for {}.".format(
name, cls.mapping["builder_name_mapping"][name]
)
)
cls.mapping["builder_name_mapping"][name] = builder_cls
return builder_cls
return wrap
@classmethod
def register_task(cls, name):
r"""Register a task to registry with key 'name'
Args:
name: Key with which the task will be registered.
Usage:
from lavis.common.registry import registry
"""
def wrap(task_cls):
from lavis.tasks.base_task import BaseTask
assert issubclass(
task_cls, BaseTask
), "All tasks must inherit BaseTask class"
if name in cls.mapping["task_name_mapping"]:
raise KeyError(
"Name '{}' already registered for {}.".format(
name, cls.mapping["task_name_mapping"][name]
)
)
cls.mapping["task_name_mapping"][name] = task_cls
return task_cls
return wrap
@classmethod
def register_model(cls, name):
r"""Register a task to registry with key 'name'
Args:
name: Key with which the task will be registered.
Usage:
from lavis.common.registry import registry
"""
def wrap(model_cls):
from lavis.models import BaseModel
assert issubclass(
model_cls, BaseModel
), "All models must inherit BaseModel class"
if name in cls.mapping["model_name_mapping"]:
raise KeyError(
"Name '{}' already registered for {}.".format(
name, cls.mapping["model_name_mapping"][name]
)
)
cls.mapping["model_name_mapping"][name] = model_cls
return model_cls
return wrap
@classmethod
def register_processor(cls, name):
r"""Register a processor to registry with key 'name'
Args:
name: Key with which the task will be registered.
Usage:
from lavis.common.registry import registry
"""
def wrap(processor_cls):
from lavis.processors import BaseProcessor
assert issubclass(
processor_cls, BaseProcessor
), "All processors must inherit BaseProcessor class"
if name in cls.mapping["processor_name_mapping"]:
raise KeyError(
"Name '{}' already registered for {}.".format(
name, cls.mapping["processor_name_mapping"][name]
)
)
cls.mapping["processor_name_mapping"][name] = processor_cls
return processor_cls
return wrap
@classmethod
def register_lr_scheduler(cls, name):
r"""Register a model to registry with key 'name'
Args:
name: Key with which the task will be registered.
Usage:
from lavis.common.registry import registry
"""
def wrap(lr_sched_cls):
if name in cls.mapping["lr_scheduler_name_mapping"]:
raise KeyError(
"Name '{}' already registered for {}.".format(
name, cls.mapping["lr_scheduler_name_mapping"][name]
)
)
cls.mapping["lr_scheduler_name_mapping"][name] = lr_sched_cls
return lr_sched_cls
return wrap
@classmethod
def register_runner(cls, name):
r"""Register a model to registry with key 'name'
Args:
name: Key with which the task will be registered.
Usage:
from lavis.common.registry import registry
"""
def wrap(runner_cls):
if name in cls.mapping["runner_name_mapping"]:
raise KeyError(
"Name '{}' already registered for {}.".format(
name, cls.mapping["runner_name_mapping"][name]
)
)
cls.mapping["runner_name_mapping"][name] = runner_cls
return runner_cls
return wrap
@classmethod
def register_path(cls, name, path):
r"""Register a path to registry with key 'name'
Args:
name: Key with which the path will be registered.
Usage:
from lavis.common.registry import registry
"""
assert isinstance(path, str), "All path must be str."
if name in cls.mapping["paths"]:
raise KeyError("Name '{}' already registered.".format(name))
cls.mapping["paths"][name] = path
@classmethod
def register(cls, name, obj):
r"""Register an item to registry with key 'name'
Args:
name: Key with which the item will be registered.
Usage::
from lavis.common.registry import registry
registry.register("config", {})
"""
path = name.split(".")
current = cls.mapping["state"]
for part in path[:-1]:
if part not in current:
current[part] = {}
current = current[part]
current[path[-1]] = obj
# @classmethod
# def get_trainer_class(cls, name):
# return cls.mapping["trainer_name_mapping"].get(name, None)
@classmethod
def get_builder_class(cls, name):
return cls.mapping["builder_name_mapping"].get(name, None)
@classmethod
def get_model_class(cls, name):
return cls.mapping["model_name_mapping"].get(name, None)
@classmethod
def get_task_class(cls, name):
return cls.mapping["task_name_mapping"].get(name, None)
@classmethod
def get_processor_class(cls, name):
return cls.mapping["processor_name_mapping"].get(name, None)
@classmethod
def get_lr_scheduler_class(cls, name):
return cls.mapping["lr_scheduler_name_mapping"].get(name, None)
@classmethod
def get_runner_class(cls, name):
return cls.mapping["runner_name_mapping"].get(name, None)
@classmethod
def list_runners(cls):
return sorted(cls.mapping["runner_name_mapping"].keys())
@classmethod
def list_models(cls):
return sorted(cls.mapping["model_name_mapping"].keys())
@classmethod
def list_tasks(cls):
return sorted(cls.mapping["task_name_mapping"].keys())
@classmethod
def list_processors(cls):
return sorted(cls.mapping["processor_name_mapping"].keys())
@classmethod
def list_lr_schedulers(cls):
return sorted(cls.mapping["lr_scheduler_name_mapping"].keys())
@classmethod
def list_datasets(cls):
return sorted(cls.mapping["builder_name_mapping"].keys())
@classmethod
def get_path(cls, name):
return cls.mapping["paths"].get(name, None)
@classmethod
def get(cls, name, default=None, no_warning=False):
r"""Get an item from registry with key 'name'
Args:
name (string): Key whose value needs to be retrieved.
default: If passed and key is not in registry, default value will
be returned with a warning. Default: None
no_warning (bool): If passed as True, warning when key doesn't exist
will not be generated. Useful for MMF's
internal operations. Default: False
"""
original_name = name
name = name.split(".")
value = cls.mapping["state"]
for subname in name:
value = value.get(subname, default)
if value is default:
break
if (
"writer" in cls.mapping["state"]
and value == default
and no_warning is False
):
cls.mapping["state"]["writer"].warning(
"Key {} is not present in registry, returning default value "
"of {}".format(original_name, default)
)
return value
@classmethod
def unregister(cls, name):
r"""Remove an item from registry with key 'name'
Args:
name: Key which needs to be removed.
Usage::
from mmf.common.registry import registry
config = registry.unregister("config")
"""
return cls.mapping["state"].pop(name, None)
registry = Registry()
| 3D-LLM-main | three_steps_3d_feature/second_step/lavis/common/registry.py |
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
import datetime
import logging
import time
from collections import defaultdict, deque
import torch
import torch.distributed as dist
from lavis.common import dist_utils
class SmoothedValue(object):
"""Track a series of values and provide access to smoothed values over a
window or the global series average.
"""
def __init__(self, window_size=20, fmt=None):
if fmt is None:
fmt = "{median:.4f} ({global_avg:.4f})"
self.deque = deque(maxlen=window_size)
self.total = 0.0
self.count = 0
self.fmt = fmt
def update(self, value, n=1):
self.deque.append(value)
self.count += n
self.total += value * n
def synchronize_between_processes(self):
"""
Warning: does not synchronize the deque!
"""
if not dist_utils.is_dist_avail_and_initialized():
return
t = torch.tensor([self.count, self.total], dtype=torch.float64, device="cuda")
dist.barrier()
dist.all_reduce(t)
t = t.tolist()
self.count = int(t[0])
self.total = t[1]
@property
def median(self):
d = torch.tensor(list(self.deque))
return d.median().item()
@property
def avg(self):
d = torch.tensor(list(self.deque), dtype=torch.float32)
return d.mean().item()
@property
def global_avg(self):
return self.total / self.count
@property
def max(self):
return max(self.deque)
@property
def value(self):
return self.deque[-1]
def __str__(self):
return self.fmt.format(
median=self.median,
avg=self.avg,
global_avg=self.global_avg,
max=self.max,
value=self.value,
)
class MetricLogger(object):
def __init__(self, delimiter="\t"):
self.meters = defaultdict(SmoothedValue)
self.delimiter = delimiter
def update(self, **kwargs):
for k, v in kwargs.items():
if isinstance(v, torch.Tensor):
v = v.item()
assert isinstance(v, (float, int))
self.meters[k].update(v)
def __getattr__(self, attr):
if attr in self.meters:
return self.meters[attr]
if attr in self.__dict__:
return self.__dict__[attr]
raise AttributeError(
"'{}' object has no attribute '{}'".format(type(self).__name__, attr)
)
def __str__(self):
loss_str = []
for name, meter in self.meters.items():
loss_str.append("{}: {}".format(name, str(meter)))
return self.delimiter.join(loss_str)
def global_avg(self):
loss_str = []
for name, meter in self.meters.items():
loss_str.append("{}: {:.4f}".format(name, meter.global_avg))
return self.delimiter.join(loss_str)
def synchronize_between_processes(self):
for meter in self.meters.values():
meter.synchronize_between_processes()
def add_meter(self, name, meter):
self.meters[name] = meter
def log_every(self, iterable, print_freq, header=None):
i = 0
if not header:
header = ""
start_time = time.time()
end = time.time()
iter_time = SmoothedValue(fmt="{avg:.4f}")
data_time = SmoothedValue(fmt="{avg:.4f}")
space_fmt = ":" + str(len(str(len(iterable)))) + "d"
log_msg = [
header,
"[{0" + space_fmt + "}/{1}]",
"eta: {eta}",
"{meters}",
"time: {time}",
"data: {data}",
]
if torch.cuda.is_available():
log_msg.append("max mem: {memory:.0f}")
log_msg = self.delimiter.join(log_msg)
MB = 1024.0 * 1024.0
for obj in iterable:
data_time.update(time.time() - end)
yield obj
iter_time.update(time.time() - end)
if i % print_freq == 0 or i == len(iterable) - 1:
eta_seconds = iter_time.global_avg * (len(iterable) - i)
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
if torch.cuda.is_available():
print(
log_msg.format(
i,
len(iterable),
eta=eta_string,
meters=str(self),
time=str(iter_time),
data=str(data_time),
memory=torch.cuda.max_memory_allocated() / MB,
)
)
else:
print(
log_msg.format(
i,
len(iterable),
eta=eta_string,
meters=str(self),
time=str(iter_time),
data=str(data_time),
)
)
i += 1
end = time.time()
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print(
"{} Total time: {} ({:.4f} s / it)".format(
header, total_time_str, total_time / len(iterable)
)
)
class AttrDict(dict):
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
def setup_logger():
logging.basicConfig(
level=logging.INFO if dist_utils.is_main_process() else logging.WARN,
format="%(asctime)s [%(levelname)s] %(message)s",
handlers=[logging.StreamHandler()],
)
| 3D-LLM-main | three_steps_3d_feature/second_step/lavis/common/logger.py |
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
import io
import json
import logging
import os
import pickle
import re
import shutil
import urllib
import urllib.error
import urllib.request
from typing import Optional
from urllib.parse import urlparse
import numpy as np
import pandas as pd
import yaml
from iopath.common.download import download
from iopath.common.file_io import file_lock, g_pathmgr
from lavis.common.registry import registry
from torch.utils.model_zoo import tqdm
from torchvision.datasets.utils import (
check_integrity,
download_file_from_google_drive,
extract_archive,
)
def now():
from datetime import datetime
return datetime.now().strftime("%Y%m%d%H%M")[:-1]
def is_url(url_or_filename):
parsed = urlparse(url_or_filename)
return parsed.scheme in ("http", "https")
def get_cache_path(rel_path):
return os.path.expanduser(os.path.join(registry.get_path("cache_root"), rel_path))
def get_abs_path(rel_path):
return os.path.join(registry.get_path("library_root"), rel_path)
def load_json(filename):
with open(filename, "r") as f:
return json.load(f)
# The following are adapted from torchvision and vissl
# torchvision: https://github.com/pytorch/vision
# vissl: https://github.com/facebookresearch/vissl/blob/main/vissl/utils/download.py
def makedir(dir_path):
"""
Create the directory if it does not exist.
"""
is_success = False
try:
if not g_pathmgr.exists(dir_path):
g_pathmgr.mkdirs(dir_path)
is_success = True
except BaseException:
print(f"Error creating directory: {dir_path}")
return is_success
def get_redirected_url(url: str):
"""
Given a URL, returns the URL it redirects to or the
original URL in case of no indirection
"""
import requests
with requests.Session() as session:
with session.get(url, stream=True, allow_redirects=True) as response:
if response.history:
return response.url
else:
return url
def to_google_drive_download_url(view_url: str) -> str:
"""
Utility function to transform a view URL of google drive
to a download URL for google drive
Example input:
https://drive.google.com/file/d/137RyRjvTBkBiIfeYBNZBtViDHQ6_Ewsp/view
Example output:
https://drive.google.com/uc?export=download&id=137RyRjvTBkBiIfeYBNZBtViDHQ6_Ewsp
"""
splits = view_url.split("/")
assert splits[-1] == "view"
file_id = splits[-2]
return f"https://drive.google.com/uc?export=download&id={file_id}"
def download_google_drive_url(url: str, output_path: str, output_file_name: str):
"""
Download a file from google drive
Downloading an URL from google drive requires confirmation when
the file of the size is too big (google drive notifies that
anti-viral checks cannot be performed on such files)
"""
import requests
with requests.Session() as session:
# First get the confirmation token and append it to the URL
with session.get(url, stream=True, allow_redirects=True) as response:
for k, v in response.cookies.items():
if k.startswith("download_warning"):
url = url + "&confirm=" + v
# Then download the content of the file
with session.get(url, stream=True, verify=True) as response:
makedir(output_path)
path = os.path.join(output_path, output_file_name)
total_size = int(response.headers.get("Content-length", 0))
with open(path, "wb") as file:
from tqdm import tqdm
with tqdm(total=total_size) as progress_bar:
for block in response.iter_content(
chunk_size=io.DEFAULT_BUFFER_SIZE
):
file.write(block)
progress_bar.update(len(block))
def _get_google_drive_file_id(url: str) -> Optional[str]:
parts = urlparse(url)
if re.match(r"(drive|docs)[.]google[.]com", parts.netloc) is None:
return None
match = re.match(r"/file/d/(?P<id>[^/]*)", parts.path)
if match is None:
return None
return match.group("id")
def _urlretrieve(url: str, filename: str, chunk_size: int = 1024) -> None:
with open(filename, "wb") as fh:
with urllib.request.urlopen(
urllib.request.Request(url, headers={"User-Agent": "vissl"})
) as response:
with tqdm(total=response.length) as pbar:
for chunk in iter(lambda: response.read(chunk_size), ""):
if not chunk:
break
pbar.update(chunk_size)
fh.write(chunk)
def download_url(
url: str,
root: str,
filename: Optional[str] = None,
md5: Optional[str] = None,
) -> None:
"""Download a file from a url and place it in root.
Args:
url (str): URL to download file from
root (str): Directory to place downloaded file in
filename (str, optional): Name to save the file under.
If None, use the basename of the URL.
md5 (str, optional): MD5 checksum of the download. If None, do not check
"""
root = os.path.expanduser(root)
if not filename:
filename = os.path.basename(url)
fpath = os.path.join(root, filename)
makedir(root)
# check if file is already present locally
if check_integrity(fpath, md5):
print("Using downloaded and verified file: " + fpath)
return
# expand redirect chain if needed
url = get_redirected_url(url)
# check if file is located on Google Drive
file_id = _get_google_drive_file_id(url)
if file_id is not None:
return download_file_from_google_drive(file_id, root, filename, md5)
# download the file
try:
print("Downloading " + url + " to " + fpath)
_urlretrieve(url, fpath)
except (urllib.error.URLError, IOError) as e: # type: ignore[attr-defined]
if url[:5] == "https":
url = url.replace("https:", "http:")
print(
"Failed download. Trying https -> http instead."
" Downloading " + url + " to " + fpath
)
_urlretrieve(url, fpath)
else:
raise e
# check integrity of downloaded file
if not check_integrity(fpath, md5):
raise RuntimeError("File not found or corrupted.")
def download_and_extract_archive(
url: str,
download_root: str,
extract_root: Optional[str] = None,
filename: Optional[str] = None,
md5: Optional[str] = None,
remove_finished: bool = False,
) -> None:
download_root = os.path.expanduser(download_root)
if extract_root is None:
extract_root = download_root
if not filename:
filename = os.path.basename(url)
download_url(url, download_root, filename, md5)
archive = os.path.join(download_root, filename)
print("Extracting {} to {}".format(archive, extract_root))
extract_archive(archive, extract_root, remove_finished)
def cache_url(url: str, cache_dir: str) -> str:
"""
This implementation downloads the remote resource and caches it locally.
The resource will only be downloaded if not previously requested.
"""
parsed_url = urlparse(url)
dirname = os.path.join(cache_dir, os.path.dirname(parsed_url.path.lstrip("/")))
makedir(dirname)
filename = url.split("/")[-1]
cached = os.path.join(dirname, filename)
with file_lock(cached):
if not os.path.isfile(cached):
logging.info(f"Downloading {url} to {cached} ...")
cached = download(url, dirname, filename=filename)
logging.info(f"URL {url} cached in {cached}")
return cached
# TODO (prigoyal): convert this into RAII-style API
def create_file_symlink(file1, file2):
"""
Simply create the symlinks for a given file1 to file2.
Useful during model checkpointing to symlinks to the
latest successful checkpoint.
"""
try:
if g_pathmgr.exists(file2):
g_pathmgr.rm(file2)
g_pathmgr.symlink(file1, file2)
except Exception as e:
logging.info(f"Could NOT create symlink. Error: {e}")
def save_file(data, filename, append_to_json=True, verbose=True):
"""
Common i/o utility to handle saving data to various file formats.
Supported:
.pkl, .pickle, .npy, .json
Specifically for .json, users have the option to either append (default)
or rewrite by passing in Boolean value to append_to_json.
"""
if verbose:
logging.info(f"Saving data to file: {filename}")
file_ext = os.path.splitext(filename)[1]
if file_ext in [".pkl", ".pickle"]:
with g_pathmgr.open(filename, "wb") as fopen:
pickle.dump(data, fopen, pickle.HIGHEST_PROTOCOL)
elif file_ext == ".npy":
with g_pathmgr.open(filename, "wb") as fopen:
np.save(fopen, data)
elif file_ext == ".json":
if append_to_json:
with g_pathmgr.open(filename, "a") as fopen:
fopen.write(json.dumps(data, sort_keys=True) + "\n")
fopen.flush()
else:
with g_pathmgr.open(filename, "w") as fopen:
fopen.write(json.dumps(data, sort_keys=True) + "\n")
fopen.flush()
elif file_ext == ".yaml":
with g_pathmgr.open(filename, "w") as fopen:
dump = yaml.dump(data)
fopen.write(dump)
fopen.flush()
else:
raise Exception(f"Saving {file_ext} is not supported yet")
if verbose:
logging.info(f"Saved data to file: {filename}")
def load_file(filename, mmap_mode=None, verbose=True, allow_pickle=False):
"""
Common i/o utility to handle loading data from various file formats.
Supported:
.pkl, .pickle, .npy, .json
For the npy files, we support reading the files in mmap_mode.
If the mmap_mode of reading is not successful, we load data without the
mmap_mode.
"""
if verbose:
logging.info(f"Loading data from file: {filename}")
file_ext = os.path.splitext(filename)[1]
if file_ext == ".txt":
with g_pathmgr.open(filename, "r") as fopen:
data = fopen.readlines()
elif file_ext in [".pkl", ".pickle"]:
with g_pathmgr.open(filename, "rb") as fopen:
data = pickle.load(fopen, encoding="latin1")
elif file_ext == ".npy":
if mmap_mode:
try:
with g_pathmgr.open(filename, "rb") as fopen:
data = np.load(
fopen,
allow_pickle=allow_pickle,
encoding="latin1",
mmap_mode=mmap_mode,
)
except ValueError as e:
logging.info(
f"Could not mmap {filename}: {e}. Trying without g_pathmgr"
)
data = np.load(
filename,
allow_pickle=allow_pickle,
encoding="latin1",
mmap_mode=mmap_mode,
)
logging.info("Successfully loaded without g_pathmgr")
except Exception:
logging.info("Could not mmap without g_pathmgr. Trying without mmap")
with g_pathmgr.open(filename, "rb") as fopen:
data = np.load(fopen, allow_pickle=allow_pickle, encoding="latin1")
else:
with g_pathmgr.open(filename, "rb") as fopen:
data = np.load(fopen, allow_pickle=allow_pickle, encoding="latin1")
elif file_ext == ".json":
with g_pathmgr.open(filename, "r") as fopen:
data = json.load(fopen)
elif file_ext == ".yaml":
with g_pathmgr.open(filename, "r") as fopen:
data = yaml.load(fopen, Loader=yaml.FullLoader)
elif file_ext == ".csv":
with g_pathmgr.open(filename, "r") as fopen:
data = pd.read_csv(fopen)
else:
raise Exception(f"Reading from {file_ext} is not supported yet")
return data
def abspath(resource_path: str):
"""
Make a path absolute, but take into account prefixes like
"http://" or "manifold://"
"""
regex = re.compile(r"^\w+://")
if regex.match(resource_path) is None:
return os.path.abspath(resource_path)
else:
return resource_path
def makedir(dir_path):
"""
Create the directory if it does not exist.
"""
is_success = False
try:
if not g_pathmgr.exists(dir_path):
g_pathmgr.mkdirs(dir_path)
is_success = True
except BaseException:
logging.info(f"Error creating directory: {dir_path}")
return is_success
def is_url(input_url):
"""
Check if an input string is a url. look for http(s):// and ignoring the case
"""
is_url = re.match(r"^(?:http)s?://", input_url, re.IGNORECASE) is not None
return is_url
def cleanup_dir(dir):
"""
Utility for deleting a directory. Useful for cleaning the storage space
that contains various training artifacts like checkpoints, data etc.
"""
if os.path.exists(dir):
logging.info(f"Deleting directory: {dir}")
shutil.rmtree(dir)
logging.info(f"Deleted contents of directory: {dir}")
def get_file_size(filename):
"""
Given a file, get the size of file in MB
"""
size_in_mb = os.path.getsize(filename) / float(1024**2)
return size_in_mb
| 3D-LLM-main | three_steps_3d_feature/second_step/lavis/common/utils.py |
import numpy as np
from matplotlib import pyplot as plt
from scipy.ndimage import filters
from skimage import transform as skimage_transform
def getAttMap(img, attMap, blur=True, overlap=True):
attMap -= attMap.min()
if attMap.max() > 0:
attMap /= attMap.max()
attMap = skimage_transform.resize(attMap, (img.shape[:2]), order=3, mode="constant")
if blur:
attMap = filters.gaussian_filter(attMap, 0.02 * max(img.shape[:2]))
attMap -= attMap.min()
attMap /= attMap.max()
cmap = plt.get_cmap("jet")
attMapV = cmap(attMap)
attMapV = np.delete(attMapV, 3, 2)
if overlap:
attMap = (
1 * (1 - attMap**0.7).reshape(attMap.shape + (1,)) * img
+ (attMap**0.7).reshape(attMap.shape + (1,)) * attMapV
)
return attMap
| 3D-LLM-main | three_steps_3d_feature/second_step/lavis/common/gradcam.py |
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
import datetime
import functools
import os
import torch
import torch.distributed as dist
import timm.models.hub as timm_hub
def setup_for_distributed(is_master):
"""
This function disables printing when not in master process
"""
import builtins as __builtin__
builtin_print = __builtin__.print
def print(*args, **kwargs):
force = kwargs.pop("force", False)
if is_master or force:
builtin_print(*args, **kwargs)
__builtin__.print = print
def is_dist_avail_and_initialized():
if not dist.is_available():
return False
if not dist.is_initialized():
return False
return True
def get_world_size():
if not is_dist_avail_and_initialized():
return 1
return dist.get_world_size()
def get_rank():
if not is_dist_avail_and_initialized():
return 0
return dist.get_rank()
def is_main_process():
return get_rank() == 0
def init_distributed_mode(args):
if "RANK" in os.environ and "WORLD_SIZE" in os.environ:
args.rank = int(os.environ["RANK"])
args.world_size = int(os.environ["WORLD_SIZE"])
args.gpu = int(os.environ["LOCAL_RANK"])
elif "SLURM_PROCID" in os.environ:
args.rank = int(os.environ["SLURM_PROCID"])
args.gpu = args.rank % torch.cuda.device_count()
else:
print("Not using distributed mode")
args.distributed = False
return
args.distributed = True
torch.cuda.set_device(args.gpu)
args.dist_backend = "nccl"
print(
"| distributed init (rank {}, world {}): {}".format(
args.rank, args.world_size, args.dist_url
),
flush=True,
)
torch.distributed.init_process_group(
backend=args.dist_backend,
init_method=args.dist_url,
world_size=args.world_size,
rank=args.rank,
timeout=datetime.timedelta(
days=365
), # allow auto-downloading and de-compressing
)
torch.distributed.barrier()
setup_for_distributed(args.rank == 0)
def get_dist_info():
if torch.__version__ < "1.0":
initialized = dist._initialized
else:
initialized = dist.is_initialized()
if initialized:
rank = dist.get_rank()
world_size = dist.get_world_size()
else: # non-distributed training
rank = 0
world_size = 1
return rank, world_size
def main_process(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
rank, _ = get_dist_info()
if rank == 0:
return func(*args, **kwargs)
return wrapper
def download_cached_file(url, check_hash=True, progress=False):
"""
Download a file from a URL and cache it locally. If the file already exists, it is not downloaded again.
If distributed, only the main process downloads the file, and the other processes wait for the file to be downloaded.
"""
def get_cached_file_path():
# a hack to sync the file path across processes
parts = torch.hub.urlparse(url)
filename = os.path.basename(parts.path)
cached_file = os.path.join(timm_hub.get_cache_dir(), filename)
return cached_file
if is_main_process():
timm_hub.download_cached_file(url, check_hash, progress)
if is_dist_avail_and_initialized():
dist.barrier()
return get_cached_file_path()
| 3D-LLM-main | three_steps_3d_feature/second_step/lavis/common/dist_utils.py |
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
__author__ = "aagrawal"
| 3D-LLM-main | three_steps_3d_feature/second_step/lavis/common/vqa_tools/__init__.py |
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
# coding=utf-8
__author__ = "aagrawal"
# This code is based on the code written by Tsung-Yi Lin for MSCOCO Python API available at the following link:
# (https://github.com/tylin/coco-caption/blob/master/pycocoevalcap/eval.py).
import sys
import re
class VQAEval:
def __init__(self, vqa=None, vqaRes=None, n=2):
self.n = n
self.accuracy = {}
self.evalQA = {}
self.evalQuesType = {}
self.evalAnsType = {}
self.vqa = vqa
self.vqaRes = vqaRes
if vqa is not None:
self.params = {"question_id": vqa.getQuesIds()}
self.contractions = {
"aint": "ain't",
"arent": "aren't",
"cant": "can't",
"couldve": "could've",
"couldnt": "couldn't",
"couldn'tve": "couldn't've",
"couldnt've": "couldn't've",
"didnt": "didn't",
"doesnt": "doesn't",
"dont": "don't",
"hadnt": "hadn't",
"hadnt've": "hadn't've",
"hadn'tve": "hadn't've",
"hasnt": "hasn't",
"havent": "haven't",
"hed": "he'd",
"hed've": "he'd've",
"he'dve": "he'd've",
"hes": "he's",
"howd": "how'd",
"howll": "how'll",
"hows": "how's",
"Id've": "I'd've",
"I'dve": "I'd've",
"Im": "I'm",
"Ive": "I've",
"isnt": "isn't",
"itd": "it'd",
"itd've": "it'd've",
"it'dve": "it'd've",
"itll": "it'll",
"let's": "let's",
"maam": "ma'am",
"mightnt": "mightn't",
"mightnt've": "mightn't've",
"mightn'tve": "mightn't've",
"mightve": "might've",
"mustnt": "mustn't",
"mustve": "must've",
"neednt": "needn't",
"notve": "not've",
"oclock": "o'clock",
"oughtnt": "oughtn't",
"ow's'at": "'ow's'at",
"'ows'at": "'ow's'at",
"'ow'sat": "'ow's'at",
"shant": "shan't",
"shed've": "she'd've",
"she'dve": "she'd've",
"she's": "she's",
"shouldve": "should've",
"shouldnt": "shouldn't",
"shouldnt've": "shouldn't've",
"shouldn'tve": "shouldn't've",
"somebody'd": "somebodyd",
"somebodyd've": "somebody'd've",
"somebody'dve": "somebody'd've",
"somebodyll": "somebody'll",
"somebodys": "somebody's",
"someoned": "someone'd",
"someoned've": "someone'd've",
"someone'dve": "someone'd've",
"someonell": "someone'll",
"someones": "someone's",
"somethingd": "something'd",
"somethingd've": "something'd've",
"something'dve": "something'd've",
"somethingll": "something'll",
"thats": "that's",
"thered": "there'd",
"thered've": "there'd've",
"there'dve": "there'd've",
"therere": "there're",
"theres": "there's",
"theyd": "they'd",
"theyd've": "they'd've",
"they'dve": "they'd've",
"theyll": "they'll",
"theyre": "they're",
"theyve": "they've",
"twas": "'twas",
"wasnt": "wasn't",
"wed've": "we'd've",
"we'dve": "we'd've",
"weve": "we've",
"werent": "weren't",
"whatll": "what'll",
"whatre": "what're",
"whats": "what's",
"whatve": "what've",
"whens": "when's",
"whered": "where'd",
"wheres": "where's",
"whereve": "where've",
"whod": "who'd",
"whod've": "who'd've",
"who'dve": "who'd've",
"wholl": "who'll",
"whos": "who's",
"whove": "who've",
"whyll": "why'll",
"whyre": "why're",
"whys": "why's",
"wont": "won't",
"wouldve": "would've",
"wouldnt": "wouldn't",
"wouldnt've": "wouldn't've",
"wouldn'tve": "wouldn't've",
"yall": "y'all",
"yall'll": "y'all'll",
"y'allll": "y'all'll",
"yall'd've": "y'all'd've",
"y'alld've": "y'all'd've",
"y'all'dve": "y'all'd've",
"youd": "you'd",
"youd've": "you'd've",
"you'dve": "you'd've",
"youll": "you'll",
"youre": "you're",
"youve": "you've",
}
self.manualMap = {
"none": "0",
"zero": "0",
"one": "1",
"two": "2",
"three": "3",
"four": "4",
"five": "5",
"six": "6",
"seven": "7",
"eight": "8",
"nine": "9",
"ten": "10",
}
self.articles = ["a", "an", "the"]
self.periodStrip = re.compile("(?!<=\d)(\.)(?!\d)")
self.commaStrip = re.compile("(\d)(,)(\d)")
self.punct = [
";",
r"/",
"[",
"]",
'"',
"{",
"}",
"(",
")",
"=",
"+",
"\\",
"_",
"-",
">",
"<",
"@",
"`",
",",
"?",
"!",
]
def evaluate(self, quesIds=None):
if quesIds == None:
quesIds = [quesId for quesId in self.params["question_id"]]
gts = {}
res = {}
for quesId in quesIds:
gts[quesId] = self.vqa.qa[quesId]
res[quesId] = self.vqaRes.qa[quesId]
# =================================================
# Compute accuracy
# =================================================
accQA = []
accQuesType = {}
accAnsType = {}
print("computing accuracy")
step = 0
for quesId in quesIds:
resAns = res[quesId]["answer"]
resAns = resAns.replace("\n", " ")
resAns = resAns.replace("\t", " ")
resAns = resAns.strip()
resAns = self.processPunctuation(resAns)
resAns = self.processDigitArticle(resAns)
gtAcc = []
gtAnswers = [ans["answer"] for ans in gts[quesId]["answers"]]
if len(set(gtAnswers)) > 1:
for ansDic in gts[quesId]["answers"]:
ansDic["answer"] = self.processPunctuation(ansDic["answer"])
for gtAnsDatum in gts[quesId]["answers"]:
otherGTAns = [
item for item in gts[quesId]["answers"] if item != gtAnsDatum
]
matchingAns = [item for item in otherGTAns if item["answer"] == resAns]
acc = min(1, float(len(matchingAns)) / 3)
gtAcc.append(acc)
quesType = gts[quesId]["question_type"]
ansType = gts[quesId]["answer_type"]
avgGTAcc = float(sum(gtAcc)) / len(gtAcc)
accQA.append(avgGTAcc)
if quesType not in accQuesType:
accQuesType[quesType] = []
accQuesType[quesType].append(avgGTAcc)
if ansType not in accAnsType:
accAnsType[ansType] = []
accAnsType[ansType].append(avgGTAcc)
self.setEvalQA(quesId, avgGTAcc)
self.setEvalQuesType(quesId, quesType, avgGTAcc)
self.setEvalAnsType(quesId, ansType, avgGTAcc)
if step % 100 == 0:
self.updateProgress(step / float(len(quesIds)))
step = step + 1
self.setAccuracy(accQA, accQuesType, accAnsType)
print("Done computing accuracy")
def processPunctuation(self, inText):
outText = inText
for p in self.punct:
if (p + " " in inText or " " + p in inText) or (
re.search(self.commaStrip, inText) != None
):
outText = outText.replace(p, "")
else:
outText = outText.replace(p, " ")
outText = self.periodStrip.sub("", outText, re.UNICODE)
return outText
def processDigitArticle(self, inText):
outText = []
tempText = inText.lower().split()
for word in tempText:
word = self.manualMap.setdefault(word, word)
if word not in self.articles:
outText.append(word)
else:
pass
for wordId, word in enumerate(outText):
if word in self.contractions:
outText[wordId] = self.contractions[word]
outText = " ".join(outText)
return outText
def setAccuracy(self, accQA, accQuesType, accAnsType):
self.accuracy["overall"] = round(100 * float(sum(accQA)) / len(accQA), self.n)
self.accuracy["perQuestionType"] = {
quesType: round(
100 * float(sum(accQuesType[quesType])) / len(accQuesType[quesType]),
self.n,
)
for quesType in accQuesType
}
self.accuracy["perAnswerType"] = {
ansType: round(
100 * float(sum(accAnsType[ansType])) / len(accAnsType[ansType]), self.n
)
for ansType in accAnsType
}
def setEvalQA(self, quesId, acc):
self.evalQA[quesId] = round(100 * acc, self.n)
def setEvalQuesType(self, quesId, quesType, acc):
if quesType not in self.evalQuesType:
self.evalQuesType[quesType] = {}
self.evalQuesType[quesType][quesId] = round(100 * acc, self.n)
def setEvalAnsType(self, quesId, ansType, acc):
if ansType not in self.evalAnsType:
self.evalAnsType[ansType] = {}
self.evalAnsType[ansType][quesId] = round(100 * acc, self.n)
def updateProgress(self, progress):
barLength = 20
status = ""
if isinstance(progress, int):
progress = float(progress)
if not isinstance(progress, float):
progress = 0
status = "error: progress var must be float\r\n"
if progress < 0:
progress = 0
status = "Halt...\r\n"
if progress >= 1:
progress = 1
status = "Done...\r\n"
block = int(round(barLength * progress))
text = "\rFinshed Percent: [{0}] {1}% {2}".format(
"#" * block + "-" * (barLength - block), int(progress * 100), status
)
sys.stdout.write(text)
sys.stdout.flush()
| 3D-LLM-main | three_steps_3d_feature/second_step/lavis/common/vqa_tools/vqa_eval.py |
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
__author__ = "aagrawal"
__version__ = "0.9"
# Interface for accessing the VQA dataset.
# This code is based on the code written by Tsung-Yi Lin for MSCOCO Python API available at the following link:
# (https://github.com/pdollar/coco/blob/master/PythonAPI/pycocotools/coco.py).
# The following functions are defined:
# VQA - VQA class that loads VQA annotation file and prepares data structures.
# getQuesIds - Get question ids that satisfy given filter conditions.
# getImgIds - Get image ids that satisfy given filter conditions.
# loadQA - Load questions and answers with the specified question ids.
# showQA - Display the specified questions and answers.
# loadRes - Load result file and create result object.
# Help on each function can be accessed by: "help(COCO.function)"
import json
import datetime
import copy
class VQA:
def __init__(self, annotation_file=None, question_file=None):
"""
Constructor of VQA helper class for reading and visualizing questions and answers.
:param annotation_file (str): location of VQA annotation file
:return:
"""
# load dataset
self.dataset = {}
self.questions = {}
self.qa = {}
self.qqa = {}
self.imgToQA = {}
if not annotation_file == None and not question_file == None:
print("loading VQA annotations and questions into memory...")
time_t = datetime.datetime.utcnow()
dataset = json.load(open(annotation_file, "r"))
questions = json.load(open(question_file, "r"))
self.dataset = dataset
self.questions = questions
self.createIndex()
def createIndex(self):
# create index
print("creating index...")
imgToQA = {ann["image_id"]: [] for ann in self.dataset["annotations"]}
qa = {ann["question_id"]: [] for ann in self.dataset["annotations"]}
qqa = {ann["question_id"]: [] for ann in self.dataset["annotations"]}
for ann in self.dataset["annotations"]:
imgToQA[ann["image_id"]] += [ann]
qa[ann["question_id"]] = ann
for ques in self.questions["questions"]:
qqa[ques["question_id"]] = ques
print("index created!")
# create class members
self.qa = qa
self.qqa = qqa
self.imgToQA = imgToQA
def info(self):
"""
Print information about the VQA annotation file.
:return:
"""
for key, value in self.datset["info"].items():
print("%s: %s" % (key, value))
def getQuesIds(self, imgIds=[], quesTypes=[], ansTypes=[]):
"""
Get question ids that satisfy given filter conditions. default skips that filter
:param imgIds (int array) : get question ids for given imgs
quesTypes (str array) : get question ids for given question types
ansTypes (str array) : get question ids for given answer types
:return: ids (int array) : integer array of question ids
"""
imgIds = imgIds if type(imgIds) == list else [imgIds]
quesTypes = quesTypes if type(quesTypes) == list else [quesTypes]
ansTypes = ansTypes if type(ansTypes) == list else [ansTypes]
if len(imgIds) == len(quesTypes) == len(ansTypes) == 0:
anns = self.dataset["annotations"]
else:
if not len(imgIds) == 0:
anns = sum(
[self.imgToQA[imgId] for imgId in imgIds if imgId in self.imgToQA],
[],
)
else:
anns = self.dataset["annotations"]
anns = (
anns
if len(quesTypes) == 0
else [ann for ann in anns if ann["question_type"] in quesTypes]
)
anns = (
anns
if len(ansTypes) == 0
else [ann for ann in anns if ann["answer_type"] in ansTypes]
)
ids = [ann["question_id"] for ann in anns]
return ids
def getImgIds(self, quesIds=[], quesTypes=[], ansTypes=[]):
"""
Get image ids that satisfy given filter conditions. default skips that filter
:param quesIds (int array) : get image ids for given question ids
quesTypes (str array) : get image ids for given question types
ansTypes (str array) : get image ids for given answer types
:return: ids (int array) : integer array of image ids
"""
quesIds = quesIds if type(quesIds) == list else [quesIds]
quesTypes = quesTypes if type(quesTypes) == list else [quesTypes]
ansTypes = ansTypes if type(ansTypes) == list else [ansTypes]
if len(quesIds) == len(quesTypes) == len(ansTypes) == 0:
anns = self.dataset["annotations"]
else:
if not len(quesIds) == 0:
anns = sum(
[self.qa[quesId] for quesId in quesIds if quesId in self.qa], []
)
else:
anns = self.dataset["annotations"]
anns = (
anns
if len(quesTypes) == 0
else [ann for ann in anns if ann["question_type"] in quesTypes]
)
anns = (
anns
if len(ansTypes) == 0
else [ann for ann in anns if ann["answer_type"] in ansTypes]
)
ids = [ann["image_id"] for ann in anns]
return ids
def loadQA(self, ids=[]):
"""
Load questions and answers with the specified question ids.
:param ids (int array) : integer ids specifying question ids
:return: qa (object array) : loaded qa objects
"""
if type(ids) == list:
return [self.qa[id] for id in ids]
elif type(ids) == int:
return [self.qa[ids]]
def showQA(self, anns):
"""
Display the specified annotations.
:param anns (array of object): annotations to display
:return: None
"""
if len(anns) == 0:
return 0
for ann in anns:
quesId = ann["question_id"]
print("Question: %s" % (self.qqa[quesId]["question"]))
for ans in ann["answers"]:
print("Answer %d: %s" % (ans["answer_id"], ans["answer"]))
def loadRes(self, resFile, quesFile):
"""
Load result file and return a result object.
:param resFile (str) : file name of result file
:return: res (obj) : result api object
"""
res = VQA()
res.questions = json.load(open(quesFile))
res.dataset["info"] = copy.deepcopy(self.questions["info"])
res.dataset["task_type"] = copy.deepcopy(self.questions["task_type"])
res.dataset["data_type"] = copy.deepcopy(self.questions["data_type"])
res.dataset["data_subtype"] = copy.deepcopy(self.questions["data_subtype"])
res.dataset["license"] = copy.deepcopy(self.questions["license"])
print("Loading and preparing results... ")
time_t = datetime.datetime.utcnow()
anns = json.load(open(resFile))
assert type(anns) == list, "results is not an array of objects"
annsQuesIds = [ann["question_id"] for ann in anns]
assert set(annsQuesIds) == set(
self.getQuesIds()
), "Results do not correspond to current VQA set. Either the results do not have predictions for all question ids in annotation file or there is atleast one question id that does not belong to the question ids in the annotation file."
for ann in anns:
quesId = ann["question_id"]
if res.dataset["task_type"] == "Multiple Choice":
assert (
ann["answer"] in self.qqa[quesId]["multiple_choices"]
), "predicted answer is not one of the multiple choices"
qaAnn = self.qa[quesId]
ann["image_id"] = qaAnn["image_id"]
ann["question_type"] = qaAnn["question_type"]
ann["answer_type"] = qaAnn["answer_type"]
print(
"DONE (t=%0.2fs)" % ((datetime.datetime.utcnow() - time_t).total_seconds())
)
res.dataset["annotations"] = anns
res.createIndex()
return res
| 3D-LLM-main | three_steps_3d_feature/second_step/lavis/common/vqa_tools/vqa.py |
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
from lavis.runners.runner_base import RunnerBase
from lavis.runners.runner_iter import RunnerIter
__all__ = ["RunnerBase", "RunnerIter"]
| 3D-LLM-main | three_steps_3d_feature/second_step/lavis/runners/__init__.py |
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
import datetime
import json
import logging
import os
import time
from pathlib import Path
import torch
import torch.distributed as dist
import webdataset as wds
from lavis.common.dist_utils import (
download_cached_file,
get_rank,
get_world_size,
is_main_process,
main_process,
)
from lavis.common.registry import registry
from lavis.common.utils import is_url
from lavis.datasets.data_utils import concat_datasets, reorg_datasets_by_split
from lavis.datasets.datasets.dataloader_utils import (
IterLoader,
MultiIterLoader,
PrefetchLoader,
)
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.utils.data import DataLoader, DistributedSampler
from torch.utils.data.dataset import ChainDataset
@registry.register_runner("runner_base")
class RunnerBase:
"""
A runner class to train and evaluate a model given a task and datasets.
The runner uses pytorch distributed data parallel by default. Future release
will support other distributed frameworks.
"""
def __init__(self, cfg, task, model, datasets, job_id):
self.config = cfg
self.job_id = job_id
self.task = task
self.datasets = datasets
self._model = model
self._wrapped_model = None
self._device = None
self._optimizer = None
self._scaler = None
self._dataloaders = None
self._lr_sched = None
self.start_epoch = 0
# self.setup_seeds()
self.setup_output_dir()
@property
def device(self):
if self._device is None:
self._device = torch.device(self.config.run_cfg.device)
return self._device
@property
def use_distributed(self):
return self.config.run_cfg.distributed
@property
def model(self):
"""
A property to get the DDP-wrapped model on the device.
"""
# move model to device
if self._model.device != self.device:
self._model = self._model.to(self.device)
# distributed training wrapper
if self.use_distributed:
if self._wrapped_model is None:
self._wrapped_model = DDP(
self._model, device_ids=[self.config.run_cfg.gpu]
)
else:
self._wrapped_model = self._model
return self._wrapped_model
@property
def optimizer(self):
# TODO make optimizer class and configurations
if self._optimizer is None:
self._optimizer = torch.optim.AdamW(
params=self.model.parameters(),
lr=float(self.config.run_cfg.init_lr),
weight_decay=float(self.config.run_cfg.weight_decay),
)
return self._optimizer
@property
def scaler(self):
amp = self.config.run_cfg.get("amp", False)
if amp:
if self._scaler is None:
self._scaler = torch.cuda.amp.GradScaler()
return self._scaler
@property
def lr_scheduler(self):
"""
A property to get and create learning rate scheduler by split just in need.
"""
if self._lr_sched is None:
lr_sched_cls = registry.get_lr_scheduler_class(self.config.run_cfg.lr_sched)
# max_epoch = self.config.run_cfg.max_epoch
max_epoch = self.max_epoch
# min_lr = self.config.run_cfg.min_lr
min_lr = self.min_lr
# init_lr = self.config.run_cfg.init_lr
init_lr = self.init_lr
# optional parameters
decay_rate = self.config.run_cfg.get("lr_decay_rate", None)
warmup_start_lr = self.config.run_cfg.get("warmup_lr", -1)
warmup_steps = self.config.run_cfg.get("warmup_steps", 0)
self._lr_sched = lr_sched_cls(
optimizer=self.optimizer,
max_epoch=max_epoch,
min_lr=min_lr,
init_lr=init_lr,
decay_rate=decay_rate,
warmup_start_lr=warmup_start_lr,
warmup_steps=warmup_steps,
)
return self._lr_sched
@property
def dataloaders(self) -> dict:
"""
A property to get and create dataloaders by split just in need.
If no train_dataset_ratio is provided, concatenate map-style datasets and
chain wds.DataPipe datasets separately. Training set becomes a tuple
(ConcatDataset, ChainDataset), both are optional but at least one of them is
required. The resultant ConcatDataset and ChainDataset will be sampled evenly.
If train_dataset_ratio is provided, create a MultiIterLoader to sample
each dataset by ratios during training.
Currently do not support multiple datasets for validation and test.
Returns:
dict: {split_name: (tuples of) dataloader}
"""
if self._dataloaders is None:
# reoganize datasets by split and concatenate/chain if necessary
dataset_ratios = self.config.run_cfg.get("train_dataset_ratios", None)
# concatenate map-style datasets and chain wds.DataPipe datasets separately
# training set becomes a tuple (ConcatDataset, ChainDataset), both are
# optional but at least one of them is required. The resultant ConcatDataset
# and ChainDataset will be sampled evenly.
logging.info(
"dataset_ratios not specified, datasets will be concatenated (map-style datasets) or chained (webdataset.DataPipeline)."
)
datasets = reorg_datasets_by_split(self.datasets)
self.datasets = concat_datasets(datasets)
# print dataset statistics after concatenation/chaining
for split_name in self.datasets:
if isinstance(self.datasets[split_name], tuple) or isinstance(
self.datasets[split_name], list
):
# mixed wds.DataPipeline and torch.utils.data.Dataset
num_records = sum(
[
len(d)
if not type(d) in [wds.DataPipeline, ChainDataset]
else 0
for d in self.datasets[split_name]
]
)
else:
if hasattr(self.datasets[split_name], "__len__"):
# a single map-style dataset
num_records = len(self.datasets[split_name])
else:
# a single wds.DataPipeline
num_records = -1
logging.info(
"Only a single wds.DataPipeline dataset, no __len__ attribute."
)
if num_records >= 0:
logging.info(
"Loaded {} records for {} split from the dataset.".format(
num_records, split_name
)
)
# create dataloaders
split_names = sorted(self.datasets.keys())
datasets = [self.datasets[split] for split in split_names]
is_trains = [split in self.train_splits for split in split_names]
batch_sizes = [
self.config.run_cfg.batch_size_train
if split == "train"
else self.config.run_cfg.batch_size_eval
for split in split_names
]
collate_fns = []
for dataset in datasets:
if isinstance(dataset, tuple) or isinstance(dataset, list):
collate_fns.append([getattr(d, "collater", None) for d in dataset])
else:
collate_fns.append(getattr(dataset, "collater", None))
dataloaders = self.create_loaders(
datasets=datasets,
num_workers=self.config.run_cfg.num_workers,
batch_sizes=batch_sizes,
is_trains=is_trains,
collate_fns=collate_fns,
dataset_ratios=dataset_ratios,
)
self._dataloaders = {k: v for k, v in zip(split_names, dataloaders)}
return self._dataloaders
@property
def cuda_enabled(self):
return self.device.type == "cuda"
@property
def max_epoch(self):
return int(self.config.run_cfg.max_epoch)
@property
def log_freq(self):
log_freq = self.config.run_cfg.get("log_freq", 50)
return int(log_freq)
@property
def init_lr(self):
return float(self.config.run_cfg.init_lr)
@property
def min_lr(self):
return float(self.config.run_cfg.min_lr)
@property
def accum_grad_iters(self):
return int(self.config.run_cfg.get("accum_grad_iters", 1))
@property
def valid_splits(self):
valid_splits = self.config.run_cfg.get("valid_splits", [])
if len(valid_splits) == 0:
logging.info("No validation splits found.")
return valid_splits
@property
def test_splits(self):
test_splits = self.config.run_cfg.get("test_splits", [])
return test_splits
@property
def train_splits(self):
train_splits = self.config.run_cfg.get("train_splits", [])
if len(train_splits) == 0:
logging.info("Empty train splits.")
return train_splits
@property
def evaluate_only(self):
"""
Set to True to skip training.
"""
return self.config.run_cfg.evaluate
@property
def use_dist_eval_sampler(self):
return self.config.run_cfg.get("use_dist_eval_sampler", True)
@property
def resume_ckpt_path(self):
return self.config.run_cfg.get("resume_ckpt_path", None)
@property
def train_loader(self):
train_dataloader = self.dataloaders["train"]
return train_dataloader
def setup_output_dir(self):
lib_root = Path(registry.get_path("library_root"))
output_dir = lib_root / self.config.run_cfg.output_dir / self.job_id
result_dir = output_dir / "result"
output_dir.mkdir(parents=True, exist_ok=True)
result_dir.mkdir(parents=True, exist_ok=True)
registry.register_path("result_dir", str(result_dir))
registry.register_path("output_dir", str(output_dir))
self.result_dir = result_dir
self.output_dir = output_dir
def train(self):
start_time = time.time()
best_agg_metric = 0
best_epoch = 0
self.log_config()
# resume from checkpoint if specified
if not self.evaluate_only and self.resume_ckpt_path is not None:
self._load_checkpoint(self.resume_ckpt_path)
for cur_epoch in range(self.start_epoch, self.max_epoch):
# training phase
if not self.evaluate_only:
logging.info("Start training")
train_stats = self.train_epoch(cur_epoch)
self.log_stats(split_name="train", stats=train_stats)
# evaluation phase
if len(self.valid_splits) > 0:
for split_name in self.valid_splits:
logging.info("Evaluating on {}.".format(split_name))
val_log = self.eval_epoch(
split_name=split_name, cur_epoch=cur_epoch
)
if val_log is not None:
if is_main_process():
assert (
"agg_metrics" in val_log
), "No agg_metrics found in validation log."
agg_metrics = val_log["agg_metrics"]
if agg_metrics > best_agg_metric and split_name == "val":
best_epoch, best_agg_metric = cur_epoch, agg_metrics
self._save_checkpoint(cur_epoch, is_best=True)
val_log.update({"best_epoch": best_epoch})
self.log_stats(val_log, split_name)
else:
# if no validation split is provided, we just save the checkpoint at the end of each epoch.
if not self.evaluate_only:
self._save_checkpoint(cur_epoch, is_best=False)
if self.evaluate_only:
break
dist.barrier()
# testing phase
test_epoch = "best" if len(self.valid_splits) > 0 else cur_epoch
self.evaluate(cur_epoch=test_epoch, skip_reload=self.evaluate_only)
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
logging.info("Training time {}".format(total_time_str))
def evaluate(self, cur_epoch="best", skip_reload=False):
test_logs = dict()
if len(self.test_splits) > 0:
for split_name in self.test_splits:
test_logs[split_name] = self.eval_epoch(
split_name=split_name, cur_epoch=cur_epoch, skip_reload=skip_reload
)
return test_logs
def train_epoch(self, epoch):
# train
self.model.train()
return self.task.train_epoch(
epoch=epoch,
model=self.model,
data_loader=self.train_loader,
optimizer=self.optimizer,
scaler=self.scaler,
lr_scheduler=self.lr_scheduler,
cuda_enabled=self.cuda_enabled,
log_freq=self.log_freq,
accum_grad_iters=self.accum_grad_iters,
)
@torch.no_grad()
def eval_epoch(self, split_name, cur_epoch, skip_reload=False):
"""
Evaluate the model on a given split.
Args:
split_name (str): name of the split to evaluate on.
cur_epoch (int): current epoch.
skip_reload_best (bool): whether to skip reloading the best checkpoint.
During training, we will reload the best checkpoint for validation.
During testing, we will use provided weights and skip reloading the best checkpoint .
"""
data_loader = self.dataloaders.get(split_name, None)
assert data_loader, "data_loader for split {} is None.".format(split_name)
# TODO In validation, you need to compute loss as well as metrics
# TODO consider moving to model.before_evaluation()
model = self.unwrap_dist_model(self.model)
if not skip_reload and cur_epoch == "best":
model = self._reload_best_model(model)
model.eval()
self.task.before_evaluation(
model=model,
dataset=self.datasets[split_name],
)
results = self.task.evaluation(model, data_loader)
if results is not None:
return self.task.after_evaluation(
val_result=results,
split_name=split_name,
epoch=cur_epoch,
)
def unwrap_dist_model(self, model):
if self.use_distributed:
return model.module
else:
return model
def create_loaders(
self,
datasets,
num_workers,
batch_sizes,
is_trains,
collate_fns,
dataset_ratios=None,
):
"""
Create dataloaders for training and validation.
"""
def _create_loader(dataset, num_workers, bsz, is_train, collate_fn):
# create a single dataloader for each split
if isinstance(dataset, ChainDataset) or isinstance(
dataset, wds.DataPipeline
):
# wds.WebdDataset instance are chained together
# webdataset.DataPipeline has its own sampler and collate_fn
loader = iter(
DataLoader(
dataset,
batch_size=bsz,
num_workers=num_workers,
pin_memory=True,
)
)
else:
# map-style dataset are concatenated together
# setup distributed sampler
if self.use_distributed:
sampler = DistributedSampler(
dataset,
shuffle=is_train,
num_replicas=get_world_size(),
rank=get_rank(),
)
if not self.use_dist_eval_sampler:
# e.g. retrieval evaluation
sampler = sampler if is_train else None
else:
sampler = None
loader = DataLoader(
dataset,
batch_size=bsz,
num_workers=num_workers,
pin_memory=True,
sampler=sampler,
shuffle=sampler is None and is_train,
collate_fn=collate_fn,
drop_last=True if is_train else False,
)
loader = PrefetchLoader(loader)
if is_train:
loader = IterLoader(loader, use_distributed=self.use_distributed)
return loader
loaders = []
for dataset, bsz, is_train, collate_fn in zip(
datasets, batch_sizes, is_trains, collate_fns
):
if isinstance(dataset, list) or isinstance(dataset, tuple):
loader = MultiIterLoader(
loaders=[
_create_loader(d, num_workers, bsz, is_train, collate_fn[i])
for i, d in enumerate(dataset)
],
ratios=dataset_ratios,
)
else:
loader = _create_loader(dataset, num_workers, bsz, is_train, collate_fn)
loaders.append(loader)
return loaders
@main_process
def _save_checkpoint(self, cur_epoch, is_best=False):
"""
Save the checkpoint at the current epoch.
"""
save_obj = {
"model": self.unwrap_dist_model(self.model).state_dict(),
"optimizer": self.optimizer.state_dict(),
"config": self.config.to_dict(),
"scaler": self.scaler.state_dict() if self.scaler else None,
"epoch": cur_epoch,
}
save_to = os.path.join(
self.output_dir,
"checkpoint_{}.pth".format("best" if is_best else cur_epoch),
)
logging.info("Saving checkpoint at epoch {} to {}.".format(cur_epoch, save_to))
torch.save(save_obj, save_to)
def _reload_best_model(self, model):
"""
Load the best checkpoint for evaluation.
"""
checkpoint_path = os.path.join(self.output_dir, "checkpoint_best.pth")
logging.info("Loading checkpoint from {}.".format(checkpoint_path))
checkpoint = torch.load(checkpoint_path, map_location=self.device)
model.load_state_dict(checkpoint["model"])
return model
def _load_checkpoint(self, url_or_filename):
"""
Resume from a checkpoint.
"""
if is_url(url_or_filename):
cached_file = download_cached_file(
url_or_filename, check_hash=False, progress=True
)
checkpoint = torch.load(cached_file, map_location=self.device)
elif os.path.isfile(url_or_filename):
checkpoint = torch.load(url_or_filename, map_location=self.device)
else:
raise RuntimeError("checkpoint url or path is invalid")
state_dict = checkpoint["model"]
self.unwrap_dist_model(self.model).load_state_dict(state_dict)
self.optimizer.load_state_dict(checkpoint["optimizer"])
if self.scaler and "scaler" in checkpoint:
self.scaler.load_state_dict(checkpoint["scaler"])
self.start_epoch = checkpoint["epoch"] + 1
logging.info("Resume checkpoint from {}".format(url_or_filename))
@main_process
def log_stats(self, stats, split_name):
if isinstance(stats, dict):
log_stats = {**{f"{split_name}_{k}": v for k, v in stats.items()}}
with open(os.path.join(self.output_dir, "log.txt"), "a") as f:
f.write(json.dumps(log_stats) + "\n")
elif isinstance(stats, list):
pass
@main_process
def log_config(self):
with open(os.path.join(self.output_dir, "log.txt"), "a") as f:
f.write(json.dumps(self.config.to_dict(), indent=4) + "\n")
| 3D-LLM-main | three_steps_3d_feature/second_step/lavis/runners/runner_base.py |
"""
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
import datetime
import logging
import os
import time
import torch
import torch.distributed as dist
import webdataset as wds
from lavis.common.dist_utils import download_cached_file, is_main_process, main_process
from lavis.common.registry import registry
from lavis.common.utils import is_url
from lavis.datasets.data_utils import concat_datasets, reorg_datasets_by_split
from lavis.runners.runner_base import RunnerBase
from torch.utils.data.dataset import ChainDataset
@registry.register_runner("runner_iter")
class RunnerIter(RunnerBase):
"""
Run training based on the number of iterations. This is common when
the training dataset size is large. Underhood logic is similar to
epoch-based training by considering every #iters_per_inner_epoch as an
inner epoch.
In iter-based runner, after every #iters_per_inner_epoch steps, we
1) do a validation epoch;
2) schedule the learning rate;
3) save the checkpoint.
We refer every #iters_per_inner_epoch steps as an inner epoch.
"""
def __init__(self, cfg, task, model, datasets, job_id):
super().__init__(cfg, task, model, datasets, job_id)
self.start_iters = 0
self.max_iters = int(self.config.run_cfg.get("max_iters", -1))
assert self.max_iters > 0, "max_iters must be greater than 0."
self.iters_per_inner_epoch = int(
self.config.run_cfg.get("iters_per_inner_epoch", -1)
)
assert (
self.iters_per_inner_epoch > 0
), "iters_per_inner_epoch must be greater than 0."
@property
def max_epoch(self):
return int(self.max_iters / self.iters_per_inner_epoch)
@property
def cur_epoch(self):
try:
return self.train_loader.epoch
except AttributeError:
# pipeline data (e.g. LAION) is streaming, have no concept of epoch
return 0
def _progress(self, cur_iters):
return "{}_iters={}".format(self.cur_epoch, cur_iters)
def train(self):
start_time = time.time()
best_agg_metric = 0
best_iters = 0
self.log_config()
# resume from checkpoint if specified
if not self.evaluate_only and self.resume_ckpt_path is not None:
self._load_checkpoint(self.resume_ckpt_path)
for start_iters in range(
self.start_iters, self.max_iters, self.iters_per_inner_epoch
):
end_iters = start_iters + self.iters_per_inner_epoch
# training phase
if not self.evaluate_only:
logging.info(
"Start training, max_iters={}, in total {} inner epochs.".format(
self.max_iters, int(self.max_iters / self.iters_per_inner_epoch)
)
)
train_stats = self.train_iters(self.cur_epoch, start_iters)
self.log_stats(split_name="train", stats=train_stats)
# evaluation phase
if len(self.valid_splits) > 0:
for split_name in self.valid_splits:
logging.info("Evaluating on {}.".format(split_name))
val_log = self.eval_epoch(
split_name=split_name, cur_epoch=self._progress(end_iters)
)
if val_log is not None:
if is_main_process():
assert (
"agg_metrics" in val_log
), "No agg_metrics found in validation log."
agg_metrics = val_log["agg_metrics"]
if agg_metrics > best_agg_metric and split_name == "val":
best_iters, best_agg_metric = end_iters, agg_metrics
self._save_checkpoint(end_iters, is_best=True)
val_log.update({"best_iters": best_iters})
self.log_stats(val_log, split_name)
else:
# if no validation split is provided, we just save the checkpoint at the end of each inner epoch.
if not self.evaluate_only:
self._save_checkpoint(end_iters, is_best=False)
if self.evaluate_only:
break
dist.barrier()
# testing phase
self.evaluate(cur_epoch=self.cur_epoch)
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
logging.info("Training time {}".format(total_time_str))
def train_iters(self, epoch, start_iters):
# train by iterations
self.model.train()
return self.task.train_iters(
epoch=epoch,
start_iters=start_iters,
iters_per_inner_epoch=self.iters_per_inner_epoch,
model=self.model,
data_loader=self.train_loader,
optimizer=self.optimizer,
scaler=self.scaler,
lr_scheduler=self.lr_scheduler,
cuda_enabled=self.cuda_enabled,
log_freq=self.log_freq,
accum_grad_iters=self.accum_grad_iters,
)
@main_process
def _save_checkpoint(self, cur_iters, is_best=False):
save_obj = {
"model": self.unwrap_dist_model(self.model).state_dict(),
"optimizer": self.optimizer.state_dict(),
"config": self.config.to_dict(),
"scaler": self.scaler.state_dict() if self.scaler else None,
"iters": cur_iters,
}
save_to = os.path.join(
self.output_dir,
"checkpoint_{}.pth".format("best" if is_best else cur_iters),
)
logging.info("Saving checkpoint at iters {} to {}.".format(cur_iters, save_to))
torch.save(save_obj, save_to)
def _load_checkpoint(self, url_or_filename):
"""
Resume from a checkpoint.
"""
if is_url(url_or_filename):
cached_file = download_cached_file(
url_or_filename, check_hash=False, progress=True
)
checkpoint = torch.load(cached_file, map_location=self.device)
elif os.path.isfile(url_or_filename):
checkpoint = torch.load(url_or_filename, map_location=self.device)
else:
raise RuntimeError("checkpoint url or path is invalid")
state_dict = checkpoint["model"]
self.unwrap_dist_model(self.model).load_state_dict(state_dict)
self.optimizer.load_state_dict(checkpoint["optimizer"])
if self.scaler and "scaler" in checkpoint:
self.scaler.load_state_dict(checkpoint["scaler"])
self.start_iters = checkpoint["iters"] + 1
logging.info("Resume checkpoint from {}".format(url_or_filename))
@property
def dataloaders(self) -> dict:
"""
A property to get and create dataloaders by split just in need.
If no train_dataset_ratio is provided, concatenate map-style datasets and
chain wds.DataPipe datasets separately. Training set becomes a tuple
(ConcatDataset, ChainDataset), both are optional but at least one of them is
required. The resultant ConcatDataset and ChainDataset will be sampled evenly.
If train_dataset_ratio is provided, create a MultiIterLoader to sample
each dataset by ratios during training.
Currently do not support multiple datasets for validation and test.
Returns:
dict: {split_name: (tuples of) dataloader}
"""
if self._dataloaders is None:
# reoganize datasets by split and concatenate/chain if necessary
dataset_ratios = self.config.run_cfg.get("train_dataset_ratios", None)
if dataset_ratios is None:
# concatenate map-style datasets and chain wds.DataPipe datasets separately
# training set becomes a tuple (ConcatDataset, ChainDataset), both are
# optional but at least one of them is required. The resultant ConcatDataset
# and ChainDataset will be sampled evenly.
logging.info(
"dataset_ratios not specified, datasets will be concatenated (map-style datasets) or chained (webdataset.DataPipeline)."
)
datasets = reorg_datasets_by_split(self.datasets)
self.datasets = concat_datasets(datasets)
else:
# create multi-loader with the provided ratios, without concatenating or chaining
missing_keys = [k for k in dataset_ratios if k not in self.datasets]
if len(missing_keys) > 0:
raise ValueError(
"Datasets with the following split names are not found: {}".format(
missing_keys
)
)
unexpected_keys = [k for k in self.datasets if k not in dataset_ratios]
if len(unexpected_keys) > 0:
raise ValueError(
"Datasets with the following split names are not expected: {}".format(
unexpected_keys
)
)
dataset_ratios = [float(dataset_ratios[k]) for k in self.datasets]
self.datasets = reorg_datasets_by_split(self.datasets)
# to keep the same structure as return value of concat_datasets
self.datasets = {
k: v[0] if len(v) == 1 else v for k, v in datasets.items()
}
# print dataset statistics after concatenation/chaining
for split_name in self.datasets:
if isinstance(self.datasets[split_name], tuple) or isinstance(
self.datasets[split_name], list
):
# mixed wds.DataPipeline and torch.utils.data.Dataset
num_records = sum(
[
len(d)
if not type(d) in [wds.DataPipeline, ChainDataset]
else 0
for d in self.datasets[split_name]
]
)
else:
try:
# a single map-style dataset
num_records = len(self.datasets[split_name])
except TypeError:
# a single wds.DataPipeline or ChainDataset
num_records = -1
logging.info(
"Only a single wds.DataPipeline dataset, no __len__ attribute."
)
if num_records >= 0:
logging.info(
"Loaded {} records for {} split from the dataset.".format(
num_records, split_name
)
)
# create dataloaders
split_names = sorted(self.datasets.keys())
datasets = [self.datasets[split] for split in split_names]
is_trains = [split in self.train_splits for split in split_names]
batch_sizes = [
self.config.run_cfg.batch_size_train
if split == "train"
else self.config.run_cfg.batch_size_eval
for split in split_names
]
collate_fns = []
for dataset in datasets:
if isinstance(dataset, tuple) or isinstance(dataset, list):
collate_fns.append([getattr(d, "collater", None) for d in dataset])
else:
collate_fns.append(getattr(dataset, "collater", None))
dataloaders = self.create_loaders(
datasets=datasets,
num_workers=self.config.run_cfg.num_workers,
batch_sizes=batch_sizes,
is_trains=is_trains,
collate_fns=collate_fns,
dataset_ratios=dataset_ratios,
)
self._dataloaders = {k: v for k, v in zip(split_names, dataloaders)}
return self._dataloaders
| 3D-LLM-main | three_steps_3d_feature/second_step/lavis/runners/runner_iter.py |
import numpy as np
from argparse import Namespace
import quaternion
from habitat.utils.geometry_utils import quaternion_to_list
from scipy.spatial.transform import Rotation as R
class Pos2Map:
def __init__(self, x, y, heading) -> None:
self.x = x
self.y = y
self.heading = heading
class Pos2World:
def __init__(self, x, y, z, heading) -> None:
self.x = x
self.y = y
self.z = z
self.heading = heading
class Geometry_Tools:
def __init__(self, image_resolution, fov, camera_height) -> None:
self._camera_matrix = self._parse_camera_matrix(*image_resolution, fov)
self._camera_height = camera_height
def _parse_r_matrix(self, ax_, angle):
ax = ax_ / np.linalg.norm(ax_)
if np.abs(angle) > 0.001:
S_hat = np.array([[0.0, -ax[2], ax[1]], [ax[2], 0.0, -ax[0]], [-ax[1], ax[0], 0.0]], dtype=np.float32)
R = np.eye(3) + np.sin(angle) * S_hat + (1 - np.cos(angle)) * (np.linalg.matrix_power(S_hat, 2))
else:
R = np.eye(3)
return R
def _parse_camera_matrix(self, width, height, fov):
xc = (width - 1.0) / 2.0
zc = (height - 1.0) / 2.0
f = (width / 2.0) / np.tan(np.deg2rad(fov / 2.0))
camera_matrix = {"xc": xc, "zc": zc, "f": f}
camera_matrix = Namespace(**camera_matrix)
return camera_matrix
def transformation_robot2world(self, goal2robot2world: list, pos2world: Pos2World) -> list:
"""transform the point relative to robot to the point relative to th world
Args:
goal2robot2world (list): = [u, v] u is in the right relative to the robot and v is in the forward of th robot
robot first moves according to the v frame and finally u frame
pos2world (Pos2World): _description_
Returns:
list: _description_
"""
u, v = goal2robot2world
x0, y0, z0 = pos2world.x, pos2world.z, pos2world.z
x1 = x0 + v * np.cos(pos2world.heading + np.pi / 2)
z1 = -(-z0 + v * np.sin(pos2world.heading + np.pi / 2))
x2 = x1 + u * np.cos(pos2world.heading + np.pi / 2 - np.pi / 2)
z2 = -(-z1 + u * np.sin(pos2world.heading + np.pi / 2 - np.pi / 2))
return [x2, y0, z2]
def transformation_robotbase2map(
self, point_clouds_2robotbase: np.array, pos2map: Pos2Map, resolution_meter2pixel
) -> np.array:
"""Mapping the points with the robot base as the coordinate system to the map coordinate system
Args:
point_clouds_2robotbase (np.array):
pos2map (Pos2Map):
resolution_meter2pixel (_type_):
Returns:
np.array: point_clouds_2map
"""
R = self._parse_r_matrix([0.0, 0.0, 1.0], angle=pos2map.heading - np.pi / 2.0)
point_clouds_2map = np.matmul(point_clouds_2robotbase.reshape(-1, 3), R.T).reshape(
point_clouds_2robotbase.shape
)
point_clouds_2map[:, :, 0] = point_clouds_2map[:, :, 0] + pos2map.x * resolution_meter2pixel
point_clouds_2map[:, :, 1] = point_clouds_2map[:, :, 1] + pos2map.y * resolution_meter2pixel
return point_clouds_2map
def transformation_robotcamera2base(self, point_clouds: np.array) -> np.array:
"""Mapping the points with the robot camera as the coordinate system to the robot base coordinate system
Args:
point_clouds (np.array): In shape (width, height, 3);
point_clouds[0] means X cooardinate point_clouds[1] means Y cooardinate point_clouds[2] means Z cooardinate
Returns:
np.array: Array of point clouds relative to the robot base coordinate system; In shape (width, height, 3)
"""
point_clouds[..., 2] = point_clouds[..., 2] + self._camera_height
return point_clouds
def transformation_camera2robotcamera(self, depth_img: np.array) -> np.array:
"""Mapping the points on the depth map to points with the robot camera as the coordinate system
Args:
depth_img (np.array): In shape (width, height, 1); The unit of pixel value is 10 meters
Returns:
np.array: Array of point clouds relative to the robot camera coordinate system; In shape (width, height, 3)
"""
x, z = np.meshgrid(np.arange(depth_img.shape[-2]), np.arange(depth_img.shape[-3] - 1, -1, -1))
for _ in range(depth_img.ndim - 3):
x = np.expand_dims(x, axis=0)
z = np.expand_dims(z, axis=0)
X = (x - self._camera_matrix.xc) * depth_img[:, :, 0] / self._camera_matrix.f
# print(depth_img)
Z = (z - self._camera_matrix.zc) * depth_img[:, :, 0] / self._camera_matrix.f
pc = np.concatenate((X[..., np.newaxis], depth_img, Z[..., np.newaxis]), axis=2)
return pc
def transformation_pointcloud2occupiedmap(
self, point_clouds_2map: np.array, map_size, z_bins: list, resolution_meter2pixel, free_index, occupied_index
) -> np.array:
"""project the point cloud relative to the map coordinate system to the top view
Args:
point_clouds_2map (np.array):
map_size (_type_):
z_bins (list): a list of values utilizing a height parameter to segment the point clouds of occupied and free
resolution_meter2pixel (_type_):
free_index (_type_): representative values of navigable areas on the map
occupied_index (_type_): representative values of obstacle areas on the map
Returns:
np.array: top down map in shape (map_size, map_size)
"""
n_z_bins = len(z_bins) + 1
isnotnan = np.logical_not(np.isnan(point_clouds_2map[:, :, 1]))
# transform points meter to pixel
X_bin = np.round(point_clouds_2map[:, :, 0] / resolution_meter2pixel).astype(np.int32)
Y_bin = np.round(point_clouds_2map[:, :, 1] / resolution_meter2pixel).astype(np.int32)
""" function explaination
np.digitize : split the point according to the z_bins
example:
z_bins = [1] ; points that lower than 1 is 0 else 1
"""
Z_bin = np.digitize(point_clouds_2map[:, :, 2], bins=z_bins).astype(np.int32)
# filter out the points outside the map and nan
isvalid = np.array(
[X_bin >= 0, X_bin < map_size, Y_bin >= 0, Y_bin < map_size, Z_bin >= 0, Z_bin < n_z_bins, isnotnan]
)
isvalid = np.all(isvalid, axis=0)
ind = (Y_bin * map_size + X_bin) * n_z_bins + Z_bin
ind[np.logical_not(isvalid)] = 0
indr = ind.ravel()
isvalidr = isvalid.ravel().astype(np.int32)
count = np.bincount(indr, isvalidr, minlength=map_size * map_size * n_z_bins)
count = count[: map_size * map_size * n_z_bins]
count = np.reshape(count, [map_size, map_size, n_z_bins])
map = np.zeros((count.shape[0], count.shape[1]))
free_mask = count[:, :, 0] > 0
map[free_mask] = free_index
occupied_mask = count[:, :, 1] > 0
map[occupied_mask] = occupied_index
return map
def transformation_quatrtnion2heading(self, rotation: quaternion):
quat = quaternion_to_list(rotation)
q = R.from_quat(quat)
heading = q.as_rotvec()[1]
return heading
def transformation_pointcloud2semanticmap(
self, point_clouds_2map: np.array, map_size, z_bins: list, resolution_meter2pixel, free_index, semantic_obs
) -> np.array:
"""project the point cloud relative to the map coordinate system to the top view
Args:
point_clouds_2map (np.array):
map_size (_type_):
z_bins (list): a list of values utilizing a height parameter to segment the point clouds of occupied and free
resolution_meter2pixel (_type_):
free_index (_type_): representative values of navigable areas on the map
semantic_obs (_type_): representative values of obstacle areas on the map, the shape is in (depyh_img.shape[0], depyh_img.shape[1])
Returns:
np.array: top down map in shape (map_size, map_size)
"""
n_z_bins = len(z_bins) + 1
isnotnan = np.logical_not(np.isnan(point_clouds_2map[:, :, 1]))
# transform points meter to pixel
X_bin = np.round(point_clouds_2map[:, :, 0] / resolution_meter2pixel).astype(np.int32)
Y_bin = np.round(point_clouds_2map[:, :, 1] / resolution_meter2pixel).astype(np.int32)
""" function explaination
np.digitize : split the point according to the z_bins
example:
z_bins = [1] ; points that lower than 1 is 0 else 1
"""
Z_bin = np.digitize(point_clouds_2map[:, :, 2], bins=z_bins).astype(np.int32)
# filter out the points outside the map and nan
isvalid = np.array(
[X_bin >= 0, X_bin < map_size, Y_bin >= 0, Y_bin < map_size, Z_bin >= 0, Z_bin < n_z_bins, isnotnan]
)
isvalid = np.all(isvalid, axis=0)
ind = (Y_bin * map_size + X_bin) * n_z_bins + Z_bin
ind[np.logical_not(isvalid)] = 0
indr = ind.ravel()
isvalidr = isvalid.ravel().astype(np.int32)
count = np.bincount(indr, isvalidr, minlength=map_size * map_size * n_z_bins)
count = count[: map_size * map_size * n_z_bins]
count = np.reshape(count, [map_size, map_size, n_z_bins])
map = np.zeros((count.shape[0], count.shape[1]))
free_mask = count[:, :, 0] > 0
occupied_mask = count[:, :, 1] > 0
for y in range(X_bin.shape[0]):
for x in range(X_bin.shape[1]):
if Y_bin[y, x] >= 0 and Y_bin[y, x] < map_size and X_bin[y, x] >= 0 and X_bin[y, x] < map_size:
if occupied_mask[Y_bin[y, x], X_bin[y, x]]:
map[Y_bin[y, x], X_bin[y, x]] = semantic_obs[y, x]
elif free_mask[Y_bin[y, x], X_bin[y, x]]:
map[Y_bin[y, x], X_bin[y, x]] = free_index
return map
class Mode_Selector:
def __init__(self) -> None:
pass
class Action_Space:
move_forward = 1
turn_left = 2
turn_right = 3
class Application(Geometry_Tools):
def __init__(
self,
image_resolution,
fov,
depth_threshold,
resolution_meter2pixel,
map_size,
camera_height,
free_index,
occupied_index,
) -> None:
super().__init__(image_resolution, fov, camera_height)
self._resolution_meter2pixel = resolution_meter2pixel
self._depth_threshold = depth_threshold
self._map_size = map_size
self.pos2map = Pos2Map(self._map_size / 2 + 1, self._map_size / 2 + 1, 0)
self.pos2world = Pos2World(None, None, None, None)
self._free_index = free_index
self._occupied_index = occupied_index
def parse_semantic_pointclouds(self, depth_img: np.array, semantic_obs: np.array, semantic_anno):
"""Parse the point cloud dictionary with semantic annotation and the average coordinate dictionary of each
semantically annotated object in the robot camera coordinate system
Args:
depth_img (np.array): In shape (width, depth, 1)
semantic_obs (np.array): In shape (width, depth)
semantic_anno (_type_): _description_
Returns:
mapping_semantic: dictionary of all points corresponding to each label in the semantic_obs
occupied_pc: dictionary of average points corresponding to each label in the semantic_obs
"""
# filter out points that exceed a certain distance
depth_img[depth_img > self._depth_threshold] = np.NaN
# parse point clouds relative to the robot camera coordinate system
point_clouds_2robotcamera = self.transformation_camera2robotcamera(depth_img)
# label each pixel in semantic_obs
## TODO:解决相同物体不同index但是同一个label的问题
mapping_semantic = {}
for row in range(semantic_obs.shape[0]):
for col in range(semantic_obs.shape[1]):
label = semantic_anno[semantic_obs[row, col]]
if not label in mapping_semantic.keys():
mapping_semantic[label] = [point_clouds_2robotcamera[row, col]]
else:
mapping_semantic[label].append(point_clouds_2robotcamera[row, col])
# remove the label that less than 50 pixels and unusual label
occupied_pc = {}
for k, v in mapping_semantic.items():
if len(v) < 50:
continue
elif k in ["floor", "ceiling", "misc", "wall", "objects", "void"]:
continue
else:
occupied_pc[k] = (sum(v) / len(v)).tolist()
return mapping_semantic, occupied_pc
def parse_depth_topdownmap(self, depth_img: np.array) -> np.array:
"""project depth image into the top down map
Args:
depth_img (np.array): in shape (width, height, 1)
Returns:
np.array: map in shape (map_size, map_size) which value 0 stands for unknow space,
self._free_index stands for free space, self._occupied_index stands for occupied space
"""
# filter out points that exceed a certain distance
depth_img[depth_img > self._depth_threshold] = np.NaN
# parse point clouds relative to the robot camera coordinate system
point_clouds_2robotcamera = self.transformation_camera2robotcamera(depth_img)
# parse point clouds relative to the robot base coordinate system
point_clouds_2robotbase = self.transformation_robotcamera2base(point_clouds_2robotcamera)
# parse point clouds relative to the map coordinate system
point_clouds_2map = self.transformation_robotbase2map(
point_clouds_2robotbase, self.pos2map, self._resolution_meter2pixel
)
# project the point clouds relative to the map coordinate system to top down map
occupied_map = self.transformation_pointcloud2occupiedmap(
point_clouds_2map,
self._map_size,
[self._camera_height],
self._resolution_meter2pixel,
self._free_index,
self._occupied_index,
)
return occupied_map
def parse_semantic_topdownmap(self, depth_img: np.array, semantic_img: np.array) -> np.array:
# filter out points that exceed a certain distance
depth_img[depth_img > self._depth_threshold] = np.NaN
# parse point clouds relative to the robot camera coordinate system
point_clouds_2robotcamera = self.transformation_camera2robotcamera(depth_img)
# parse point clouds relative to the robot base coordinate system
point_clouds_2robotbase = self.transformation_robotcamera2base(point_clouds_2robotcamera)
# parse point clouds relative to the map coordinate system
point_clouds_2map = self.transformation_robotbase2map(
point_clouds_2robotbase, self.pos2map, self._resolution_meter2pixel
)
# project the point clouds relative to the map coordinate system to top down map
semantic_map = self.transformation_pointcloud2semanticmap(
point_clouds_2map,
self._map_size,
[self._camera_height],
self._resolution_meter2pixel,
self._free_index,
semantic_img,
)
return semantic_map
def update_pos2map_by_action(self, forward_step2tenmeter, turn_angle2degree, action) -> None:
if action == Action_Space.move_forward:
self.pos2map.x = (
self.pos2map.x + forward_step2tenmeter * np.cos(self.pos2map.heading) / self._resolution_meter2pixel
)
self.pos2map.y = (
self.pos2map.y + forward_step2tenmeter * np.sin(self.pos2map.heading) / self._resolution_meter2pixel
)
elif action == Action_Space.turn_left:
self.pos2map.heading = self.pos2map.heading + turn_angle2degree * np.pi / 180.0
elif action == Action_Space.turn_right:
self.pos2map.heading = self.pos2map.heading - turn_angle2degree * np.pi / 180.0
if self.pos2map.heading > np.pi * 2:
self.pos2map.heading -= np.pi * 2
elif self.pos2map.heading < 0:
self.pos2map.heading += np.pi * 2
def update_pos2map_by_cooardinate(self, tgt_pos2world: list = None, tgt_rot2world: quaternion = None) -> None:
"""_summary_
Args:
tgt_pos2world (list, optional): _description_. Defaults to None.
tgt_rot2world (quaternion)
tgt_heading2world (_type_, optional): in radius. Defaults to None.
"""
if not tgt_rot2world is None:
tgt_heading2world = self.transformation_quatrtnion2heading(tgt_rot2world)
if tgt_heading2world > np.pi * 2:
tgt_heading2world -= np.pi * 2
elif tgt_heading2world < 0:
tgt_heading2world += np.pi * 2
if self.pos2world.x is None:
self.pos2world.x = tgt_pos2world[0]
self.pos2world.y = tgt_pos2world[1]
self.pos2world.z = tgt_pos2world[2]
self.pos2world.heading = tgt_heading2world
else:
if not tgt_pos2world is None and not (
abs(tgt_pos2world[0] - self.pos2world.x) + abs(tgt_pos2world[2] - self.pos2world.z) < 0.001
):
xt, yt, zt = tgt_pos2world
delta_heading2world = np.arctan((xt - self.pos2world.x) / (zt - self.pos2world.z))
delta_heading2world = (
delta_heading2world
if (self.pos2world.heading < np.pi / 2 or self.pos2world.heading > np.pi * 3 / 2)
else delta_heading2world + np.pi
)
delta_distance2map = (
np.linalg.norm([(xt - self.pos2world.x) / 10, (zt - self.pos2world.z) / 10])
/ self._resolution_meter2pixel
)
delta_heading2curheading = delta_heading2world - self.pos2world.heading
delta_heading2map = delta_heading2curheading + self.pos2map.heading
self.pos2map.x = self.pos2map.x + np.cos(delta_heading2map) * delta_distance2map
self.pos2map.y = self.pos2map.y + np.sin(delta_heading2map) * delta_distance2map
self.pos2world.x = xt
self.pos2world.y = yt
self.pos2world.z = zt
if not tgt_heading2world is None:
delta_heading2world = tgt_heading2world - self.pos2world.heading
self.pos2world.heading = tgt_heading2world
if self.pos2world.heading > np.pi * 2:
self.pos2world.heading -= np.pi * 2
elif self.pos2world.heading < 0:
self.pos2world.heading += np.pi * 2
self.pos2map.heading += delta_heading2world
if self.pos2map.heading > np.pi * 2:
self.pos2map.heading -= np.pi * 2
elif self.pos2map.heading < 0:
self.pos2map.heading += np.pi * 2
def update_occupied_map(self, new_occupied_map, old_occupied_map):
mask_free_reigon = new_occupied_map == self._free_index
old_occupied_map[mask_free_reigon] = self._free_index
mask_occupied_reigon = new_occupied_map == self._occupied_index
old_occupied_map[mask_occupied_reigon] = self._occupied_index
return old_occupied_map
def update_semantic_map(self, new_semantic_map, old_semantic_map):
mask = new_semantic_map > 0
for y in range(old_semantic_map.shape[0]):
for x in range(old_semantic_map.shape[1]):
if mask[y, x]:
old_semantic_map[y, x] = new_semantic_map[y, x]
return old_semantic_map
| 3D-LLM-main | three_steps_3d_feature/third_step/tools.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import json
import argparse
import numpy as np
import copy
import cv2
from habitat.utils.geometry_utils import quaternion_to_list
import torch
import quaternion
import matplotlib.pyplot as plt
from scipy.spatial.transform import Rotation as R
import sys
from tools import Application
def robot2world(position, u, v, heading):
x0, y0, z0 = position
x1 = x0 + v * np.cos(heading + np.pi / 2)
z1 = -(-z0 + v * np.sin(heading + np.pi / 2))
x2 = x1 + u * np.cos(heading + np.pi / 2 - np.pi / 2)
z2 = -(-z1 + u * np.sin(heading + np.pi / 2 - np.pi / 2))
return [x2, y0, z2]
def transformation_quatrtnion2heading(rotation: quaternion):
quat = quaternion_to_list(rotation)
q = R.from_quat(quat)
heading = q.as_rotvec()[1]
return heading
def main(room_name, data_root_dir, depth_dir, feat_dir, sample_num):
feature_dir = os.path.join(feat_dir, room_name)
data_dir = os.path.join(data_root_dir, room_name)
try:
if (
os.path.exists(os.path.join(data_dir, "pcd_feat.pt"))
and torch.load(os.path.join(data_dir, "pcd_feat.pt")).shape[0] > 0
):
return
except:
pass
depth_dir = os.path.join(depth_dir, room_name)
api = Application((512, 512), 90, 1, 0.005, 600, 1.5, 1, 2)
pc_pos = []
pc_feat = []
from tqdm import tqdm
for file in tqdm(os.listdir(feature_dir)):
try:
feature_map = torch.load(os.path.join(feature_dir, file)).detach().cpu().numpy()
except:
continue
pose_file = json.load(open(os.path.join(data_dir, file.replace(".pt", ".json"))))
house_name = room_name.split("_")[0]
ky = room_name.split("_")[1]
bbox = json.load(open(os.path.join("room_bboxes_with_walls_revised_axis", house_name + ".json")))[ky]
min_x = bbox[0][0]
min_y = bbox[0][1]
min_z = bbox[0][2]
max_x = bbox[1][0]
max_y = bbox[1][1]
max_z = bbox[1][2]
rotation_0 = pose_file["rotation"][0]
rotation_1 = pose_file["rotation"][1]
rotation_2 = pose_file["rotation"][2]
rotation_3 = pose_file["rotation"][3]
position = pose_file["translation"]
heading = transformation_quatrtnion2heading(np.quaternion(rotation_0, rotation_1, rotation_2, rotation_3))
if heading > np.pi * 2:
heading -= np.pi * 2
elif heading < 0:
heading += np.pi * 2
depth_map = np.load(os.path.join(depth_dir, file.replace(".pt", "_depth.npy")))
point_clouds_2current = api.transformation_camera2robotcamera(np.expand_dims(depth_map / 10.0, axis=2))
color_map = cv2.imread(os.path.join(data_dir, file.replace(".pt", ".png")))
for w in range(point_clouds_2current.shape[0]):
for h in range(point_clouds_2current.shape[1]):
if np.count_nonzero(feature_map[w, h]) == 0:
continue
if color_map[w, h, 0] == 0 and color_map[w, h, 1] == 0 and color_map[w, h, 2] == 0:
continue
pc2r = [point_clouds_2current[w, h, j] for j in range(point_clouds_2current.shape[-1])]
pc2w = robot2world(position, pc2r[0] * 10, pc2r[1] * 10, heading)
pc2w[1] = pc2r[2] * 10 + pc2w[1]
if not (
(min_x - 0 < pc2w[0] < max_x + 0)
and (min_y - 0 < pc2w[1] < max_y + 0)
and (min_z - 0 < pc2w[2] < max_z + 0)
):
continue
else:
pc_pos.append(pc2w)
pc_feat.append(feature_map[w, h])
pc_pos = np.array(pc_pos)
pc_feat = np.array(pc_feat)
if len(pc_pos) > sample_num:
N = len(pc_pos)
indices = np.random.choice(N, sample_num, replace=False)
final_points = pc_pos[indices]
final_features = pc_feat[indices]
else:
final_points = pc_pos
final_features = pc_feat
print(final_points.shape)
torch.save(final_points, os.path.join(data_dir, "pcd_pos.pt"))
torch.save(final_features, os.path.join(data_dir, "pcd_feat.pt"))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Specify dirs")
parser.add_argument("--data_dir_path", default="./masked_rdp_data/", type=str)
parser.add_argument("--depth_dir_path", default="./masked_rdp_data/", type=str)
parser.add_argument("--feat_dir_path", default="./maskformer_masks/", type=str)
parser.add_argument("--sample_num", default=300000, type=int)
args = parser.parse_args()
room_list = os.listdir(args.data_dir_path)
for room_name in room_list:
main(room_name, args.data_dir_path, args.depth_dir_path, args.feat_dir_path, args.sample_num)
| 3D-LLM-main | three_steps_3d_feature/third_step/direct_3d.py |
#liquid attention
import torch
import torch.nn as nn
from torch.nn import TransformerEncoder, TransformerEncoderLayer
import torch.nn.functional as F
import math
class LeakyIntegrator(nn.Module):
def __init__(self, tau=0.5):
super(LeakyIntegrator, self).__init__()
self.tau = tau
def forward(self, x):
dxdt = -x / self.tau
return x + dxdt
class ConductanceBasedSynapse(nn.Module):
def __init__(self):
super(ConductanceBasedSynapse, self).__init__()
def forward(self, x):
return F.sigmoid(x)
class LeakyIntegrationAttention(nn.Module):
def __init__(self, d_model, nhead):
super(LeakyIntegrationAttention, self).__init__()
self.d_model = d_model
self.nhead = nhead
self.query_proj = nn.Linear(d_model, d_model)
self.key_proj = nn.Linear(d_model, d_model)
self.value_proj = nn.Linear(d_model, d_model)
self.leaky_integrator = LeakyIntegrator()
self.out_proj = nn.Linear(d_model, d_model)
def forward(self, query, key, value, attn_mask=None, key_padding_mask=None):
q = self.query_proj(query)
k = self.key_prok(key)
v = self.value_proj(value)
q = q.view(query.shape[0], query.shape[1], self.nhead, -1).transpose(1, 2)
k = k.view(key.shape[0], key.shape[1], self.head, -1).transpose(1, 2)
v = v.view(value.shape[0], value.shape[1], self.nhead, -1).transpose(1, 2)
q = self.leaky_integrator(q)
k = self.leaky_integrator(k)
#attention weights
attn_weights = torch.matmul(q, k.tranpose(-2, -1))
attn_weights = attn_weights / math.sqrt(self.d_model)
if attn_mask is not None:
attn_weights += attn_mask
if key_padding_mask is not None:
attn_weights = attn_weights.masked_fill(key_padding_mask.unsqueeze(1).unsqueeze(2), float("-inf"))
attn_weights = F.softmax(attn_weights, dim=1)
attn_weights = self.conductance_based_synapse(attn_weights)
attn_output = torch.matmul(attn_weights, v)
attn_output = attn_output.transpose(1, 2).contiguous().view(query.shape[0], query.shape[1], self.d_model)
attn_output = self.out_proj(attn_output)
return attn_output, attn_weights
class CustomTransformerEncoderLayer(TransformerEncoderLayer):
def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, activation='gelu'):
super(CustomTransformerEncoderLayer, self).__init__(d_model, nhead, dim_feedforward, dropout, activation)
self.self_attn = LeakyIntegrator(d_model, nhead)
class CustomTransformerEncoder(TransformerEncoder):
def __init__(self, encoder_layer, num_layers, norm=None):
def __init__(self, encoder_layer, num_layer, norm=None):
super(CustomTransformerEncoder, self).__init__(encoder_layer, num_layers, norm)
d_model = 512
nhead = 8
num_layers = 6
encoder_layer = CustomTransformerEncoder(d_model, nhead)
custom_transformer_encoder = CustomTransformerEncoder(encoder_layer, num_layers) | Liquid-main | LeakyAttention.py |
# Adapted from https://github.com/NVIDIA/apex/blob/master/setup.py
import sys
import warnings
import os
from pathlib import Path
from setuptools import setup, find_packages
import subprocess
import torch
from torch.utils.cpp_extension import BuildExtension, CppExtension, CUDAExtension, CUDA_HOME
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
# ninja build does not work unless include_dirs are abs path
this_dir = os.path.dirname(os.path.abspath(__file__))
def get_cuda_bare_metal_version(cuda_dir):
raw_output = subprocess.check_output([cuda_dir + "/bin/nvcc", "-V"], universal_newlines=True)
output = raw_output.split()
release_idx = output.index("release") + 1
release = output[release_idx].split(".")
bare_metal_major = release[0]
bare_metal_minor = release[1][0]
return raw_output, bare_metal_major, bare_metal_minor
def check_cuda_torch_binary_vs_bare_metal(cuda_dir):
raw_output, bare_metal_major, bare_metal_minor = get_cuda_bare_metal_version(cuda_dir)
torch_binary_major = torch.version.cuda.split(".")[0]
torch_binary_minor = torch.version.cuda.split(".")[1]
print("\nCompiling cuda extensions with")
print(raw_output + "from " + cuda_dir + "/bin\n")
if (bare_metal_major != torch_binary_major) or (bare_metal_minor != torch_binary_minor):
raise RuntimeError(
"Cuda extensions are being compiled with a version of Cuda that does "
"not match the version used to compile Pytorch binaries. "
"Pytorch binaries were compiled with Cuda {}.\n".format(torch.version.cuda)
+ "In some cases, a minor-version mismatch will not cause later errors: "
"https://github.com/NVIDIA/apex/pull/323#discussion_r287021798. "
"You can try commenting out this check (at your own risk)."
)
def raise_if_cuda_home_none(global_option: str) -> None:
if CUDA_HOME is not None:
return
raise RuntimeError(
f"{global_option} was requested, but nvcc was not found. Are you sure your environment has nvcc available? "
"If you're installing within a container from https://hub.docker.com/r/pytorch/pytorch, "
"only images whose names contain 'devel' will provide nvcc."
)
def append_nvcc_threads(nvcc_extra_args):
_, bare_metal_major, bare_metal_minor = get_cuda_bare_metal_version(CUDA_HOME)
if int(bare_metal_major) >= 11 and int(bare_metal_minor) >= 2:
return nvcc_extra_args + ["--threads", "4"]
return nvcc_extra_args
if not torch.cuda.is_available():
# https://github.com/NVIDIA/apex/issues/486
# Extension builds after https://github.com/pytorch/pytorch/pull/23408 attempt to query torch.cuda.get_device_capability(),
# which will fail if you are compiling in an environment without visible GPUs (e.g. during an nvidia-docker build command).
print(
"\nWarning: Torch did not find available GPUs on this system.\n",
"If your intention is to cross-compile, this is not an error.\n"
"By default, We cross-compile for Volta (compute capability 7.0), "
"Turing (compute capability 7.5),\n"
"and, if the CUDA version is >= 11.0, Ampere (compute capability 8.0).\n"
"If you wish to cross-compile for a single specific architecture,\n"
'export TORCH_CUDA_ARCH_LIST="compute capability" before running setup.py.\n',
)
if os.environ.get("TORCH_CUDA_ARCH_LIST", None) is None:
_, bare_metal_major, bare_metal_minor = get_cuda_bare_metal_version(CUDA_HOME)
if int(bare_metal_major) == 11:
os.environ["TORCH_CUDA_ARCH_LIST"] = "7.0;7.5;8.0"
if int(bare_metal_minor) > 0:
os.environ["TORCH_CUDA_ARCH_LIST"] = "7.0;7.5;8.0;8.6"
else:
os.environ["TORCH_CUDA_ARCH_LIST"] = "7.0;7.5"
print("\n\ntorch.__version__ = {}\n\n".format(torch.__version__))
TORCH_MAJOR = int(torch.__version__.split(".")[0])
TORCH_MINOR = int(torch.__version__.split(".")[1])
cmdclass = {}
ext_modules = []
# Check, if ATen/CUDAGeneratorImpl.h is found, otherwise use ATen/cuda/CUDAGeneratorImpl.h
# See https://github.com/pytorch/pytorch/pull/70650
generator_flag = []
torch_dir = torch.__path__[0]
if os.path.exists(os.path.join(torch_dir, "include", "ATen", "CUDAGeneratorImpl.h")):
generator_flag = ["-DOLD_GENERATOR_PATH"]
raise_if_cuda_home_none("flash_attn")
# Check, if CUDA11 is installed for compute capability 8.0
cc_flag = []
_, bare_metal_major, _ = get_cuda_bare_metal_version(CUDA_HOME)
if int(bare_metal_major) < 11:
raise RuntimeError("FlashAttention is only supported on CUDA 11")
cc_flag.append("-gencode")
cc_flag.append("arch=compute_75,code=sm_75")
cc_flag.append("-gencode")
cc_flag.append("arch=compute_80,code=sm_80")
subprocess.run(["git", "submodule", "update", "--init", "csrc/flash_attn/cutlass"])
ext_modules.append(
CUDAExtension(
name="flash_attn_cuda",
sources=[
"csrc/flash_attn/fmha_api.cpp",
"csrc/flash_attn/src/fmha_fwd_hdim32.cu",
"csrc/flash_attn/src/fmha_fwd_hdim64.cu",
"csrc/flash_attn/src/fmha_fwd_hdim128.cu",
"csrc/flash_attn/src/fmha_bwd_hdim32.cu",
"csrc/flash_attn/src/fmha_bwd_hdim64.cu",
"csrc/flash_attn/src/fmha_bwd_hdim128.cu",
"csrc/flash_attn/src/fmha_block_fprop_fp16_kernel.sm80.cu",
"csrc/flash_attn/src/fmha_block_dgrad_fp16_kernel_loop.sm80.cu",
],
extra_compile_args={
"cxx": ["-O3", "-std=c++17"] + generator_flag,
"nvcc": append_nvcc_threads(
[
"-O3",
"-std=c++17",
"-U__CUDA_NO_HALF_OPERATORS__",
"-U__CUDA_NO_HALF_CONVERSIONS__",
"-U__CUDA_NO_HALF2_OPERATORS__",
"-U__CUDA_NO_BFLOAT16_CONVERSIONS__",
"--expt-relaxed-constexpr",
"--expt-extended-lambda",
"--use_fast_math",
"--ptxas-options=-v",
"-lineinfo"
]
+ generator_flag
+ cc_flag
),
},
include_dirs=[
Path(this_dir) / 'csrc' / 'flash_attn',
Path(this_dir) / 'csrc' / 'flash_attn' / 'src',
Path(this_dir) / 'csrc' / 'flash_attn' / 'cutlass' / 'include',
],
)
)
setup(
name="flash_attn",
version="0.2.8",
packages=find_packages(
exclude=("build", "csrc", "include", "tests", "dist", "docs", "benchmarks", "flash_attn.egg-info",)
),
author="Tri Dao",
author_email="[email protected]",
description="Flash Attention: Fast and Memory-Efficient Exact Attention",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/HazyResearch/flash-attention",
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: BSD License",
"Operating System :: Unix",
],
ext_modules=ext_modules,
cmdclass={"build_ext": BuildExtension} if ext_modules else {},
python_requires=">=3.7",
install_requires=[
"torch",
"einops",
],
)
| FLASHATTENION-LION-OPTIMIZE-main | setup.py |
# Adapted from https://github.com/NVIDIA/apex/blob/master/setup.py
import torch
from torch.utils.cpp_extension import BuildExtension, CppExtension, CUDAExtension, CUDA_HOME
from setuptools import setup, find_packages
import subprocess
import sys
import warnings
import os
# ninja build does not work unless include_dirs are abs path
this_dir = os.path.dirname(os.path.abspath(__file__))
def get_cuda_bare_metal_version(cuda_dir):
raw_output = subprocess.check_output([cuda_dir + "/bin/nvcc", "-V"], universal_newlines=True)
output = raw_output.split()
release_idx = output.index("release") + 1
release = output[release_idx].split(".")
bare_metal_major = release[0]
bare_metal_minor = release[1][0]
return raw_output, bare_metal_major, bare_metal_minor
def check_cuda_torch_binary_vs_bare_metal(cuda_dir):
raw_output, bare_metal_major, bare_metal_minor = get_cuda_bare_metal_version(cuda_dir)
torch_binary_major = torch.version.cuda.split(".")[0]
torch_binary_minor = torch.version.cuda.split(".")[1]
print("\nCompiling cuda extensions with")
print(raw_output + "from " + cuda_dir + "/bin\n")
if (bare_metal_major != torch_binary_major) or (bare_metal_minor != torch_binary_minor):
raise RuntimeError(
"Cuda extensions are being compiled with a version of Cuda that does "
"not match the version used to compile Pytorch binaries. "
"Pytorch binaries were compiled with Cuda {}.\n".format(torch.version.cuda)
+ "In some cases, a minor-version mismatch will not cause later errors: "
"https://github.com/NVIDIA/apex/pull/323#discussion_r287021798. "
"You can try commenting out this check (at your own risk)."
)
def raise_if_cuda_home_none(global_option: str) -> None:
if CUDA_HOME is not None:
return
raise RuntimeError(
f"{global_option} was requested, but nvcc was not found. Are you sure your environment has nvcc available? "
"If you're installing within a container from https://hub.docker.com/r/pytorch/pytorch, "
"only images whose names contain 'devel' will provide nvcc."
)
def append_nvcc_threads(nvcc_extra_args):
_, bare_metal_major, bare_metal_minor = get_cuda_bare_metal_version(CUDA_HOME)
if int(bare_metal_major) >= 11 and int(bare_metal_minor) >= 2:
return nvcc_extra_args + ["--threads", "4"]
return nvcc_extra_args
if not torch.cuda.is_available():
# https://github.com/NVIDIA/apex/issues/486
# Extension builds after https://github.com/pytorch/pytorch/pull/23408 attempt to query torch.cuda.get_device_capability(),
# which will fail if you are compiling in an environment without visible GPUs (e.g. during an nvidia-docker build command).
print(
"\nWarning: Torch did not find available GPUs on this system.\n",
"If your intention is to cross-compile, this is not an error.\n"
"By default, Apex will cross-compile for Pascal (compute capabilities 6.0, 6.1, 6.2),\n"
"Volta (compute capability 7.0), Turing (compute capability 7.5),\n"
"and, if the CUDA version is >= 11.0, Ampere (compute capability 8.0).\n"
"If you wish to cross-compile for a single specific architecture,\n"
'export TORCH_CUDA_ARCH_LIST="compute capability" before running setup.py.\n',
)
if os.environ.get("TORCH_CUDA_ARCH_LIST", None) is None:
_, bare_metal_major, bare_metal_minor = get_cuda_bare_metal_version(CUDA_HOME)
if int(bare_metal_major) == 11:
os.environ["TORCH_CUDA_ARCH_LIST"] = "6.0;6.1;6.2;7.0;7.5;8.0"
if int(bare_metal_minor) > 0:
os.environ["TORCH_CUDA_ARCH_LIST"] = "6.0;6.1;6.2;7.0;7.5;8.0;8.6"
else:
os.environ["TORCH_CUDA_ARCH_LIST"] = "6.0;6.1;6.2;7.0;7.5"
print("\n\ntorch.__version__ = {}\n\n".format(torch.__version__))
TORCH_MAJOR = int(torch.__version__.split(".")[0])
TORCH_MINOR = int(torch.__version__.split(".")[1])
cmdclass = {}
ext_modules = []
# Check, if ATen/CUDAGeneratorImpl.h is found, otherwise use ATen/cuda/CUDAGeneratorImpl.h
# See https://github.com/pytorch/pytorch/pull/70650
generator_flag = []
torch_dir = torch.__path__[0]
if os.path.exists(os.path.join(torch_dir, "include", "ATen", "CUDAGeneratorImpl.h")):
generator_flag = ["-DOLD_GENERATOR_PATH"]
raise_if_cuda_home_none("--ft_attention")
# Check, if CUDA11 is installed for compute capability 8.0
cc_flag = []
# cc_flag.append("-gencode")
# cc_flag.append("arch=compute_70,code=sm_70")
cc_flag.append("-gencode")
cc_flag.append("arch=compute_80,code=sm_80")
ext_modules.append(
CUDAExtension(
name="ft_attention",
sources=[
"ft_attention.cpp",
"decoder_masked_multihead_attention.cu",
],
extra_compile_args={
"cxx": ["-O3", "-DENABLE_BF16"] + generator_flag,
"nvcc": append_nvcc_threads(
[
"-DENABLE_BF16", # TODO
"-O3",
"-U__CUDA_NO_HALF_OPERATORS__",
"-U__CUDA_NO_HALF_CONVERSIONS__",
"-U__CUDA_NO_BFLOAT16_OPERATORS__",
"-U__CUDA_NO_BFLOAT16_CONVERSIONS__",
"-U__CUDA_NO_BFLOAT162_OPERATORS__",
"-U__CUDA_NO_BFLOAT162_CONVERSIONS__",
"--expt-relaxed-constexpr",
"--expt-extended-lambda",
"--use_fast_math",
]
+ generator_flag
+ cc_flag
),
},
include_dirs=[this_dir],
)
)
setup(
name="ft_attention",
version="0.1",
description="Attention for single query from FasterTransformer",
ext_modules=ext_modules,
cmdclass={"build_ext": BuildExtension} if ext_modules else {},
)
| FLASHATTENION-LION-OPTIMIZE-main | csrc/ft_attention/setup.py |
# Copied from https://github.com/NVIDIA/apex/tree/master/csrc/megatron
# We add the case where seqlen = 4k and seqlen = 8k
import os
import subprocess
import torch
from setuptools import setup
from torch.utils.cpp_extension import BuildExtension, CUDAExtension, CUDA_HOME
def get_cuda_bare_metal_version(cuda_dir):
raw_output = subprocess.check_output([cuda_dir + "/bin/nvcc", "-V"], universal_newlines=True)
output = raw_output.split()
release_idx = output.index("release") + 1
release = output[release_idx].split(".")
bare_metal_major = release[0]
bare_metal_minor = release[1][0]
return raw_output, bare_metal_major, bare_metal_minor
def append_nvcc_threads(nvcc_extra_args):
_, bare_metal_major, bare_metal_minor = get_cuda_bare_metal_version(CUDA_HOME)
if int(bare_metal_major) >= 11 and int(bare_metal_minor) >= 2:
return nvcc_extra_args + ["--threads", "4"]
return nvcc_extra_args
cc_flag = []
cc_flag.append("-gencode")
cc_flag.append("arch=compute_70,code=sm_70")
cc_flag.append("-gencode")
cc_flag.append("arch=compute_80,code=sm_80")
setup(
name='fused_softmax_lib',
ext_modules=[
CUDAExtension(
name='fused_softmax_lib',
sources=['fused_softmax.cpp', 'scaled_masked_softmax_cuda.cu', 'scaled_upper_triang_masked_softmax_cuda.cu'],
extra_compile_args={
'cxx': ['-O3',],
'nvcc': append_nvcc_threads(['-O3', '--use_fast_math'] + cc_flag)
}
)
],
cmdclass={
'build_ext': BuildExtension
})
| FLASHATTENION-LION-OPTIMIZE-main | csrc/fused_softmax/setup.py |
# Adapted from https://github.com/NVIDIA/apex/blob/master/setup.py
import torch
from torch.utils.cpp_extension import BuildExtension, CppExtension, CUDAExtension, CUDA_HOME
from setuptools import setup, find_packages
import subprocess
import sys
import warnings
import os
# ninja build does not work unless include_dirs are abs path
this_dir = os.path.dirname(os.path.abspath(__file__))
def get_cuda_bare_metal_version(cuda_dir):
raw_output = subprocess.check_output([cuda_dir + "/bin/nvcc", "-V"], universal_newlines=True)
output = raw_output.split()
release_idx = output.index("release") + 1
release = output[release_idx].split(".")
bare_metal_major = release[0]
bare_metal_minor = release[1][0]
return raw_output, bare_metal_major, bare_metal_minor
def check_cuda_torch_binary_vs_bare_metal(cuda_dir):
raw_output, bare_metal_major, bare_metal_minor = get_cuda_bare_metal_version(cuda_dir)
torch_binary_major = torch.version.cuda.split(".")[0]
torch_binary_minor = torch.version.cuda.split(".")[1]
print("\nCompiling cuda extensions with")
print(raw_output + "from " + cuda_dir + "/bin\n")
if (bare_metal_major != torch_binary_major) or (bare_metal_minor != torch_binary_minor):
raise RuntimeError(
"Cuda extensions are being compiled with a version of Cuda that does "
"not match the version used to compile Pytorch binaries. "
"Pytorch binaries were compiled with Cuda {}.\n".format(torch.version.cuda)
+ "In some cases, a minor-version mismatch will not cause later errors: "
"https://github.com/NVIDIA/apex/pull/323#discussion_r287021798. "
"You can try commenting out this check (at your own risk)."
)
def raise_if_cuda_home_none(global_option: str) -> None:
if CUDA_HOME is not None:
return
raise RuntimeError(
f"{global_option} was requested, but nvcc was not found. Are you sure your environment has nvcc available? "
"If you're installing within a container from https://hub.docker.com/r/pytorch/pytorch, "
"only images whose names contain 'devel' will provide nvcc."
)
def append_nvcc_threads(nvcc_extra_args):
_, bare_metal_major, bare_metal_minor = get_cuda_bare_metal_version(CUDA_HOME)
if int(bare_metal_major) >= 11 and int(bare_metal_minor) >= 2:
return nvcc_extra_args + ["--threads", "4"]
return nvcc_extra_args
if not torch.cuda.is_available():
# https://github.com/NVIDIA/apex/issues/486
# Extension builds after https://github.com/pytorch/pytorch/pull/23408 attempt to query torch.cuda.get_device_capability(),
# which will fail if you are compiling in an environment without visible GPUs (e.g. during an nvidia-docker build command).
print(
"\nWarning: Torch did not find available GPUs on this system.\n",
"If your intention is to cross-compile, this is not an error.\n"
"By default, Apex will cross-compile for Pascal (compute capabilities 6.0, 6.1, 6.2),\n"
"Volta (compute capability 7.0), Turing (compute capability 7.5),\n"
"and, if the CUDA version is >= 11.0, Ampere (compute capability 8.0).\n"
"If you wish to cross-compile for a single specific architecture,\n"
'export TORCH_CUDA_ARCH_LIST="compute capability" before running setup.py.\n',
)
if os.environ.get("TORCH_CUDA_ARCH_LIST", None) is None:
_, bare_metal_major, bare_metal_minor = get_cuda_bare_metal_version(CUDA_HOME)
if int(bare_metal_major) == 11:
os.environ["TORCH_CUDA_ARCH_LIST"] = "6.0;6.1;6.2;7.0;7.5;8.0"
if int(bare_metal_minor) > 0:
os.environ["TORCH_CUDA_ARCH_LIST"] = "6.0;6.1;6.2;7.0;7.5;8.0;8.6"
else:
os.environ["TORCH_CUDA_ARCH_LIST"] = "6.0;6.1;6.2;7.0;7.5"
print("\n\ntorch.__version__ = {}\n\n".format(torch.__version__))
TORCH_MAJOR = int(torch.__version__.split(".")[0])
TORCH_MINOR = int(torch.__version__.split(".")[1])
cmdclass = {}
ext_modules = []
# Check, if ATen/CUDAGeneratorImpl.h is found, otherwise use ATen/cuda/CUDAGeneratorImpl.h
# See https://github.com/pytorch/pytorch/pull/70650
generator_flag = []
torch_dir = torch.__path__[0]
if os.path.exists(os.path.join(torch_dir, "include", "ATen", "CUDAGeneratorImpl.h")):
generator_flag = ["-DOLD_GENERATOR_PATH"]
raise_if_cuda_home_none("--xentropy")
# Check, if CUDA11 is installed for compute capability 8.0
cc_flag = []
cc_flag.append("-gencode")
cc_flag.append("arch=compute_70,code=sm_70")
cc_flag.append("-gencode")
cc_flag.append("arch=compute_80,code=sm_80")
ext_modules.append(
CUDAExtension(
name="xentropy_cuda_lib",
sources=[
"interface.cpp",
"xentropy_kernel.cu"
],
extra_compile_args={
"cxx": ["-O3"] + generator_flag,
"nvcc": append_nvcc_threads(
["-O3"]
+ generator_flag
+ cc_flag
),
},
include_dirs=[this_dir],
)
)
setup(
name="xentropy_cuda_lib",
version="0.1",
description="Cross-entropy loss",
ext_modules=ext_modules,
cmdclass={"build_ext": BuildExtension} if ext_modules else {},
)
| FLASHATTENION-LION-OPTIMIZE-main | csrc/xentropy/setup.py |
# Adapted from https://github.com/NVIDIA/apex/blob/master/setup.py
import torch
from torch.utils.cpp_extension import BuildExtension, CppExtension, CUDAExtension, CUDA_HOME
from setuptools import setup, find_packages
import subprocess
import sys
import warnings
import os
# ninja build does not work unless include_dirs are abs path
this_dir = os.path.dirname(os.path.abspath(__file__))
def get_cuda_bare_metal_version(cuda_dir):
raw_output = subprocess.check_output([cuda_dir + "/bin/nvcc", "-V"], universal_newlines=True)
output = raw_output.split()
release_idx = output.index("release") + 1
release = output[release_idx].split(".")
bare_metal_major = release[0]
bare_metal_minor = release[1][0]
return raw_output, bare_metal_major, bare_metal_minor
def check_cuda_torch_binary_vs_bare_metal(cuda_dir):
raw_output, bare_metal_major, bare_metal_minor = get_cuda_bare_metal_version(cuda_dir)
torch_binary_major = torch.version.cuda.split(".")[0]
torch_binary_minor = torch.version.cuda.split(".")[1]
print("\nCompiling cuda extensions with")
print(raw_output + "from " + cuda_dir + "/bin\n")
if (bare_metal_major != torch_binary_major) or (bare_metal_minor != torch_binary_minor):
raise RuntimeError(
"Cuda extensions are being compiled with a version of Cuda that does "
"not match the version used to compile Pytorch binaries. "
"Pytorch binaries were compiled with Cuda {}.\n".format(torch.version.cuda)
+ "In some cases, a minor-version mismatch will not cause later errors: "
"https://github.com/NVIDIA/apex/pull/323#discussion_r287021798. "
"You can try commenting out this check (at your own risk)."
)
def raise_if_cuda_home_none(global_option: str) -> None:
if CUDA_HOME is not None:
return
raise RuntimeError(
f"{global_option} was requested, but nvcc was not found. Are you sure your environment has nvcc available? "
"If you're installing within a container from https://hub.docker.com/r/pytorch/pytorch, "
"only images whose names contain 'devel' will provide nvcc."
)
def append_nvcc_threads(nvcc_extra_args):
_, bare_metal_major, bare_metal_minor = get_cuda_bare_metal_version(CUDA_HOME)
if int(bare_metal_major) >= 11 and int(bare_metal_minor) >= 2:
return nvcc_extra_args + ["--threads", "4"]
return nvcc_extra_args
if not torch.cuda.is_available():
# https://github.com/NVIDIA/apex/issues/486
# Extension builds after https://github.com/pytorch/pytorch/pull/23408 attempt to query torch.cuda.get_device_capability(),
# which will fail if you are compiling in an environment without visible GPUs (e.g. during an nvidia-docker build command).
print(
"\nWarning: Torch did not find available GPUs on this system.\n",
"If your intention is to cross-compile, this is not an error.\n"
"By default, Apex will cross-compile for Pascal (compute capabilities 6.0, 6.1, 6.2),\n"
"Volta (compute capability 7.0), Turing (compute capability 7.5),\n"
"and, if the CUDA version is >= 11.0, Ampere (compute capability 8.0).\n"
"If you wish to cross-compile for a single specific architecture,\n"
'export TORCH_CUDA_ARCH_LIST="compute capability" before running setup.py.\n',
)
if os.environ.get("TORCH_CUDA_ARCH_LIST", None) is None:
_, bare_metal_major, bare_metal_minor = get_cuda_bare_metal_version(CUDA_HOME)
if int(bare_metal_major) == 11:
os.environ["TORCH_CUDA_ARCH_LIST"] = "6.0;6.1;6.2;7.0;7.5;8.0"
if int(bare_metal_minor) > 0:
os.environ["TORCH_CUDA_ARCH_LIST"] = "6.0;6.1;6.2;7.0;7.5;8.0;8.6"
else:
os.environ["TORCH_CUDA_ARCH_LIST"] = "6.0;6.1;6.2;7.0;7.5"
print("\n\ntorch.__version__ = {}\n\n".format(torch.__version__))
TORCH_MAJOR = int(torch.__version__.split(".")[0])
TORCH_MINOR = int(torch.__version__.split(".")[1])
cmdclass = {}
ext_modules = []
# Check, if ATen/CUDAGeneratorImpl.h is found, otherwise use ATen/cuda/CUDAGeneratorImpl.h
# See https://github.com/pytorch/pytorch/pull/70650
generator_flag = []
torch_dir = torch.__path__[0]
if os.path.exists(os.path.join(torch_dir, "include", "ATen", "CUDAGeneratorImpl.h")):
generator_flag = ["-DOLD_GENERATOR_PATH"]
raise_if_cuda_home_none("--fast_layer_norm")
# Check, if CUDA11 is installed for compute capability 8.0
cc_flag = []
cc_flag.append("-gencode")
cc_flag.append("arch=compute_70,code=sm_70")
cc_flag.append("-gencode")
cc_flag.append("arch=compute_80,code=sm_80")
ext_modules.append(
CUDAExtension(
name="dropout_layer_norm",
sources=[
"ln_api.cpp",
"ln_fwd_256.cu",
"ln_bwd_256.cu",
"ln_fwd_512.cu",
"ln_bwd_512.cu",
"ln_fwd_768.cu",
"ln_bwd_768.cu",
"ln_fwd_1024.cu",
"ln_bwd_1024.cu",
"ln_fwd_1280.cu",
"ln_bwd_1280.cu",
"ln_fwd_1536.cu",
"ln_bwd_1536.cu",
"ln_fwd_2048.cu",
"ln_bwd_2048.cu",
"ln_fwd_2560.cu",
"ln_bwd_2560.cu",
"ln_fwd_3072.cu",
"ln_bwd_3072.cu",
"ln_fwd_4096.cu",
"ln_bwd_4096.cu",
"ln_fwd_5120.cu",
"ln_bwd_5120.cu",
"ln_fwd_6144.cu",
"ln_bwd_6144.cu",
],
extra_compile_args={
"cxx": ["-O3"] + generator_flag,
"nvcc": append_nvcc_threads(
[
"-O3",
"-U__CUDA_NO_HALF_OPERATORS__",
"-U__CUDA_NO_HALF_CONVERSIONS__",
"-U__CUDA_NO_BFLOAT16_OPERATORS__",
"-U__CUDA_NO_BFLOAT16_CONVERSIONS__",
"-U__CUDA_NO_BFLOAT162_OPERATORS__",
"-U__CUDA_NO_BFLOAT162_CONVERSIONS__",
"--expt-relaxed-constexpr",
"--expt-extended-lambda",
"--use_fast_math",
]
+ generator_flag
+ cc_flag
),
},
include_dirs=[this_dir],
)
)
setup(
name="dropout_layer_norm",
version="0.1",
description="Fused dropout + add + layer norm",
ext_modules=ext_modules,
cmdclass={"build_ext": BuildExtension} if ext_modules else {},
)
| FLASHATTENION-LION-OPTIMIZE-main | csrc/layer_norm/setup.py |
# Adapted from https://github.com/NVIDIA/apex/blob/master/setup.py
import torch
from torch.utils.cpp_extension import BuildExtension, CppExtension, CUDAExtension, CUDA_HOME
from setuptools import setup, find_packages
import subprocess
import sys
import warnings
import os
# ninja build does not work unless include_dirs are abs path
this_dir = os.path.dirname(os.path.abspath(__file__))
def get_cuda_bare_metal_version(cuda_dir):
raw_output = subprocess.check_output([cuda_dir + "/bin/nvcc", "-V"], universal_newlines=True)
output = raw_output.split()
release_idx = output.index("release") + 1
release = output[release_idx].split(".")
bare_metal_major = release[0]
bare_metal_minor = release[1][0]
return raw_output, bare_metal_major, bare_metal_minor
def check_cuda_torch_binary_vs_bare_metal(cuda_dir):
raw_output, bare_metal_major, bare_metal_minor = get_cuda_bare_metal_version(cuda_dir)
torch_binary_major = torch.version.cuda.split(".")[0]
torch_binary_minor = torch.version.cuda.split(".")[1]
print("\nCompiling cuda extensions with")
print(raw_output + "from " + cuda_dir + "/bin\n")
if (bare_metal_major != torch_binary_major) or (bare_metal_minor != torch_binary_minor):
raise RuntimeError(
"Cuda extensions are being compiled with a version of Cuda that does "
"not match the version used to compile Pytorch binaries. "
"Pytorch binaries were compiled with Cuda {}.\n".format(torch.version.cuda)
+ "In some cases, a minor-version mismatch will not cause later errors: "
"https://github.com/NVIDIA/apex/pull/323#discussion_r287021798. "
"You can try commenting out this check (at your own risk)."
)
def raise_if_cuda_home_none(global_option: str) -> None:
if CUDA_HOME is not None:
return
raise RuntimeError(
f"{global_option} was requested, but nvcc was not found. Are you sure your environment has nvcc available? "
"If you're installing within a container from https://hub.docker.com/r/pytorch/pytorch, "
"only images whose names contain 'devel' will provide nvcc."
)
def append_nvcc_threads(nvcc_extra_args):
_, bare_metal_major, bare_metal_minor = get_cuda_bare_metal_version(CUDA_HOME)
if int(bare_metal_major) >= 11 and int(bare_metal_minor) >= 2:
return nvcc_extra_args + ["--threads", "4"]
return nvcc_extra_args
if not torch.cuda.is_available():
# https://github.com/NVIDIA/apex/issues/486
# Extension builds after https://github.com/pytorch/pytorch/pull/23408 attempt to query torch.cuda.get_device_capability(),
# which will fail if you are compiling in an environment without visible GPUs (e.g. during an nvidia-docker build command).
print(
"\nWarning: Torch did not find available GPUs on this system.\n",
"If your intention is to cross-compile, this is not an error.\n"
"By default, Apex will cross-compile for Pascal (compute capabilities 6.0, 6.1, 6.2),\n"
"Volta (compute capability 7.0), Turing (compute capability 7.5),\n"
"and, if the CUDA version is >= 11.0, Ampere (compute capability 8.0).\n"
"If you wish to cross-compile for a single specific architecture,\n"
'export TORCH_CUDA_ARCH_LIST="compute capability" before running setup.py.\n',
)
if os.environ.get("TORCH_CUDA_ARCH_LIST", None) is None:
_, bare_metal_major, bare_metal_minor = get_cuda_bare_metal_version(CUDA_HOME)
if int(bare_metal_major) == 11:
os.environ["TORCH_CUDA_ARCH_LIST"] = "6.0;6.1;6.2;7.0;7.5;8.0"
if int(bare_metal_minor) > 0:
os.environ["TORCH_CUDA_ARCH_LIST"] = "6.0;6.1;6.2;7.0;7.5;8.0;8.6"
else:
os.environ["TORCH_CUDA_ARCH_LIST"] = "6.0;6.1;6.2;7.0;7.5"
print("\n\ntorch.__version__ = {}\n\n".format(torch.__version__))
TORCH_MAJOR = int(torch.__version__.split(".")[0])
TORCH_MINOR = int(torch.__version__.split(".")[1])
cmdclass = {}
ext_modules = []
raise_if_cuda_home_none("rotary_emb")
# Check, if CUDA11 is installed for compute capability 8.0
cc_flag = []
cc_flag.append("-gencode")
cc_flag.append("arch=compute_70,code=sm_70")
cc_flag.append("-gencode")
cc_flag.append("arch=compute_80,code=sm_80")
ext_modules.append(
CUDAExtension(
'rotary_emb', [
'rotary.cpp',
'rotary_cuda.cu',
],
extra_compile_args={'cxx': ['-g', '-march=native', '-funroll-loops'],
'nvcc': append_nvcc_threads([
'-O3', '--use_fast_math', '--expt-extended-lambda'
] + cc_flag)
}
)
)
setup(
name="rotary_emb",
version="0.1",
ext_modules=ext_modules,
cmdclass={"build_ext": BuildExtension} if ext_modules else {},
)
| FLASHATTENION-LION-OPTIMIZE-main | csrc/rotary/setup.py |
import os
import subprocess
import torch
from setuptools import setup
from torch.utils.cpp_extension import BuildExtension, CUDAExtension, CUDA_HOME
def get_cuda_bare_metal_version(cuda_dir):
raw_output = subprocess.check_output([cuda_dir + "/bin/nvcc", "-V"], universal_newlines=True)
output = raw_output.split()
release_idx = output.index("release") + 1
release = output[release_idx].split(".")
bare_metal_major = release[0]
bare_metal_minor = release[1][0]
return raw_output, bare_metal_major, bare_metal_minor
def append_nvcc_threads(nvcc_extra_args):
_, bare_metal_major, bare_metal_minor = get_cuda_bare_metal_version(CUDA_HOME)
if int(bare_metal_major) >= 11 and int(bare_metal_minor) >= 2:
return nvcc_extra_args + ["--threads", "4"]
return nvcc_extra_args
setup(
name='fused_dense_lib',
ext_modules=[
CUDAExtension(
name='fused_dense_lib',
sources=['fused_dense.cpp', 'fused_dense_cuda.cu'],
extra_compile_args={
'cxx': ['-O3',],
'nvcc': append_nvcc_threads(['-O3'])
}
)
],
cmdclass={
'build_ext': BuildExtension
})
| FLASHATTENION-LION-OPTIMIZE-main | csrc/fused_dense_lib/setup.py |
from typing import Callable
import dotenv
import hydra
from omegaconf import OmegaConf, DictConfig
# load environment variables from `.env` file if it exists
# recursively searches for `.env` in all folders starting from work dir
dotenv.load_dotenv(override=True)
OmegaConf.register_new_resolver('eval', eval)
OmegaConf.register_new_resolver('div_up', lambda x, y: (x + y - 1) // y)
# Delay the evaluation until we have the datamodule
# So we want the resolver to yield the same string.
OmegaConf.register_new_resolver('datamodule', lambda attr: '${datamodule:' + str(attr) + '}')
# Turn on TensorFloat32
import torch.backends
torch.backends.cuda.matmul.allow_tf32 = True
torch.backends.cudnn.allow_tf32 = True
def dictconfig_filter_key(d: DictConfig, fn: Callable) -> DictConfig:
"""Only keep keys where fn(key) is True. Support nested DictConfig.
"""
# Using d.items_ex(resolve=False) instead of d.items() since we want to keep the
# ${datamodule:foo} unresolved for now.
return DictConfig({k: dictconfig_filter_key(v, fn) if isinstance(v, DictConfig) else v
# for k, v in d.items_ex(resolve=False) if fn(k)})
for k, v in d.items() if fn(k)})
@hydra.main(config_path="configs/", config_name="config.yaml")
def main(config: DictConfig):
# Remove config keys that start with '__'. These are meant to be used only in computing
# other entries in the config.
config = dictconfig_filter_key(config, lambda k: not k.startswith('__'))
# Imports should be nested inside @hydra.main to optimize tab completion
# Read more here: https://github.com/facebookresearch/hydra/issues/934
from src.train import train
from src.eval import evaluate
from src.utils import utils
# A couple of optional utilities:
# - disabling python warnings
# - forcing debug-friendly configuration
# - verifying experiment name is set when running in experiment mode
# You can safely get rid of this line if you don't want those
utils.extras(config)
# Pretty print config using Rich library
if config.get("print_config"):
utils.print_config(config, resolve=True)
# Train model
mode = config.get('mode', 'train')
if mode not in ['train', 'eval']:
raise NotImplementedError(f'mode {mode} not supported')
if mode == 'train':
return train(config)
elif mode == 'eval':
return evaluate(config)
if __name__ == "__main__":
main()
| FLASHATTENION-LION-OPTIMIZE-main | training/run.py |
import os
from pathlib import Path
current_dir = Path(__file__).parent.absolute()
import pytest
import torch
import dotenv
from src.datamodules.language_modeling_hf import LMDataModule
# load environment variables from `.env` file if it exists
# recursively searches for `.env` in all folders starting from work dir
dotenv.load_dotenv(override=True)
def div_up(x: int, y: int) -> int:
return (x + y - 1) // y
# https://stackoverflow.com/questions/1006289/how-to-find-out-the-number-of-cpus-using-python/55423170#55423170
def num_cpu_cores():
try:
import psutil
return psutil.cpu_count(logical=False)
except ImportError:
return len(os.sched_getaffinity(0))
class TestLMDataModule:
def test_wikitext2(self):
batch_size = 7
dataset_name = 'wikitext'
dataset_config_name = 'wikitext-2-raw-v1'
data_dir = Path(os.getenv('DATA_DIR', current_dir.parent.parent / 'data'))
cache_dir = data_dir / 'wikitext-2' / 'cache'
max_length = 1024
datamodule = LMDataModule(dataset_name, tokenizer_name='gpt2',
dataset_config_name=dataset_config_name,
max_length=max_length, cache_dir=cache_dir,
add_eos=False, batch_size=batch_size, num_workers=4)
datamodule.prepare_data()
datamodule.setup(stage='fit')
train_loader = datamodule.train_dataloader()
val_loader = datamodule.val_dataloader()
datamodule.setup(stage='test')
test_loader = datamodule.test_dataloader()
train_len = 2391884
val_len = 247289
test_len = 283287
assert len(train_loader) == div_up((train_len - 1) // max_length, batch_size)
assert len(val_loader) == div_up((val_len - 1) // max_length, batch_size)
assert len(test_loader) == div_up((test_len - 1) // max_length, batch_size)
for loader in [train_loader, val_loader, test_loader]:
x, y = next(iter(loader))
assert x.dim() == 2
assert x.shape == (batch_size, max_length)
assert x.dtype == torch.long
assert torch.allclose(x[:, 1:], y[:, :-1])
def test_wikitext103(self):
batch_size = 7
dataset_name = 'wikitext'
dataset_config_name = 'wikitext-103-raw-v1'
data_dir = Path(os.getenv('DATA_DIR', current_dir.parent.parent / 'data'))
cache_dir = data_dir / 'wikitext-103' / 'cache'
max_length = 1024
datamodule = LMDataModule(dataset_name, tokenizer_name='gpt2',
dataset_config_name=dataset_config_name,
max_length=max_length, cache_dir=cache_dir,
add_eos=False, batch_size=batch_size, num_workers=4)
datamodule.prepare_data()
datamodule.setup(stage='fit')
train_loader = datamodule.train_dataloader()
val_loader = datamodule.val_dataloader()
datamodule.setup(stage='test')
test_loader = datamodule.test_dataloader()
train_len = 117920140
val_len = 247289
test_len = 283287
assert len(train_loader) == div_up((train_len - 1) // max_length, batch_size)
assert len(val_loader) == div_up((val_len - 1) // max_length, batch_size)
assert len(test_loader) == div_up((test_len - 1) // max_length, batch_size)
for loader in [train_loader, val_loader, test_loader]:
x, y = next(iter(loader))
assert x.dim() == 2
assert x.shape == (batch_size, max_length)
assert x.dtype == torch.long
assert torch.allclose(x[:, 1:], y[:, :-1])
def test_openwebtext(self):
batch_size = 8
dataset_name = 'openwebtext'
dataset_config_name = None
data_dir = Path(os.getenv('DATA_DIR', current_dir.parent.parent / 'data'))
cache_dir = data_dir / 'openwebtext' / 'cache'
max_length = 1024
datamodule = LMDataModule(dataset_name, tokenizer_name='gpt2',
dataset_config_name=dataset_config_name,
max_length=max_length, cache_dir=cache_dir,
add_eos=True, batch_size=batch_size,
num_workers=num_cpu_cores() // 2)
datamodule.prepare_data()
datamodule.setup(stage='fit')
train_loader = datamodule.train_dataloader()
val_loader = datamodule.val_dataloader()
datamodule.setup(stage='test')
test_loader = datamodule.test_dataloader()
train_len = 9035582198
val_len = 4434897
test_len = 4434897
assert len(train_loader) == div_up((train_len - 1) // max_length, batch_size)
assert len(val_loader) == div_up((val_len - 1) // max_length, batch_size)
assert len(test_loader) == div_up((test_len - 1) // max_length, batch_size)
for loader in [train_loader, val_loader, test_loader]:
x, y = next(iter(loader))
assert x.dim() == 2
assert x.shape == (batch_size, max_length)
assert x.dtype == torch.long
assert torch.allclose(x[:, 1:], y[:, :-1])
def test_lambada(self):
batch_size = 8
dataset_name = 'lambada'
dataset_config_name = None
data_dir = Path(os.getenv('DATA_DIR', current_dir.parent.parent / 'data'))
cache_dir = data_dir / 'lambada' / 'cache'
max_length = 1024
datamodule = LMDataModule(dataset_name, tokenizer_name='gpt2',
dataset_config_name=dataset_config_name,
max_length=max_length, cache_dir=cache_dir,
add_eos=True, batch_size=batch_size,
num_workers=64)
datamodule.prepare_data()
datamodule.setup(stage='fit')
train_loader = datamodule.train_dataloader()
val_loader = datamodule.val_dataloader()
datamodule.setup(stage='test')
test_loader = datamodule.test_dataloader()
train_len = 9035582198
val_len = 4434897
test_len = 4434897
assert len(train_loader) == div_up((train_len - 1) // max_length, batch_size)
assert len(val_loader) == div_up((val_len - 1) // max_length, batch_size)
assert len(test_loader) == div_up((test_len - 1) // max_length, batch_size)
for loader in [train_loader, val_loader, test_loader]:
x, y = next(iter(loader))
assert x.dim() == 2
assert x.shape == (batch_size, max_length)
assert x.dtype == torch.long
assert torch.allclose(x[:, 1:], y[:, :-1])
def test_the_pile(self):
batch_size = 8
dataset_name = 'the_pile'
dataset_config_name = None
data_dir = Path(os.getenv('DATA_DIR', current_dir.parent.parent / 'data'))
cache_dir = data_dir / 'the_pile' / 'cache'
max_length = 2048
# Dataset is too large to fit into memory, need to use disk for concatenation
datamodule = LMDataModule(dataset_name, tokenizer_name='gpt2',
dataset_config_name=dataset_config_name,
max_length=max_length, cache_dir=cache_dir,
add_eos=True, batch_size=batch_size,
num_workers=num_cpu_cores() // 2, use_shmem=False)
datamodule.prepare_data()
datamodule.setup(stage='fit')
train_loader = datamodule.train_dataloader()
val_loader = datamodule.val_dataloader()
datamodule.setup(stage='test')
test_loader = datamodule.test_dataloader()
train_len = 374337375694
val_len = 383326395
test_len = 373297018
assert len(train_loader) == div_up((train_len - 1) // max_length, batch_size)
assert len(val_loader) == div_up((val_len - 1) // max_length, batch_size)
assert len(test_loader) == div_up((test_len - 1) // max_length, batch_size)
for loader in [train_loader, val_loader, test_loader]:
x, y = next(iter(loader))
assert x.dim() == 2
assert x.shape == (batch_size, max_length)
assert x.dtype == torch.long
assert torch.allclose(x[:, 1:], y[:, :-1])
def test_pg19(self):
batch_size = 8
dataset_name = 'pg19'
dataset_config_name = None
data_dir = Path(os.getenv('DATA_DIR', current_dir.parent.parent / 'data'))
cache_dir = data_dir / 'pg19' / 'cache'
max_length = 2048
# Dataset is too large to fit into memory, need to use disk for concatenation
datamodule = LMDataModule(dataset_name, tokenizer_name='gpt2',
dataset_config_name=dataset_config_name,
max_length=max_length, cache_dir=cache_dir,
add_eos=True, batch_size=batch_size,
num_workers=num_cpu_cores() // 2)
datamodule.prepare_data()
datamodule.setup(stage='fit')
train_loader = datamodule.train_dataloader()
val_loader = datamodule.val_dataloader()
datamodule.setup(stage='test')
test_loader = datamodule.test_dataloader()
train_len = 3066544128
val_len = 4653056
test_len = 10584064
assert len(train_loader) == div_up((train_len - 1) // max_length, batch_size)
assert len(val_loader) == div_up((val_len - 1) // max_length, batch_size)
assert len(test_loader) == div_up((test_len - 1) // max_length, batch_size)
for loader in [train_loader, val_loader, test_loader]:
x, y = next(iter(loader))
assert x.dim() == 2
assert x.shape == (batch_size, max_length)
assert x.dtype == torch.long
assert torch.allclose(x[:, 1:], y[:, :-1])
| FLASHATTENION-LION-OPTIMIZE-main | training/tests/datamodules/test_language_modeling_hf.py |
from typing import List, Optional, Sequence
from pathlib import Path
import hydra
from omegaconf import OmegaConf, DictConfig
from pytorch_lightning import (
Callback,
LightningDataModule,
LightningModule,
Trainer,
seed_everything,
)
from pytorch_lightning.loggers import LightningLoggerBase
from src.utils import utils
log = utils.get_logger(__name__)
def last_modification_time(path):
"""Including files / directory 1-level below the path
"""
path = Path(path)
if path.is_file():
return path.stat().st_mtime
elif path.is_dir():
return max(child.stat().st_mtime for child in path.iterdir())
else:
return None
def train(config: DictConfig) -> Optional[float]:
"""Contains training pipeline.
Instantiates all PyTorch Lightning objects from config.
Args:
config (DictConfig): Configuration composed by Hydra.
Returns:
Optional[float]: Metric score for hyperparameter optimization.
"""
# Set seed for random number generators in pytorch, numpy and python.random
if config.get("seed"):
seed_everything(config.seed, workers=True)
# We want to add fields to config so need to call OmegaConf.set_struct
OmegaConf.set_struct(config, False)
# Init lightning model
model: LightningModule = hydra.utils.instantiate(config.task, cfg=config, _recursive_=False)
datamodule: LightningDataModule = model._datamodule
# Init lightning callbacks
callbacks: List[Callback] = []
if "callbacks" in config:
for _, cb_conf in config.callbacks.items():
if cb_conf is not None and "_target_" in cb_conf:
log.info(f"Instantiating callback <{cb_conf._target_}>")
callbacks.append(hydra.utils.instantiate(cb_conf))
# Init lightning loggers
logger: List[LightningLoggerBase] = []
if "logger" in config:
for _, lg_conf in config.logger.items():
if lg_conf is not None and "_target_" in lg_conf:
log.info(f"Instantiating logger <{lg_conf._target_}>")
logger.append(hydra.utils.instantiate(lg_conf))
ckpt_cfg = {}
if config.get('resume'):
try:
checkpoint_path = Path(config.callbacks.model_checkpoint.dirpath)
if checkpoint_path.is_dir():
last_ckpt = checkpoint_path / 'last.ckpt'
autosave_ckpt = checkpoint_path / '.pl_auto_save.ckpt'
if not (last_ckpt.exists() or autosave_ckpt.exists()):
raise FileNotFoundError("Resume requires either last.ckpt or .pl_autosave.ckpt")
if ((not last_ckpt.exists())
or (autosave_ckpt.exists()
and last_modification_time(autosave_ckpt) > last_modification_time(last_ckpt))):
# autosave_ckpt = autosave_ckpt.replace(autosave_ckpt.with_name('.pl_auto_save_loaded.ckpt'))
checkpoint_path = autosave_ckpt
else:
checkpoint_path = last_ckpt
# DeepSpeed's checkpoint is a directory, not a file
if checkpoint_path.is_file() or checkpoint_path.is_dir():
ckpt_cfg = {'ckpt_path': str(checkpoint_path)}
else:
log.info(f'Checkpoint file {str(checkpoint_path)} not found. Will start training from scratch')
except (KeyError, FileNotFoundError):
pass
# Configure ddp automatically
n_devices = config.trainer.get('devices', 1)
if isinstance(n_devices, Sequence): # trainer.devices could be [1, 3] for example
n_devices = len(n_devices)
if n_devices > 1 and config.trainer.get('strategy', None) is None:
config.trainer.strategy = dict(
_target_='pytorch_lightning.strategies.DDPStrategy',
find_unused_parameters=False,
gradient_as_bucket_view=True, # https://pytorch-lightning.readthedocs.io/en/stable/advanced/advanced_gpu.html#ddp-optimizations
)
# Init lightning trainer
log.info(f"Instantiating trainer <{config.trainer._target_}>")
trainer: Trainer = hydra.utils.instantiate(
config.trainer, callbacks=callbacks, logger=logger)
# Train the model
log.info("Starting training!")
trainer.fit(model=model, datamodule=datamodule, **ckpt_cfg)
# Evaluate model on test set, using the best model achieved during training
if config.get("test_after_training") and not config.trainer.get("fast_dev_run"):
log.info("Starting testing!")
trainer.test(model=model, datamodule=datamodule)
# Make sure everything closed properly
log.info("Finalizing!")
utils.finish(
config=config,
model=model,
datamodule=datamodule,
trainer=trainer,
callbacks=callbacks,
logger=logger,
)
# Print path to best checkpoint
if not config.trainer.get("fast_dev_run"):
log.info(f"Best model ckpt: {trainer.checkpoint_callback.best_model_path}")
# Return metric score for hyperparameter optimization
optimized_metric = config.get("optimized_metric")
if optimized_metric:
return trainer.callback_metrics[optimized_metric]
| FLASHATTENION-LION-OPTIMIZE-main | training/src/train.py |
from typing import List, Optional
from pathlib import Path
import torch
import hydra
from omegaconf import OmegaConf, DictConfig
from pytorch_lightning import (
Callback,
LightningDataModule,
LightningModule,
Trainer,
seed_everything,
)
from pytorch_lightning.loggers import LightningLoggerBase
from src.utils import utils
log = utils.get_logger(__name__)
def remove_prefix(text: str, prefix: str):
if text.startswith(prefix):
return text[len(prefix) :]
return text # or whatever
def load_checkpoint(path, device='cpu'):
path = Path(path).expanduser()
if path.is_dir():
path /= 'last.ckpt'
# dst = f'cuda:{torch.cuda.current_device()}'
log.info(f'Loading checkpoint from {str(path)}')
state_dict = torch.load(path, map_location=device)
# T2T-ViT checkpoint is nested in the key 'state_dict_ema'
if state_dict.keys() == {'state_dict_ema'}:
state_dict = state_dict['state_dict_ema']
# Swin checkpoint is nested in the key 'model'
if state_dict.keys() == {'model'}:
state_dict = state_dict['model']
# Lightning checkpoint contains extra stuff, we only want the model state dict
if 'pytorch-lightning_version' in state_dict:
state_dict = {remove_prefix(k, 'model.'): v for k, v in state_dict['state_dict'].items()}
return state_dict
def evaluate(config: DictConfig) -> None:
"""Example of inference with trained model.
It loads trained image classification model from checkpoint.
Then it loads example image and predicts its label.
"""
# load model from checkpoint
# model __init__ parameters will be loaded from ckpt automatically
# you can also pass some parameter explicitly to override it
# We want to add fields to config so need to call OmegaConf.set_struct
OmegaConf.set_struct(config, False)
# load model
checkpoint_type = config.eval.get('checkpoint_type', 'pytorch')
if checkpoint_type not in ['lightning', 'pytorch']:
raise NotImplementedError(f'checkpoint_type ${checkpoint_type} not supported')
if checkpoint_type == 'lightning':
cls = hydra.utils.get_class(config.task._target_)
model = cls.load_from_checkpoint(checkpoint_path=config.eval.ckpt)
elif checkpoint_type == 'pytorch':
model_cfg = config.model_pretrained if 'model_pretrained' in config else None
trained_model: LightningModule = hydra.utils.instantiate(config.task, cfg=config,
model_cfg=model_cfg,
_recursive_=False)
if 'ckpt' in config.eval:
load_return = trained_model.model.load_state_dict(
load_checkpoint(config.eval.ckpt, device=trained_model.device), strict=False
)
log.info(load_return)
if 'model_pretrained' in config:
...
else:
model = trained_model
datamodule: LightningDataModule = hydra.utils.instantiate(config.datamodule)
# datamodule: LightningDataModule = model._datamodule
datamodule.prepare_data()
datamodule.setup()
# print model hyperparameters
log.info(f'Model hyperparameters: {model.hparams}')
# Init Lightning callbacks
callbacks: List[Callback] = []
if "callbacks" in config:
for _, cb_conf in config["callbacks"].items():
if cb_conf is not None and "_target_" in cb_conf:
log.info(f"Instantiating callback <{cb_conf._target_}>")
callbacks.append(hydra.utils.instantiate(cb_conf))
# Init Lightning loggers
logger: List[LightningLoggerBase] = []
if "logger" in config:
for _, lg_conf in config["logger"].items():
if lg_conf is not None and "_target_" in lg_conf:
log.info(f"Instantiating logger <{lg_conf._target_}>")
logger.append(hydra.utils.instantiate(lg_conf))
# Init Lightning trainer
log.info(f"Instantiating trainer <{config.trainer._target_}>")
trainer: Trainer = hydra.utils.instantiate(
config.trainer, callbacks=callbacks, logger=logger, _convert_="partial"
)
# Evaluate the model
log.info("Starting evaluation!")
if config.eval.get('run_val', True):
trainer.validate(model=model, datamodule=datamodule)
if config.eval.get('run_test', True):
trainer.test(model=model, datamodule=datamodule)
# Make sure everything closed properly
log.info("Finalizing!")
utils.finish(
config=config,
model=model,
datamodule=datamodule,
trainer=trainer,
callbacks=callbacks,
logger=logger,
)
| FLASHATTENION-LION-OPTIMIZE-main | training/src/eval.py |
from typing import Any, Dict, Optional
import torch
from torch import Tensor
from torchmetrics import Metric
class NumTokens(Metric):
"""Keep track of how many tokens we've seen.
"""
# TODO: how do we prevent the reset between the epochs? The reset happens on the 1st batch
# of the next epoch.
# Right now the hack is that we override reset(), which would mess up the forward method.
# We then override forward to do the right thing.
is_differentiable = False
higher_is_better = False
full_state_update = False
count: Tensor
def __init__(self, **kwargs: Dict[str, Any]):
super().__init__(**kwargs)
self.add_state("count", default=torch.tensor(0, dtype=torch.int64), dist_reduce_fx="sum",
persistent=True) # We want the count to be saved to state-dict
def update(self, preds: Tensor, target: Tensor, loss: Optional[Tensor] = None) -> None: # type: ignore
self.count += target.numel()
def compute(self) -> Tensor:
return self.count
def reset(self):
count = self.count
super().reset()
self.count = count
# Adapted from https://github.com/Lightning-AI/metrics/blob/master/src/torchmetrics/metric.py
def _forward_reduce_state_update(self, *args: Any, **kwargs: Any) -> Any:
"""forward computation using single call to `update` to calculate the metric value on the current batch and
accumulate global state.
This can be done when the global metric state is a sinple reduction of batch states.
"""
self.update(*args, **kwargs)
return self.compute()
| FLASHATTENION-LION-OPTIMIZE-main | training/src/metrics/num_tokens.py |
# Inspired by https://github.com/NVIDIA/NeMo/blob/main/nemo/collections/common/metrics/perplexity.py
# But we compute the perplexity correctly: exp(average(nll)), not average(exp(nll))
# Also adapted from https://github.com/Lightning-AI/metrics/blob/master/src/torchmetrics/text/perplexity.py
# But we pass in the loss to avoid recomputation
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import Tensor
from torchmetrics import Metric
try:
from flash_attn.losses.cross_entropy import CrossEntropyLoss
except ImportError:
CrossEntropyLoss = torch.nn.CrossEntropyLoss
__all__ = ['Perplexity']
class Perplexity(Metric):
r"""
Perplexity measures how well a language model predicts a text sample. It's calculated as the average number of bits
per word a model needs to represent the sample.
Args:
kwargs:
Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Examples:
>>> import torch
>>> preds = torch.rand(2, 8, 5, generator=torch.manual_seed(22))
>>> target = torch.randint(5, (2, 8), generator=torch.manual_seed(22))
>>> target[0, 6:] = -100
>>> metric = Perplexity(ignore_index=-100)
>>> metric(preds, target)
tensor(5.2545)
"""
is_differentiable = True
higher_is_better = False
full_state_update = False
total_log_probs: Tensor
count: Tensor
def __init__(self, **kwargs: Dict[str, Any]):
super().__init__(**kwargs)
self.add_state("total_log_probs", default=torch.tensor(0.0, dtype=torch.float64),
dist_reduce_fx="sum")
self.add_state("count", default=torch.tensor(0, dtype=torch.int64), dist_reduce_fx="sum")
self.loss_fn = CrossEntropyLoss()
def update(self, preds: Tensor, target: Tensor, loss: Optional[Tensor] = None) -> None: # type: ignore
"""Compute and store intermediate statistics for Perplexity.
Args:
preds:
Probabilities assigned to each token in a sequence with shape [batch_size, seq_len, vocab_size].
target:
Ground truth values with a shape [batch_size, seq_len].
"""
count = target.numel()
if loss is None:
loss = self.loss_fn(preds, target)
self.total_log_probs += loss.double() * count
self.count += count
def compute(self) -> Tensor:
"""Compute the Perplexity.
Returns:
Perplexity
"""
return torch.exp(self.total_log_probs / self.count)
| FLASHATTENION-LION-OPTIMIZE-main | training/src/metrics/perplexity.py |
import torch
from torch import Tensor
from torchmetrics import Metric, Accuracy
class AccuracyMine(Accuracy):
"""Wrap torchmetrics.Accuracy to take argmax of y in case of Mixup.
"""
def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore
super().update(preds, target.argmax(dim=-1) if target.is_floating_point() else target)
| FLASHATTENION-LION-OPTIMIZE-main | training/src/metrics/accuracy.py |
from typing import Any, List
import inspect
import torch
import hydra
from pytorch_lightning import LightningModule, LightningDataModule
from torchmetrics import MetricCollection
from einops import rearrange
from omegaconf import OmegaConf
from src.utils.utils import get_logger
from src.optim.param_grouping import group_parameters_for_optimizer
from src.utils.checkpoint import load_checkpoint
logger = get_logger(__name__)
class SequenceModel(LightningModule):
def __init__(self, cfg, model_cfg=None):
"""If model_cfg is passed, it will take precedence over cfg.model
"""
super().__init__()
# this line ensures params passed to LightningModule will be saved to ckpt
# it also allows to access params with 'self.hparams' attribute
self.save_hyperparameters(cfg)
self.cfg = cfg
self.model_cfg = model_cfg or self.cfg.model
self.instantiate_datamodule()
self.instantiate_model()
self.warmstart()
self.instantiate_loss()
self.instantiate_metrics()
def instantiate_datamodule(self):
logger.info(f"Instantiating datamodule <{self.cfg.datamodule._target_}>")
# Calling this self.datamodule will mess with PL since it also assigns self.datamodule
self._datamodule: LightningDataModule = hydra.utils.instantiate(self.cfg.datamodule)
self._datamodule.prepare_data()
self._datamodule.setup()
OmegaConf.clear_resolver('datamodule')
OmegaConf.register_new_resolver('datamodule', lambda attr: getattr(self._datamodule, attr))
def instantiate_model(self):
# if hasattr(self._datamodule, 'num_classes'):
# self.model_cfg.num_classes = self._datamodule.num_classes
# if (hasattr(self._datamodule, 'vocab_size')
# and self.model_cfg.get('embedding_cfg', None) is not None
# and self.model_cfg.embedding_cfg._target_ == "torch.nn.Embedding"):
# self.model_cfg.embedding_cfg.num_embeddings = self._datamodule.vocab_size
logger.info(f"Instantiating model <{self.model_cfg._target_}>")
recursive = getattr(self.model_cfg, '_recursive_', False)
self.model = hydra.utils.instantiate(self.model_cfg, _recursive_=recursive)
def instantiate_loss(self):
loss_fn_cfg = self.cfg.train.get('loss_fn')
if loss_fn_cfg is None:
loss_fn_cfg = {'_target_': 'torch.nn.CrossEntropyLoss'}
self.loss_fn = hydra.utils.instantiate(loss_fn_cfg)
loss_fn_val_cfg = self.cfg.train.get('loss_fn_val', loss_fn_cfg)
self.loss_fn_val = hydra.utils.instantiate(loss_fn_val_cfg)
def instantiate_metrics(self):
# use separate metric instance for train, val and test step
# to ensure a proper reduction over the epoch
if 'eval' in self.cfg and 'metrics' in self.cfg.eval:
metrics_cfg = self.cfg.eval.metrics
else:
metrics_cfg = {'acc': {'_target_': 'torchmetrics.Accuracy'}}
metrics = MetricCollection({name: hydra.utils.instantiate(cfg)
for name, cfg in metrics_cfg.items()})
self.train_metrics = metrics.clone(prefix='train/')
self.val_metrics = metrics.clone(prefix='val/')
self.test_metrics = metrics.clone(prefix='test/')
def warmstart(self):
if self.cfg.train.get('warmstart', None) is not None:
logger.info(f"Warm-starting with weights from {self.cfg.train.warmstart.path}")
strict = self.cfg.train.warmstart.get('strict', True)
state_dict = load_checkpoint(self.cfg.train.warmstart.path)
if self.cfg.train.warmstart.get('post_process', None) is not None:
state_dict = hydra.utils.instantiate(self.cfg.train.warmstart.post_process,
state_dict)
load_return = self.model.load_state_dict(state_dict, strict=False)
logger.info(load_return)
def forward(self, *args, **kwargs):
return self.model(*args, **kwargs)
def step(self, batch: Any, is_train=True):
try:
x, y, lengths = batch
except ValueError:
x, y = batch
lengths = None
output = self.forward(x) if lengths is None else self.forward(x, lengths=lengths)
loss = self.loss_fn(output, y) if is_train else self.loss_fn_val(output, y)
return loss, output, y
def shared_step(self, batch: Any, batch_idx: int, phase='train'):
loss, output, targets = self.step(batch, is_train=(phase == 'train'))
metrics = getattr(self, f'{phase}_metrics')
metrics(output, targets)
log_on_step = 'eval' in self.cfg and self.cfg.eval.get('log_on_step', False) and phase == 'train'
self.log(f"{phase}/loss", loss, on_step=log_on_step, on_epoch=True,
prog_bar=False, sync_dist=True)
# https://pytorch-lightning.readthedocs.io/en/stable/visualize/logging_advanced.html#enable-metrics-for-distributed-training
# We need to log the Metrics object, not the metric result, since otherwise
# pytorch-lightning will use torch.mean to reduce it.
# This would be wrong for perplexity, for example.
self.log_dict(metrics, on_step=log_on_step, on_epoch=True, prog_bar=True, sync_dist=True)
return {"loss": loss, "output": output, "targets": targets}
def training_step(self, batch: Any, batch_idx: int):
return self.shared_step(batch, batch_idx, phase='train')
def validation_step(self, batch: Any, batch_idx: int):
return self.shared_step(batch, batch_idx, phase='val')
def test_step(self, batch: Any, batch_idx: int):
return self.shared_step(batch, batch_idx, phase='test')
def configure_optimizers(self):
if 'optimizer_param_grouping' in self.cfg.train: # Set zero weight decay for some params
parameters = group_parameters_for_optimizer(self.model, self.cfg.train.optimizer,
**self.cfg.train.optimizer_param_grouping)
else:
# parameters = self.model.parameters()
parameters = self.parameters() # [21-09-08] AG: this will train task specific parameters such as Retrieval head for AAN
optimizer = hydra.utils.instantiate(self.cfg.train.optimizer, parameters)
# Log optimizer info
for i, g in enumerate(optimizer.param_groups):
ntensors = len(g['params'])
nparams = sum(p.numel() for p in g['params'])
hparams = {k: v for k, v in g.items() if k != 'params'}
logger.info(f'Optimizer group {i}: {ntensors} tensors, {nparams} parameters, {hparams}')
if 'scheduler' not in self.cfg.train:
return optimizer
else:
# lr_scheduler should be called either every step (default) or every epoch
lr_scheduler = hydra.utils.instantiate(self.cfg.train.scheduler, optimizer)
return [optimizer], {'scheduler': lr_scheduler,
'interval': self.cfg.train.get('scheduler_interval', 'step'),
'monitor': self.cfg.train.get('scheduler_monitor', 'val/loss')}
def optimizer_zero_grad(self, epoch, batch_idx, optimizer, optimizer_idx):
# https://pytorch-lightning.readthedocs.io/en/latest/guides/speed.html#set-grads-to-none
# TD [2022-04-30]: DeepSpeed optimizer uses the kwarg set_grad_to_none instead of set_to_none
if 'set_to_none' in inspect.signature(optimizer.zero_grad).parameters:
optimizer.zero_grad(set_to_none=True)
else:
optimizer.zero_grad()
def on_save_checkpoint(self, checkpoint):
# TD [2022-08-07] ['epoch_loop.batch_progress']['total']['completed'] is 1 iteration
# behind, so we're using the optimizer's progress.
checkpoint['loops']['fit_loop']['epoch_loop.batch_progress']['total']['completed'] = checkpoint['loops']['fit_loop']['epoch_loop.batch_loop.optimizer_loop.optim_progress']['optimizer']['step']['total']['completed'] * self.trainer.accumulate_grad_batches
checkpoint['loops']['fit_loop']['epoch_loop.batch_progress']['current']['completed'] = checkpoint['loops']['fit_loop']['epoch_loop.batch_loop.optimizer_loop.optim_progress']['optimizer']['step']['current']['completed'] * self.trainer.accumulate_grad_batches
# _batches_that_stepped tracks the number of global steps, not the number
# of local steps, so we don't multiply with self.trainer.accumulate_grad_batches here.
checkpoint['loops']['fit_loop']['epoch_loop.state_dict']['_batches_that_stepped'] = checkpoint['loops']['fit_loop']['epoch_loop.batch_loop.optimizer_loop.optim_progress']['optimizer']['step']['total']['completed']
class SequenceLMModel(SequenceModel):
def step(self, batch: Any, is_train=True):
x, y = batch
output = self.forward(x).logits
output = rearrange(output, '... C -> (...) C')
y = rearrange(y, '... -> (...)')
loss = self.loss_fn(output, y) if is_train else self.loss_fn_val(output, y)
return loss, output, y
def shared_step(self, batch: Any, batch_idx: int, phase='train'):
loss, output, targets = self.step(batch, is_train=(phase == 'train'))
# Passing the loss to the perplexity metrics to avoid recomputation
metrics = getattr(self, f'{phase}_metrics')
metrics(output, targets, loss=loss)
log_on_step = 'eval' in self.cfg and self.cfg.eval.get('log_on_step', False) and phase == 'train'
self.log(f"{phase}/loss", loss, on_step=log_on_step, on_epoch=True,
prog_bar=False, sync_dist=True)
# https://pytorch-lightning.readthedocs.io/en/stable/visualize/logging_advanced.html#enable-metrics-for-distributed-training
# We need to log the Metrics object, not the metric result, since otherwise
# pytorch-lightning will use torch.mean to reduce it.
# This would be wrong for perplexity, for example.
self.log_dict(metrics, on_step=log_on_step, on_epoch=True, prog_bar=True, sync_dist=True)
return {"loss": loss, "output": output, "targets": targets}
| FLASHATTENION-LION-OPTIMIZE-main | training/src/tasks/seq.py |
# Adapted from https://pytorch.org/docs/stable/_modules/torch/distributed/algorithms/ddp_comm_hooks/default_hooks.html
# We divide by world_size first before converting to fp16, so it's safer.
from typing import Any, Callable
import torch
import torch.distributed as dist
def fp16_compress_hook(
process_group: dist.ProcessGroup, bucket: dist.GradBucket
) -> torch.futures.Future[torch.Tensor]:
"""
This DDP communication hook implements a simple gradient compression
approach that casts ``GradBucket`` tensor to half-precision floating-point format (``torch.float16``)
and then divides it by the process group size.
It allreduces those ``float16`` gradient tensors. Once compressed gradient
tensors are allreduced, the chained callback ``decompress`` casts it back to the input data type (such as ``float32``).
Example::
>>> ddp_model.register_comm_hook(process_group, fp16_compress_hook)
"""
group_to_use = process_group if process_group is not None else dist.group.WORLD
world_size = group_to_use.size()
# Divide first before converting to fp16
# Use out argument to fuse the division and the conversion.
compressed_tensor = torch.div(bucket.buffer(), world_size,
out=torch.empty_like(bucket.buffer(), dtype=torch.float16))
fut = dist.all_reduce(
compressed_tensor, group=group_to_use, async_op=True
).get_future()
def decompress(fut):
decompressed_tensor = bucket.buffer()
# Decompress in place to reduce the peak memory.
# See: https://github.com/pytorch/pytorch/issues/45968
decompressed_tensor.copy_(fut.value()[0])
return decompressed_tensor
# TODO: maybe have a backoff strategy: check if the buffer has inf / NaN, in that case
# resend with fp32?
return fut.then(decompress)
| FLASHATTENION-LION-OPTIMIZE-main | training/src/distributed/ddp_comm_hooks.py |
# Adapted from https://github.com/rwightman/pytorch-image-models/blob/master/benchmark.py
from typing import Any, List, Sequence
import torch
from pytorch_lightning import Callback, Trainer, LightningModule
from pytorch_lightning.utilities import rank_zero_only
from pytorch_lightning.utilities.parsing import AttributeDict
from src.utils.flops import has_deepspeed_profiling, has_fvcore_profiling
from src.utils.flops import profile_deepspeed, profile_fvcore
class FlopCount(Callback):
"""Counter the number of FLOPs used by the model
"""
def __init__(self, profilers: List[str] = ['fvcore', 'deepspeed'],
input_size: tuple = (3, 224, 224), input_dtype=torch.float32, device=None):
if not isinstance(profilers, Sequence):
profilers = [profilers]
if any(p not in ['fvcore', 'deepspeed'] for p in profilers):
raise NotImplementedError('Only support fvcore and deepspeed profilers')
if 'fvcore' in profilers and not has_fvcore_profiling:
raise ImportError('fvcore is not installed. Install it by running `pip install fvcore`')
elif 'deepspeed' in profilers and not has_deepspeed_profiling:
raise ImportError('deepspeed is not installed')
super().__init__()
self.profilers = profilers
self.input_size = tuple(input_size)
self.input_dtype = input_dtype
self.device = device
@rank_zero_only
def on_fit_start(self, trainer: Trainer, pl_module: LightningModule) -> None:
if 'fvcore' in self.profilers:
_, macs, _, acts = profile_fvcore(pl_module.to(self.device), input_size=self.input_size,
input_dtype=self.input_dtype, detailed=True)
trainer.logger.log_hyperparams({'GMACs': macs * 1e-9, 'MActs': acts * 1e-6})
if 'deepspeed' in self.profilers:
macs, _= profile_deepspeed(pl_module.to(self.device), input_size=self.input_size,
input_dtype=self.input_dtype, detailed=True)
if 'fvcore' not in self.profilers: # fvcore's MACs seem more accurate
trainer.logger.log_hyperparams({'GMACs': macs * 1e-9})
| FLASHATTENION-LION-OPTIMIZE-main | training/src/callbacks/flop_count.py |
import subprocess
from pathlib import Path
from typing import List
import matplotlib.pyplot as plt
import seaborn as sn
import torch
import wandb
from pytorch_lightning import Callback, Trainer
from pytorch_lightning.loggers import LoggerCollection, WandbLogger
from pytorch_lightning.utilities import rank_zero_only
from sklearn import metrics
from sklearn.metrics import f1_score, precision_score, recall_score
def get_wandb_logger(trainer: Trainer) -> WandbLogger:
"""Safely get Weights&Biases logger from Trainer."""
if trainer.fast_dev_run:
raise Exception(
"Cannot use wandb callbacks since pytorch lightning disables loggers in `fast_dev_run=true` mode."
)
if isinstance(trainer.logger, WandbLogger):
return trainer.logger
if isinstance(trainer.logger, LoggerCollection):
for logger in trainer.logger:
if isinstance(logger, WandbLogger):
return logger
raise Exception(
"You are using wandb related callback, but WandbLogger was not found for some reason..."
)
class WatchModel(Callback):
"""Make wandb watch model at the beginning of the run."""
def __init__(self, log: str = "gradients", log_freq: int = 100):
self.log = log
self.log_freq = log_freq
@rank_zero_only
def on_train_start(self, trainer, pl_module):
logger = get_wandb_logger(trainer=trainer)
logger.watch(model=trainer.model, log=self.log, log_freq=self.log_freq)
class UploadCodeAsArtifact(Callback):
"""Upload all code files to wandb as an artifact, at the beginning of the run."""
def __init__(self, code_dir: str, use_git: bool = True):
"""
Args:
code_dir: the code directory
use_git: if using git, then upload all files that are not ignored by git.
if not using git, then upload all '*.py' file
"""
self.code_dir = code_dir
self.use_git = use_git
@rank_zero_only
def on_train_start(self, trainer, pl_module):
logger = get_wandb_logger(trainer=trainer)
experiment = logger.experiment
code = wandb.Artifact("project-source", type="code")
if self.use_git:
# get .git folder
# https://alexwlchan.net/2020/11/a-python-function-to-ignore-a-path-with-git-info-exclude/
git_dir_path = Path(
subprocess.check_output(["git", "rev-parse", "--git-dir"]).strip().decode("utf8")
).resolve()
for path in Path(self.code_dir).resolve().rglob("*"):
if (
path.is_file()
# ignore files in .git
and not str(path).startswith(str(git_dir_path)) # noqa: W503
# ignore files ignored by git
and ( # noqa: W503
subprocess.run(["git", "check-ignore", "-q", str(path)]).returncode == 1
)
):
code.add_file(str(path), name=str(path.relative_to(self.code_dir)))
else:
for path in Path(self.code_dir).resolve().rglob("*.py"):
code.add_file(str(path), name=str(path.relative_to(self.code_dir)))
experiment.log_artifact(code)
class UploadCheckpointsAsArtifact(Callback):
"""Upload checkpoints to wandb as an artifact, at the end of run."""
def __init__(self, ckpt_dir: str = "checkpoints/", upload_best_only: bool = False):
self.ckpt_dir = ckpt_dir
self.upload_best_only = upload_best_only
@rank_zero_only
def on_keyboard_interrupt(self, trainer, pl_module):
self.on_train_end(trainer, pl_module)
@rank_zero_only
def on_train_end(self, trainer, pl_module):
logger = get_wandb_logger(trainer=trainer)
experiment = logger.experiment
ckpts = wandb.Artifact("experiment-ckpts", type="checkpoints")
if self.upload_best_only:
ckpts.add_file(trainer.checkpoint_callback.best_model_path)
else:
for path in Path(self.ckpt_dir).rglob("*.ckpt"):
ckpts.add_file(str(path))
experiment.log_artifact(ckpts)
class LogConfusionMatrix(Callback):
"""Generate confusion matrix every epoch and send it to wandb.
Expects validation step to return predictions and targets.
"""
def __init__(self):
self.preds = []
self.targets = []
self.ready = True
def on_sanity_check_start(self, trainer, pl_module) -> None:
self.ready = False
def on_sanity_check_end(self, trainer, pl_module):
"""Start executing this callback only after all validation sanity checks end."""
self.ready = True
def on_validation_batch_end(
self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx
):
"""Gather data from single batch."""
if self.ready:
self.preds.append(outputs["preds"])
self.targets.append(outputs["targets"])
def on_validation_epoch_end(self, trainer, pl_module):
"""Generate confusion matrix."""
if self.ready:
logger = get_wandb_logger(trainer)
experiment = logger.experiment
preds = torch.cat(self.preds).cpu().numpy()
targets = torch.cat(self.targets).cpu().numpy()
confusion_matrix = metrics.confusion_matrix(y_true=targets, y_pred=preds)
# set figure size
plt.figure(figsize=(14, 8))
# set labels size
sn.set(font_scale=1.4)
# set font size
sn.heatmap(confusion_matrix, annot=True, annot_kws={"size": 8}, fmt="g")
# names should be uniqe or else charts from different experiments in wandb will overlap
experiment.log({f"confusion_matrix/{experiment.name}": wandb.Image(plt)}, commit=False)
# according to wandb docs this should also work but it crashes
# experiment.log(f{"confusion_matrix/{experiment.name}": plt})
# reset plot
plt.clf()
self.preds.clear()
self.targets.clear()
class LogF1PrecRecHeatmap(Callback):
"""Generate f1, precision, recall heatmap every epoch and send it to wandb.
Expects validation step to return predictions and targets.
"""
def __init__(self, class_names: List[str] = None):
self.preds = []
self.targets = []
self.ready = True
def on_sanity_check_start(self, trainer, pl_module):
self.ready = False
def on_sanity_check_end(self, trainer, pl_module):
"""Start executing this callback only after all validation sanity checks end."""
self.ready = True
def on_validation_batch_end(
self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx
):
"""Gather data from single batch."""
if self.ready:
self.preds.append(outputs["preds"])
self.targets.append(outputs["targets"])
def on_validation_epoch_end(self, trainer, pl_module):
"""Generate f1, precision and recall heatmap."""
if self.ready:
logger = get_wandb_logger(trainer=trainer)
experiment = logger.experiment
preds = torch.cat(self.preds).cpu().numpy()
targets = torch.cat(self.targets).cpu().numpy()
f1 = f1_score(targets, preds, average=None)
r = recall_score(targets, preds, average=None)
p = precision_score(targets, preds, average=None)
data = [f1, p, r]
# set figure size
plt.figure(figsize=(14, 3))
# set labels size
sn.set(font_scale=1.2)
# set font size
sn.heatmap(
data,
annot=True,
annot_kws={"size": 10},
fmt=".3f",
yticklabels=["F1", "Precision", "Recall"],
)
# names should be uniqe or else charts from different experiments in wandb will overlap
experiment.log({f"f1_p_r_heatmap/{experiment.name}": wandb.Image(plt)}, commit=False)
# reset plot
plt.clf()
self.preds.clear()
self.targets.clear()
class LogImagePredictions(Callback):
"""Logs a validation batch and their predictions to wandb.
Example adapted from:
https://wandb.ai/wandb/wandb-lightning/reports/Image-Classification-using-PyTorch-Lightning--VmlldzoyODk1NzY
"""
def __init__(self, num_samples: int = 8):
super().__init__()
self.num_samples = num_samples
self.ready = True
def on_sanity_check_start(self, trainer, pl_module):
self.ready = False
def on_sanity_check_end(self, trainer, pl_module):
"""Start executing this callback only after all validation sanity checks end."""
self.ready = True
def on_validation_epoch_end(self, trainer, pl_module):
if self.ready:
logger = get_wandb_logger(trainer=trainer)
experiment = logger.experiment
# get a validation batch from the validation dat loader
val_samples = next(iter(trainer.datamodule.val_dataloader()))
val_imgs, val_labels = val_samples
# run the batch through the network
val_imgs = val_imgs.to(device=pl_module.device)
logits = pl_module(val_imgs)
preds = torch.argmax(logits, dim=-1)
# log the images as wandb Image
experiment.log(
{
f"Images/{experiment.name}": [
wandb.Image(x, caption=f"Pred:{pred}, Label:{y}")
for x, pred, y in zip(
val_imgs[: self.num_samples],
preds[: self.num_samples],
val_labels[: self.num_samples],
)
]
}
)
| FLASHATTENION-LION-OPTIMIZE-main | training/src/callbacks/wandb_callbacks.py |
import torch
from pytorch_lightning import Callback, Trainer, LightningModule
import logging
log = logging.getLogger(__name__) # We want a logger for each process, not just the rank 0
def l2_promote():
import ctypes
_libcudart = ctypes.CDLL('libcudart.so')
# Set device limit on the current device
# cudaLimitMaxL2FetchGranularity = 0x05
pValue = ctypes.cast((ctypes.c_int*1)(), ctypes.POINTER(ctypes.c_int))
_libcudart.cudaDeviceSetLimit(ctypes.c_int(0x05), ctypes.c_int(128))
_libcudart.cudaDeviceGetLimit(pValue, ctypes.c_int(0x05))
assert pValue.contents.value == 128
def set_affinity(trainer):
try:
from src.utils.gpu_affinity import set_affinity
nproc_per_node = torch.cuda.device_count()
affinity = set_affinity(trainer.local_rank, nproc_per_node, 'socket_unique_continuous')
log.info(f'{trainer.local_rank}: thread affinity: {affinity}')
# TD [2022-05-07] Somehow calling this causes GPU 0 to allocate extra ~800MB of memory per
# number of GPUs (e.g., 6.4GB of extra memory in a 8-GPU setup). H/t Dan.
# l2_promote()
except:
pass
class GpuAffinity(Callback):
"""Set GPU affinity and increase the L2 fetch granularity.
Adapted from https://github.com/NVIDIA/DeepLearningExamples/tree/master/PyTorch/LanguageModeling/Transformer-XL
"""
def setup(self, trainer: Trainer, pl_module: LightningModule, stage=None) -> None:
set_affinity(trainer)
| FLASHATTENION-LION-OPTIMIZE-main | training/src/callbacks/gpu_affinity.py |
# Inspired by https://github.com/Lightning-AI/lightning/blob/master/src/pytorch_lightning/utilities/grads.py
# However, they compute grad at every iteration (I think), and the .item() calls incur a lot of overhead
# (6-7% slow down on GPT-2 small). Instead we only compute for iterations where we need to log, and don't
# call .item() explicitly.
from typing import Any
from collections import OrderedDict
from pytorch_lightning import Callback, Trainer
from pytorch_lightning.utilities import rank_zero_only
from pytorch_lightning.strategies import DeepSpeedStrategy
import torch
import torch.nn as nn
try:
from apex.contrib.layer_norm import FastLayerNorm
except ImportError:
FastLayerNorm = None
class NormMonitor(Callback):
"""Monitor the scales of weights and gradients.
"""
def __init__(self, layer_norm_only: bool = False):
super().__init__()
self.layer_norm_only = layer_norm_only
# Use on_before_optimizer_step instead of on_train_batch_start since there might be
# gradient accumulation and we only care about scale when it could change (i.e., optimizer.step).
@rank_zero_only
def on_before_optimizer_step(self, trainer: Trainer, pl_module, *args: Any, **kwargs: Any) -> None:
if not trainer._logger_connector.should_update_logs:
return
model = pl_module.model
named_parameters = {}
if self.layer_norm_only:
ln_modules = (nn.LayerNorm, nn.Embedding)
if FastLayerNorm is not None:
ln_modules += (FastLayerNorm,)
for mn, m in model.named_modules():
if isinstance(m, ln_modules):
for pn, p in m.named_parameters():
fpn = '%s.%s' % (mn, pn) if mn else pn # full param name
named_parameters[fpn] = p
else:
named_parameters = dict(model.named_parameters())
if isinstance(trainer.strategy, DeepSpeedStrategy):
loss_scale = trainer.model.optimizer.loss_scale
else:
loss_scale = 1.0
stats = {}
param_l1_norm, grad_l1_norm = [], []
for param_name, param in named_parameters.items():
param_abs = param.abs()
param_abs_mean = param_abs.mean(dtype=torch.float32)
stats[f'stats/{param_name}_max'] = param_abs.max()
stats[f'stats/{param_name}_mean'] = param_abs_mean
param_l1_norm.append(param_abs_mean * param.numel())
if param.grad is not None:
# If using AMP, gradient is already unscaled by the AMP loss scaler at this point
# https://github.com/Lightning-AI/lightning/pull/9606
# However, if using DeepSpeed, we need to scale it ourselves
param_grad_abs = param.grad.abs()
param_grad_abs_mean = param_grad_abs.mean(dtype=torch.float32) / loss_scale
stats[f'stats/{param_name}_grad_max'] = param_grad_abs.max() / loss_scale
stats[f'stats/{param_name}_grad_mean'] = param_grad_abs_mean
grad_l1_norm.append(param_grad_abs_mean * param.grad.numel())
stats['total_param_l1_norm'] = torch.stack(param_l1_norm).sum()
if grad_l1_norm:
stats['total_grad_l1_norm'] = torch.stack(grad_l1_norm).sum()
# Sort by params name
stats = OrderedDict(sorted(stats.items()))
if trainer.loggers is not None:
for logger in trainer.loggers:
logger.log_metrics(stats, step=trainer.fit_loop.epoch_loop._batches_that_stepped)
| FLASHATTENION-LION-OPTIMIZE-main | training/src/callbacks/norm_monitor.py |
import pytorch_lightning as pl
from pytorch_lightning import Callback
from pytorch_lightning.utilities import rank_zero_only
import torch
from torch.autograd import grad
class CausalityMonitor(Callback):
r"""Monitor causality of a model by tracking gradient leakage forward in time.
In a fully causal model, dy[k]du[s] ~= 0 for all k < s.
Args:
seq_len (int): Length of the sequence to monitor.
input_dim (int): Dimension of the input to monitor. If 0, the callback assumes
the task to be language modeling, and skips the embedding layer. If > 0,
input_dim is interpreted as the input channel dimension, i.e. D with
dummy input of dimension [B, L, D].
Notes:
This callback assumes that `pl_module.model` has a `net` or `s4seq` attribute,
indicating the primary model to monitor. For LMs, `net` or `s4seq` should
be after the embedding layer.
"""
def __init__(self, seq_len: int = 10, input_dim: int = 0):
super().__init__()
self.seq_len = seq_len
self.input_dim = input_dim
@rank_zero_only
def on_train_epoch_end(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None:
model = pl_module.model
with torch.enable_grad():
if self.input_dim == 0:
# [MP] LongTensors cannot have gradients - we start from post
# embedding in the LM case
input_dim = model.d_model
x = torch.randn((2, self.seq_len, input_dim), \
requires_grad=True).to(pl_module.device)
# [DF] HACK: we need to get the layer that comes after the embedding
if hasattr(model, 'net'):
y = model.net(x)
else:
y = model.s4seq(x)
else:
x = torch.randn(1, self.seq_len, self.input_dim, \
requires_grad=True).to(pl_module.device)
y = model(x)
stats = {}
for i in range(self.seq_len):
# total gradients flowing from y_i to x
g = grad(y[0,0,i].mean(), x, retain_graph=True, allow_unused=True)[0]
g = g[0,i+1:,:].abs().mean()
stats[f'stats/causality_{i}'] = g.item()
if trainer.loggers is not None:
for logger in trainer.loggers:
logger.log_metrics(stats, step=trainer.global_step)
| FLASHATTENION-LION-OPTIMIZE-main | training/src/callbacks/causality_monitor.py |
# Inspired by https://github.com/PyTorchLightning/pytorch-lightning/blob/master/pytorch_lightning/callbacks/stochastic_weight_avg.py
# https://github.com/PyTorchLightning/Lightning-Bolts/blob/master/pl_bolts/callbacks/byol_updates.py
# https://forums.pytorchlightning.ai/t/adopting-exponential-moving-average-ema-for-pl-pipeline/488/2
# https://github.com/PyTorchLightning/pytorch-lightning/issues/8100
from typing import Dict, Any
from pytorch_lightning import Callback, Trainer
from pytorch_lightning.utilities import rank_zero_only
from pytorch_lightning.utilities.parsing import AttributeDict
from pytorch_lightning.utilities.types import STEP_OUTPUT
from src.utils.ema import ExponentialMovingAverage
class EMACallback(Callback):
"""TD [2021-08-31]: saving and loading from checkpoint should work.
"""
def __init__(self, decay: float, use_num_updates: bool = True):
"""
decay: The exponential decay.
use_num_updates: Whether to use number of updates when computing
averages.
"""
super().__init__()
self.decay = decay
self.use_num_updates = use_num_updates
self.ema = None
def on_train_start(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule"):
# It's possible that we already loaded EMA from the checkpoint
if self.ema is None:
self.ema = ExponentialMovingAverage([p for p in pl_module.parameters() if p.requires_grad],
decay=self.decay, use_num_updates=self.use_num_updates)
# Ideally we want on_after_optimizer_step but pytorch-lightning doesn't have it
# We only want to update when parameters are changing.
# Because of gradient accumulation, this doesn't happen every training step.
# https://github.com/PyTorchLightning/pytorch-lightning/issues/11688
def on_train_batch_end(
self,
trainer: "pl.Trainer",
pl_module: "pl.LightningModule",
outputs: STEP_OUTPUT,
batch: Any,
batch_idx: int,
) -> None:
if (batch_idx + 1) % trainer.accumulate_grad_batches == 0:
self.ema.update()
def on_validation_start(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None:
# During the initial validation we don't have self.ema yet
if self.ema is not None:
self.ema.store()
self.ema.copy_to()
def on_validation_end(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None:
if self.ema is not None:
self.ema.restore()
def on_test_start(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None:
if self.ema is not None:
self.ema.store()
self.ema.copy_to()
def on_test_end(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None:
if self.ema is not None:
self.ema.restore()
def on_save_checkpoint(
self, trainer: "pl.Trainer", pl_module: "pl.LightningModule", checkpoint: Dict[str, Any]
) -> Dict[str, Any]:
return self.ema.state_dict()
def on_load_checkpoint(
self, trainer: "pl.Trainer", pl_module: "pl.LightningModule",
checkpoint: Dict[str, Any]
) -> None:
if self.ema is None:
self.ema = ExponentialMovingAverage([p for p in pl_module.parameters() if p.requires_grad],
decay=self.decay, use_num_updates=self.use_num_updates)
self.ema.load_state_dict(checkpoint)
| FLASHATTENION-LION-OPTIMIZE-main | training/src/callbacks/ema.py |
# Adapted from https://pytorch-lightning.readthedocs.io/en/latest/_modules/pytorch_lightning/callbacks/gpu_stats_monitor.html#GPUStatsMonitor
# We only need the speed monitoring, not the GPU monitoring
import time
from typing import Any
from pytorch_lightning import Callback, Trainer
from pytorch_lightning.utilities import rank_zero_only
from pytorch_lightning.utilities.parsing import AttributeDict
from pytorch_lightning.utilities.types import STEP_OUTPUT
class SpeedMonitor(Callback):
"""Monitor the speed of each step and each epoch.
"""
def __init__(self, intra_step_time: bool = True, inter_step_time: bool = True,
epoch_time: bool = True, verbose=False):
super().__init__()
self._log_stats = AttributeDict(
{
'intra_step_time': intra_step_time,
'inter_step_time': inter_step_time,
'epoch_time': epoch_time,
}
)
self.verbose = verbose
def on_train_start(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None:
self._snap_epoch_time = None
def on_train_epoch_start(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None:
self._snap_intra_step_time = None
self._snap_inter_step_time = None
self._snap_epoch_time = time.time()
def on_validation_epoch_start(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None:
self._snap_inter_step_time = None
def on_test_epoch_start(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None:
self._snap_inter_step_time = None
@rank_zero_only
def on_train_batch_start(
self,
trainer: "pl.Trainer",
pl_module: "pl.LightningModule",
batch: Any,
batch_idx: int,
) -> None:
if self._log_stats.intra_step_time:
self._snap_intra_step_time = time.time()
if not trainer._logger_connector.should_update_logs:
return
logs = {}
if self._log_stats.inter_step_time and self._snap_inter_step_time:
# First log at beginning of second step
logs["time/inter_step (ms)"] = (time.time() - self._snap_inter_step_time) * 1000
if trainer.logger is not None:
trainer.logger.log_metrics(logs, step=trainer.global_step)
@rank_zero_only
def on_train_batch_end(
self,
trainer: "pl.Trainer",
pl_module: "pl.LightningModule",
outputs: STEP_OUTPUT,
batch: Any,
batch_idx: int,
) -> None:
if self._log_stats.inter_step_time:
self._snap_inter_step_time = time.time()
if self.verbose and self._log_stats.intra_step_time and self._snap_intra_step_time:
pl_module.print(f"time/intra_step (ms): {(time.time() - self._snap_intra_step_time) * 1000}")
if not trainer._logger_connector.should_update_logs:
return
logs = {}
if self._log_stats.intra_step_time and self._snap_intra_step_time:
logs["time/intra_step (ms)"] = (time.time() - self._snap_intra_step_time) * 1000
if trainer.logger is not None:
trainer.logger.log_metrics(logs, step=trainer.global_step)
@rank_zero_only
def on_train_epoch_end(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule",) -> None:
logs = {}
if self._log_stats.epoch_time and self._snap_epoch_time:
logs["time/epoch (s)"] = time.time() - self._snap_epoch_time
if trainer.logger is not None:
trainer.logger.log_metrics(logs, step=trainer.global_step)
| FLASHATTENION-LION-OPTIMIZE-main | training/src/callbacks/speed_monitor.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.