python_code
stringlengths 0
992k
| repo_name
stringlengths 8
46
| file_path
stringlengths 5
162
|
---|---|---|
from glob import glob
from .base_dataset import BaseDataset
import random
import os
import pandas as pd
import io
from PIL import Image
from CoTrain.datasets import client
class CC3MDataset(BaseDataset):
def __init__(self, *args, split="", **kwargs):
assert split in ["train", "val", "test"]
self.split = split
self.metadata = None
self._load_metadata()
if split == "train":
names = ["cc3m_train"]
elif split == "val":
names = ["cc3m_val"]
elif split == "test":
names = ["cc3m_val"]
print(names, ": ", len(self.metadata), "samples in total.")
super().__init__(*args, **kwargs, names=names, text_column_name="caption")
self.data_dir = "s3://GCC/"
def _load_metadata(self):
# download specific
metadata_dir = './meta_data/cc3m'
split_files = {
'train': 'cc3m_training_success_full.tsv',
'val': 'cc3m_validation_success_full.tsv', # there is no test
'test': 'cc3m_validation_success_full.tsv'
}
target_split_fp = split_files[self.split]
metadata = pd.read_csv(os.path.join(metadata_dir, target_split_fp), sep='\t')
# elif self.split == 'val':
# metadata = metadata.sample(1000, random_state=0) # 15k val is unnecessarily large, downsample.
self.metadata = metadata
def _get_image_path(self, sample):
# conceptual captions uses this hashing to create the filename
rel_dir = 'training'
if self.split != 'train':
rel_dir = 'validation'
rel_fp = os.path.join(rel_dir, sample[1])
#rel_fp = os.path.join(rel_dir, str(zlib.crc32(sample['thumbnailUrl'].encode('utf-8')) & 0xffffffff))
# print(rel_fp)
return os.path.join(self.data_dir, rel_fp), rel_fp
def _get_caption(self, sample):
return sample[0]
#return sample['caption']
def get_raw_image(self, sample):
# print(sample)
abs_fp, rel_fp = self._get_image_path(sample)
if "s3://" in abs_fp:
img_bytes = client.get(abs_fp)
assert img_bytes is not None, "Get image failed from {}".format(img_bytes)
img = Image.open(io.BytesIO(img_bytes)).convert("RGB")
else:
img = Image.open(abs_fp).convert("RGB")
if img is None:
raise Exception("Invalid img!", rel_fp)
else:
return img
def get_image(self, index, sample, image_key="image"):
image = self.get_raw_image(sample)
# image_tensor = [tr(image).unsqueeze(0) for tr in global_transforms]
image_tensor = self.image_aug(image, self.transforms)
# image_tensor[0] = image_tensor[0].unsqueeze(0)
# print(len(image_tensor))
# print(image_tensor[0].size())
return {
"video": image_tensor,
"vid_index": sample[1],
"cap_index": index,
"raw_index": index,
}
def get_false_image(self, rep, image_key="image"):
random_index = random.randint(0, len(self.metadata) - 1)
sample = self.metadata.iloc[random_index]
image = self.get_raw_image(sample)
# image_tensor = [tr(image).unsqueeze(0) for tr in global_transforms]
image_tensor = self.image_aug(image, self.transforms)
return {f"false_video_{rep}": image_tensor}
def get_text(self, raw_index, sample):
text = sample[0]
encoding = self.tokenizer(
text,
padding="max_length",
truncation=True,
max_length=self.max_text_len,
return_special_tokens_mask=True,
)
# print(encoding.size())
return {
"text": (text, encoding),
"vid_index": sample[1],
"cap_index": raw_index,
"raw_index": raw_index,
}
def get_false_text(self, rep):
random_index = random.randint(0, len(self.metadata) - 1)
sample = self.metadata.iloc[random_index]
text = sample[0]
encoding = self.tokenizer(
text,
truncation=True,
max_length=self.max_text_len,
return_special_tokens_mask=True,
)
return {f"false_text_{rep}": (text, encoding)}
def get_suite(self, index):
result = None
max_try = 10
try_time = 0
while result is None:
try_time += 1
sample = self.metadata.iloc[index]
try:
ret = dict()
ret.update(self.get_image(index, sample))
if not self.image_only:
txt = self.get_text(index, sample)
ret.update({"replica": True if txt["cap_index"] > 0 else False})
ret.update(txt)
for i in range(self.draw_false_image):
ret.update(self.get_false_image(i))
for i in range(self.draw_false_text):
ret.update(self.get_false_text(i))
result = True
except Exception as e:
index = random.randint(0, len(self.metadata) - 1)
exc = e
if try_time > max_try:
print(f"Exceed max time Error while read file idx {sample} in {self.names[0]} with error {exc}")
try_time = 0
return ret
def __len__(self):
return len(self.metadata)
def __getitem__(self, index):
return self.get_suite(index)
| InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/datasets/image/cc3m.py |
# pretrain dataset
## video
from CoTrain.datamodules.video.webvid_datamodule import WEBVIDDataModule
from CoTrain.datamodules.video.webvid10m_datamodule import WEBVID10MDataModule
from CoTrain.datamodules.video.howto100m_datamodule import HT100MDataModule
from CoTrain.datamodules.video.youtube_datamodule import YOUTUBEDataModule
from CoTrain.datamodules.video.yttemporal_datamodule import YTTemporalMDataModule
## image
from CoTrain.datamodules.image.cc3m_datamodule import CC3MDataModule
from CoTrain.datamodules.image.cc12m_datamodule import CC12MDataModule
from CoTrain.datamodules.image.yfcc15m_datamodule import YFCC15MDataModule
from CoTrain.datamodules.image.laion400m_datamodule import LAION400MDataModule
from CoTrain.datamodules.image.vg_caption_datamodule import VisualGenomeCaptionDataModule
from CoTrain.datamodules.image.coco_caption_karpathy_datamodule import CocoCaptionKarpathyDataModule
from CoTrain.datamodules.image.conceptual_caption_datamodule import ConceptualCaptionDataModule
from CoTrain.datamodules.image.sbu_datamodule import SBUCaptionDataModule
from CoTrain.datamodules.image.mix100m_datamodule import MIX100MDataModule
# finetune dataset
## image
from CoTrain.datamodules.image.f30k_caption_karpathy_datamodule import F30KCaptionKarpathyDataModule
from CoTrain.datamodules.image.vqav2_datamodule import VQAv2DataModule
from CoTrain.datamodules.image.nlvr2_datamodule import NLVR2DataModule
from CoTrain.datamodules.image.vcr_datamodule import VCRDataModule
## video
from CoTrain.datamodules.video.msrvtt_datamodule import MSRVTTDataModule
from CoTrain.datamodules.video.msrvttqa_datamodule import MSRVTTQADataModule
from CoTrain.datamodules.video.msrvtt_choice_datamodule import MSRVTTChoiceDataModule
from CoTrain.datamodules.video.msvd_datamodule import MSVDDataModule
from CoTrain.datamodules.video.msvdqa_datamodule import MSVDQADataModule
from CoTrain.datamodules.video.ego4d_datamodule import Ego4DDataModule
from CoTrain.datamodules.video.tvqa_datamodule import TVQADataModule
from CoTrain.datamodules.video.lsmdc_choice_datamodule import LSMDCChoiceDataModule
from CoTrain.datamodules.video.ego4d_choice_datamodule import EGO4DChoiceDataModule
from CoTrain.datamodules.video.tgif_datamodule import TGIFDataModule
from CoTrain.datamodules.video.tgifqa_datamodule import TGIFQADataModule
from CoTrain.datamodules.video.didemo_datamodule import DIDEMODataModule
from CoTrain.datamodules.video.hmdb51_datamodule import HMDB51DataModule
from CoTrain.datamodules.video.ucf101_datamodule import UCF101DataModule
from CoTrain.datamodules.video.k400_datamodule import K400DataModule
from CoTrain.datamodules.video.lsmdc_datamodule import LSMDCDataModule
from CoTrain.datamodules.video.k400_video_datamodule import K400VideoDataModule
from CoTrain.datamodules.image.activitynet_datamodule import ActivityNetDataModule
_datamodules = {
# image
"vg": VisualGenomeCaptionDataModule,
"f30k": F30KCaptionKarpathyDataModule,
"coco": CocoCaptionKarpathyDataModule,
"gcc": ConceptualCaptionDataModule,
"sbu": SBUCaptionDataModule,
"vqa": VQAv2DataModule,
"nlvr2": NLVR2DataModule,
"cc3m": CC3MDataModule,
"cc12m": CC12MDataModule,
'yfcc15m': YFCC15MDataModule,
'laion400m': LAION400MDataModule,
'vcr': VCRDataModule,
'mix100m': MIX100MDataModule,
# video
'howto100m': HT100MDataModule,
'youtube': YOUTUBEDataModule,
'webvid': WEBVIDDataModule,
'webvid10m': WEBVID10MDataModule,
'msrvtt': MSRVTTDataModule,
'msrvttqa': MSRVTTQADataModule,
'msrvtt_choice': MSRVTTChoiceDataModule,
'msvd': MSVDDataModule,
'msvdqa': MSVDQADataModule,
'ego4d': Ego4DDataModule,
'tvqa': TVQADataModule,
'lsmdc_choice': LSMDCChoiceDataModule,
'ego4d_choice': EGO4DChoiceDataModule,
'yttemporal': YTTemporalMDataModule,
'tgif': TGIFDataModule,
"tgifqa": TGIFQADataModule,
'didemo': DIDEMODataModule,
'hmdb51': HMDB51DataModule,
'ucf101': UCF101DataModule,
'k400': K400DataModule,
'lsmdc': LSMDCDataModule,
'activitynet': ActivityNetDataModule,
'k400_video': K400VideoDataModule,
}
| InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/datamodules/__init__.py |
from CoTrain.datasets import MSRVTTChoiceDataset
from CoTrain.datamodules.image.datamodule_base import BaseDataModule
class MSRVTTChoiceDataModule(BaseDataModule):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@property
def dataset_cls(self):
return MSRVTTChoiceDataset
@property
def dataset_cls_no_false(self):
return MSRVTTChoiceDataset
@property
def dataset_name(self):
return "msrvtt_choice"
| InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/datamodules/video/msrvtt_choice_datamodule.py |
from CoTrain.datasets import TGIFQADataset
from CoTrain.datamodules.image.datamodule_base import BaseDataModule
class TGIFQADataModule(BaseDataModule):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@property
def dataset_cls(self):
return TGIFQADataset
@property
def dataset_cls_no_false(self):
return TGIFQADataset
@property
def dataset_name(self):
return "tgifqa"
| InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/datamodules/video/tgifqa_datamodule.py |
from CoTrain.datasets import WEBVID10MDataset
from CoTrain.datamodules.image.datamodule_base import BaseDataModule
class WEBVID10MDataModule(BaseDataModule):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@property
def dataset_cls(self):
return WEBVID10MDataset
@property
def dataset_cls_no_false(self):
return WEBVID10MDataset
@property
def dataset_name(self):
return "webvid10m"
| InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/datamodules/video/webvid10m_datamodule.py |
import functools
from pytorch_lightning import LightningDataModule
from torch.utils.data import DataLoader
from torch.utils.data.dataset import ConcatDataset
from torch.utils.data.distributed import DistributedSampler
from pytorch_lightning.trainer.supporters import CombinedLoader
from CoTrain.datamodules import _datamodules
class MTDataModule(LightningDataModule):
def __init__(self, _config, dist=False):
video_datamodule_keys = _config["video_datasets"]
image_datamodule_keys = _config["image_datasets"]
self.num_video_datasets = len(video_datamodule_keys)
self.num_image_datasets = len(image_datamodule_keys)
assert self.num_video_datasets > 0 or self.num_image_datasets > 0
super().__init__()
if self.num_video_datasets > 0:
self.video_dm_keys = video_datamodule_keys
self.video_dm_dicts = {key: _datamodules[key](_config) for key in video_datamodule_keys}
self.video_dms = [v for k, v in self.video_dm_dicts.items()]
self.video_batch_size = self.video_dms[0].batch_size
self.video_vocab_size = self.video_dms[0].vocab_size
self.video_num_workers = self.video_dms[0].num_workers
if self.num_image_datasets:
self.image_dm_keys = image_datamodule_keys
self.image_dm_dicts = {key: _datamodules[key](_config) for key in image_datamodule_keys}
self.image_dms = [v for k, v in self.image_dm_dicts.items()]
self.image_batch_size = self.image_dms[0].batch_size * _config["image_data_mult"]
self.image_vocab_size = self.image_dms[0].vocab_size
self.image_num_workers = self.image_dms[0].num_workers
self.dist = dist
# We add extra val datamodules so that we can use different dataset in train and val
# We assume all val datasets are video datasets
self.val_dm_keys = _config["val_datasets"]
self.val_dm_dicts = {
key: _datamodules[key](_config) for key in self.val_dm_keys
}
self.val_dms = [v for k, v in self.val_dm_dicts.items()]
self.pin_memory = False
def prepare_data(self):
if self.num_video_datasets:
for dm in self.video_dms:
dm.prepare_data()
if self.num_image_datasets:
for dm in self.image_dms:
dm.prepare_data()
for dm in self.val_dms:
dm.prepare_data()
def setup(self, stage):
if self.num_video_datasets:
for dm in self.video_dms:
dm.setup(stage)
if self.num_image_datasets:
for dm in self.image_dms:
dm.setup(stage)
for dm in self.val_dms:
dm.setup(stage)
if self.num_video_datasets:
self.video_train_dataset = ConcatDataset([dm.train_dataset for dm in self.video_dms])
self.video_val_dataset = ConcatDataset([dm.val_dataset for dm in self.video_dms])
self.video_test_dataset = ConcatDataset([dm.test_dataset for dm in self.video_dms])
if self.num_image_datasets:
self.image_train_dataset = ConcatDataset([dm.train_dataset for dm in self.image_dms])
self.image_val_dataset = ConcatDataset([dm.val_dataset for dm in self.image_dms])
self.image_test_dataset = ConcatDataset([dm.test_dataset for dm in self.image_dms])
if len(self.val_dms) == 0:
self.val_dataset = None
else:
self.val_dataset = ConcatDataset([dm.val_dataset for dm in self.val_dms])
if len(self.video_dms) > 0:
self.tokenizer = self.video_dms[0].tokenizer
else:
self.tokenizer = self.image_dms[0].tokenizer
if self.num_video_datasets:
self.video_collate = functools.partial(
self.video_dms[0].train_dataset.collate, mlm_collator=self.video_dms[0].mlm_collator,
)
if self.num_image_datasets:
self.image_collate = functools.partial(
self.image_dms[0].train_dataset.collate, mlm_collator=self.image_dms[0].mlm_collator,
)
if self.dist:
if self.num_video_datasets:
self.video_train_sampler = DistributedSampler(self.video_train_dataset, shuffle=False)
self.video_val_sampler = DistributedSampler(self.video_val_dataset, shuffle=False)
self.video_test_sampler = DistributedSampler(self.video_test_dataset, shuffle=False)
if self.num_image_datasets:
self.image_train_sampler = DistributedSampler(self.image_train_dataset, shuffle=False)
self.image_val_sampler = DistributedSampler(self.image_val_dataset, shuffle=False)
self.image_test_sampler = DistributedSampler(self.image_test_dataset, shuffle=False)
if self.val_dataset is not None:
self.val_sampler = DistributedSampler(self.val_dataset, shuffle=False)
else:
self.video_train_sampler = None
self.video_val_sampler = None
self.video_test_sampler = None
self.image_train_sampler = None
self.image_val_sampler = None
self.image_test_sampler = None
def train_dataloader(self):
if self.num_video_datasets:
video_loader = DataLoader(
self.video_train_dataset,
batch_size=self.video_batch_size,
sampler=self.video_train_sampler,
num_workers=self.video_num_workers,
pin_memory=self.pin_memory,
collate_fn=self.video_collate,
)
if self.num_image_datasets:
image_loader = DataLoader(
self.image_train_dataset,
batch_size=self.image_batch_size,
sampler=self.image_train_sampler,
num_workers=self.image_num_workers,
pin_memory=self.pin_memory,
collate_fn=self.image_collate,
)
if self.num_video_datasets and self.num_image_datasets:
loaders = {"v": video_loader, "i": image_loader}
combined_loader = CombinedLoader(loaders, mode="min_size") # "min_size" / "max_size_cycle",
else:
if self.num_video_datasets:
combined_loader = video_loader
else:
combined_loader = image_loader
return combined_loader
def val_dataloader(self, batch_size=None):
# Skip all other datasets if we have different val datasets
if self.val_dataset is not None:
return DataLoader(
self.val_dataset,
# batch_size=batch_size if batch_size is not None else self.video_batch_size * 4,
batch_size=batch_size if batch_size is not None else self.video_batch_size,
sampler=self.val_sampler,
num_workers=self.video_num_workers // 2,
pin_memory=self.pin_memory,
collate_fn=self.video_collate,
)
if self.num_video_datasets:
video_loader = DataLoader(
self.video_val_dataset,
batch_size=batch_size if batch_size is not None else self.video_batch_size,
sampler=self.video_val_sampler,
num_workers=self.video_num_workers,
pin_memory=self.pin_memory,
collate_fn=self.video_collate,
)
if self.num_image_datasets:
image_loader = DataLoader(
self.image_val_dataset,
batch_size=batch_size if batch_size is not None else self.image_batch_size,
sampler=self.image_val_sampler,
num_workers=self.image_num_workers,
pin_memory=self.pin_memory,
collate_fn=self.image_collate,
)
if self.num_video_datasets and self.num_image_datasets:
loaders = {"v": video_loader, "i": image_loader}
combined_loader = CombinedLoader(loaders, mode="min_size") # min_size / max_size_cycle
else:
if self.num_video_datasets:
combined_loader = video_loader
else:
combined_loader = image_loader
return combined_loader
def test_dataloader(self):
if self.num_video_datasets:
video_loader = DataLoader(
self.video_test_dataset,
batch_size=self.video_batch_size,
sampler=self.video_test_sampler,
num_workers=self.video_num_workers,
pin_memory=self.pin_memory,
collate_fn=self.video_collate,
)
if self.num_image_datasets:
image_loader = DataLoader(
self.image_test_dataset,
batch_size=self.image_batch_size,
sampler=self.image_test_sampler,
num_workers=self.image_num_workers,
pin_memory=self.pin_memory,
collate_fn=self.image_collate,
)
if self.num_video_datasets and self.num_image_datasets:
loaders = {"v": video_loader, "i": image_loader}
combined_loader = CombinedLoader(loaders, mode="min_size") # min_size / max_size_cycle
else:
if self.num_video_datasets:
combined_loader = video_loader
else:
combined_loader = image_loader
return combined_loader
| InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/datamodules/video/multitask_datamodule.py |
from CoTrain.datasets import MSRVTTDataset
from CoTrain.datamodules.image.datamodule_base import BaseDataModule
class MSRVTTDataModule(BaseDataModule):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@property
def dataset_cls(self):
return MSRVTTDataset
@property
def dataset_cls_no_false(self):
return MSRVTTDataset
@property
def dataset_name(self):
return "msrvtt"
| InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/datamodules/video/msrvtt_datamodule.py |
from CoTrain.datasets import LSMDCChoiceDataset
from CoTrain.datamodules.image.datamodule_base import BaseDataModule
class LSMDCChoiceDataModule(BaseDataModule):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@property
def dataset_cls(self):
return LSMDCChoiceDataset
@property
def dataset_cls_no_false(self):
return LSMDCChoiceDataset
@property
def dataset_name(self):
return "lsmdc_choice"
| InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/datamodules/video/lsmdc_choice_datamodule.py |
from CoTrain.datasets import HMDB51Dataset
from CoTrain.datamodules.image.datamodule_base import BaseDataModule
class HMDB51DataModule(BaseDataModule):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@property
def dataset_cls(self):
return HMDB51Dataset
@property
def dataset_cls_no_false(self):
return HMDB51Dataset
@property
def dataset_name(self):
return "hmdb51"
| InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/datamodules/video/hmdb51_datamodule.py |
from CoTrain.datasets import Ego4DDataset
from CoTrain.datamodules.image.datamodule_base import BaseDataModule
class Ego4DDataModule(BaseDataModule):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@property
def dataset_cls(self):
return Ego4DDataset
@property
def dataset_cls_no_false(self):
return Ego4DDataset
@property
def dataset_name(self):
return "ego4d"
| InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/datamodules/video/ego4d_datamodule.py |
from CoTrain.datasets import TGIFDataset
from CoTrain.datamodules.image.datamodule_base import BaseDataModule
class TGIFDataModule(BaseDataModule):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@property
def dataset_cls(self):
return TGIFDataset
@property
def dataset_cls_no_false(self):
return TGIFDataset
@property
def dataset_name(self):
return "tgif"
| InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/datamodules/video/tgif_datamodule.py |
from CoTrain.datasets import TVQADataset
from CoTrain.datamodules.image.datamodule_base import BaseDataModule
class TVQADataModule(BaseDataModule):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@property
def dataset_cls(self):
return TVQADataset
@property
def dataset_cls_no_false(self):
return TVQADataset
@property
def dataset_name(self):
return "tvqa"
| InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/datamodules/video/tvqa_datamodule.py |
from CoTrain.datasets import HT100MDataset
from CoTrain.datamodules.image.datamodule_base import BaseDataModule
class HT100MDataModule(BaseDataModule):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@property
def dataset_cls(self):
return HT100MDataset
@property
def dataset_cls_no_false(self):
return HT100MDataset
@property
def dataset_name(self):
return "howto100m"
| InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/datamodules/video/howto100m_datamodule.py |
from CoTrain.datasets import MSRVTTQADataset
from CoTrain.datamodules.image.datamodule_base import BaseDataModule
from collections import defaultdict
class MSRVTTQADataModule(BaseDataModule):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@property
def dataset_cls(self):
return MSRVTTQADataset
@property
def dataset_name(self):
return "msrvttqa"
def setup(self, stage):
super().setup(stage)
self.answer2id = self.train_dataset.ans_lab_dict
sorted_a2i = sorted(self.answer2id.items(), key=lambda x: x[1])
self.num_class = max(self.answer2id.values()) + 1
self.id2answer = defaultdict(lambda: "unknown")
for k, v in sorted_a2i:
self.id2answer[v] = k
| InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/datamodules/video/msrvttqa_datamodule.py |
InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/datamodules/video/__init__.py |
|
from CoTrain.datasets import MSVDQADataset
from CoTrain.datamodules.image.datamodule_base import BaseDataModule
from collections import defaultdict
class MSVDQADataModule(BaseDataModule):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@property
def dataset_cls(self):
return MSVDQADataset
@property
def dataset_name(self):
return "msvdqa"
def setup(self, stage):
super().setup(stage)
self.answer2id = self.train_dataset.ans_lab_dict
sorted_a2i = sorted(self.answer2id.items(), key=lambda x: x[1])
self.num_class = max(self.answer2id.values()) + 1
self.id2answer = defaultdict(lambda: "unknown")
for k, v in sorted_a2i:
self.id2answer[v] = k
| InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/datamodules/video/msvdqa_datamodule.py |
from CoTrain.datasets import K400VideoDataset
from CoTrain.datamodules.image.datamodule_base import BaseDataModule
class K400VideoDataModule(BaseDataModule):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@property
def dataset_cls(self):
return K400VideoDataset
@property
def dataset_cls_no_false(self):
return K400VideoDataset
@property
def dataset_name(self):
return "k400_video" | InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/datamodules/video/k400_video_datamodule.py |
from CoTrain.datasets import YOUTUBEDataset
from CoTrain.datamodules.image.datamodule_base import BaseDataModule
class YOUTUBEDataModule(BaseDataModule):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@property
def dataset_cls(self):
return YOUTUBEDataset
@property
def dataset_cls_no_false(self):
return YOUTUBEDataset
@property
def dataset_name(self):
return "youtube"
| InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/datamodules/video/youtube_datamodule.py |
from CoTrain.datasets import UCF101Dataset
from CoTrain.datamodules.image.datamodule_base import BaseDataModule
class UCF101DataModule(BaseDataModule):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@property
def dataset_cls(self):
return UCF101Dataset
@property
def dataset_cls_no_false(self):
return UCF101Dataset
@property
def dataset_name(self):
return "ucf101"
| InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/datamodules/video/ucf101_datamodule.py |
from CoTrain.datasets import DIDEMODataset
from CoTrain.datamodules.image.datamodule_base import BaseDataModule
class DIDEMODataModule(BaseDataModule):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@property
def dataset_cls(self):
return DIDEMODataset
@property
def dataset_cls_no_false(self):
return DIDEMODataset
@property
def dataset_name(self):
return "didemo"
| InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/datamodules/video/didemo_datamodule.py |
from CoTrain.datasets import YTTemporalDataset
from CoTrain.datamodules.image.datamodule_base import BaseDataModule
class YTTemporalMDataModule(BaseDataModule):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@property
def dataset_cls(self):
return YTTemporalDataset
@property
def dataset_cls_no_false(self):
return YTTemporalDataset
@property
def dataset_name(self):
return "yttemporal"
| InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/datamodules/video/yttemporal_datamodule.py |
from CoTrain.datasets import WEBVIDDataset
from CoTrain.datamodules.image.datamodule_base import BaseDataModule
class WEBVIDDataModule(BaseDataModule):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@property
def dataset_cls(self):
return WEBVIDDataset
@property
def dataset_cls_no_false(self):
return WEBVIDDataset
@property
def dataset_name(self):
return "webvid"
| InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/datamodules/video/webvid_datamodule.py |
from CoTrain.datasets import K400Dataset
from CoTrain.datamodules.image.datamodule_base import BaseDataModule
class K400DataModule(BaseDataModule):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@property
def dataset_cls(self):
return K400Dataset
@property
def dataset_cls_no_false(self):
return K400Dataset
@property
def dataset_name(self):
return "k400"
| InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/datamodules/video/k400_datamodule.py |
from CoTrain.datasets import EGO4DChoiceDataset
from CoTrain.datamodules.image.datamodule_base import BaseDataModule
class EGO4DChoiceDataModule(BaseDataModule):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@property
def dataset_cls(self):
return EGO4DChoiceDataset
@property
def dataset_cls_no_false(self):
return EGO4DChoiceDataset
@property
def dataset_name(self):
return "ego4d_choice"
| InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/datamodules/video/ego4d_choice_datamodule.py |
from CoTrain.datasets import LSMDCDataset
from CoTrain.datamodules.image.datamodule_base import BaseDataModule
class LSMDCDataModule(BaseDataModule):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@property
def dataset_cls(self):
return LSMDCDataset
@property
def dataset_cls_no_false(self):
return LSMDCDataset
@property
def dataset_name(self):
return "lsmdc"
| InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/datamodules/video/lsmdc_datamodule.py |
from CoTrain.datasets import MSVDDataset
from CoTrain.datamodules.image.datamodule_base import BaseDataModule
class MSVDDataModule(BaseDataModule):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@property
def dataset_cls(self):
return MSVDDataset
@property
def dataset_cls_no_false(self):
return MSVDDataset
@property
def dataset_name(self):
return "msvd"
| InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/datamodules/video/msvd_datamodule.py |
from CoTrain.datasets import NLVR2Dataset
from .datamodule_base import BaseDataModule
class NLVR2DataModule(BaseDataModule):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@property
def dataset_cls(self):
return NLVR2Dataset
@property
def dataset_name(self):
return "nlvr2"
| InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/datamodules/image/nlvr2_datamodule.py |
import torch
from pytorch_lightning import LightningDataModule
from torch.utils.data import DataLoader
from transformers import (
DataCollatorForLanguageModeling,
DataCollatorForWholeWordMask,
BertTokenizer,
)
def get_pretrained_tokenizer(from_pretrained):
if torch.distributed.is_initialized():
if torch.distributed.get_rank() == 0:
BertTokenizer.from_pretrained(
from_pretrained, do_lower_case="uncased" in from_pretrained
)
torch.distributed.barrier()
return BertTokenizer.from_pretrained(
from_pretrained, do_lower_case="uncased" in from_pretrained
)
class BaseDataModule(LightningDataModule):
def __init__(self, _config):
super().__init__()
self.data_dir = _config["data_root"]
self.num_workers = _config["num_workers"]
self.batch_size = _config["per_gpu_batchsize"]
self.eval_batch_size = self.batch_size
self.image_size = _config["image_size"]
self.max_text_len = _config["max_text_len"]
self.draw_false_video = _config["draw_false_video"]
self.draw_false_image = _config["draw_false_image"]
self.draw_false_text = _config["draw_false_text"]
self.image_only = _config["video_only"]
self.num_frames = _config["num_frames"]
self.draw_options_text = _config["draw_options_text"]
self.backend = _config["backend"]
self.train_transform_keys = (
["default_train"]
if len(_config["train_transform_keys"]) == 0
else _config["train_transform_keys"]
)
self.val_transform_keys = (
["default_val"]
if len(_config["val_transform_keys"]) == 0
else _config["val_transform_keys"]
)
tokenizer = _config["tokenizer"]
self.tokenizer = get_pretrained_tokenizer(tokenizer)
self.vocab_size = self.tokenizer.vocab_size
collator = (
DataCollatorForWholeWordMask
if _config["whole_word_masking"]
else DataCollatorForLanguageModeling
)
self.mlm_collator = collator(
tokenizer=self.tokenizer, mlm=True, mlm_probability=_config["mlm_prob"]
)
self.setup_flag = False
@property
def dataset_cls(self):
raise NotImplementedError("return tuple of dataset class")
@property
def dataset_name(self):
raise NotImplementedError("return name of dataset")
def set_train_dataset(self):
self.train_dataset = self.dataset_cls(
self.data_dir,
self.train_transform_keys,
split="train",
image_size=self.image_size,
max_text_len=self.max_text_len,
draw_false_image=self.draw_false_image,
draw_false_video=self.draw_false_video,
draw_false_text=self.draw_false_text,
image_only=self.image_only,
num_frames=self.num_frames,
draw_options_text=self.draw_options_text,
backend=self.backend
)
def set_val_dataset(self):
self.val_dataset = self.dataset_cls(
self.data_dir,
self.val_transform_keys,
split="val",
image_size=self.image_size,
max_text_len=self.max_text_len,
draw_false_image=self.draw_false_image,
draw_false_video=self.draw_false_video,
draw_false_text=self.draw_false_text,
image_only=self.image_only,
num_frames=self.num_frames,
draw_options_text=self.draw_options_text,
backend=self.backend
)
if hasattr(self, "dataset_cls_no_false"):
self.val_dataset_no_false = self.dataset_cls_no_false(
self.data_dir,
self.val_transform_keys,
split="val",
image_size=self.image_size,
max_text_len=self.max_text_len,
draw_false_image=0,
draw_false_video=0,
draw_false_text=0,
image_only=self.image_only,
num_frames=self.num_frames,
draw_options_text=self.draw_options_text,
backend=self.backend
)
def make_no_false_val_dset(self, image_only=False):
return self.dataset_cls_no_false(
self.data_dir,
self.val_transform_keys,
split="val",
image_size=self.image_size,
max_text_len=self.max_text_len,
draw_false_image=0,
draw_false_video=0,
draw_false_text=0,
image_only=image_only,
num_frames=self.num_frames,
draw_options_text=self.draw_options_text,
backend=self.backend
)
def set_test_dataset(self):
self.test_dataset = self.dataset_cls(
self.data_dir,
self.val_transform_keys,
split="test",
image_size=self.image_size,
max_text_len=self.max_text_len,
draw_false_image=self.draw_false_image,
draw_false_video=self.draw_false_video,
draw_false_text=self.draw_false_text,
image_only=self.image_only,
num_frames=self.num_frames,
draw_options_text=self.draw_options_text,
backend=self.backend
)
def setup(self, stage):
if not self.setup_flag:
self.set_train_dataset()
self.set_val_dataset()
self.set_test_dataset()
self.train_dataset.tokenizer = self.tokenizer
self.val_dataset.tokenizer = self.tokenizer
self.test_dataset.tokenizer = self.tokenizer
self.setup_flag = True
def train_dataloader(self):
loader = DataLoader(
self.train_dataset,
batch_size=self.batch_size,
shuffle=True,
num_workers=self.num_workers,
pin_memory=True,
collate_fn=self.train_dataset.collate,
)
return loader
def val_dataloader(self):
loader = DataLoader(
self.val_dataset,
batch_size=self.eval_batch_size,
shuffle=False,
num_workers=self.num_workers,
pin_memory=True,
collate_fn=self.val_dataset.collate,
)
return loader
def test_dataloader(self):
loader = DataLoader(
self.test_dataset,
batch_size=self.eval_batch_size,
shuffle=False,
num_workers=self.num_workers,
pin_memory=True,
collate_fn=self.test_dataset.collate,
)
return loader
| InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/datamodules/image/datamodule_base.py |
from CoTrain.datasets import ConceptualCaptionDataset
from .datamodule_base import BaseDataModule
class ConceptualCaptionDataModule(BaseDataModule):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@property
def dataset_cls(self):
return ConceptualCaptionDataset
@property
def dataset_name(self):
return "gcc"
| InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/datamodules/image/conceptual_caption_datamodule.py |
from CoTrain.datasets import SBUCaptionDataset
from .datamodule_base import BaseDataModule
class SBUCaptionDataModule(BaseDataModule):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@property
def dataset_cls(self):
return SBUCaptionDataset
@property
def dataset_name(self):
return "sbu"
| InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/datamodules/image/sbu_datamodule.py |
from CoTrain.datasets import MIX100MDataset
from .datamodule_base import BaseDataModule
class MIX100MDataModule(BaseDataModule):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@property
def dataset_cls(self):
return MIX100MDataset
@property
def dataset_name(self):
return "mix100m"
| InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/datamodules/image/mix100m_datamodule.py |
from CoTrain.datasets import CC3MDataset
from .datamodule_base import BaseDataModule
class CC3MDataModule(BaseDataModule):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@property
def dataset_cls(self):
return CC3MDataset
@property
def dataset_name(self):
return "cc3m"
| InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/datamodules/image/cc3m_datamodule.py |
InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/datamodules/image/__init__.py |
|
from CoTrain.datasets import VisualGenomeCaptionDataset
from .datamodule_base import BaseDataModule
class VisualGenomeCaptionDataModule(BaseDataModule):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@property
def dataset_cls(self):
return VisualGenomeCaptionDataset
@property
def dataset_name(self):
return "vg"
| InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/datamodules/image/vg_caption_datamodule.py |
from CoTrain.datasets import VQAv2Dataset
from CoTrain.datamodules.image.datamodule_base import BaseDataModule
from collections import defaultdict
class VQAv2DataModule(BaseDataModule):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@property
def dataset_cls(self):
return VQAv2Dataset
@property
def dataset_name(self):
return "vqa"
def setup(self, stage):
super().setup(stage)
train_answers = self.train_dataset.table["answers"].to_pandas().tolist()
val_answers = self.val_dataset.table["answers"].to_pandas().tolist()
train_labels = self.train_dataset.table["answer_labels"].to_pandas().tolist()
val_labels = self.val_dataset.table["answer_labels"].to_pandas().tolist()
all_answers = [c for c in train_answers + val_answers if c is not None]
all_answers = [l for lll in all_answers for ll in lll for l in ll]
all_labels = [c for c in train_labels + val_labels if c is not None]
all_labels = [l for lll in all_labels for ll in lll for l in ll]
self.answer2id = {k: v for k, v in zip(all_answers, all_labels)}
sorted_a2i = sorted(self.answer2id.items(), key=lambda x: x[1])
self.num_class = max(self.answer2id.values()) + 1
self.id2answer = defaultdict(lambda: "unknown")
for k, v in sorted_a2i:
self.id2answer[v] = k
| InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/datamodules/image/vqav2_datamodule.py |
from CoTrain.datasets import YFCC15MDataset
from .datamodule_base import BaseDataModule
class YFCC15MDataModule(BaseDataModule):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@property
def dataset_cls(self):
return YFCC15MDataset
@property
def dataset_name(self):
return "yfcc15m"
| InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/datamodules/image/yfcc15m_datamodule.py |
from CoTrain.datasets import LAION400MDataset
from .datamodule_base import BaseDataModule
class LAION400MDataModule(BaseDataModule):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@property
def dataset_cls(self):
return LAION400MDataset
@property
def dataset_name(self):
return "laion400m"
| InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/datamodules/image/laion400m_datamodule.py |
from CoTrain.datasets import CocoCaptionKarpathyDataset
from .datamodule_base import BaseDataModule
class CocoCaptionKarpathyDataModule(BaseDataModule):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@property
def dataset_cls(self):
return CocoCaptionKarpathyDataset
@property
def dataset_cls_no_false(self):
return CocoCaptionKarpathyDataset
@property
def dataset_name(self):
return "coco"
| InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/datamodules/image/coco_caption_karpathy_datamodule.py |
from CoTrain.datasets import ActivityNetDataset
from .datamodule_base import BaseDataModule
class ActivityNetDataModule(BaseDataModule):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@property
def dataset_cls(self):
return ActivityNetDataset
@property
def dataset_name(self):
return "activitynet"
| InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/datamodules/image/activitynet_datamodule.py |
from CoTrain.datasets import VCRDataset
from .datamodule_base import BaseDataModule
class VCRDataModule(BaseDataModule):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@property
def dataset_cls(self):
return VCRDataset
@property
def dataset_name(self):
return "vcr"
| InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/datamodules/image/vcr_datamodule.py |
from CoTrain.datasets import F30KCaptionKarpathyDataset
from .datamodule_base import BaseDataModule
class F30KCaptionKarpathyDataModule(BaseDataModule):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@property
def dataset_cls(self):
return F30KCaptionKarpathyDataset
@property
def dataset_cls_no_false(self):
return F30KCaptionKarpathyDataset
@property
def dataset_name(self):
return "f30k"
| InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/datamodules/image/f30k_caption_karpathy_datamodule.py |
from CoTrain.datasets import CC12MDataset
from .datamodule_base import BaseDataModule
class CC12MDataModule(BaseDataModule):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@property
def dataset_cls(self):
return CC12MDataset
@property
def dataset_name(self):
return "cc3m"
| InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/datamodules/image/cc12m_datamodule.py |
from CoTrain.transforms.image.pixelbert import (
pixelbert_transform,
pixelbert_transform_randaug,
open_clip_transform,
)
_transforms = {
"pixelbert": pixelbert_transform,
"pixelbert_randaug": pixelbert_transform_randaug,
"open_clip": open_clip_transform,
}
def keys_to_transforms(keys: list, size=224, mode="train"):
return [_transforms[key](size=size, mode=mode) for key in keys]
| InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/transforms/__init__.py |
# input: (C, T, H, W) output: (C, T, H, W)
def VideoTransform(mode='train', crop_size=224, backend='v100'):
if backend == 'a100':
print("initalize data augmentation for a100 gpus")
import CoTrain.transforms.video.video_transform as video_transform
from torchvision import transforms
# https://github.com/FingerRec/BE/blob/main/src/Contrastive/augment/video_transformations/volume_transforms.py
input_mean = [0.48145466, 0.4578275, 0.40821073]
input_std = [0.26862954, 0.26130258, 0.27577711]
scale_size = crop_size * 256 // 224
if mode == 'train':
global_transforms = transforms.Compose([
video_transform.TensorToNumpy(),
# video_transform.Resize(int(crop_size * 1.2)), # 256/224 = 1.14
video_transform.Resize(scale_size),
video_transform.RandomCrop(crop_size),
# video_transform.ColorJitter(0.5, 0.5, 0.25, 0.5), # color operation perimitted, damage attribute
video_transform.ClipToTensor(channel_nb=3),
# video_transform.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
video_transform.Normalize(mean=input_mean, std=input_std)
])
local_transforms = transforms.Compose([
video_transform.TensorToNumpy(),
video_transform.Resize(crop_size), # 256/224 = 1.14
video_transform.RandomCrop(96),
# video_transform.ColorJitter(0.5, 0.5, 0.25, 0.5), # color operation perimitted, damage attribute
video_transform.ClipToTensor(channel_nb=3),
# video_transform.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
video_transform.Normalize(mean=input_mean, std=input_std)
])
else:
global_transforms = transforms.Compose([
video_transform.TensorToNumpy(),
# video_transform.Resize(int(crop_size * 1.2)), # 256
video_transform.Resize(scale_size),
video_transform.CenterCrop(crop_size), # 224
video_transform.ClipToTensor(channel_nb=3),
# video_transform.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
video_transform.Normalize(mean=input_mean, std=input_std)
])
local_transforms = transforms.Compose([
video_transform.TensorToNumpy(),
video_transform.Resize(crop_size), # 256
video_transform.CenterCrop(96), # 224
video_transform.ClipToTensor(channel_nb=3),
# video_transform.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
video_transform.Normalize(mean=input_mean, std=input_std)
])
return [global_transforms, local_transforms]
else:
# for pytorch > 1.9.0, V100
import pytorchvideo.transforms as video_transforms
# https://pytorchvideo.readthedocs.io/en/latest/api/transforms/transforms.html
global_transform = video_transforms.create_video_transform(mode=mode, min_size=int(crop_size*1.2),
max_size=int(crop_size*1.5),
crop_size=crop_size,
aug_type='randaug', # randaug/augmix
num_samples=None) # not use temporal sub sampling
local_transform = video_transforms.create_video_transform(mode=mode, min_size=crop_size,
max_size=int(crop_size*1.5),
crop_size=96,
aug_type='randaug', # randaug/augmix
num_samples=None) # not use temporal sub sampling
return [global_transform, local_transform]
def video_aug(videos, video_transform, byte=False):
if byte:
videos = videos.permute(1, 0, 2, 3).byte() # tchw -> cthw
else:
videos = videos.permute(1, 0, 2, 3)
# normal
# videos_tensor = [video_transform(videos).permute(1, 0, 2, 3)] # -> tchw
# dino
global_videos_tensor = []
global_transform, local_transform = video_transform
# print(videos.type())
# 2 GLOBAL views
for i in range(1):
global_videos_tensor.append(global_transform(videos).permute(1, 0, 2, 3))
# 3 LOCAL VIEWS
# local_videos_tensor = []
# for i in range(0):
# local_videos_tensor.append(local_transform(videos).permute(1, 0, 2, 3))
return global_videos_tensor
# return [global_videos_tensor, local_videos_tensor]
# # dino
# def video_aug(videos, video_transform, byte=False):
# if byte:
# videos = videos.permute(1, 0, 2, 3) # tchw -> cthw
# else:
# videos = videos.permute(1, 0, 2, 3).byte()
# # two global view
# videos_tensor = [video_transform(videos).permute(1, 0, 2, 3), video_transform(videos).permute(1, 0, 2, 3)] # -> tchw
# # local views
# return videos_tensor | InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/transforms/video/videoaug.py |
InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/transforms/video/__init__.py |
|
import numbers
import random
import numpy as np
import PIL
import skimage
import skimage.transform
import torchvision
import torch
from CoTrain.transforms.image import functional as F
from torchvision import transforms
from PIL import Image
def convert_img(img):
"""Converts (H, W, C) numpy.ndarray to (C, W, H) format
"""
if len(img.shape) == 3:
img = img.transpose(2, 0, 1)
if len(img.shape) == 2:
img = np.expand_dims(img, 0)
return img
class ClipToTensor(object):
"""Convert a list of m (H x W x C) numpy.ndarrays in the range [0, 255]
to a torch.FloatTensor of shape (C x m x H x W) in the range [0, 1.0]
"""
def __init__(self, channel_nb=3, div_255=True, numpy=False):
self.channel_nb = channel_nb
self.div_255 = div_255
self.numpy = numpy
def __call__(self, clip):
"""
Args: clip_test (list of numpy.ndarray): clip_test (list of images)
to be converted to tensor.
"""
# Retrieve shape
if isinstance(clip[0], np.ndarray):
h, w, ch = clip[0].shape
assert ch == self.channel_nb, 'Got {0} instead of 3 channels'.format(
ch)
elif isinstance(clip[0], Image.Image):
w, h = clip[0].size
else:
raise TypeError('Expected numpy.ndarray or PIL.Image\
but got list of {0}'.format(type(clip[0])))
np_clip = np.zeros([self.channel_nb, len(clip), int(h), int(w)])
# Convert
for img_idx, img in enumerate(clip):
if isinstance(img, np.ndarray):
pass
elif isinstance(img, Image.Image):
img = np.array(img, copy=False)
else:
raise TypeError('Expected numpy.ndarray or PIL.Image\
but got list of {0}'.format(type(clip[0])))
img = convert_img(img)
np_clip[:, img_idx, :, :] = img
if self.numpy:
if self.div_255:
np_clip = np_clip / 255
return np_clip
else:
tensor_clip = torch.from_numpy(np_clip)
if not isinstance(tensor_clip, torch.FloatTensor):
tensor_clip = tensor_clip.float()
if self.div_255:
tensor_clip = tensor_clip.div(255)
return tensor_clip
class ToTensor(object):
"""Converts numpy array to tensor
"""
def __call__(self, array):
tensor = torch.from_numpy(array)
return tensor
class ColorDistortion(object):
def __init__(self, s=1.0):
self.s = s
self.color_jitter = transforms.ColorJitter(0.8*s, 0.8*s, 0.8*s, 0.2*s)
self.rnd_color_jitter = transforms.RandomApply([self.color_jitter], p=0.8)
self.rnd_gray = transforms.RandomGrayscale(p=0.2)
def __call__(self, video):
color_distort = transforms.Compose([self.rnd_color_jitter, self.rnd_gray])
return color_distort(video)
class Compose(object):
"""Composes several transforms
Args:
transforms (list of ``Transform`` objects): list of transforms
to compose
"""
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, clip):
for t in self.transforms:
clip = t(clip)
return clip
class RandomHorizontalFlip(object):
"""Horizontally flip the list of given images randomly
with a probability 0.5
"""
def __call__(self, clip):
"""
Args:
img (PIL.Image or numpy.ndarray): List of images to be cropped
in format (h, w, c) in numpy.ndarray
Returns:
PIL.Image or numpy.ndarray: Randomly flipped clip_test
"""
if random.random() < 0.5:
if isinstance(clip[0], np.ndarray):
return [np.fliplr(img) for img in clip]
elif isinstance(clip[0], PIL.Image.Image):
return [
img.transpose(PIL.Image.FLIP_LEFT_RIGHT) for img in clip
]
else:
raise TypeError('Expected numpy.ndarray or PIL.Image' +
' but got list of {0}'.format(type(clip[0])))
return clip
class RandomResize(object):
"""Resizes a list of (H x W x C) numpy.ndarray to the final size
The larger the original image is, the more times it takes to
interpolate
Args:
interpolation (str): Can be one of 'nearest', 'bilinear'
defaults to nearest
size (tuple): (widht, height)
"""
def __init__(self, ratio=(3. / 4., 4. / 3.), interpolation='nearest'):
self.ratio = ratio
self.interpolation = interpolation
def __call__(self, clip):
scaling_factor = random.uniform(self.ratio[0], self.ratio[1])
if isinstance(clip[0], np.ndarray):
im_h, im_w, im_c = clip[0].shape
elif isinstance(clip[0], PIL.Image.Image):
im_w, im_h = clip[0].size
new_w = int(im_w * scaling_factor)
new_h = int(im_h * scaling_factor)
new_size = (new_w, new_h)
resized = F.resize_clip(
clip, new_size, interpolation=self.interpolation)
return resized
class Resize(object):
"""Resizes a list of (H x W x C) numpy.ndarray to the final size
The larger the original image is, the more times it takes to
interpolate
Args:
interpolation (str): Can be one of 'nearest', 'bilinear'
defaults to nearest
size (tuple): (widht, height)
"""
def __init__(self, size, interpolation='nearest'):
self.size = size
self.interpolation = interpolation
def __call__(self, clip):
resized = F.resize_clip(
clip, self.size, interpolation=self.interpolation)
return resized
class RandomCrop(object):
"""Extract random crop at the same location for a list of images
Args:
size (sequence or int): Desired output size for the
crop in format (h, w)
"""
def __init__(self, size):
if isinstance(size, numbers.Number):
size = (size, size)
self.size = size
def __call__(self, clip):
"""
Args:
img (PIL.Image or numpy.ndarray): List of images to be cropped
in format (h, w, c) in numpy.ndarray
Returns:
PIL.Image or numpy.ndarray: Cropped list of images
"""
h, w = self.size
if isinstance(clip[0], np.ndarray):
im_h, im_w, im_c = clip[0].shape
elif isinstance(clip[0], PIL.Image.Image):
im_w, im_h = clip[0].size
else:
raise TypeError('Expected numpy.ndarray or PIL.Image' +
'but got list of {0}'.format(type(clip[0])))
if w > im_w or h > im_h:
error_msg = (
'Initial image size should be larger then '
'cropped size but got cropped sizes : ({w}, {h}) while '
'initial image is ({im_w}, {im_h})'.format(
im_w=im_w, im_h=im_h, w=w, h=h))
raise ValueError(error_msg)
x1 = random.randint(0, im_w - w)
y1 = random.randint(0, im_h - h)
cropped = F.crop_clip(clip, y1, x1, h, w)
return cropped
class CornerCrop(object):
def __init__(self, size, crop_position=None):
self.size = size
if crop_position is None:
self.randomize = True
else:
self.randomize = False
self.crop_position = crop_position
self.crop_positions = ['c', 'tl', 'tr', 'bl', 'br']
def __call__(self, imgs):
t, h, w, c = imgs.shape
corner_imgs = list()
for n in self.crop_positions:
#print(n)
if n == 'c':
th, tw = (self.size, self.size)
x1 = int(round((w- tw) / 2.))
y1 = int(round((h - th) / 2.))
x2 = x1 + tw
y2 = y1 + th
elif n == 'tl':
x1 = 0
y1 = 0
x2 = self.size
y2 = self.size
elif n == 'tr':
x1 = w - self.size
y1 = 0
x2 = w
y2 = self.size
elif n == 'bl':
x1 = 0
y1 = h - self.size
x2 = self.size
y2 = h
elif n == 'br':
x1 = w - self.size
y1 = h - self.size
x2 = w
y2 = h
corner_imgs.append(imgs[:, y1:y2, x1:x2, :])
return corner_imgs
def randomize_parameters(self):
if self.randomize:
self.crop_position = self.crop_positions[random.randint(
0,
len(self.crop_positions) - 1)]
class RandomRotation(object):
"""Rotate entire clip_test randomly by a random angle within
given bounds
Args:
degrees (sequence or int): Range of degrees to select from
If degrees is a number instead of sequence like (min, max),
the range of degrees, will be (-degrees, +degrees).
"""
def __init__(self, degrees):
if isinstance(degrees, numbers.Number):
if degrees < 0:
raise ValueError('If degrees is a single number,'
'must be positive')
degrees = (-degrees, degrees)
else:
if len(degrees) != 2:
raise ValueError('If degrees is a sequence,'
'it must be of len 2.')
self.degrees = degrees
def __call__(self, clip):
"""
Args:
img (PIL.Image or numpy.ndarray): List of images to be cropped
in format (h, w, c) in numpy.ndarray
Returns:
PIL.Image or numpy.ndarray: Cropped list of images
"""
angle = random.uniform(self.degrees[0], self.degrees[1])
if isinstance(clip[0], np.ndarray):
rotated = [skimage.transform.rotate(img, angle) for img in clip]
elif isinstance(clip[0], PIL.Image.Image):
rotated = [img.rotate(angle) for img in clip]
else:
raise TypeError('Expected numpy.ndarray or PIL.Image' +
'but got list of {0}'.format(type(clip[0])))
return rotated
class STA_RandomRotation(object):
"""Rotate entire clip_test randomly by a random angle within
given bounds
Args:
degrees (sequence or int): Range of degrees to select from
If degrees is a number instead of sequence like (min, max),
the range of degrees, will be (-degrees, +degrees).
"""
def __init__(self, degrees):
if isinstance(degrees, numbers.Number):
if degrees < 0:
raise ValueError('If degrees is a single number,'
'must be positive')
degrees = (-degrees, degrees)
else:
if len(degrees) != 2:
raise ValueError('If degrees is a sequence,'
'it must be of len 2.')
self.degrees = degrees
def __call__(self, clip):
"""
Args:
img (PIL.Image or numpy.ndarray): List of images to be cropped
in format (h, w, c) in numpy.ndarray
Returns:
PIL.Image or numpy.ndarray: Cropped list of images
"""
bsz = len(clip)
angle = random.uniform(self.degrees[0], self.degrees[1])
angles = [(i+1)/(bsz+1) * angle for i in range(bsz)]
if isinstance(clip[0], np.ndarray):
rotated = [skimage.transform.rotate(img, angles[i]) for i, img in enumerate(clip)]
elif isinstance(clip[0], PIL.Image.Image):
rotated = [img.rotate(angles[i]) for i, img in enumerate(clip)]
else:
raise TypeError('Expected numpy.ndarray or PIL.Image' +
'but got list of {0}'.format(type(clip[0])))
return rotated
class Each_RandomRotation(object):
"""Rotate entire clip_test randomly by a random angle within
given bounds
Args:
degrees (sequence or int): Range of degrees to select from
If degrees is a number instead of sequence like (min, max),
the range of degrees, will be (-degrees, +degrees).
"""
def __init__(self, degrees):
if isinstance(degrees, numbers.Number):
if degrees < 0:
raise ValueError('If degrees is a single number,'
'must be positive')
degrees = (-degrees, degrees)
else:
if len(degrees) != 2:
raise ValueError('If degrees is a sequence,'
'it must be of len 2.')
self.degrees = degrees
def __call__(self, clip):
"""
Args:
img (PIL.Image or numpy.ndarray): List of images to be cropped
in format (h, w, c) in numpy.ndarray
Returns:
PIL.Image or numpy.ndarray: Cropped list of images
"""
bsz = len(clip)
angles = [random.uniform(self.degrees[0], self.degrees[1]) for i in range(bsz)]
# print(angles)
if isinstance(clip[0], np.ndarray):
rotated = [skimage.transform.rotate(img, angles[i]) for i, img in enumerate(clip)]
elif isinstance(clip[0], PIL.Image.Image):
rotated = [img.rotate(angles[i]) for i, img in enumerate(clip)]
else:
raise TypeError('Expected numpy.ndarray or PIL.Image' +
'but got list of {0}'.format(type(clip[0])))
return rotated
class CenterCrop(object):
"""Extract center crop at the same location for a list of images
Args:
size (sequence or int): Desired output size for the
crop in format (h, w)
"""
def __init__(self, size):
if isinstance(size, numbers.Number):
size = (size, size)
self.size = size
def __call__(self, clip):
"""
Args:
img (PIL.Image or numpy.ndarray): List of images to be cropped
in format (h, w, c) in numpy.ndarray
Returns:
PIL.Image or numpy.ndarray: Cropped list of images
"""
h, w = self.size
if isinstance(clip[0], np.ndarray):
im_h, im_w, im_c = clip[0].shape
elif isinstance(clip[0], PIL.Image.Image):
im_w, im_h = clip[0].size
else:
raise TypeError('Expected numpy.ndarray or PIL.Image' +
'but got list of {0}'.format(type(clip[0])))
if w > im_w or h > im_h:
error_msg = (
'Initial image size should be larger then '
'cropped size but got cropped sizes : ({w}, {h}) while '
'initial image is ({im_w}, {im_h})'.format(
im_w=im_w, im_h=im_h, w=w, h=h))
raise ValueError(error_msg)
x1 = int(round((im_w - w) / 2.))
y1 = int(round((im_h - h) / 2.))
cropped = F.crop_clip(clip, y1, x1, h, w)
return cropped
class ColorJitter(object):
"""Randomly change the brightness, contrast and saturation and hue of the clip_test
Args:
brightness (float): How much to jitter brightness. brightness_factor
is chosen uniformly from [max(0, 1 - brightness), 1 + brightness].
contrast (float): How much to jitter contrast. contrast_factor
is chosen uniformly from [max(0, 1 - contrast), 1 + contrast].
saturation (float): How much to jitter saturation. saturation_factor
is chosen uniformly from [max(0, 1 - saturation), 1 + saturation].
hue(float): How much to jitter hue. hue_factor is chosen uniformly from
[-hue, hue]. Should be >=0 and <= 0.5.
"""
def __init__(self, brightness=0, contrast=0, saturation=0, hue=0):
self.brightness = brightness
self.contrast = contrast
self.saturation = saturation
self.hue = hue
def get_params(self, brightness, contrast, saturation, hue):
if brightness > 0:
brightness_factor = random.uniform(
max(0, 1 - brightness), 1 + brightness)
else:
brightness_factor = None
if contrast > 0:
contrast_factor = random.uniform(
max(0, 1 - contrast), 1 + contrast)
else:
contrast_factor = None
if saturation > 0:
saturation_factor = random.uniform(
max(0, 1 - saturation), 1 + saturation)
else:
saturation_factor = None
if hue > 0:
hue_factor = random.uniform(-hue, hue)
else:
hue_factor = None
return brightness_factor, contrast_factor, saturation_factor, hue_factor
def __call__(self, clip):
"""
Args:
clip_test (list): list of PIL.Image
Returns:
list PIL.Image : list of transformed PIL.Image
"""
if isinstance(clip[0], np.ndarray):
raise TypeError(
'Color jitter not yet implemented for numpy arrays')
elif isinstance(clip[0], PIL.Image.Image):
brightness, contrast, saturation, hue = self.get_params(
self.brightness, self.contrast, self.saturation, self.hue)
# Create img sync_dir function sequence
img_transforms = []
if brightness is not None:
img_transforms.append(lambda img: torchvision.transforms.functional.adjust_brightness(img, brightness))
if saturation is not None:
img_transforms.append(lambda img: torchvision.transforms.functional.adjust_saturation(img, saturation))
if hue is not None:
img_transforms.append(lambda img: torchvision.transforms.functional.adjust_hue(img, hue))
if contrast is not None:
img_transforms.append(lambda img: torchvision.transforms.functional.adjust_contrast(img, contrast))
random.shuffle(img_transforms)
# Apply to all images
jittered_clip = []
for img in clip:
for func in img_transforms:
jittered_img = func(img)
jittered_clip.append(jittered_img)
else:
raise TypeError('Expected numpy.ndarray or PIL.Image' +
'but got list of {0}'.format(type(clip[0])))
return jittered_clip
class EachColorJitter(object):
"""Randomly change the brightness, contrast and saturation and hue of the clip_test
Args:
brightness (float): How much to jitter brightness. brightness_factor
is chosen uniformly from [max(0, 1 - brightness), 1 + brightness].
contrast (float): How much to jitter contrast. contrast_factor
is chosen uniformly from [max(0, 1 - contrast), 1 + contrast].
saturation (float): How much to jitter saturation. saturation_factor
is chosen uniformly from [max(0, 1 - saturation), 1 + saturation].
hue(float): How much to jitter hue. hue_factor is chosen uniformly from
[-hue, hue]. Should be >=0 and <= 0.5.
"""
def __init__(self, brightness=0, contrast=0, saturation=0, hue=0):
self.brightness = brightness
self.contrast = contrast
self.saturation = saturation
self.hue = hue
def get_params(self, brightness, contrast, saturation, hue):
if brightness > 0:
brightness_factor = random.uniform(
max(0, 1 - brightness), 1 + brightness)
else:
brightness_factor = None
if contrast > 0:
contrast_factor = random.uniform(
max(0, 1 - contrast), 1 + contrast)
else:
contrast_factor = None
if saturation > 0:
saturation_factor = random.uniform(
max(0, 1 - saturation), 1 + saturation)
else:
saturation_factor = None
if hue > 0:
hue_factor = random.uniform(-hue, hue)
else:
hue_factor = None
return brightness_factor, contrast_factor, saturation_factor, hue_factor
def __call__(self, clip):
"""
Args:
clip_test (list): list of PIL.Image
Returns:
list PIL.Image : list of transformed PIL.Image
"""
if isinstance(clip[0], np.ndarray):
raise TypeError(
'Color jitter not yet implemented for numpy arrays')
elif isinstance(clip[0], PIL.Image.Image):
brightness, contrast, saturation, hue = self.get_params(
self.brightness, self.contrast, self.saturation, self.hue)
# Create img sync_dir function sequence
img_transforms = []
if brightness is not None:
img_transforms.append(lambda img: torchvision.transforms.functional.adjust_brightness(img, brightness))
if saturation is not None:
img_transforms.append(lambda img: torchvision.transforms.functional.adjust_saturation(img, saturation))
if hue is not None:
img_transforms.append(lambda img: torchvision.transforms.functional.adjust_hue(img, hue))
if contrast is not None:
img_transforms.append(lambda img: torchvision.transforms.functional.adjust_contrast(img, contrast))
random.shuffle(img_transforms)
# Apply to all images
jittered_clip = []
for img in clip:
for func in img_transforms:
jittered_img = func(img)
jittered_clip.append(jittered_img)
else:
raise TypeError('Expected numpy.ndarray or PIL.Image' +
'but got list of {0}'.format(type(clip[0])))
return jittered_clip
class Normalize(object):
"""Normalize a clip_test with mean and standard deviation.
Given mean: ``(M1,...,Mn)`` and std: ``(S1,..,Sn)`` for ``n`` channels, this sync_dir
will normalize each channel of the input ``torch.*Tensor`` i.e.
``input[channel] = (input[channel] - mean[channel]) / std[channel]``
.. note::
This sync_dir acts out of place, i.e., it does not mutates the input tensor.
Args:
mean (sequence): Sequence of means for each channel.
std (sequence): Sequence of standard deviations for each channel.
"""
def __init__(self, mean, std):
self.mean = mean
self.std = std
def __call__(self, clip):
"""
Args:
clip (Tensor): Tensor clip_test of size (T, C, H, W) to be normalized.
Returns:
Tensor: Normalized Tensor clip_test.
"""
return F.normalize(clip, self.mean, self.std)
def __repr__(self):
return self.__class__.__name__ + '(mean={0}, std={1})'.format(self.mean, self.std)
class TensorToNumpy(object):
def __init__(self):
print("convert to numpy")
def __call__(self, clip):
np_clip = clip.permute(1, 2, 3, 0).cpu().detach().numpy()
pil_clip = [Image.fromarray(np.uint8(numpy_image)).convert('RGB') for numpy_image in np_clip]
return pil_clip
| InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/transforms/video/video_transform.py |
from .utils import (
inception_normalize,
MinMaxResize,
)
from torchvision import transforms
from .randaug import RandAugment
def pixelbert_transform(size=800, mode="train"):
longer = int((1333 / 800) * size)
return transforms.Compose(
[
MinMaxResize(shorter=size, longer=longer),
transforms.ToTensor(),
inception_normalize,
]
)
def pixelbert_transform_randaug(size=800, mode="train"):
longer = int((1333 / 800) * size)
trs = transforms.Compose(
[
MinMaxResize(shorter=size, longer=longer),
transforms.ToTensor(),
inception_normalize,
]
)
trs.transforms.insert(0, RandAugment(2, 9))
return trs
def open_clip_transform(size=224, mode="train"):
input_mean = [0.48145466, 0.4578275, 0.40821073]
input_std = [0.26862954, 0.26130258, 0.27577711]
if mode == "train":
return transforms.Compose(
[
transforms.RandomResizedCrop(
size,
scale=(0.9, 1.0),
interpolation=transforms.InterpolationMode.BICUBIC,
),
transforms.ToTensor(),
transforms.Normalize(mean=input_mean, std=input_std),
]
)
else:
return transforms.Compose(
[
transforms.Resize(
size,
interpolation=transforms.InterpolationMode.BICUBIC,
),
transforms.CenterCrop(size),
transforms.ToTensor(),
transforms.Normalize(mean=input_mean, std=input_std),
]
)
| InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/transforms/image/pixelbert.py |
InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/transforms/image/__init__.py |
|
# code in this file is adpated from rpmcruz/autoaugment
# https://github.com/rpmcruz/autoaugment/blob/master/transformations.py
import random
import PIL, PIL.ImageOps, PIL.ImageEnhance, PIL.ImageDraw
import numpy as np
import torch
from PIL import Image
def ShearX(img, v): # [-0.3, 0.3]
assert -0.3 <= v <= 0.3
if random.random() > 0.5:
v = -v
return img.transform(img.size, PIL.Image.AFFINE, (1, v, 0, 0, 1, 0))
def ShearY(img, v): # [-0.3, 0.3]
assert -0.3 <= v <= 0.3
if random.random() > 0.5:
v = -v
return img.transform(img.size, PIL.Image.AFFINE, (1, 0, 0, v, 1, 0))
def TranslateX(img, v): # [-150, 150] => percentage: [-0.45, 0.45]
assert -0.45 <= v <= 0.45
if random.random() > 0.5:
v = -v
v = v * img.size[0]
return img.transform(img.size, PIL.Image.AFFINE, (1, 0, v, 0, 1, 0))
def TranslateXabs(img, v): # [-150, 150] => percentage: [-0.45, 0.45]
assert 0 <= v
if random.random() > 0.5:
v = -v
return img.transform(img.size, PIL.Image.AFFINE, (1, 0, v, 0, 1, 0))
def TranslateY(img, v): # [-150, 150] => percentage: [-0.45, 0.45]
assert -0.45 <= v <= 0.45
if random.random() > 0.5:
v = -v
v = v * img.size[1]
return img.transform(img.size, PIL.Image.AFFINE, (1, 0, 0, 0, 1, v))
def TranslateYabs(img, v): # [-150, 150] => percentage: [-0.45, 0.45]
assert 0 <= v
if random.random() > 0.5:
v = -v
return img.transform(img.size, PIL.Image.AFFINE, (1, 0, 0, 0, 1, v))
def Rotate(img, v): # [-30, 30]
assert -30 <= v <= 30
if random.random() > 0.5:
v = -v
return img.rotate(v)
def AutoContrast(img, _):
return PIL.ImageOps.autocontrast(img)
def Invert(img, _):
return PIL.ImageOps.invert(img)
def Equalize(img, _):
return PIL.ImageOps.equalize(img)
def Flip(img, _): # not from the paper
return PIL.ImageOps.mirror(img)
def Solarize(img, v): # [0, 256]
assert 0 <= v <= 256
return PIL.ImageOps.solarize(img, v)
def SolarizeAdd(img, addition=0, threshold=128):
img_np = np.array(img).astype(np.int)
img_np = img_np + addition
img_np = np.clip(img_np, 0, 255)
img_np = img_np.astype(np.uint8)
img = Image.fromarray(img_np)
return PIL.ImageOps.solarize(img, threshold)
def Posterize(img, v): # [4, 8]
v = int(v)
v = max(1, v)
return PIL.ImageOps.posterize(img, v)
def Contrast(img, v): # [0.1,1.9]
assert 0.1 <= v <= 1.9
return PIL.ImageEnhance.Contrast(img).enhance(v)
def Color(img, v): # [0.1,1.9]
assert 0.1 <= v <= 1.9
return PIL.ImageEnhance.Color(img).enhance(v)
def Brightness(img, v): # [0.1,1.9]
assert 0.1 <= v <= 1.9
return PIL.ImageEnhance.Brightness(img).enhance(v)
def Sharpness(img, v): # [0.1,1.9]
assert 0.1 <= v <= 1.9
return PIL.ImageEnhance.Sharpness(img).enhance(v)
def Cutout(img, v): # [0, 60] => percentage: [0, 0.2]
assert 0.0 <= v <= 0.2
if v <= 0.0:
return img
v = v * img.size[0]
return CutoutAbs(img, v)
def CutoutAbs(img, v): # [0, 60] => percentage: [0, 0.2]
# assert 0 <= v <= 20
if v < 0:
return img
w, h = img.size
x0 = np.random.uniform(w)
y0 = np.random.uniform(h)
x0 = int(max(0, x0 - v / 2.0))
y0 = int(max(0, y0 - v / 2.0))
x1 = min(w, x0 + v)
y1 = min(h, y0 + v)
xy = (x0, y0, x1, y1)
color = (125, 123, 114)
# color = (0, 0, 0)
img = img.copy()
PIL.ImageDraw.Draw(img).rectangle(xy, color)
return img
def SamplePairing(imgs): # [0, 0.4]
def f(img1, v):
i = np.random.choice(len(imgs))
img2 = PIL.Image.fromarray(imgs[i])
return PIL.Image.blend(img1, img2, v)
return f
def Identity(img, v):
return img
def augment_list(): # 16 oeprations and their ranges
# https://github.com/google-research/uda/blob/master/image/randaugment/policies.py#L57
# l = [
# (Identity, 0., 1.0),
# (ShearX, 0., 0.3), # 0
# (ShearY, 0., 0.3), # 1
# (TranslateX, 0., 0.33), # 2
# (TranslateY, 0., 0.33), # 3
# (Rotate, 0, 30), # 4
# (AutoContrast, 0, 1), # 5
# (Invert, 0, 1), # 6
# (Equalize, 0, 1), # 7
# (Solarize, 0, 110), # 8
# (Posterize, 4, 8), # 9
# # (Contrast, 0.1, 1.9), # 10
# (Color, 0.1, 1.9), # 11
# (Brightness, 0.1, 1.9), # 12
# (Sharpness, 0.1, 1.9), # 13
# # (Cutout, 0, 0.2), # 14
# # (SamplePairing(imgs), 0, 0.4), # 15
# ]
# https://github.com/tensorflow/tpu/blob/8462d083dd89489a79e3200bcc8d4063bf362186/models/official/efficientnet/autoaugment.py#L505
l = [
(AutoContrast, 0, 1),
(Equalize, 0, 1),
# (Invert, 0, 1),
(Rotate, 0, 30),
(Posterize, 0, 4),
(Solarize, 0, 256),
(SolarizeAdd, 0, 110),
(Color, 0.1, 1.9),
(Contrast, 0.1, 1.9),
(Brightness, 0.1, 1.9),
(Sharpness, 0.1, 1.9),
(ShearX, 0.0, 0.3),
(ShearY, 0.0, 0.3),
# (CutoutAbs, 0, 40),
(TranslateXabs, 0.0, 100),
(TranslateYabs, 0.0, 100),
]
return l
class Lighting(object):
"""Lighting noise(AlexNet - style PCA - based noise)"""
def __init__(self, alphastd, eigval, eigvec):
self.alphastd = alphastd
self.eigval = torch.Tensor(eigval)
self.eigvec = torch.Tensor(eigvec)
def __call__(self, img):
if self.alphastd == 0:
return img
alpha = img.new().resize_(3).normal_(0, self.alphastd)
rgb = (
self.eigvec.type_as(img)
.clone()
.mul(alpha.view(1, 3).expand(3, 3))
.mul(self.eigval.view(1, 3).expand(3, 3))
.sum(1)
.squeeze()
)
return img.add(rgb.view(3, 1, 1).expand_as(img))
class CutoutDefault(object):
"""
Reference : https://github.com/quark0/darts/blob/master/cnn/utils.py
"""
def __init__(self, length):
self.length = length
def __call__(self, img):
h, w = img.size(1), img.size(2)
mask = np.ones((h, w), np.float32)
y = np.random.randint(h)
x = np.random.randint(w)
y1 = np.clip(y - self.length // 2, 0, h)
y2 = np.clip(y + self.length // 2, 0, h)
x1 = np.clip(x - self.length // 2, 0, w)
x2 = np.clip(x + self.length // 2, 0, w)
mask[y1:y2, x1:x2] = 0.0
mask = torch.from_numpy(mask)
mask = mask.expand_as(img)
img *= mask
return img
class RandAugment:
def __init__(self, n, m):
self.n = n
self.m = m # [0, 30]
self.augment_list = augment_list()
def __call__(self, img):
ops = random.choices(self.augment_list, k=self.n)
for op, minval, maxval in ops:
val = (float(self.m) / 30) * float(maxval - minval) + minval
img = op(img, val)
return img
| InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/transforms/image/randaug.py |
import numbers
import torch
import cv2
import numpy as np
import PIL
def _is_tensor_clip(clip):
return torch.is_tensor(clip) and clip.ndimension() == 4
def crop_clip(clip, min_h, min_w, h, w):
if isinstance(clip[0], np.ndarray):
cropped = [img[min_h:min_h + h, min_w:min_w + w, :] for img in clip]
elif isinstance(clip[0], PIL.Image.Image):
cropped = [
img.crop((min_w, min_h, min_w + w, min_h + h)) for img in clip
]
else:
raise TypeError('Expected numpy.ndarray or PIL.Image' +
'but got list of {0}'.format(type(clip[0])))
return cropped
def resize_clip(clip, size, interpolation='bilinear'):
if isinstance(clip[0], np.ndarray):
if isinstance(size, numbers.Number):
im_h, im_w, im_c = clip[0].shape
# Min spatial dim already matches minimal size
if (im_w <= im_h and im_w == size) or (im_h <= im_w
and im_h == size):
return clip
new_h, new_w = get_resize_sizes(im_h, im_w, size)
size = (new_w, new_h)
else:
size = size[1], size[0]
if interpolation == 'bilinear':
np_inter = cv2.INTER_LINEAR
else:
np_inter = cv2.INTER_NEAREST
scaled = [
cv2.resize(img, size, interpolation=np_inter) for img in clip
]
elif isinstance(clip[0], PIL.Image.Image):
if isinstance(size, numbers.Number):
im_w, im_h = clip[0].size
# Min spatial dim already matches minimal size
if (im_w <= im_h and im_w == size) or (im_h <= im_w
and im_h == size):
return clip
new_h, new_w = get_resize_sizes(im_h, im_w, size)
size = (new_w, new_h)
else:
size = size[1], size[0]
if interpolation == 'bilinear':
pil_inter = PIL.Image.NEAREST
else:
pil_inter = PIL.Image.BILINEAR
scaled = [img.resize(size, pil_inter) for img in clip]
else:
raise TypeError('Expected numpy.ndarray or PIL.Image' +
'but got list of {0}'.format(type(clip[0])))
return scaled
def get_resize_sizes(im_h, im_w, size):
if im_w < im_h:
ow = size
oh = int(size * im_h / im_w)
else:
oh = size
ow = int(size * im_w / im_h)
return oh, ow
def normalize(clip, mean, std, inplace=False):
if not _is_tensor_clip(clip):
raise TypeError('tensor is not a torch clip_test.')
if not inplace:
clip = clip.clone()
dtype = clip.dtype
dim = len(mean)
mean = torch.as_tensor(mean, dtype=dtype, device=clip.device)
std = torch.as_tensor(std, dtype=dtype, device=clip.device)
# print(clip_test.size())
# if dim == 3:
clip.sub_(mean[:, None, None, None]).div_(std[:, None, None, None])
# else:
# clip_test.sub_(mean[:, None, None]).div_(std[:, None, None])
return clip | InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/transforms/image/functional.py |
from torchvision import transforms
from PIL import Image
class MinMaxResize:
def __init__(self, shorter=800, longer=1333):
self.min = shorter
self.max = longer
def __call__(self, x):
w, h = x.size
scale = self.min / min(w, h)
if h < w:
newh, neww = self.min, scale * w
else:
newh, neww = scale * h, self.min
if max(newh, neww) > self.max:
scale = self.max / max(newh, neww)
newh = newh * scale
neww = neww * scale
newh, neww = int(newh + 0.5), int(neww + 0.5)
newh, neww = newh // 32 * 32, neww // 32 * 32
return x.resize((neww, newh), resample=Image.BICUBIC)
class UnNormalize(object):
def __init__(self, mean, std):
self.mean = mean
self.std = std
def __call__(self, tensor):
"""
Args:
tensor (Tensor): Tensor image of size (C, H, W) to be normalized.
Returns:
Tensor: Normalized image.
"""
for t, m, s in zip(tensor, self.mean, self.std):
t.mul_(s).add_(m)
# The normalize code -> t.sub_(m).div_(s)
return tensor
# This is simple maximum entropy normalization performed in Inception paper
inception_normalize = transforms.Compose(
[transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])]
)
# ViT uses simple non-biased inception normalization
# https://github.com/google-research/vision_transformer/blob/master/vit_jax/input_pipeline.py#L132
inception_unnormalize = transforms.Compose(
[UnNormalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])]
)
| InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/transforms/image/utils.py |
def image_aug(images, image_transform):
# print(image_transform)
# I've no idea what the fuck this is doing
# TODO: Maybe remove the second view?
global_transform = image_transform[0]
# local_transform = image_transform[0][1]
global_images_tensor = []
# 2 GLOBAL views
for i in range(2):
global_images_tensor.append(global_transform(images).unsqueeze(0))
return global_images_tensor
# # 3 LOCAL VIEWS
# local_images_tensor = []
# for i in range(3):
# local_images_tensor.append(local_transform(images).unsqueeze(0))
# return [global_images_tensor, local_images_tensor] | InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/transforms/image/imageaug.py |
import torch
import torch.nn as nn
import random
class TemporalRoll(nn.Module):
def __init__(self, n_segment=3, n_div=8, v=0):
super(TemporalRoll, self).__init__()
self.n_segment = n_segment
self.fold_div = n_div
self.v = v
def forward(self, x, layer=1):
# return x
nt, l, c = x.size()
n_batch = nt // self.n_segment
x = x.view(n_batch, self.n_segment, l, c)
if self.v == 0:
# 16, 3, 197, 768
fold = l // self.fold_div
out = torch.zeros_like(x)
# keep cls token
out[:, :, 0] = x[:, :, 0]
# roll left step 1 along time dimension (1)
out[:, :, 1:fold+1] = torch.roll(x[:, :, 1:fold+1], 1, 1)
# roll right step 1 along time dimension (1)
out[:, :, -fold:] = torch.roll(x[:, :, -fold:], -1, 1)
# not roll
out[:, :, 1+fold:-fold] = x[:, :, 1+fold: -fold]
# # 16, 3, 197, 768
# fold = l // self.fold_div
# out = torch.zeros_like(x)
# # roll left step 1 along time dimension (1)
# out[:, :, :fold] = torch.roll(x[:, :, :fold], 1, 1)
# # roll right step 1 along time dimension (1)
# out[:, :, -fold:] = torch.roll(x[:, :, -fold:], -1, 1)
# # not roll
# out[:, :, fold:-fold] = x[:, :, fold: -fold]
# random sampling
elif self.v == 1:
out = torch.zeros_like(x)
roll_token_idexs = random.sample(range(1, l), l//2)
# print(roll_token_idexs)
out = x
out[:, :, roll_token_idexs] = torch.roll(x[:, :, roll_token_idexs], 1, 1)
# roll different tokens for different blocks
elif self.v == 2:
rolled_token_len = l // self.fold_div
fold = rolled_token_len * (layer % self.fold_div)
begin_index = 1 + fold
end_index = min(1 + fold + rolled_token_len, l)
out = torch.zeros_like(x)
out[:, :, 0] = x[:, :, 0] # cls token unchanged
out[:, :, begin_index:] = x[:, :, begin_index:]
out[:, :, begin_index:end_index] = torch.roll(x[:, :, begin_index:end_index], 1, 1)
out[:, :, end_index:] = x[:, :, end_index:]
else: # not roll
fold = c // self.fold_div
out = torch.zeros_like(x)
out[:, :-1, :fold] = x[:, 1:, :fold] # shift left tokens
out[:, 1:, fold: 2 * fold] = x[:, :-1, fold: 2 * fold] # shift right tokens
out[:, :, 2 * fold:] = x[:, :, 2 * fold:] # not shift
return out.view(nt, l, c) | InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/modules/temporal_roll.py |
import torch
import torch.nn as nn
import pytorch_lightning as pl
from transformers.models.bert.modeling_bert import BertConfig, BertEmbeddings
from CoTrain.modules import heads, cotrain_utils
from CoTrain.modules import objectives as objectives
from CoTrain.modules import base_vision_transformer as vit
from CoTrain.modules.temporal_roll import TemporalRoll
import torch.nn.functional as F
import math
from CoTrain.modules.cotrain_utils import state_dict_dino_fix
class VCOPHeader(torch.nn.Module):
def __init__(self, tuple_len=3, feature_size=768):
"""
In the constructor we instantiate two nn.Linear modules and assign them as
member variables.
"""
super(VCOPHeader, self).__init__()
self.feature_size = feature_size
self.fc7 = nn.Linear(self.feature_size * 2, 512)
self.tuple_len = tuple_len
pair_num = int(tuple_len * (tuple_len - 1) / 2)
self.class_num = math.factorial(tuple_len)
self.fc8 = nn.Linear(512 * pair_num, self.class_num)
self.dropout = nn.Dropout(p=0.5)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
"""
"""
pf = [] # pairwise concat
for i in range(self.tuple_len):
for j in range(i + 1, self.tuple_len):
pf.append(torch.cat([x[:, i], x[:, j]], dim=1))
pf = [self.fc7(i) for i in pf]
pf = [self.relu(i) for i in pf]
h = torch.cat(pf, dim=1)
h = self.dropout(h)
h = self.fc8(h) # logits
return h
class CoTrainTransformerSS(pl.LightningModule):
def __init__(self, config):
super().__init__()
self.save_hyperparameters()
bert_config = BertConfig(
vocab_size=config["vocab_size"],
hidden_size=config["hidden_size"],
num_hidden_layers=config["num_layers"],
num_attention_heads=config["num_heads"],
intermediate_size=config["hidden_size"] * config["mlp_ratio"],
max_position_embeddings=config["max_text_len"],
hidden_dropout_prob=config["drop_rate"],
attention_probs_dropout_prob=config["drop_rate"],
)
self.text_embeddings = BertEmbeddings(bert_config)
self.text_embeddings.apply(objectives.init_weights)
self.token_type_embeddings = nn.Embedding(2, config["hidden_size"])
self.token_type_embeddings.apply(objectives.init_weights)
flag = 0
if self.hparams.config["load_path"] == "":
while not flag == 1:
try:
self.transformer = getattr(vit, self.hparams.config["vit"])(
pretrained=True, config=self.hparams.config
)
flag = 1
except:
print("load pretrained failed, try again")
flag = 0
else:
self.transformer = getattr(vit, self.hparams.config["vit"])(
pretrained=False, config=self.hparams.config
)
self.pooler = heads.Pooler(config["hidden_size"])
self.pooler.apply(objectives.init_weights)
# num frames
self.num_frames = config["num_frames"] # a global variable to identify if image/video
if config["loss_names"]["mlm"] > 0:
self.mlm_score = heads.MLMHead(bert_config)
self.mlm_score.apply(objectives.init_weights)
if config["loss_names"]["vtm"] > 0:
self.vtm_score = heads.vtmHead(config["hidden_size"])
self.vtm_score.apply(objectives.init_weights)
if config["loss_names"]["mpp"] > 0:
self.mpp_score = heads.MPPHead(bert_config)
self.mpp_score.apply(objectives.init_weights)
# vtc may also used for pretrain
# == for video text contrastive learning
if config["loss_names"]["vtc"] > 0:
print("initalize video project and txt projection")
# v1
self.txt_proj = nn.Linear(config["hidden_size"], config["shared_embedding_dim"])
self.vid_proj = nn.Linear(config["hidden_size"], config["shared_embedding_dim"])
# v2
# self.vid_proj = nn.Sequential(
# nn.Dropout(0.5),
# nn.Linear(config["hidden_size"], config["hidden_size"] // 2),
# nn.LayerNorm(config["hidden_size"] // 2),
# nn.GELU(),
# nn.Linear(config["hidden_size"] // 2, config["shared_embedding_dim"]),
# )
# self.txt_proj = nn.Sequential(
# nn.Dropout(0.5),
# nn.Linear(config["hidden_size"], config["hidden_size"] // 2),
# nn.LayerNorm(config["hidden_size"] // 2),
# nn.GELU(),
# nn.Linear(config["hidden_size"] // 2, config["shared_embedding_dim"]),
# )
self.txt_proj.apply(objectives.init_weights)
self.vid_proj.apply(objectives.init_weights)
# == end
if (
self.hparams.config["load_path"] != ""
and not self.hparams.config["test_only"]
):
print("0" * 200)
ckpt = torch.load(self.hparams.config["load_path"], map_location="cpu")
state_dict = ckpt["state_dict"]
# if downstream max text token length not consistent with pretrain
if self.text_embeddings.position_embeddings.weight.size() != state_dict['text_embeddings.position_embeddings.weight'].size():
state_dict.pop('text_embeddings.position_embeddings.weight', None)
state_dict.pop('text_embeddings.position_ids', None)
new_state_dict = state_dict_dino_fix(state_dict, self.state_dict())
# new_state_dict = self._inflate_positional_embeds(state_dict)
self.load_state_dict(new_state_dict, strict=False)
# self.load_state_dict(state_dict, strict=False)
if self.hparams.config["linear_evaluation"]:
for name, param in self.named_parameters():
# only train project layer
if 'mlm_score' in name or 'vtm_score' in name or 'mpp_score' in name:
param.requires_grad = True
elif 'txt_proj' in name or 'vid_proj' in name:
param.requires_grad = True
elif 'pooler' in name:
param.requires_grad = True
else:
param.requires_grad = False
# flag = False
# for name, param in self.named_parameters():
# if '20' in name:
# flag = True
# param.requires_grad = flag
# trainable_params = filter(lambda p: p.requires_grad, self.parameters())
# ===================== Downstream ===================== #
hs = self.hparams.config["hidden_size"]
# print(config["loss_names"])
if config["loss_names"]["multiple_choice"] > 0:
self.vtm_score = heads.vtmHead(config["hidden_size"])
self.vtm_score.apply(objectives.init_weights)
# alex: vcr q2a task
if config["loss_names"]["vcr_q2a"] > 0:
self.vtm_score = heads.vtmHead(config["hidden_size"])
self.vtm_score.apply(objectives.init_weights)
# alex: tvqa
if config["loss_names"]["mc_vqa"] > 0:
self.vtm_score = heads.vtmHead(config["hidden_size"])
self.vtm_score.apply(objectives.init_weights)
if self.hparams.config["loss_names"]["vqa"] > 0:
vs = self.hparams.config["vqav2_label_size"]
self.vqa_classifier = nn.Sequential(
nn.Linear(hs, hs * 2),
nn.LayerNorm(hs * 2),
nn.GELU(),
nn.Linear(hs * 2, vs),
)
self.vqa_classifier.apply(objectives.init_weights)
# alex: add for vcr: q2a
if self.hparams.config["loss_names"]["vcr_q2a"] > 0:
# for q2a
self.rank_output = nn.Linear(hs, 1)
self.rank_output.weight.data = self.vtm_score.fc.weight.data[1:, :]
self.rank_output.bias.data = self.vtm_score.fc.bias.data[1:]
# for qa2r
self.rank_output_2 = nn.Linear(hs, 1)
self.rank_output_2.weight.data = self.vtm_score.fc.weight.data[1:, :]
self.rank_output_2.bias.data = self.vtm_score.fc.bias.data[1:]
self.margin = 0.2
# add for vcop prediction
if self.hparams.config["loss_names"]["vcop"] > 0:
self.vcop_classifier = VCOPHeader(tuple_len=self.num_frames, feature_size=hs)
# add for tvqa
if self.hparams.config["loss_names"]["mc_vqa"] > 0:
# # v1: for q2a with vtm_score
# self.rank_output = nn.Linear(hs, 1)
# self.rank_output.weight.data = self.vtm_score.fc.weight.data[1:, :]
# self.rank_output.bias.data = self.vtm_score.fc.bias.data[1:]
# self.dropout = nn.Dropout(0.1)
self.mc_vqa_classifier = nn.Sequential(
nn.Dropout(0.1),
nn.Linear(hs, 256),
nn.LayerNorm(256),
nn.GELU(),
nn.Linear(256, 1),
)
self.mc_vqa_classifier.apply(objectives.init_weights)
# alex: add for openend_vqa
if self.hparams.config["loss_names"]["openend_vqa"] > 0:
vs = self.hparams.config["msrvttqa_label_size"]
# self.vqa_classifier = nn.Sequential(
# nn.Dropout(0.5),
# nn.Linear(hs, hs * 2),
# nn.LayerNorm(hs * 2),
# nn.GELU(),
# nn.Linear(hs * 2, vs),
# )
# small dataset
self.vqa_classifier = nn.Sequential(
nn.Dropout(0.5),
nn.Linear(hs, hs//2),
nn.LayerNorm(hs//2),
nn.GELU(),
nn.Linear(hs//2, vs),
)
self.vqa_classifier.apply(objectives.init_weights)
if self.hparams.config["loss_names"]["nlvr2"] > 0:
self.nlvr2_classifier = nn.Sequential(
nn.Linear(hs * 2, hs * 2),
nn.LayerNorm(hs * 2),
nn.GELU(),
nn.Linear(hs * 2, 2),
)
self.nlvr2_classifier.apply(objectives.init_weights)
emb_data = self.token_type_embeddings.weight.data
self.token_type_embeddings = nn.Embedding(3, hs)
self.token_type_embeddings.apply(objectives.init_weights)
self.token_type_embeddings.weight.data[0, :] = emb_data[0, :]
self.token_type_embeddings.weight.data[1, :] = emb_data[1, :]
self.token_type_embeddings.weight.data[2, :] = emb_data[1, :]
if self.hparams.config["loss_names"]["irtr"] > 0:
self.rank_output = nn.Linear(hs, 1)
self.rank_output.weight.data = self.vtm_score.fc.weight.data[1:, :]
self.rank_output.bias.data = self.vtm_score.fc.bias.data[1:]
self.margin = 0.2
# for p in self.vtm_score.parameters(): # alex: requires_grad = true?
# p.requires_grad = False
# test msrvtt multiple choice without finetune
if self.hparams.config["loss_names"]["multiple_choice"] > 0:
self.rank_output = nn.Linear(hs, 1)
self.rank_output.weight.data = self.vtm_score.fc.weight.data[1:, :]
self.rank_output.bias.data = self.vtm_score.fc.bias.data[1:]
self.margin = 0.2
cotrain_utils.set_metrics(self)
self.current_tasks = list()
self.temporal_roll_module = TemporalRoll(n_segment=self.num_frames, v=0)
# ===================== load downstream (test_only) ======================
if self.hparams.config["load_path"] != "" and self.hparams.config["test_only"]:
print("====load checkpoint=====")
ckpt = torch.load(self.hparams.config["load_path"], map_location="cpu")
state_dict = ckpt["state_dict"]
print('*' * 30)
print("current state dict")
print('*' * 30)
for k, v in self.state_dict().items():
print(k)
# temporal embed and fix model ?
# new_state_dict = state_dict_data_parallel_fix(state_dict, self.state_dict())
new_state_dict = state_dict_dino_fix(state_dict, self.state_dict())
# new_state_dict = self._inflate_positional_embeds(state_dict)
self.load_state_dict(new_state_dict, strict=False)
# self.load_state_dict(state_dict, strict=False)
# # # print learnable param
# for name, param in self.named_parameters():
# if param.requires_grad:
# print("learned param: ", name)
def infer(
self,
batch,
mask_text=False,
mask_video=False,
video_token_type_idx=1,
video_embeds=None,
video_masks=None,
input_video_only=False,
input_text_only=False,
mode="video"
):
# if text: process in normal video
# if video: repeat the text tensor for K times
if f"video_{video_token_type_idx - 1}" in batch:
imgkey = f"video_{video_token_type_idx - 1}"
else:
imgkey = "video"
do_mlm = "_mlm" if mask_text else ""
text_ids = batch[f"text_ids{do_mlm}"]
text_labels = batch[f"text_labels{do_mlm}"]
text_masks = batch[f"text_masks"]
# print(batch[imgkey])
self.num_frames = batch[imgkey][0].size(1)
if not input_video_only:
text_embeds = self.text_embeddings(text_ids)
video_labels = None
patch_index = None
if not input_text_only:
if video_embeds is None and video_masks is None:
img = batch[imgkey][0]
img = img.contiguous().view(-1, img.size()[2], img.size()[3], img.size()[4]) # btchw to [bt]chw
(
video_embeds,
video_masks,
patch_index,
video_labels,
) = self.transformer.visual_embed(
img,
max_image_len=self.hparams.config["max_image_len"],
mask_it=mask_video,
mode=mode
)
else:
patch_index, video_labels = (
None,
None,
)
if not input_video_only:
text_embeds = text_embeds + self.token_type_embeddings(torch.zeros_like(text_masks))
text_embeds = torch.repeat_interleave(text_embeds, self.num_frames, dim=0)
text_masks = torch.repeat_interleave(text_masks, self.num_frames, dim=0)
if not input_text_only:
video_embeds = video_embeds + self.token_type_embeddings(torch.full_like(video_masks, video_token_type_idx))
# print(text_embeds.size(), video_embeds.size())
if not input_text_only and not input_video_only:
co_embeds = torch.cat([text_embeds, video_embeds], dim=1)
co_masks = torch.cat([text_masks, video_masks], dim=1)
x = co_embeds
if input_text_only:
x = text_embeds
co_masks = text_masks
if input_video_only:
x = video_embeds
co_masks = video_masks
for i, blk in enumerate(self.transformer.blocks):
# perform temporal roll operation for temporal modeling [video only]
if self.num_frames > 1 and not input_video_only and not input_text_only:
text_feats, image_feats = (
x[:, : text_embeds.shape[1]],
x[:, text_embeds.shape[1]:],
)
image_feats = self.temporal_roll_module(image_feats, i)
x = torch.cat((text_feats, image_feats), dim=1)
x, _attn = blk(x, mask=co_masks)
x = self.transformer.norm(x)
# reshape to video tensor
x = x.view(x.size(0) // self.num_frames, -1, x.size(-2),
x.size(-1))
# add vcop here
h = None
if self.hparams.config["loss_names"]["vcop"] > 0 and mode == "video":
h = x
x = torch.mean(x, dim=1)
if input_text_only:
text_feats = x
if "vtc" in self.current_tasks:
text_feats = self.txt_proj(text_feats)
video_feats = None
if input_video_only:
video_feats = x
if "vtc" in self.current_tasks:
video_feats = self.vid_proj(video_feats)
text_feats = None
if not input_text_only and not input_video_only:
text_feats, video_feats = (
x[:, : text_embeds.shape[1]],
x[:, text_embeds.shape[1]:],
)
cls_feats = self.pooler(x)
if not input_video_only:
text_masks = text_masks[::self.num_frames].contiguous()
if not input_text_only:
video_masks = video_masks[::self.num_frames].contiguous()
ret = {
"text_feats": text_feats,
"video_feats": video_feats,
"cls_feats": cls_feats,
"raw_cls_feats": x[:, 0],
"video_labels": video_labels,
"video_masks": video_masks,
"text_labels": text_labels,
"text_ids": text_ids,
"text_masks": text_masks,
"patch_index": patch_index,
"vcop_features": h
}
return ret
def forward(self, batch, mode="video"):
ret = dict()
if len(self.current_tasks) == 0:
ret.update(self.infer(batch, mode=mode))
return ret
# Masked Language Modeling
if "mlm" in self.current_tasks:
ret.update(objectives.compute_mlm(self, batch, mode=mode))
# video Text Matching
if "vtm" in self.current_tasks:
ret.update(objectives.compute_vtm_wpa(self, batch, mode=mode))
# ret.update(objectives.compute_vtm_wpa_dino(self, batch, mode=mode))
# video Text Contrastive
if "vtc" in self.current_tasks:
ret.update(objectives.compute_vtc(self, batch, mode=mode))
# Visual Question Answering
if "vqa" in self.current_tasks:
ret.update(objectives.compute_vqa(self, batch))
# alex: msrvtt Visual Question Answering
if "openend_vqa" in self.current_tasks:
ret.update(objectives.compute_openend_vqa(self, batch))
# alex: vcop only for video
if "vcop" in self.current_tasks and mode == "video":
ret.update(objectives.compute_vcop(self, batch))
# alex: vcr qa
if "vcr_q2a" in self.current_tasks:
ret.update(objectives.compute_vcr_q2a(self, batch))
# alex: mc_vqa
if "mc_vqa" in self.current_tasks:
ret.update(objectives.compute_mc_vqa_q2a(self, batch))
# alex: msrvtt multiple choice setting
if "multiple_choice" in self.current_tasks:
ret.update(objectives.compute_multiple_choice(self, batch))
# video Retrieval and Text Retrieval
if "irtr" in self.current_tasks:
ret.update(objectives.compute_irtr(self, batch))
return ret
def training_step(self, batch, batch_idx):
cotrain_utils.set_task(self)
# co-training
if "v" in batch and "i" in batch:
video_output = self(batch["v"], mode="video")
image_output = self(batch["i"], mode="image")
total_loss = sum([v for k, v in video_output.items() if "loss" in k]) + sum([v for k, v in image_output.items() if "loss" in k])
else:
output = self(batch, mode="video")
total_loss = sum([v for k, v in output.items() if "loss" in k])
return total_loss
def training_epoch_end(self, outs):
cotrain_utils.epoch_wrapup(self)
def validation_step(self, batch, batch_idx):
cotrain_utils.set_task(self)
if "v" in batch and "i" in batch:
video_output = self(batch["v"], mode="video")
image_output = self(batch["i"], mode="image")
else:
output = self(batch, mode="video")
def validation_epoch_end(self, outs):
cotrain_utils.epoch_wrapup(self)
def test_step(self, batch, batch_idx):
cotrain_utils.set_task(self)
if "v" in batch and "i" in batch:
video_output = self(batch["v"], mode="video")
image_output = self(batch["i"], mode="image")
else:
output = self(batch, mode="video")
ret = dict()
if self.hparams.config["loss_names"]["vqa"] > 0:
ret.update(objectives.vqa_test_step(self, batch, image_output))
return ret
def test_epoch_end(self, outs):
model_name = self.hparams.config["load_path"].split("/")[-1][:-5]
if self.hparams.config["loss_names"]["vqa"] > 0:
objectives.vqa_test_wrapup(outs, model_name)
cotrain_utils.epoch_wrapup(self)
def configure_optimizers(self):
return cotrain_utils.set_schedule(self)
def _inflate_positional_embeds(self, new_state_dict, load_temporal_fix='zeros'):
# allow loading of timesformer with fewer num_frames
curr_keys = list(self.state_dict().keys())
if 'transformer.temporal_embed' in new_state_dict and 'transformer.temporal_embed' in curr_keys:
load_temporal_embed = new_state_dict['transformer.temporal_embed']
load_num_frames = load_temporal_embed.shape[1]
curr_num_frames = self.hparams.config['num_frames']
embed_dim = load_temporal_embed.shape[2]
if load_num_frames != curr_num_frames:
if load_num_frames > curr_num_frames:
print(f'### loaded {self.hparams.config["vit"]} model has MORE frames than current...'
f'### loading weights, filling in the extras via {load_temporal_fix}')
new_temporal_embed = load_temporal_embed[:, :curr_num_frames, :]
else:
print(f'### loaded {self.hparams.config["vit"]} model has FEWER frames than current...'
f'### loading weights, filling in the extras via {load_temporal_fix}')
if load_temporal_fix == 'zeros':
new_temporal_embed = torch.zeros([load_temporal_embed.shape[0], curr_num_frames, embed_dim])
new_temporal_embed[:, :load_num_frames] = load_temporal_embed
elif load_temporal_fix in ['interp', 'bilinear']:
# interpolate
# unsqueeze so pytorch thinks its an image
mode = 'nearest'
if load_temporal_fix == 'bilinear':
mode = 'bilinear'
load_temporal_embed = load_temporal_embed.unsqueeze(0)
new_temporal_embed = F.interpolate(load_temporal_embed,
(curr_num_frames, embed_dim), mode=mode).squeeze(0)
else:
raise NotImplementedError
new_state_dict['transformer.temporal_embed'] = new_temporal_embed
# allow loading with smaller spatial patches. assumes custom border crop, to append the
# border patches to the input sequence
if 'transformer.pos_embed' in new_state_dict and 'transformer.pos_embed' in curr_keys:
load_pos_embed = new_state_dict['transformer.pos_embed']
load_num_patches = load_pos_embed.shape[1]
curr_pos_embed = self.state_dict()['transformer.pos_embed']
if load_num_patches != curr_pos_embed.shape[1]:
raise NotImplementedError(
'Loading models with different spatial resolution / patch number not yet implemented, sorry.')
return new_state_dict | InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/modules/cotrain_module.py |
import torch
import io
import random
from transformers import (
get_polynomial_decay_schedule_with_warmup,
get_cosine_schedule_with_warmup,
)
from CoTrain.modules.dist_utils import all_gather
from CoTrain.modules.objectives import compute_irtr_recall, compute_decouple_irtr_recall, compute_ind_irtr_recall
# from CoTrain.gadgets.my_metrics import Accuracy, VQAScore, Scalar
from CoTrain.gadgets.my_metrics import VQAScore
from torchmetrics import Accuracy
from torchmetrics import MeanMetric as Scalar
from CoTrain.datasets import client
from .clip_param_keys import clip_param_keys
def set_split_metrics(pl_module, split, loss_names):
for k, v in loss_names.items():
if v < 1:
continue
if k == "vqa":
setattr(pl_module, f"{split}_vqa_score", VQAScore())
setattr(pl_module, f"{split}_{k}_loss", Scalar())
# vcr
elif k == "vcr_q2a":
setattr(pl_module, f"{split}_{k}_loss", Scalar())
setattr(pl_module, f"{split}_{k}_accuracy", Accuracy())
setattr(pl_module, f"{split}_vcr_qar_loss", Scalar())
setattr(pl_module, f"{split}_vcr_qar_accuracy", Accuracy())
elif k == "mc_vqa":
setattr(pl_module, f"{split}_{k}_loss", Scalar())
setattr(pl_module, f"{split}_{k}_accuracy", Accuracy())
elif k == "openend_vqa":
setattr(pl_module, f"{split}_vqa_score", VQAScore())
setattr(pl_module, f"{split}_vqa_loss", Scalar())
setattr(pl_module, f"{split}_{k}_loss", Scalar())
setattr(pl_module, f"{split}_{k}_accuracy", Accuracy(ignore_index=-100))
elif k == "vcop":
setattr(pl_module, f"{split}_vcop_score", VQAScore())
setattr(pl_module, f"{split}_vcop_loss", Scalar())
setattr(pl_module, f"{split}_{k}_loss", Scalar())
setattr(pl_module, f"{split}_{k}_accuracy", Accuracy())
elif k == "multiple_choice":
setattr(pl_module, f"{split}_{k}_accuracy", Accuracy())
setattr(pl_module, f"{split}_{k}_loss", Scalar())
elif k == "nlvr2":
if split == "train":
setattr(pl_module, f"train_{k}_accuracy", Accuracy())
setattr(pl_module, f"train_{k}_loss", Scalar())
else:
setattr(pl_module, f"dev_{k}_accuracy", Accuracy())
setattr(pl_module, f"dev_{k}_loss", Scalar())
setattr(pl_module, f"test_{k}_accuracy", Accuracy())
setattr(pl_module, f"test_{k}_loss", Scalar())
elif k == "irtr":
setattr(pl_module, f"{split}_irtr_loss", Scalar())
elif k == "mppd" or k == "mpfr":
setattr(pl_module, f"{split}_{k}_loss", Scalar())
elif k == "vtm":
setattr(pl_module, f"{split}_{k}_accuracy", Accuracy())
setattr(pl_module, f"{split}_{k}_loss", Scalar())
setattr(pl_module, f"{split}_{k}_dino_loss", Scalar())
setattr(pl_module, f"{split}_{k}_wpa_loss", Scalar())
# add for image text contrastive learning
elif k == "itc":
setattr(pl_module, f"{split}_{k}_loss", Scalar())
elif k == "dino":
setattr(pl_module, f"{split}_{k}_loss", Scalar())
elif k == "contrastive":
setattr(pl_module, f"{split}_{k}_loss", Scalar())
setattr(pl_module, f"{split}_{k}_image_accuracy", Accuracy())
setattr(pl_module, f"{split}_{k}_text_accuracy", Accuracy())
elif k == "zs_classify":
setattr(pl_module, f"{split}_{k}_accuracy", Accuracy())
elif k == "mlm":
setattr(pl_module, f"{split}_{k}_accuracy", Accuracy(ignore_index=-100))
setattr(pl_module, f"{split}_{k}_loss", Scalar())
else:
setattr(pl_module, f"{split}_{k}_accuracy", Accuracy())
setattr(pl_module, f"{split}_{k}_loss", Scalar())
def set_metrics(pl_module):
set_split_metrics(pl_module, "train", pl_module.hparams.config["loss_names"])
if len(pl_module.hparams.config["val_datasets"]) == 0:
set_split_metrics(pl_module, "val", pl_module.hparams.config["loss_names"])
else:
set_split_metrics(pl_module, "val", pl_module.hparams.config["val_loss_names"])
def epoch_wrapup(pl_module):
phase = "train" if pl_module.training else "val"
the_metric = 0
the_metric_qar = 0
if pl_module.hparams.config["get_recall_metric"] and not pl_module.training:
(ir_r1, ir_r5, ir_r10, tr_r1, tr_r5, tr_r10) = compute_irtr_recall(pl_module)
if torch.distributed.get_rank() == 0:
print((ir_r1, ir_r5, ir_r10, tr_r1, tr_r5, tr_r10), pl_module.global_step)
pl_module.logger.experiment.add_scalar(
"recalls/ir_r1", ir_r1, pl_module.global_step
)
pl_module.logger.experiment.add_scalar(
"recalls/ir_r5", ir_r5, pl_module.global_step
)
pl_module.logger.experiment.add_scalar(
"recalls/ir_r10", ir_r10, pl_module.global_step
)
pl_module.logger.experiment.add_scalar(
"recalls/tr_r1", tr_r1, pl_module.global_step
)
pl_module.logger.experiment.add_scalar(
"recalls/tr_r5", tr_r5, pl_module.global_step
)
pl_module.logger.experiment.add_scalar(
"recalls/tr_r10", tr_r10, pl_module.global_step
)
the_metric += ir_r1.item() + tr_r1.item()
# add for ind irtr
if pl_module.hparams.config["get_ind_recall_metric"] and not pl_module.training:
(ir_r1, ir_r5, ir_r10, tr_r1, tr_r5, tr_r10) = compute_ind_irtr_recall(pl_module)
print((ir_r1, ir_r5, ir_r10, tr_r1, tr_r5, tr_r10), pl_module.global_step)
pl_module.logger.experiment.add_scalar(
"recalls/ir_r1", ir_r1, pl_module.global_step
)
pl_module.logger.experiment.add_scalar(
"recalls/ir_r5", ir_r5, pl_module.global_step
)
pl_module.logger.experiment.add_scalar(
"recalls/ir_r10", ir_r10, pl_module.global_step
)
pl_module.logger.experiment.add_scalar(
"recalls/tr_r1", tr_r1, pl_module.global_step
)
pl_module.logger.experiment.add_scalar(
"recalls/tr_r5", tr_r5, pl_module.global_step
)
pl_module.logger.experiment.add_scalar(
"recalls/tr_r10", tr_r10, pl_module.global_step
)
# the_metric += ir_r1.item() + tr_r1.item()
the_metric += ir_r1 + tr_r1
# == end
if phase == "val" and len(pl_module.hparams.config["val_datasets"]) > 0:
# We are using a special dataset for val
loss_names = pl_module.hparams.config["val_loss_names"]
else:
loss_names = pl_module.hparams.config["loss_names"]
for loss_name, v in loss_names.items():
if v < 1:
continue
if loss_name == "mim" and not hasattr(pl_module, "visual_decoder"):
# We may choose not to decode
continue
value = 0
qar_value = 0
if loss_name == "vqa":
value = getattr(pl_module, f"{phase}_{loss_name}_score").compute()
pl_module.log(f"{loss_name}/{phase}/score_epoch", value, sync_dist=True)
getattr(pl_module, f"{phase}_{loss_name}_score").reset()
pl_module.log(
f"{loss_name}/{phase}/loss_epoch",
getattr(pl_module, f"{phase}_{loss_name}_loss").compute(), sync_dist=True
)
getattr(pl_module, f"{phase}_{loss_name}_loss").reset()
elif loss_name == "vcr_q2a":
# q2a
pl_module.log(
f"{loss_name}/{phase}/loss_epoch",
getattr(pl_module, f"{phase}_{loss_name}_loss").compute(), sync_dist=True
)
getattr(pl_module, f"{phase}_{loss_name}_loss").reset()
value = getattr(pl_module, f"{phase}_{loss_name}_accuracy").compute()
pl_module.log(f"{loss_name}/{phase}/accuracy_epoch", value, sync_dist=True)
getattr(pl_module, f"{phase}_{loss_name}_accuracy").reset()
# qar
pl_module.log(
f"vcr_qar/{phase}/loss_epoch",
getattr(pl_module, f"{phase}_vcr_qar_loss").compute(), sync_dist=True
)
getattr(pl_module, f"{phase}_vcr_qar_loss").reset()
qar_value = getattr(pl_module, f"{phase}_vcr_qar_accuracy").compute()
pl_module.log(f"vcr_qar/{phase}/accuracy_epoch", qar_value, sync_dist=True)
getattr(pl_module, f"{phase}_vcr_qar_accuracy").reset()
# mc_vqa
elif loss_name == "mc_vqa":
pl_module.log(
f"{loss_name}/{phase}/loss_epoch",
getattr(pl_module, f"{phase}_{loss_name}_loss").compute(),
sync_dist=True
)
getattr(pl_module, f"{phase}_{loss_name}_loss").reset()
value = getattr(pl_module, f"{phase}_{loss_name}_accuracy").compute()
pl_module.log(f"{loss_name}/{phase}/accuracy_epoch", value, sync_dist=True)
getattr(pl_module, f"{phase}_{loss_name}_accuracy").reset()
elif loss_name == "openend_vqa":
pl_module.log(
f"{loss_name}/{phase}/loss_epoch",
getattr(pl_module, f"{phase}_vqa_loss").compute(),
sync_dist=True,
)
getattr(pl_module, f"{phase}_vqa_loss").reset()
value = getattr(pl_module, f"{phase}_{loss_name}_accuracy").compute()
pl_module.log(f"{loss_name}/{phase}/accuracy_epoch", value, sync_dist=True)
getattr(pl_module, f"{phase}_{loss_name}_accuracy").reset()
# multiple_choice
elif loss_name == "multiple_choice":
pl_module.log(
f"{loss_name}/{phase}/loss_epoch",
getattr(pl_module, f"{phase}_{loss_name}_loss").compute(),
sync_dist=True,
)
getattr(pl_module, f"{phase}_{loss_name}_loss").reset()
value = getattr(pl_module, f"{phase}_{loss_name}_accuracy").compute()
pl_module.log(f"{loss_name}/{phase}/accuracy_epoch", value, sync_dist=True)
getattr(pl_module, f"{phase}_{loss_name}_accuracy").reset()
# vcop
elif loss_name == "vcop":
pl_module.log(
f"{loss_name}/{phase}/loss_epoch",
getattr(pl_module, f"{phase}_{loss_name}_loss").compute(),
sync_dist=True,
)
getattr(pl_module, f"{phase}_{loss_name}_loss").reset()
value = getattr(pl_module, f"{phase}_{loss_name}_accuracy").compute()
pl_module.log(f"{loss_name}/{phase}/accuracy_epoch", value, sync_dist=True)
getattr(pl_module, f"{phase}_{loss_name}_accuracy").reset()
elif loss_name == "nlvr2":
if phase == "train":
value = getattr(pl_module, f"train_{loss_name}_accuracy").compute()
pl_module.log(f"{loss_name}/train/accuracy_epoch", value, sync_dist=True)
getattr(pl_module, f"train_{loss_name}_accuracy").reset()
pl_module.log(
f"{loss_name}/train/loss_epoch",
getattr(pl_module, f"train_{loss_name}_loss").compute(),
sync_dist=True,
)
getattr(pl_module, f"train_{loss_name}_loss").reset()
else:
value = getattr(pl_module, f"dev_{loss_name}_accuracy").compute()
pl_module.log(f"{loss_name}/dev/accuracy_epoch", value, sync_dist=True)
getattr(pl_module, f"dev_{loss_name}_accuracy").reset()
pl_module.log(
f"{loss_name}/dev/loss_epoch",
getattr(pl_module, f"dev_{loss_name}_loss").compute(),
sync_dist=True,
)
getattr(pl_module, f"dev_{loss_name}_loss").reset()
value = getattr(pl_module, f"test_{loss_name}_accuracy").compute()
pl_module.log(f"{loss_name}/test/accuracy_epoch", value, sync_dist=True)
getattr(pl_module, f"test_{loss_name}_accuracy").reset()
pl_module.log(
f"{loss_name}/test/loss_epoch",
getattr(pl_module, f"test_{loss_name}_loss").compute(),
sync_dist=True,
)
getattr(pl_module, f"test_{loss_name}_loss").reset()
elif loss_name == "irtr":
pl_module.log(
f"{loss_name}/{phase}/irtr_loss_epoch",
getattr(pl_module, f"{phase}_irtr_loss").compute(), sync_dist=True
)
getattr(pl_module, f"{phase}_irtr_loss").reset()
elif loss_name == "mppd" or loss_name == "mpfr":
pl_module.log(
f"{loss_name}/{phase}/loss_epoch",
getattr(pl_module, f"{phase}_{loss_name}_loss").compute(),
sync_dist=True,
)
getattr(pl_module, f"{phase}_{loss_name}_loss").reset()
elif loss_name == "vtm":
value = getattr(pl_module, f"{phase}_{loss_name}_accuracy").compute()
pl_module.log(f"{loss_name}/{phase}/accuracy_epoch", value, sync_dist=True)
getattr(pl_module, f"{phase}_{loss_name}_accuracy").reset()
pl_module.log(
f"{loss_name}/{phase}/loss_epoch",
getattr(pl_module, f"{phase}_{loss_name}_loss").compute(),
sync_dist=True,
)
getattr(pl_module, f"{phase}_{loss_name}_loss").reset()
pl_module.log(
f"{loss_name}/{phase}/wpa_loss_epoch",
getattr(pl_module, f"{phase}_{loss_name}_wpa_loss").compute(),
sync_dist=True,
)
getattr(pl_module, f"{phase}_{loss_name}_wpa_loss").reset()
# add dino loss
pl_module.log(
f"{loss_name}/{phase}/dino_loss_epoch",
getattr(pl_module, f"{phase}_{loss_name}_dino_loss").compute(),
sync_dist=True,
)
getattr(pl_module, f"{phase}_{loss_name}_dino_loss").reset()
elif loss_name == "dino":
pl_module.log(
f"{loss_name}/{phase}/loss_epoch",
getattr(pl_module, f"{phase}_{loss_name}_loss").compute(),
sync_dist=True,
)
# value = f"{loss_name}/{phase}/loss_epoch",
getattr(pl_module, f"{phase}_{loss_name}_loss").reset()
elif loss_name == "vtc":
pl_module.log(
f"{loss_name}/{phase}/loss_epoch",
getattr(pl_module, f"{phase}_{loss_name}_loss").compute(),
sync_dist=True,
)
# value = f"{loss_name}/{phase}/loss_epoch",
getattr(pl_module, f"{phase}_{loss_name}_loss").reset()
elif loss_name == "contrastive":
pl_module.log(
f"{loss_name}/{phase}/loss_epoch",
getattr(pl_module, f"{phase}_{loss_name}_loss").compute(),
sync_dist=True,
)
pl_module.log(
f"{loss_name}/{phase}/image_accuracy_epoch",
getattr(pl_module, f"{phase}_{loss_name}_image_accuracy").compute(),
sync_dist=True,
)
pl_module.log(
f"{loss_name}/{phase}/text_accuracy_epoch",
getattr(pl_module, f"{phase}_{loss_name}_text_accuracy").compute(),
sync_dist=True,
)
getattr(pl_module, f"{phase}_{loss_name}_loss").reset()
getattr(pl_module, f"{phase}_{loss_name}_image_accuracy").reset()
getattr(pl_module, f"{phase}_{loss_name}_text_accuracy").reset()
elif loss_name == "zs_classify":
value = getattr(pl_module, f"{phase}_{loss_name}_accuracy").compute()
pl_module.log(f"{loss_name}/{phase}/accuracy_epoch", value, sync_dist=True)
getattr(pl_module, f"{phase}_{loss_name}_accuracy").reset()
elif loss_name == "mim":
pl_module.log(
f"{loss_name}/{phase}/loss_epoch",
getattr(pl_module, f"{phase}_{loss_name}_loss").compute(),
sync_dist=True,
)
else:
value = getattr(pl_module, f"{phase}_{loss_name}_accuracy").compute()
pl_module.log(f"{loss_name}/{phase}/accuracy_epoch", value, sync_dist=True)
getattr(pl_module, f"{phase}_{loss_name}_accuracy").reset()
pl_module.log(
f"{loss_name}/{phase}/loss_epoch",
getattr(pl_module, f"{phase}_{loss_name}_loss").compute(),
sync_dist=True,
)
getattr(pl_module, f"{phase}_{loss_name}_loss").reset()
if loss_name == "vcr_q2a":
# print(value, qar_value)
the_metric += qar_value/2 + value/2
else:
the_metric += value
pl_module.log(f"{phase}/the_metric", the_metric)
def check_non_acc_grad(pl_module):
if pl_module.token_type_embeddings.weight.grad is None:
return True
else:
grad = pl_module.token_type_embeddings.weight.grad
return (grad.sum() == 0).item()
def set_task(pl_module):
phase = "train" if pl_module.training else "val"
if phase == "val" and len(pl_module.hparams.config["val_datasets"]) > 0:
# We are using a special dataset for val
pl_module.current_tasks = [
k for k, v in pl_module.hparams.config["val_loss_names"].items() if v >= 1
]
else:
pl_module.current_tasks = [
k for k, v in pl_module.hparams.config["loss_names"].items() if v >= 1
]
return
def set_schedule(pl_module):
lr = pl_module.hparams.config["learning_rate"]
wd = pl_module.hparams.config["weight_decay"]
def no_weight_decay(n, p):
no_decay = [
"bias",
"LayerNorm.bias",
"LayerNorm.weight",
"norm.bias",
"norm.weight",
"norm1.bias",
"norm1.weight",
"norm2.bias",
"norm2.weight",
# Following are for clip
"ln_1.weight",
"ln_2.weight",
"ln_3.weight",
"ln_post.weight",
"ln_pre.weight",
"ln_final.weight",
"embedding",
"logit_scale",
# evl
"temporal_cls_token",
"pemb_t",
"input_lns"
]
return p.dim() < 2 or any(nd in n for nd in no_decay)
head_names = ["vqa_classifier", "nlvr2_classifier"]
lr_mult = pl_module.hparams.config["lr_mult"]
clip_lr_mult = pl_module.hparams.config["clip_lr_mult"]
end_lr = pl_module.hparams.config["end_lr"]
decay_power = pl_module.hparams.config["decay_power"]
optim_type = pl_module.hparams.config["optim_type"]
names = [n for n, p in pl_module.named_parameters()]
optimizer_grouped_parameters = [
{
"params": [
p
for n, p in pl_module.named_parameters()
if not no_weight_decay(n, p)
and not any(bb in n for bb in head_names)
and not any(cp in n for cp in clip_param_keys)
],
"weight_decay": wd,
"lr": lr,
},
{
"params": [
p
for n, p in pl_module.named_parameters()
if no_weight_decay(n, p)
and not any(bb in n for bb in head_names)
and not any(cp in n for cp in clip_param_keys)
],
"weight_decay": 0.0,
"lr": lr,
},
{
"params": [
p
for n, p in pl_module.named_parameters()
if not no_weight_decay(n, p)
and any(bb in n for bb in head_names)
and not any(cp in n for cp in clip_param_keys)
],
"weight_decay": wd,
"lr": lr * lr_mult,
},
{
"params": [
p
for n, p in pl_module.named_parameters()
if no_weight_decay(n, p) and any(bb in n for bb in head_names)
and not any(cp in n for cp in clip_param_keys)
],
"weight_decay": 0.0,
"lr": lr * lr_mult,
},
{
"params": [
p
for n, p in pl_module.named_parameters()
if not no_weight_decay(n, p)
and not any(bb in n for bb in head_names)
and any(cp in n for cp in clip_param_keys)
],
"weight_decay": wd,
"lr": lr * clip_lr_mult,
},
{
"params": [
p
for n, p in pl_module.named_parameters()
if no_weight_decay(n, p)
and not any(bb in n for bb in head_names)
and any(cp in n for cp in clip_param_keys)
],
"weight_decay": 0.0,
"lr": lr * clip_lr_mult,
},
]
if optim_type == "adamw":
optimizer = torch.optim.AdamW(
optimizer_grouped_parameters, lr=lr, eps=1e-6, betas=(0.9, 0.98))
elif optim_type == "adam":
optimizer = torch.optim.Adam(optimizer_grouped_parameters, lr=lr)
elif optim_type == "sgd":
optimizer = torch.optim.SGD(optimizer_grouped_parameters, lr=lr, momentum=0.9)
if pl_module.trainer.max_steps is None or pl_module.trainer.max_steps == -1:
max_steps = (
len(pl_module.trainer.datamodule.train_dataloader())
* pl_module.trainer.max_epochs
// pl_module.trainer.accumulate_grad_batches
)
else:
max_steps = pl_module.trainer.max_steps
warmup_steps = pl_module.hparams.config["warmup_steps"]
if isinstance(pl_module.hparams.config["warmup_steps"], float):
warmup_steps = int(max_steps * warmup_steps)
if decay_power == "cosine":
scheduler = get_cosine_schedule_with_warmup(
optimizer,
num_warmup_steps=warmup_steps,
num_training_steps=max_steps,
)
else:
scheduler = get_polynomial_decay_schedule_with_warmup(
optimizer,
num_warmup_steps=warmup_steps,
num_training_steps=max_steps,
lr_end=end_lr,
power=decay_power,
)
sched = {"scheduler": scheduler, "interval": "step"}
return (
[optimizer],
[sched],
)
def state_dict_data_parallel_fix(load_state_dict, curr_state_dict):
load_keys = list(load_state_dict.keys())
curr_keys = list(curr_state_dict.keys())
redo_dp = False
undo_dp = False
if not curr_keys[0].startswith('module.') and load_keys[0].startswith('module.'):
undo_dp = True
elif curr_keys[0].startswith('module.') and not load_keys[0].startswith('module.'):
redo_dp = True
if undo_dp:
from collections import OrderedDict
new_state_dict = OrderedDict()
for k, v in load_state_dict.items():
name = k[7:] # remove `module.`
new_state_dict[name] = v
# load params
elif redo_dp:
from collections import OrderedDict
new_state_dict = OrderedDict()
for k, v in load_state_dict.items():
name = 'module.' + k # remove `module.`
new_state_dict[name] = v
else:
new_state_dict = load_state_dict
return new_state_dict
def state_dict_dino_fix(load_state_dict, curr_state_dict):
load_keys = list(load_state_dict.keys())
curr_keys = list(curr_state_dict.keys())
# for k in curr_state_dict.keys():
# print(k)
print('*'*50)
redo_dp = False
undo_dp = False
dino_dp = False
if not curr_keys[0].startswith('module.') and load_keys[0].startswith('module.'):
undo_dp = True
elif curr_keys[0].startswith('module.') and not load_keys[0].startswith('module.'):
redo_dp = True
elif load_keys[10].startswith('teacher.') or load_keys[10].startswith('student.'):
dino_dp = True
if undo_dp:
from collections import OrderedDict
new_state_dict = OrderedDict()
for k, v in load_state_dict.items():
# print(k)
name = k[7:] # remove `module.`
new_state_dict[name] = v
# load params
elif redo_dp:
from collections import OrderedDict
new_state_dict = OrderedDict()
for k, v in load_state_dict.items():
# print(k)
name = 'module.' + k # remove `module.`
new_state_dict[name] = v
elif dino_dp:
from collections import OrderedDict
new_state_dict = OrderedDict()
for k, v in load_state_dict.items():
# print(k)
if k[:8] == "student.":
name = "transformer." + k[8:] # remove `student.`
new_state_dict[name] = v
# continue
elif k[:8] == "teacher.":
# name = "transformer." + k[8:] # remove `teacher.`
# new_state_dict[name] = v
continue
else:
new_state_dict[k] = v
else:
for k, v in load_state_dict.items():
print(k)
new_state_dict = load_state_dict
print('*'*30)
print("new state dict")
print('*'*30)
for k, v in new_state_dict.items():
print(k)
return new_state_dict
def read_load_path(load_path):
if "s3://" in load_path:
assert client is not None, "Failed to init petrel client"
model_bytes = client.get(load_path)
assert load_path is not None, "Read fail from {}".format(load_path)
return io.BytesIO(model_bytes)
else:
return load_path | InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/modules/cotrain_utils.py |
import torch
# def forzen_param(model):
# for name, param in model.named_parameters():
# if 'mlm_score' in name or 'vtm_score' in name or 'mpp_score' in name:
# param.requires_grad = True
# else:
# param.requires_grad = False
# return True
def forzen_param(model):
flag = False
for name, param in model.named_parameters():
if '10' in name:
flag = True
param.requires_grad = flag
return True | InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/modules/forzen_param.py |
""" Vision Transformer (ViT) in PyTorch
A PyTorch implement of Vision Transformers as described in
'An Image Is Worth 16 x 16 Words: Transformers for Image Recognition at Scale' - https://arxiv.org/abs/2010.11929
The official jax code is released and available at https://github.com/google-research/vision_transformer
Acknowledgments:
* The paper authors for releasing code and weights, thanks!
* I fixed my class token impl based on Phil Wang's https://github.com/lucidrains/vit-pytorch ... check it out
for some einops/einsum fun
* Simple transformer style inspired by Andrej Karpathy's https://github.com/karpathy/minGPT
* Bert reference code checks against Huggingface Transformers and Tensorflow Bert
DeiT model defs and weights from https://github.com/facebookresearch/deit,
paper `DeiT: Data-efficient Image Transformers` - https://arxiv.org/abs/2012.12877
Hacked together by / Copyright 2020 Ross Wightman
"""
import math
import logging
from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as F
import hashlib
import os
import urllib
import warnings
from functools import partial
from tqdm import tqdm
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.models.helpers import load_pretrained
from timm.models.layers import StdConv2dSame, DropPath, to_2tuple, trunc_normal_
from timm.models.resnet import resnet26d, resnet50d
from timm.models.resnetv2 import ResNetV2
from timm.models.registry import register_model
from torchvision import transforms
from einops import rearrange, repeat
from timm.models.layers import DropPath, to_2tuple, trunc_normal_
from torch import einsum, nn
import sys
sys.path.append("pretrained")
_logger = logging.getLogger(__name__)
def download_clip(
url: str = "https://openaipublic.azureedge.net/clip/models/40d365715913c9da98579312b702a82c18be219cc2a73407c4526f58eba950af/ViT-B-32.pt",
root: str = os.path.expanduser("../../pretrained/torch/hub/checkpoints"),
):
os.makedirs(root, exist_ok=True)
filename = os.path.basename(url)
expected_sha256 = url.split("/")[-2]
download_target = os.path.join(root, filename)
if os.path.exists(download_target) and not os.path.isfile(download_target):
raise RuntimeError(f"{download_target} exists and is not a regular file")
if os.path.isfile(download_target):
if (
hashlib.sha256(open(download_target, "rb").read()).hexdigest()
== expected_sha256
):
return download_target
else:
warnings.warn(
f"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file"
)
with urllib.request.urlopen(url) as source, open(download_target, "wb") as output:
with tqdm(total=int(source.info().get("Content-Length")), ncols=80) as loop:
while True:
buffer = source.read(8192)
if not buffer:
break
output.write(buffer)
loop.update(len(buffer))
if (
hashlib.sha256(open(download_target, "rb").read()).hexdigest()
!= expected_sha256
):
raise RuntimeError(
f"Model has been downloaded but the SHA256 checksum does not not match"
)
return download_target
class UnNormalize(object):
def __init__(self, mean, std):
self.mean = mean
self.std = std
def __call__(self, tensor):
for t, m, s in zip(tensor, self.mean, self.std):
t.mul_(s).add_(m)
return tensor
inception_unnormalize = transforms.Compose(
[UnNormalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])]
)
def _cfg(url="", **kwargs):
return {
"url": url,
"num_classes": 1000,
"input_size": (3, 224, 224),
"pool_size": None,
"crop_pct": 0.9,
"interpolation": "bicubic",
"mean": IMAGENET_DEFAULT_MEAN,
"std": IMAGENET_DEFAULT_STD,
"first_conv": "patch_embed.proj",
"classifier": "head",
**kwargs,
}
default_cfgs = {
# patch models (my experiments)
"vit_small_patch16_224": _cfg(
url="https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/vit_small_p16_224-15ec54c9.pth",
),
# patch models (weights ported from official Google JAX impl)
"vit_base_patch16_224": _cfg(
url="https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_base_p16_224-80ecf9dd.pth",
mean=(0.5, 0.5, 0.5),
std=(0.5, 0.5, 0.5),
),
"vit_base_patch32_224": _cfg(
url="", # no official model weights for this combo, only for in21k
mean=(0.5, 0.5, 0.5),
std=(0.5, 0.5, 0.5),
),
"vit_base_patch16_384": _cfg(
url="https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_base_p16_384-83fb41ba.pth",
input_size=(3, 384, 384),
mean=(0.5, 0.5, 0.5),
std=(0.5, 0.5, 0.5),
crop_pct=1.0,
),
"vit_base_patch32_384": _cfg(
url="https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_base_p32_384-830016f5.pth",
input_size=(3, 384, 384),
mean=(0.5, 0.5, 0.5),
std=(0.5, 0.5, 0.5),
crop_pct=1.0,
),
"vit_large_patch16_224": _cfg(
url="https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_large_p16_224-4ee7a4dc.pth",
mean=(0.5, 0.5, 0.5),
std=(0.5, 0.5, 0.5),
),
"vit_large_patch32_224": _cfg(
url="", # no official model weights for this combo, only for in21k
mean=(0.5, 0.5, 0.5),
std=(0.5, 0.5, 0.5),
),
"vit_large_patch16_384": _cfg(
url="https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_large_p16_384-b3be5167.pth",
input_size=(3, 384, 384),
mean=(0.5, 0.5, 0.5),
std=(0.5, 0.5, 0.5),
crop_pct=1.0,
),
"vit_large_patch32_384": _cfg(
url="https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_large_p32_384-9b920ba8.pth",
input_size=(3, 384, 384),
mean=(0.5, 0.5, 0.5),
std=(0.5, 0.5, 0.5),
crop_pct=1.0,
),
# patch models, imagenet21k (weights ported from official Google JAX impl)
"vit_base_patch16_224_in21k": _cfg(
url="https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_base_patch16_224_in21k-e5005f0a.pth",
num_classes=21843,
mean=(0.5, 0.5, 0.5),
std=(0.5, 0.5, 0.5),
),
"vit_base_patch32_224_in21k": _cfg(
url="https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_base_patch32_224_in21k-8db57226.pth",
num_classes=21843,
mean=(0.5, 0.5, 0.5),
std=(0.5, 0.5, 0.5),
),
"vit_large_patch16_224_in21k": _cfg(
url="https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_large_patch16_224_in21k-606da67d.pth",
num_classes=21843,
mean=(0.5, 0.5, 0.5),
std=(0.5, 0.5, 0.5),
),
"vit_large_patch32_224_in21k": _cfg(
url="https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_large_patch32_224_in21k-9046d2e7.pth",
num_classes=21843,
mean=(0.5, 0.5, 0.5),
std=(0.5, 0.5, 0.5),
),
"vit_huge_patch14_224_in21k": _cfg(
url="", # FIXME I have weights for this but > 2GB limit for github release binaries
num_classes=21843,
mean=(0.5, 0.5, 0.5),
std=(0.5, 0.5, 0.5),
),
# hybrid models (weights ported from official Google JAX impl)
"vit_base_resnet50_224_in21k": _cfg(
url="https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_base_resnet50_224_in21k-6f7c7740.pth",
num_classes=21843,
mean=(0.5, 0.5, 0.5),
std=(0.5, 0.5, 0.5),
crop_pct=0.9,
first_conv="patch_embed.backbone.stem.conv",
),
"vit_base_resnet50_384": _cfg(
url="https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_base_resnet50_384-9fd3c705.pth",
input_size=(3, 384, 384),
mean=(0.5, 0.5, 0.5),
std=(0.5, 0.5, 0.5),
crop_pct=1.0,
first_conv="patch_embed.backbone.stem.conv",
),
# hybrid models (my experiments)
"vit_small_resnet26d_224": _cfg(),
"vit_small_resnet50d_s3_224": _cfg(),
"vit_base_resnet26d_224": _cfg(),
"vit_base_resnet50d_224": _cfg(),
# deit models (FB weights)
"vit_deit_tiny_patch16_224": _cfg(
url="https://dl.fbaipublicfiles.com/deit/deit_tiny_patch16_224-a1311bcf.pth"
),
"vit_deit_small_patch16_224": _cfg(
url="https://dl.fbaipublicfiles.com/deit/deit_small_patch16_224-cd65a155.pth"
),
"vit_deit_base_patch16_224": _cfg(
url="https://dl.fbaipublicfiles.com/deit/deit_base_patch16_224-b5f2ef4d.pth",
),
"vit_deit_base_patch16_384": _cfg(
url="https://dl.fbaipublicfiles.com/deit/deit_base_patch16_384-8de9b5d1.pth",
input_size=(3, 384, 384),
crop_pct=1.0,
),
"vit_deit_tiny_distilled_patch16_224": _cfg(
url="https://dl.fbaipublicfiles.com/deit/deit_tiny_distilled_patch16_224-b40b3cf7.pth"
),
"vit_deit_small_distilled_patch16_224": _cfg(
url="https://dl.fbaipublicfiles.com/deit/deit_small_distilled_patch16_224-649709d9.pth"
),
"vit_deit_base_distilled_patch16_224": _cfg(
url="https://dl.fbaipublicfiles.com/deit/deit_base_distilled_patch16_224-df68dfff.pth",
),
"vit_deit_base_distilled_patch16_384": _cfg(
url="https://dl.fbaipublicfiles.com/deit/deit_base_distilled_patch16_384-d0272ac0.pth",
input_size=(3, 384, 384),
crop_pct=1.0,
),
}
class Mlp(nn.Module):
def __init__(
self,
in_features,
hidden_features=None,
out_features=None,
act_layer=nn.GELU,
drop=0.0,
):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
def attn(q, k, v):
sim = einsum('b i d, b j d -> b i j', q, k)
attn = sim.softmax(dim=-1)
out = einsum('b i j, b j d -> b i d', attn, v)
return out
class VarAttention(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.,
initialize='random'):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
# NOTE scale factor was wrong in my original version, can set manually to be compat with prev weights
self.scale = qk_scale or head_dim ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.proj = nn.Linear(dim, dim)
if initialize == 'zeros':
self.qkv.weight.data.fill_(0)
self.qkv.bias.data.fill_(0)
# fill proj weight with 1 here to improve training dynamics. Otherwise temporal attention inputs
# are multiplied by 0*0, which is hard for the model to move out of.
self.proj.weight.data.fill_(1)
self.proj.bias.data.fill_(0)
self.attn_drop = nn.Dropout(attn_drop)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x, einops_from, einops_to, **einops_dims):
h = self.num_heads
# project x to q, k, v vaalues
q, k, v = self.qkv(x).chunk(3, dim=-1)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q, k, v))
q *= self.scale
# splice out CLS token at index 1
(cls_q, q_), (cls_k, k_), (cls_v, v_) = map(lambda t: (t[:, 0:1], t[:, 1:]), (q, k, v))
# let CLS token attend to key / values of all patches across time and space
cls_out = attn(cls_q, k, v)
# rearrange across time or space
q_, k_, v_ = map(lambda t: rearrange(t, f'{einops_from} -> {einops_to}', **einops_dims), (q_, k_, v_))
# expand cls token keys and values across time or space and concat
r = q_.shape[0] // cls_k.shape[0]
cls_k, cls_v = map(lambda t: repeat(t, 'b () d -> (b r) () d', r=r), (cls_k, cls_v))
k_ = torch.cat((cls_k, k_), dim=1)
v_ = torch.cat((cls_v, v_), dim=1)
# attention
out = attn(q_, k_, v_)
# merge back time or space
out = rearrange(out, f'{einops_to} -> {einops_from}', **einops_dims)
# concat back the cls token
out = torch.cat((cls_out, out), dim=1)
# merge back the heads
out = rearrange(out, '(b h) n d -> b n (h d)', h=h)
## to out
x = self.proj(out)
x = self.proj_drop(x)
return x
class Attention(nn.Module):
def __init__(
self,
dim,
num_heads=8,
qkv_bias=False,
qk_scale=None,
attn_drop=0.0,
proj_drop=0.0,
):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
# NOTE scale factor was wrong in my original version, can set manually to be compat with prev weights
self.scale = qk_scale or head_dim ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x, mask=None):
B, N, C = x.shape
qkv = (
self.qkv(x)
.reshape(B, N, 3, self.num_heads, C // self.num_heads)
.permute(2, 0, 3, 1, 4)
)
q, k, v = (
qkv[0],
qkv[1],
qkv[2],
) # make torchscript happy (cannot use tensor as tuple)
attn = (q @ k.transpose(-2, -1)) * self.scale
if mask is not None:
mask = mask.bool()
attn = attn.masked_fill(~mask[:, None, None, :], float("-inf"))
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x, attn
class Block(nn.Module):
def __init__(
self,
dim,
num_heads,
mlp_ratio=4.0,
qkv_bias=False,
qk_scale=None,
drop=0.0,
attn_drop=0.0,
drop_path=0.0,
act_layer=nn.GELU,
norm_layer=nn.LayerNorm,
):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention(
dim,
num_heads=num_heads,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
attn_drop=attn_drop,
proj_drop=drop,
)
# NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(
in_features=dim,
hidden_features=mlp_hidden_dim,
act_layer=act_layer,
drop=drop,
)
def forward(self, x, mask=None):
_x, attn = self.attn(self.norm1(x), mask=mask)
x = x + self.drop_path(_x)
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x, attn
class PatchEmbed(nn.Module):
""" Image to Patch Embedding"""
def __init__(
self,
img_size=224,
patch_size=16,
in_chans=3,
embed_dim=768,
no_patch_embed_bias=False,
):
super().__init__()
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0])
self.img_size = img_size
self.patch_size = patch_size
self.num_patches = num_patches
self.proj = nn.Conv2d(
in_chans,
embed_dim,
kernel_size=patch_size,
stride=patch_size,
bias=False if no_patch_embed_bias else True,
)
def forward(self, x):
# print(x.size())
B, C, H, W = x.shape
# FIXME look at relaxing size constraints
x = self.proj(x)
return x
# from https://github.com/SwinTransformer/Video-Swin-Transformer/blob/master/mmaction/models/backbones/swin_transformer.py
class PatchEmbed3D(nn.Module):
""" Video to Patch Embedding.
Args:
patch_size (int): Patch token size. Default: (3,16,16).
in_chans (int): Number of input video channels. Default: 3.
embed_dim (int): Number of linear projection output channels. Default: 768.
norm_layer (nn.Module, optional): Normalization layer. Default: None
"""
def __init__(self, patch_size=(3, 16, 16), in_chans=3, embed_dim=768, norm_layer=None):
super().__init__()
self.patch_size = patch_size
self.in_chans = in_chans
self.embed_dim = embed_dim
self.proj = nn.Conv3d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
if norm_layer is not None:
self.norm = norm_layer(embed_dim)
else:
self.norm = None
def forward(self, x):
"""Forward function."""
# padding
_, _, D, H, W = x.size()
if W % self.patch_size[2] != 0:
x = F.pad(x, (0, self.patch_size[2] - W % self.patch_size[2]))
if H % self.patch_size[1] != 0:
x = F.pad(x, (0, 0, 0, self.patch_size[1] - H % self.patch_size[1]))
if D % self.patch_size[0] != 0:
x = F.pad(x, (0, 0, 0, 0, 0, self.patch_size[0] - D % self.patch_size[0]))
x = self.proj(x) # B C D Wh Ww
if self.norm is not None:
D, Wh, Ww = x.size(2), x.size(3), x.size(4)
x = x.flatten(2).transpose(1, 2)
x = self.norm(x)
x = x.transpose(1, 2).view(-1, self.embed_dim, D, Wh, Ww)
return x
class VisionTransformer(nn.Module):
""" Vision Transformer
A PyTorch impl of : `An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale` -
https://arxiv.org/abs/2010.11929
"""
def __init__(
self,
img_size=224,
patch_size=16,
in_chans=3,
num_classes=1000,
embed_dim=768,
depth=12,
num_heads=12,
mlp_ratio=4.0,
qkv_bias=True,
qk_scale=None,
representation_size=None,
drop_rate=0.0,
attn_drop_rate=0.0,
drop_path_rate=0.1,
norm_layer=None,
add_norm_before_transformer=False,
no_patch_embed_bias=False,
config=None,
):
"""
Args:
img_size (int, tuple): input image size
patch_size (int, tuple): patch size
in_chans (int): number of input channels
num_classes (int): number of classes for classification head
embed_dim (int): embedding dimension
depth (int): depth of transformer
num_heads (int): number of attention heads
mlp_ratio (int): ratio of mlp hidden dim to embedding dim
qkv_bias (bool): enable bias for qkv if True
qk_scale (float): override default qk scale of head_dim ** -0.5 if set
representation_size (Optional[int]): enable and set representation layer (pre-logits) to this value if set
drop_rate (float): dropout rate
attn_drop_rate (float): attention dropout rate
drop_path_rate (float): stochastic depth rate
hybrid_backbone (nn.Module): CNN backbone to use in-place of PatchEmbed module
norm_layer: (nn.Module): normalization layer
"""
super().__init__()
drop_rate = drop_rate if config is None else config["drop_rate"]
self.num_classes = num_classes
self.num_features = (
self.embed_dim
) = embed_dim # num_features for consistency with other models
norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6)
self.add_norm_before_transformer = add_norm_before_transformer
self.patch_embed = PatchEmbed(
img_size=img_size,
patch_size=patch_size,
in_chans=in_chans,
embed_dim=embed_dim,
)
num_patches = self.patch_embed.num_patches
self.patch_size = patch_size
self.patch_dim = img_size // patch_size
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim))
self.pos_drop = nn.Dropout(p=drop_rate)
# print(img_size, patch_size, self.patch_dim)
# # =================== temporal embedding ======================
self.num_frames = config['num_frames']
print("# frames for module is: {}".format(self.num_frames))
self.temporal_embed = nn.Parameter(torch.zeros(1, config['num_frames'], embed_dim))
# self.patches_per_frame = 49 # 7x7 for patch with 32 resolution
self.patches_per_frame = self.patch_dim ** 2 # 7x7 for patch with 32 resolution
# # ===
if add_norm_before_transformer:
self.pre_norm = norm_layer(embed_dim)
dpr = [
x.item() for x in torch.linspace(0, drop_path_rate, depth)
] # stochastic depth decay rule
self.blocks = nn.ModuleList(
[
Block(
dim=embed_dim,
num_heads=num_heads,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
drop=drop_rate,
attn_drop=attn_drop_rate,
drop_path=dpr[i],
norm_layer=norm_layer,
)
for i in range(depth)
]
)
self.norm = norm_layer(embed_dim)
trunc_normal_(self.pos_embed, std=0.02)
trunc_normal_(self.cls_token, std=0.02)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=0.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
@torch.jit.ignore
def no_weight_decay(self):
return {"pos_embed", "cls_token"}
def mask_tokens(self, orig_image, feats):
"""
Prepare masked tokens inputs/labels for masked patch prediction: 80% MASK, 10% random, 10% original.
"""
img_unnorm = orig_image * 0.5 + 0.5
_, _, ph, pw = self.patch_embed.proj.weight.shape
with torch.no_grad():
img_unnorm_patch = F.conv2d(
img_unnorm,
weight=torch.ones(3, 1, ph, pw).to(img_unnorm) / (ph * pw),
bias=None,
stride=(ph, pw),
padding=0,
groups=3,
)
labels = (
((img_unnorm_patch * 255).long().flatten(start_dim=2, end_dim=3))
.permute(0, 2, 1)
.contiguous()
)
# We sample a few tokens in each sequence for MLM training (with probability `self.mlm_probability`)
probability_matrix = torch.full(labels.shape[:-1], 0.15)
masked_indices = torch.bernoulli(probability_matrix).bool()
labels[~masked_indices] = -100 # We only compute loss on masked tokens
# 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])
indices_replaced = (
torch.bernoulli(torch.full(labels.shape[:-1], 0.8)).bool() & masked_indices
)
feats[indices_replaced] = self.mask_token.to(feats)
return feats, labels
def visual_embed(self, _x, max_image_len=200, mask_it=False, mode='video'):
# for special case of irtr_recall
if len(_x.size()) == 5:
_x = _x.contiguous().view(-1, _x.size(2), _x.size(3), _x.size(4))
_, _, ph, pw = self.patch_embed.proj.weight.shape
# print(_x.size())
# if len(_x.size()) == 4:
# _x = _x.unsqueeze(1)
# # print(_x.size()) # 64x4x3x224x224
# # _b = _x.size(0)
# # print(_x)
# _x = _x.contiguous().view(-1, _x.size(2), _x.size(3), _x.size(4))
# print(_x.size())
x = self.patch_embed(_x)
# # average along time dimension
# x = x.view(_b, -1, x.size(1), x.size(2), x.size(3))
# x = torch.mean(x, dim=1)
# x = x.view(_b, x.size(1), x.size(2), -1)
# print(x.size())
x_mask = (_x.sum(dim=1) != 0).float()[:, None, :, :] # only use first image
# print(x_mask.size())
x_mask = F.interpolate(x_mask, size=(x.shape[2], x.shape[3])).long()
x_h = x_mask[:, 0].sum(dim=1)[:, 0]
x_w = x_mask[:, 0].sum(dim=2)[:, 0]
B, C, H, W = x.shape
B = B
spatial_pos = (
self.pos_embed[:, 1:, :]
.transpose(1, 2)
.view(1, C, self.patch_dim, self.patch_dim)
)
pos_embed = torch.cat(
[
F.pad(
F.interpolate(
spatial_pos, size=(h, w), mode="bilinear", align_corners=True,
),
(0, W - w, 0, H - h),
)
for h, w in zip(x_h, x_w)
],
dim=0,
)
pos_embed = pos_embed.flatten(2).transpose(1, 2)
x = x.flatten(2).transpose(1, 2)
patch_index = (
torch.stack(
torch.meshgrid(
torch.arange(x_mask.shape[-2]), torch.arange(x_mask.shape[-1])
),
dim=-1,
)[None, None, :, :, :]
.expand(x_mask.shape[0], x_mask.shape[1], -1, -1, -1)
.flatten(1, 3)
)
x_mask = x_mask.flatten(1)
if mask_it:
x, label = self.mask_tokens(_x, x)
if (
max_image_len < 0
or max_image_len is None
or not isinstance(max_image_len, int)
):
# suppose aug is 800 x 1333, then, maximum effective res is 800 x 1333 (if one side gets bigger, the other will be constrained and be shrinked)
# (800 // self.patch_size) * (1333 // self.patch_size) is the maximum number of patches that single image can get.
# if self.patch_size = 32, 25 * 41 = 1025
# if res is 384 x 640, 12 * 20 = 240
eff = x_h * x_w
max_image_len = eff.max()
else:
eff = x_h * x_w
max_image_len = min(eff.max(), max_image_len)
valid_idx = x_mask.nonzero(as_tuple=False)
non_valid_idx = (1 - x_mask).nonzero(as_tuple=False)
unique_rows = valid_idx[:, 0].unique()
valid_row_idx = [valid_idx[valid_idx[:, 0] == u] for u in unique_rows]
non_valid_row_idx = [
non_valid_idx[non_valid_idx[:, 0] == u] for u in unique_rows
]
valid_nums = [v.size(0) for v in valid_row_idx]
non_valid_nums = [v.size(0) for v in non_valid_row_idx]
pad_nums = [max_image_len - v for v in valid_nums]
select = list()
for i, (v, nv, p) in enumerate(zip(valid_nums, non_valid_nums, pad_nums)):
if p <= 0:
valid_choice = torch.multinomial(torch.ones(v).float(), max_image_len)
select.append(valid_row_idx[i][valid_choice])
else:
pad_choice = torch.multinomial(
torch.ones(nv).float(), p, replacement=True
)
select.append(
torch.cat(
[valid_row_idx[i], non_valid_row_idx[i][pad_choice]], dim=0,
)
)
select = torch.cat(select, dim=0)
x = x[select[:, 0], select[:, 1]].view(B, -1, C)
x_mask = x_mask[select[:, 0], select[:, 1]].view(B, -1)
patch_index = patch_index[select[:, 0], select[:, 1]].view(B, -1, 2)
pos_embed = pos_embed[select[:, 0], select[:, 1]].view(B, -1, C) # 48, 196, 768
# # === alex: add temporal pos embed here
# temporal embed needs to be repeated within each frame (this does [1,2,3] --> [1,1,1,2,2,2,3,3,3]...)
if mode == 'video':
self.patches_per_frame = x.size(1)
tile_temporal_embed = self.temporal_embed.repeat_interleave(self.patches_per_frame, 1)
# tile_temporal_embed = tile_temporal_embed.view(-1, self.num_frames, tile_temporal_embed.size(-1))
# print(pos_embed.size(), tile_temporal_embed.size())
pos_embed = pos_embed.view(pos_embed.size(0)//self.num_frames, -1, pos_embed.size(-1))
pos_embed = pos_embed + tile_temporal_embed
pos_embed = pos_embed.view(-1, self.patches_per_frame, pos_embed.size(-1))
# =======
if mask_it:
label = label[select[:, 0], select[:, 1]].view(B, -1, 3)
label[x_mask == 0] = -100
label = torch.cat(
[torch.full((label.shape[0], 1, 3), -100).to(label), label,], dim=1,
)
cls_tokens = self.cls_token.expand(B, -1, -1)
# print(cls_tokens.size(), x.size())
x = torch.cat((cls_tokens, x), dim=1)
pos_embed = torch.cat(
(self.pos_embed[:, 0, :][:, None, :].expand(B, -1, -1), pos_embed), dim=1
)
# print(x.size(), pos_embed.size())
x = x + pos_embed
x = self.pos_drop(x)
if self.add_norm_before_transformer:
x = self.pre_norm(x)
x_mask = torch.cat([torch.ones(x_mask.shape[0], 1).to(x_mask), x_mask], dim=1)
# print(x_mask)
# print(x_mask.size())
# print(x.size())
if mask_it:
return x, x_mask, (patch_index, (H, W)), label
else:
return x, x_mask, (patch_index, (H, W)), None
def forward_features(self, _x, max_image_len=144, mask_it=False):
x, x_mask, patch_index, label = self.visual_embed(
_x, max_image_len=max_image_len, mask_it=mask_it
)
for blk in self.blocks:
x, _ = blk(x, mask=x_mask)
x = self.norm(x)
return x, x_mask, label
def forward(self, x, max_image_len=-1):
x, _, _ = self.forward_features(x, max_image_len=max_image_len)
x = x[:, 0]
x = self.head(x)
return x
class DistilledVisionTransformer(VisionTransformer):
""" Vision Transformer with distillation token.
Paper: `Training data-efficient image transformers & distillation through attention` -
https://arxiv.org/abs/2012.12877
This impl of distilled ViT is taken from https://github.com/facebookresearch/deit
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.dist_token = nn.Parameter(torch.zeros(1, 1, self.embed_dim))
num_patches = self.patch_embed.num_patches
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 2, self.embed_dim))
trunc_normal_(self.dist_token, std=0.02)
trunc_normal_(self.pos_embed, std=0.02)
def visual_embed(self, _x, max_image_len=200, mask_it=False):
_, _, ph, pw = self.patch_embed.proj.weight.shape
x = self.patch_embed(_x)
x_mask = (_x.sum(dim=1) != 0).float()[:, None, :, :]
x_mask = F.interpolate(x_mask, size=(x.shape[2], x.shape[3])).long()
x_h = x_mask[:, 0].sum(dim=1)[:, 0]
x_w = x_mask[:, 0].sum(dim=2)[:, 0]
B, C, H, W = x.shape
spatial_pos = (
self.pos_embed[:, 2:, :]
.transpose(1, 2)
.view(1, C, self.patch_dim, self.patch_dim)
)
pos_embed = torch.cat(
[
F.pad(
F.interpolate(
spatial_pos, size=(h, w), mode="bilinear", align_corners=True,
),
(0, W - w, 0, H - h),
)
for h, w in zip(x_h, x_w)
],
dim=0,
)
pos_embed = pos_embed.flatten(2).transpose(1, 2)
x = x.flatten(2).transpose(1, 2)
patch_index = (
torch.stack(
torch.meshgrid(
torch.arange(x_mask.shape[-2]), torch.arange(x_mask.shape[-1])
),
dim=-1,
)[None, None, :, :, :]
.expand(x_mask.shape[0], x_mask.shape[1], -1, -1, -1)
.flatten(1, 3)
)
x_mask = x_mask.flatten(1)
if mask_it:
x, label = self.mask_tokens(_x, x)
if (
max_image_len < 0
or max_image_len is None
or not isinstance(max_image_len, int)
):
# suppose aug is 800 x 1333, then, maximum effective res is 800 x 1333 (if one side gets bigger, the other will be constrained and be shrinked)
# (800 // self.patch_size) * (1333 // self.patch_size) is the maximum number of patches that single image can get.
# if self.patch_size = 32, 25 * 41 = 1025
# if res is 384 x 640, 12 * 20 = 240
eff = x_h * x_w
max_image_len = eff.max()
else:
eff = x_h * x_w
max_image_len = min(eff.max(), max_image_len)
valid_idx = x_mask.nonzero(as_tuple=False)
non_valid_idx = (1 - x_mask).nonzero(as_tuple=False)
unique_rows = valid_idx[:, 0].unique()
valid_row_idx = [valid_idx[valid_idx[:, 0] == u] for u in unique_rows]
non_valid_row_idx = [
non_valid_idx[non_valid_idx[:, 0] == u] for u in unique_rows
]
valid_nums = [v.size(0) for v in valid_row_idx]
non_valid_nums = [v.size(0) for v in non_valid_row_idx]
pad_nums = [max_image_len - v for v in valid_nums]
select = list()
for i, (v, nv, p) in enumerate(zip(valid_nums, non_valid_nums, pad_nums)):
if p <= 0:
valid_choice = torch.multinomial(torch.ones(v).float(), max_image_len)
select.append(valid_row_idx[i][valid_choice])
else:
pad_choice = torch.multinomial(
torch.ones(nv).float(), p, replacement=True
)
select.append(
torch.cat(
[valid_row_idx[i], non_valid_row_idx[i][pad_choice]], dim=0,
)
)
select = torch.cat(select, dim=0)
x = x[select[:, 0], select[:, 1]].view(B, -1, C)
x_mask = x_mask[select[:, 0], select[:, 1]].view(B, -1)
patch_index = patch_index[select[:, 0], select[:, 1]].view(B, -1, 2)
pos_embed = pos_embed[select[:, 0], select[:, 1]].view(B, -1, C)
if mask_it:
label = label[select[:, 0], select[:, 1]].view(B, -1, 3)
label[x_mask == 0] = -100
label = torch.cat(
[torch.full((label.shape[0], 1, 3), -100).to(label), label,], dim=1,
)
cls_tokens = self.cls_token.expand(B, -1, -1)
dist_token = self.dist_token.expand(B, -1, -1)
x = torch.cat((cls_tokens, dist_token, x), dim=1)
pos_embed = torch.cat(
(self.pos_embed[:, :2, :].expand(B, -1, -1), pos_embed), dim=1
)
x = x + pos_embed
x = self.pos_drop(x)
if self.add_norm_before_transformer:
x = self.pre_norm(x)
x_mask = torch.cat([torch.ones(x_mask.shape[0], 2).to(x_mask), x_mask], dim=1)
if mask_it:
return x, x_mask, (patch_index, (H, W)), label
else:
return x, x_mask, (patch_index, (H, W)), None
def forward_features(self, _x, max_image_len=144, mask_it=False):
x, x_mask, patch_index, label = self.visual_embed(
_x, max_image_len=max_image_len, mask_it=mask_it
)
for blk in self.blocks:
x, _ = blk(x, mask=x_mask)
x = self.norm(x)
return x, x_mask, label
def forward(self, x, max_image_len=-1):
x, _, _ = self.forward_features(x, max_image_len=max_image_len)
x = x[:, 0]
x = self.head(x)
return x
def resize_pos_embed(posemb, posemb_new):
# Rescale the grid of position embeddings when loading from state_dict. Adapted from
# https://github.com/google-research/vision_transformer/blob/00883dd691c63a6830751563748663526e811cee/vit_jax/checkpoint.py#L224
_logger.info("Resized position embedding: %s to %s", posemb.shape, posemb_new.shape)
ntok_new = posemb_new.shape[1]
if True:
posemb_tok, posemb_grid = posemb[:, :1], posemb[0, 1:]
ntok_new -= 1
else:
posemb_tok, posemb_grid = posemb[:, :0], posemb[0]
gs_old = int(math.sqrt(len(posemb_grid)))
gs_new = int(math.sqrt(ntok_new))
_logger.info("Position embedding grid-size from %s to %s", gs_old, gs_new)
posemb_grid = posemb_grid.reshape(1, gs_old, gs_old, -1).permute(0, 3, 1, 2)
posemb_grid = F.interpolate(posemb_grid, size=(gs_new, gs_new), mode="bilinear")
posemb_grid = posemb_grid.permute(0, 2, 3, 1).reshape(1, gs_new * gs_new, -1)
posemb = torch.cat([posemb_tok, posemb_grid], dim=1)
return posemb
def checkpoint_filter_fn(state_dict, model):
""" convert patch embedding weight from manual patchify + linear proj to conv"""
out_dict = {}
if "model" in state_dict:
# For deit models
state_dict = state_dict["model"]
for k, v in state_dict.items():
if "patch_embed.proj.weight" in k and len(v.shape) < 4:
# For old models that I trained prior to conv based patchification
O, I, H, W = model.patch_embed.proj.weight.shape
v = v.reshape(O, -1, H, W)
elif k == "pos_embed" and v.shape != model.pos_embed.shape:
# To resize pos embedding when using model at different size from pretrained weights
v = resize_pos_embed(v, model.pos_embed)
out_dict[k] = v
return out_dict
def _create_vision_transformer(variant, pretrained=False, distilled=False, **kwargs):
default_cfg = default_cfgs[variant]
default_num_classes = default_cfg["num_classes"]
default_img_size = default_cfg["input_size"][-1]
num_classes = kwargs.pop("num_classes", default_num_classes)
img_size = kwargs.pop("img_size", default_img_size)
repr_size = kwargs.pop("representation_size", None)
if repr_size is not None and num_classes != default_num_classes:
# Remove representation layer if fine-tuning. This may not always be the desired action,
# but I feel better than doing nothing by default for fine-tuning. Perhaps a better interface?
_logger.warning("Removing representation layer for fine-tuning.")
repr_size = None
model_cls = DistilledVisionTransformer if distilled else VisionTransformer
model = model_cls(
img_size=img_size,
num_classes=num_classes,
representation_size=repr_size,
**kwargs,
)
model.default_cfg = default_cfg
if pretrained:
load_pretrained(
model,
num_classes=num_classes,
in_chans=kwargs.get("in_chans", 3),
filter_fn=partial(checkpoint_filter_fn, model=model),
strict=False,
)
return model
@register_model
def vit_small_patch16_224(pretrained=False, **kwargs):
""" My custom 'small' ViT model. Depth=8, heads=8= mlp_ratio=3."""
model_kwargs = dict(
patch_size=16,
embed_dim=768,
depth=8,
num_heads=8,
mlp_ratio=3.0,
qkv_bias=False,
norm_layer=nn.LayerNorm,
**kwargs,
)
if pretrained:
# NOTE my scale was wrong for original weights, leaving this here until I have better ones for this model
model_kwargs.setdefault("qk_scale", 768 ** -0.5)
model = _create_vision_transformer(
"vit_small_patch16_224", pretrained=pretrained, **model_kwargs
)
return model
@register_model
def vit_base_patch16_224(pretrained=False, **kwargs):
""" ViT-Base (ViT-B/16) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-1k weights fine-tuned from in21k @ 224x224, source https://github.com/google-research/vision_transformer.
"""
model_kwargs = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, **kwargs)
model = _create_vision_transformer(
"vit_base_patch16_224", pretrained=pretrained, **model_kwargs
)
return model
@register_model
def vit_base_patch32_224(pretrained=False, **kwargs):
""" ViT-Base (ViT-B/32) from original paper (https://arxiv.org/abs/2010.11929). No pretrained weights.
"""
model_kwargs = dict(patch_size=32, embed_dim=768, depth=12, num_heads=12, **kwargs)
model = _create_vision_transformer(
"vit_base_patch32_224", pretrained=pretrained, **model_kwargs
)
return model
@register_model
def vit_base_patch16_384(pretrained=False, **kwargs):
""" ViT-Base model (ViT-B/16) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-1k weights fine-tuned from in21k @ 384x384, source https://github.com/google-research/vision_transformer.
"""
model_kwargs = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, **kwargs)
model = _create_vision_transformer(
"vit_base_patch16_384", pretrained=pretrained, **model_kwargs
)
return model
@register_model
def vit_base_patch32_384(pretrained=False, **kwargs):
""" ViT-Base model (ViT-B/32) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-1k weights fine-tuned from in21k @ 384x384, source https://github.com/google-research/vision_transformer.
"""
model_kwargs = dict(patch_size=32, embed_dim=768, depth=12, num_heads=12, **kwargs)
model = _create_vision_transformer(
"vit_base_patch32_384", pretrained=pretrained, **model_kwargs
)
return model
@register_model
def vit_large_patch16_224(pretrained=False, **kwargs):
""" ViT-Large model (ViT-L/32) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-1k weights fine-tuned from in21k @ 224x224, source https://github.com/google-research/vision_transformer.
"""
model_kwargs = dict(patch_size=16, embed_dim=1024, depth=24, num_heads=16, **kwargs)
model = _create_vision_transformer(
"vit_large_patch16_224", pretrained=pretrained, **model_kwargs
)
return model
@register_model
def vit_large_patch32_224(pretrained=False, **kwargs):
""" ViT-Large model (ViT-L/32) from original paper (https://arxiv.org/abs/2010.11929). No pretrained weights.
"""
model_kwargs = dict(patch_size=32, embed_dim=1024, depth=24, num_heads=16, **kwargs)
model = _create_vision_transformer(
"vit_large_patch32_224", pretrained=pretrained, **model_kwargs
)
return model
@register_model
def vit_large_patch16_384(pretrained=False, **kwargs):
""" ViT-Large model (ViT-L/16) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-1k weights fine-tuned from in21k @ 384x384, source https://github.com/google-research/vision_transformer.
"""
model_kwargs = dict(patch_size=16, embed_dim=1024, depth=24, num_heads=16, **kwargs)
model = _create_vision_transformer(
"vit_large_patch16_384", pretrained=pretrained, **model_kwargs
)
return model
@register_model
def vit_large_patch32_384(pretrained=False, **kwargs):
""" ViT-Large model (ViT-L/32) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-1k weights fine-tuned from in21k @ 384x384, source https://github.com/google-research/vision_transformer.
"""
model_kwargs = dict(patch_size=32, embed_dim=1024, depth=24, num_heads=16, **kwargs)
model = _create_vision_transformer(
"vit_large_patch32_384", pretrained=pretrained, **model_kwargs
)
return model
@register_model
def vit_base_patch16_224_in21k(pretrained=False, **kwargs):
""" ViT-Base model (ViT-B/16) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer.
"""
model_kwargs = dict(
patch_size=16,
embed_dim=768,
depth=12,
num_heads=12,
representation_size=768,
**kwargs,
)
model = _create_vision_transformer(
"vit_base_patch16_224_in21k", pretrained=pretrained, **model_kwargs
)
return model
@register_model
def vit_base_patch32_224_in21k(pretrained=False, **kwargs):
""" ViT-Base model (ViT-B/32) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer.
"""
model_kwargs = dict(
patch_size=32,
embed_dim=768,
depth=12,
num_heads=12,
representation_size=768,
**kwargs,
)
model = _create_vision_transformer(
"vit_base_patch32_224_in21k", pretrained=pretrained, **model_kwargs
)
return model
@register_model
def vit_large_patch16_224_in21k(pretrained=False, **kwargs):
""" ViT-Large model (ViT-L/16) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer.
"""
model_kwargs = dict(
patch_size=16,
embed_dim=1024,
depth=24,
num_heads=16,
representation_size=1024,
**kwargs,
)
model = _create_vision_transformer(
"vit_large_patch16_224_in21k", pretrained=pretrained, **model_kwargs
)
return model
@register_model
def vit_large_patch32_224_in21k(pretrained=False, **kwargs):
""" ViT-Large model (ViT-L/32) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer.
"""
model_kwargs = dict(
patch_size=32,
embed_dim=1024,
depth=24,
num_heads=16,
representation_size=1024,
**kwargs,
)
model = _create_vision_transformer(
"vit_large_patch32_224_in21k", pretrained=pretrained, **model_kwargs
)
return model
@register_model
def vit_huge_patch14_224_in21k(pretrained=False, **kwargs):
""" ViT-Huge model (ViT-H/14) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer.
NOTE: converted weights not currently available, too large for github release hosting.
"""
model_kwargs = dict(
patch_size=14,
embed_dim=1280,
depth=32,
num_heads=16,
representation_size=1280,
**kwargs,
)
model = _create_vision_transformer(
"vit_huge_patch14_224_in21k", pretrained=pretrained, **model_kwargs
)
return model
@register_model
def vit_base_resnet50_224_in21k(pretrained=False, **kwargs):
""" R50+ViT-B/16 hybrid model from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer.
"""
# create a ResNetV2 w/o pre-activation, that uses StdConv and GroupNorm and has 3 stages, no head
backbone = ResNetV2(
layers=(3, 4, 9),
num_classes=0,
global_pool="",
in_chans=kwargs.get("in_chans", 3),
preact=False,
stem_type="same",
conv_layer=StdConv2dSame,
)
model_kwargs = dict(
embed_dim=768,
depth=12,
num_heads=12,
hybrid_backbone=backbone,
representation_size=768,
**kwargs,
)
model = _create_vision_transformer(
"vit_base_resnet50_224_in21k", pretrained=pretrained, **model_kwargs
)
return model
@register_model
def vit_base_resnet50_384(pretrained=False, **kwargs):
""" R50+ViT-B/16 hybrid from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-1k weights fine-tuned from in21k @ 384x384, source https://github.com/google-research/vision_transformer.
"""
# create a ResNetV2 w/o pre-activation, that uses StdConv and GroupNorm and has 3 stages, no head
backbone = ResNetV2(
layers=(3, 4, 9),
num_classes=0,
global_pool="",
in_chans=kwargs.get("in_chans", 3),
preact=False,
stem_type="same",
conv_layer=StdConv2dSame,
)
model_kwargs = dict(
embed_dim=768, depth=12, num_heads=12, hybrid_backbone=backbone, **kwargs
)
model = _create_vision_transformer(
"vit_base_resnet50_384", pretrained=pretrained, **model_kwargs
)
return model
@register_model
def vit_small_resnet26d_224(pretrained=False, **kwargs):
""" Custom ViT small hybrid w/ ResNet26D stride 32. No pretrained weights.
"""
backbone = resnet26d(
pretrained=pretrained,
in_chans=kwargs.get("in_chans", 3),
features_only=True,
out_indices=[4],
)
model_kwargs = dict(
embed_dim=768,
depth=8,
num_heads=8,
mlp_ratio=3,
hybrid_backbone=backbone,
**kwargs,
)
model = _create_vision_transformer(
"vit_small_resnet26d_224", pretrained=pretrained, **model_kwargs
)
return model
@register_model
def vit_small_resnet50d_s3_224(pretrained=False, **kwargs):
""" Custom ViT small hybrid w/ ResNet50D 3-stages, stride 16. No pretrained weights.
"""
backbone = resnet50d(
pretrained=pretrained,
in_chans=kwargs.get("in_chans", 3),
features_only=True,
out_indices=[3],
)
model_kwargs = dict(
embed_dim=768,
depth=8,
num_heads=8,
mlp_ratio=3,
hybrid_backbone=backbone,
**kwargs,
)
model = _create_vision_transformer(
"vit_small_resnet50d_s3_224", pretrained=pretrained, **model_kwargs
)
return model
@register_model
def vit_base_resnet26d_224(pretrained=False, **kwargs):
""" Custom ViT base hybrid w/ ResNet26D stride 32. No pretrained weights.
"""
backbone = resnet26d(
pretrained=pretrained,
in_chans=kwargs.get("in_chans", 3),
features_only=True,
out_indices=[4],
)
model_kwargs = dict(
embed_dim=768, depth=12, num_heads=12, hybrid_backbone=backbone, **kwargs
)
model = _create_vision_transformer(
"vit_base_resnet26d_224", pretrained=pretrained, **model_kwargs
)
return model
@register_model
def vit_base_resnet50d_224(pretrained=False, **kwargs):
""" Custom ViT base hybrid w/ ResNet50D stride 32. No pretrained weights.
"""
backbone = resnet50d(
pretrained=pretrained,
in_chans=kwargs.get("in_chans", 3),
features_only=True,
out_indices=[4],
)
model_kwargs = dict(
embed_dim=768, depth=12, num_heads=12, hybrid_backbone=backbone, **kwargs
)
model = _create_vision_transformer(
"vit_base_resnet50d_224", pretrained=pretrained, **model_kwargs
)
return model
@register_model
def vit_deit_tiny_patch16_224(pretrained=False, **kwargs):
""" DeiT-tiny model @ 224x224 from paper (https://arxiv.org/abs/2012.12877).
ImageNet-1k weights from https://github.com/facebookresearch/deit.
"""
model_kwargs = dict(patch_size=16, embed_dim=192, depth=12, num_heads=3, **kwargs)
model = _create_vision_transformer(
"vit_deit_tiny_patch16_224", pretrained=pretrained, **model_kwargs
)
return model
@register_model
def vit_deit_small_patch16_224(pretrained=False, **kwargs):
""" DeiT-small model @ 224x224 from paper (https://arxiv.org/abs/2012.12877).
ImageNet-1k weights from https://github.com/facebookresearch/deit.
"""
model_kwargs = dict(patch_size=16, embed_dim=384, depth=12, num_heads=6, **kwargs)
model = _create_vision_transformer(
"vit_deit_small_patch16_224", pretrained=pretrained, **model_kwargs
)
return model
@register_model
def vit_deit_base_patch16_224(pretrained=False, **kwargs):
""" DeiT base model @ 224x224 from paper (https://arxiv.org/abs/2012.12877).
ImageNet-1k weights from https://github.com/facebookresearch/deit.
"""
model_kwargs = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, **kwargs)
model = _create_vision_transformer(
"vit_deit_base_patch16_224", pretrained=pretrained, **model_kwargs
)
return model
@register_model
def vit_deit_base_patch16_384(pretrained=False, **kwargs):
""" DeiT base model @ 384x384 from paper (https://arxiv.org/abs/2012.12877).
ImageNet-1k weights from https://github.com/facebookresearch/deit.
"""
model_kwargs = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, **kwargs)
model = _create_vision_transformer(
"vit_deit_base_patch16_384", pretrained=pretrained, **model_kwargs
)
return model
@register_model
def vit_deit_tiny_distilled_patch16_224(pretrained=False, **kwargs):
""" DeiT-tiny distilled model @ 224x224 from paper (https://arxiv.org/abs/2012.12877).
ImageNet-1k weights from https://github.com/facebookresearch/deit.
"""
model_kwargs = dict(patch_size=16, embed_dim=192, depth=12, num_heads=3, **kwargs)
model = _create_vision_transformer(
"vit_deit_tiny_distilled_patch16_224",
pretrained=pretrained,
distilled=True,
**model_kwargs,
)
return model
@register_model
def vit_deit_small_distilled_patch16_224(pretrained=False, **kwargs):
""" DeiT-small distilled model @ 224x224 from paper (https://arxiv.org/abs/2012.12877).
ImageNet-1k weights from https://github.com/facebookresearch/deit.
"""
model_kwargs = dict(patch_size=16, embed_dim=384, depth=12, num_heads=6, **kwargs)
model = _create_vision_transformer(
"vit_deit_small_distilled_patch16_224",
pretrained=pretrained,
distilled=True,
**model_kwargs,
)
return model
@register_model
def vit_deit_base_distilled_patch16_224(pretrained=False, **kwargs):
""" DeiT-base distilled model @ 224x224 from paper (https://arxiv.org/abs/2012.12877).
ImageNet-1k weights from https://github.com/facebookresearch/deit.
"""
model_kwargs = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, **kwargs)
model = _create_vision_transformer(
"vit_deit_base_distilled_patch16_224",
pretrained=pretrained,
distilled=True,
**model_kwargs,
)
return model
@register_model
def vit_deit_base_distilled_patch16_384(pretrained=False, **kwargs):
""" DeiT-base distilled model @ 384x384 from paper (https://arxiv.org/abs/2012.12877).
ImageNet-1k weights from https://github.com/facebookresearch/deit.
"""
model_kwargs = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, **kwargs)
model = _create_vision_transformer(
"vit_deit_base_distilled_patch16_384",
pretrained=pretrained,
distilled=True,
**model_kwargs,
)
return model
| InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/modules/base_vision_transformer.py |
# Modified from https://github.com/lucidrains/CoCa-pytorch/blob/main/coca_pytorch/coca_pytorch.py
from turtle import forward
import torch
from torch import einsum, nn
import torch.nn.functional as F
from einops import rearrange, repeat
# helper functions
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
# for controlling freezing of parameters
def set_module_requires_grad_(module, requires_grad):
for param in module.parameters():
param.requires_grad = requires_grad
def freeze_all_layers_(module):
set_module_requires_grad_(module, False)
def unfreeze_all_layers_(module):
set_module_requires_grad_(module, True)
def freeze_model_and_make_eval_(model):
model.eval()
freeze_all_layers_(model)
# normalization
# they use layernorm without bias, something that pytorch does not offer
class LayerNorm(nn.Module):
def __init__(self, dim):
super().__init__()
self.gamma = nn.Parameter(torch.ones(dim))
self.register_buffer("beta", torch.zeros(dim))
def forward(self, x):
return F.layer_norm(x, x.shape[-1:], self.gamma, self.beta)
# residual
class Residual(nn.Module):
def __init__(self, fn):
super().__init__()
self.fn = fn
def forward(self, x, *args, **kwargs):
return self.fn(x, *args, **kwargs) + x
# rotary positional embedding
# https://arxiv.org/abs/2104.09864
class RotaryEmbedding(nn.Module):
def __init__(self, dim):
super().__init__()
inv_freq = 1.0 / (10000 ** (torch.arange(0, dim, 2).float() / dim))
self.register_buffer("inv_freq", inv_freq)
def forward(self, max_seq_len, *, device):
seq = torch.arange(max_seq_len, device=device, dtype=self.inv_freq.dtype)
freqs = einsum("i , j -> i j", seq, self.inv_freq)
return torch.cat((freqs, freqs), dim=-1)
def rotate_half(x):
x = rearrange(x, "... (j d) -> ... j d", j=2)
x1, x2 = x.unbind(dim=-2)
return torch.cat((-x2, x1), dim=-1)
def apply_rotary_pos_emb(pos, t):
return (t * pos.cos()) + (rotate_half(t) * pos.sin())
# classic Noam Shazeer paper, except here they use SwiGLU instead of the more popular GEGLU for gating the feedforward
# https://arxiv.org/abs/2002.05202
class SwiGLU(nn.Module):
def forward(self, x):
x, gate = x.chunk(2, dim=-1)
return F.silu(gate) * x
# parallel attention and feedforward with residual
# discovered by Wang et al + EleutherAI from GPT-J fame
class ParallelTransformerBlock(nn.Module):
def __init__(self, dim, dim_head=64, heads=8, ff_mult=4):
super().__init__()
self.norm = LayerNorm(dim)
attn_inner_dim = dim_head * heads
ff_inner_dim = dim * ff_mult
self.fused_dims = (attn_inner_dim, dim_head, dim_head, (ff_inner_dim * 2))
self.heads = heads
self.scale = dim_head**-0.5
self.rotary_emb = RotaryEmbedding(dim_head)
self.fused_attn_ff_proj = nn.Linear(dim, sum(self.fused_dims), bias=False)
self.attn_out = nn.Linear(attn_inner_dim, dim, bias=False)
self.ff_out = nn.Sequential(
SwiGLU(),
nn.Linear(ff_inner_dim, dim, bias=False)
)
# for caching causal mask and rotary embeddings
self.register_buffer("mask", None, persistent=False)
self.register_buffer("pos_emb", None, persistent=False)
def get_mask(self, n, device):
if self.mask is not None and self.mask.shape[-1] >= n:
return self.mask[:n, :n]
mask = torch.ones((n, n), device=device, dtype=torch.bool).triu(1)
self.register_buffer("mask", mask, persistent=False)
return mask
def get_rotary_embedding(self, n, device):
if self.pos_emb is not None and self.pos_emb.shape[-2] >= n:
return self.pos_emb[:n]
pos_emb = self.rotary_emb(n, device=device)
self.register_buffer("pos_emb", pos_emb, persistent=False)
return pos_emb
def forward(self, x, attn_mask=None):
"""
einstein notation
b - batch
h - heads
n, i, j - sequence length (base sequence length, source, target)
d - feature dimension
"""
n, device, h = x.shape[1], x.device, self.heads
# pre layernorm
x = self.norm(x)
# attention queries, keys, values, and feedforward inner
q, k, v, ff = self.fused_attn_ff_proj(x).split(self.fused_dims, dim=-1)
# split heads
# they use multi-query single-key-value attention, yet another Noam Shazeer paper
# they found no performance loss past a certain scale, and more efficient decoding obviously
# https://arxiv.org/abs/1911.02150
q = rearrange(q, "b n (h d) -> b h n d", h=h)
# rotary embeddings
positions = self.get_rotary_embedding(n, device)
q, k = map(lambda t: apply_rotary_pos_emb(positions, t), (q, k))
# scale
q = q * self.scale
# similarity
sim = einsum("b h i d, b j d -> b h i j", q, k)
# causal mask
causal_mask = self.get_mask(n, device)
sim = sim.masked_fill(causal_mask, -torch.finfo(sim.dtype).max)
# extra attention mask - for masking out attention from text CLS token to padding
if exists(attn_mask):
attn_mask = rearrange(attn_mask, 'b i j -> b 1 i j')
sim = sim.masked_fill(~attn_mask, -torch.finfo(sim.dtype).max)
# attention
sim = sim - sim.amax(dim=-1, keepdim=True).detach()
attn = sim.softmax(dim=-1)
# aggregate values
out = einsum("b h i j, b j d -> b h i d", attn, v)
# merge heads
out = rearrange(out, "b h n d -> b n (h d)")
return self.attn_out(out) + self.ff_out(ff)
# cross attention - using multi-query + one-headed key / values as in PaLM w/ optional parallel feedforward
class CrossAttention(nn.Module):
def __init__(
self,
dim,
*,
context_dim=None,
dim_head=64,
heads=8,
parallel_ff=False,
ff_mult=4,
norm_context=False
):
super().__init__()
self.heads = heads
self.scale = dim_head ** -0.5
inner_dim = heads * dim_head
context_dim = default(context_dim, dim)
self.norm = LayerNorm(dim)
self.context_norm = LayerNorm(context_dim) if norm_context else nn.Identity()
self.to_q = nn.Linear(dim, inner_dim, bias=False)
self.to_kv = nn.Linear(context_dim, dim_head * 2, bias=False)
self.to_out = nn.Linear(inner_dim, dim, bias=False)
# whether to have parallel feedforward
ff_inner_dim = ff_mult * dim
self.ff = nn.Sequential(
nn.Linear(dim, ff_inner_dim * 2, bias=False),
SwiGLU(),
nn.Linear(ff_inner_dim, dim, bias=False)
) if parallel_ff else None
def forward(self, x, context):
"""
einstein notation
b - batch
h - heads
n, i, j - sequence length (base sequence length, source, target)
d - feature dimension
"""
# pre-layernorm, for queries and context
x = self.norm(x)
context = self.context_norm(context)
# get queries
q = self.to_q(x)
q = rearrange(q, 'b n (h d) -> b h n d', h = self.heads)
# scale
q = q * self.scale
# get key / values
k, v = self.to_kv(context).chunk(2, dim=-1)
# query / key similarity
sim = einsum('b h i d, b j d -> b h i j', q, k)
# attention
sim = sim - sim.amax(dim=-1, keepdim=True)
attn = sim.softmax(dim=-1)
# aggregate
out = einsum('b h i j, b j d -> b h i d', attn, v)
# merge and combine heads
out = rearrange(out, 'b h n d -> b n (h d)')
out = self.to_out(out)
# add parallel feedforward (for multimodal layers)
if exists(self.ff):
out = out + self.ff(x)
return out
# transformer
class CoCa(nn.Module):
def __init__(
self,
*,
dim,
num_tokens,
unimodal_depth,
multimodal_depth,
image_dim = None,
num_img_queries=256,
dim_head=64,
heads=8,
ff_mult=4,
img_encoder=None,
caption_loss_weight=1.,
contrastive_loss_weight=1.,
pad_id=0
):
super().__init__()
self.dim = dim
self.pad_id = pad_id
self.caption_loss_weight = caption_loss_weight
self.contrastive_loss_weight = contrastive_loss_weight
# token embeddings
self.token_emb = nn.Embedding(num_tokens, dim)
self.text_cls_token = nn.Parameter(torch.randn(dim))
# image encoder
self.img_encoder = img_encoder
if exists(self.img_encoder):
freeze_model_and_make_eval_(self.img_encoder)
# attention pooling for image tokens
self.img_queries = nn.Parameter(torch.randn(num_img_queries + 1, dim)) # num image queries for multimodal, but 1 extra CLS for contrastive learning
self.img_attn_pool = CrossAttention(dim=dim, context_dim=image_dim, dim_head=dim_head, heads=heads, norm_context=True)
self.img_attn_pool_norm = LayerNorm(dim)
self.text_cls_norm = LayerNorm(dim)
# contrastive learning temperature
self.temperature = nn.Parameter(torch.Tensor([1.]))
# unimodal layers
self.unimodal_layers = nn.ModuleList([])
for ind in range(unimodal_depth):
self.unimodal_layers.append(
Residual(ParallelTransformerBlock(dim=dim, dim_head=dim_head, heads=heads, ff_mult=ff_mult)),
)
# multimodal layers
self.multimodal_layers = nn.ModuleList([])
for ind in range(multimodal_depth):
self.multimodal_layers.append(nn.ModuleList([
Residual(ParallelTransformerBlock(dim=dim, dim_head=dim_head, heads=heads, ff_mult=ff_mult)),
Residual(CrossAttention(dim=dim, dim_head=dim_head, heads=heads, parallel_ff=True, ff_mult=ff_mult))
]))
# to logits
self.to_logits = nn.Sequential(
LayerNorm(dim),
nn.Linear(dim, num_tokens, bias=False)
)
# they used embedding weight tied projection out to logits, not common, but works
self.to_logits[-1].weight = self.token_emb.weight
nn.init.normal_(self.token_emb.weight, std=0.02)
def embed_text(self, text):
batch, device = text.shape[0], text.device
seq = text.shape[1]
text_tokens = self.token_emb(text)
# append text cls tokens
text_cls_tokens = repeat(self.text_cls_token, 'd -> b 1 d', b=batch)
text_tokens = torch.cat((text_tokens, text_cls_tokens), dim=-2)
# create specific mask for text cls token at the end
# to prevent it from attending to padding
cls_mask = rearrange(text!=self.pad_id, 'b j -> b 1 j')
attn_mask = F.pad(cls_mask, (0, 1, seq, 0), value=True)
# go through unimodal layers
for attn_ff in self.unimodal_layers:
text_tokens = attn_ff(text_tokens, attn_mask=attn_mask)
# get text cls token
text_tokens, text_cls_tokens = text_tokens[:, :-1], text_tokens[:, -1]
text_embeds = self.text_cls_norm(text_cls_tokens)
return text_embeds, text_tokens
def embed_image(self, images=None, image_tokens=None):
# encode images into embeddings
# with the img_encoder passed in at init
# it can also accept precomputed image tokens
assert not (exists(images) and exists(image_tokens))
if exists(images):
assert exists(self.img_encoder), 'img_encoder must be passed in for automatic image encoding'
self.img_encoder.eval()
with torch.no_grad():
image_tokens = self.img_encoder(images).detach()
# attention pool image tokens
img_queries = repeat(self.img_queries, 'n d -> b n d', b=image_tokens.shape[0])
img_queries = self.img_attn_pool(img_queries, image_tokens)
img_queries = self.img_attn_pool_norm(img_queries)
return img_queries[:, 0], img_queries[:, 1:]
def forward(
self,
text,
images=None,
image_tokens=None,
labels=None,
return_loss=False,
return_embeddings=False
):
batch, device = text.shape[0], text.device
if return_loss and not exists(labels):
text, labels = text[:, :-1], text[:, 1:]
text_embeds, text_tokens = self.embed_text(text)
image_embeds, image_tokens = self.embed_image(images=images, image_tokens=image_tokens)
# return embeddings if that is what the researcher wants
if return_embeddings:
return text_embeds, image_embeds
# go through multimodal layers
for attn_ff, cross_attn in self.multimodal_layers:
text_tokens = attn_ff(text_tokens)
text_tokens = cross_attn(text_tokens, image_tokens)
logits = self.to_logits(text_tokens)
if not return_loss:
return logits
# shorthand
ce = F.cross_entropy
# calculate caption loss (cross entropy loss)
logits = rearrange(logits, 'b n c -> b c n')
caption_loss = ce(logits, labels, ignore_index=self.pad_id)
caption_loss = caption_loss * self.caption_loss_weight
# calculate contrastive loss
sim = einsum('i d, j d -> i j', text_embeds, image_embeds)
sim = sim * self.temperature.exp()
contrastive_labels = torch.arange(batch, device=device)
contrastive_loss = (ce(sim, contrastive_labels) + ce(sim.t(), contrastive_labels)) * 0.5
contrastive_loss = contrastive_loss * self.contrastive_loss_weight
return caption_loss + contrastive_loss
| InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/modules/coca.py |
# from CoTrain.modules.cotrain_dino_module_v2 import CoTrainTransformerSS
from CoTrain.modules.cotrain_module import CoTrainTransformerSS
# from CoTrain.modules.cotrain_dino_module_v3 import CoTrainTransformerSS
from CoTrain.modules.clip_module import CLIP | InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/modules/__init__.py |
clip_param_keys = {
'positional_embedding',
'text_projection',
'visual.class_embedding',
'visual.positional_embedding',
'visual.conv1.weight',
'visual.ln_pre.weight',
'visual.ln_pre.bias',
'visual.transformer.resblocks.0.attn.in_proj_weight',
'visual.transformer.resblocks.0.attn.in_proj_bias',
'visual.transformer.resblocks.0.attn.out_proj.weight',
'visual.transformer.resblocks.0.attn.out_proj.bias',
'visual.transformer.resblocks.0.ln_1.weight',
'visual.transformer.resblocks.0.ln_1.bias',
'visual.transformer.resblocks.0.mlp.c_fc.weight',
'visual.transformer.resblocks.0.mlp.c_fc.bias',
'visual.transformer.resblocks.0.mlp.c_proj.weight',
'visual.transformer.resblocks.0.mlp.c_proj.bias',
'visual.transformer.resblocks.0.ln_2.weight',
'visual.transformer.resblocks.0.ln_2.bias',
'visual.transformer.resblocks.1.attn.in_proj_weight',
'visual.transformer.resblocks.1.attn.in_proj_bias',
'visual.transformer.resblocks.1.attn.out_proj.weight',
'visual.transformer.resblocks.1.attn.out_proj.bias',
'visual.transformer.resblocks.1.ln_1.weight',
'visual.transformer.resblocks.1.ln_1.bias',
'visual.transformer.resblocks.1.mlp.c_fc.weight',
'visual.transformer.resblocks.1.mlp.c_fc.bias',
'visual.transformer.resblocks.1.mlp.c_proj.weight',
'visual.transformer.resblocks.1.mlp.c_proj.bias',
'visual.transformer.resblocks.1.ln_2.weight',
'visual.transformer.resblocks.1.ln_2.bias',
'visual.transformer.resblocks.2.attn.in_proj_weight',
'visual.transformer.resblocks.2.attn.in_proj_bias',
'visual.transformer.resblocks.2.attn.out_proj.weight',
'visual.transformer.resblocks.2.attn.out_proj.bias',
'visual.transformer.resblocks.2.ln_1.weight',
'visual.transformer.resblocks.2.ln_1.bias',
'visual.transformer.resblocks.2.mlp.c_fc.weight',
'visual.transformer.resblocks.2.mlp.c_fc.bias',
'visual.transformer.resblocks.2.mlp.c_proj.weight',
'visual.transformer.resblocks.2.mlp.c_proj.bias',
'visual.transformer.resblocks.2.ln_2.weight',
'visual.transformer.resblocks.2.ln_2.bias',
'visual.transformer.resblocks.3.attn.in_proj_weight',
'visual.transformer.resblocks.3.attn.in_proj_bias',
'visual.transformer.resblocks.3.attn.out_proj.weight',
'visual.transformer.resblocks.3.attn.out_proj.bias',
'visual.transformer.resblocks.3.ln_1.weight',
'visual.transformer.resblocks.3.ln_1.bias',
'visual.transformer.resblocks.3.mlp.c_fc.weight',
'visual.transformer.resblocks.3.mlp.c_fc.bias',
'visual.transformer.resblocks.3.mlp.c_proj.weight',
'visual.transformer.resblocks.3.mlp.c_proj.bias',
'visual.transformer.resblocks.3.ln_2.weight',
'visual.transformer.resblocks.3.ln_2.bias',
'visual.transformer.resblocks.4.attn.in_proj_weight',
'visual.transformer.resblocks.4.attn.in_proj_bias',
'visual.transformer.resblocks.4.attn.out_proj.weight',
'visual.transformer.resblocks.4.attn.out_proj.bias',
'visual.transformer.resblocks.4.ln_1.weight',
'visual.transformer.resblocks.4.ln_1.bias',
'visual.transformer.resblocks.4.mlp.c_fc.weight',
'visual.transformer.resblocks.4.mlp.c_fc.bias',
'visual.transformer.resblocks.4.mlp.c_proj.weight',
'visual.transformer.resblocks.4.mlp.c_proj.bias',
'visual.transformer.resblocks.4.ln_2.weight',
'visual.transformer.resblocks.4.ln_2.bias',
'visual.transformer.resblocks.5.attn.in_proj_weight',
'visual.transformer.resblocks.5.attn.in_proj_bias',
'visual.transformer.resblocks.5.attn.out_proj.weight',
'visual.transformer.resblocks.5.attn.out_proj.bias',
'visual.transformer.resblocks.5.ln_1.weight',
'visual.transformer.resblocks.5.ln_1.bias',
'visual.transformer.resblocks.5.mlp.c_fc.weight',
'visual.transformer.resblocks.5.mlp.c_fc.bias',
'visual.transformer.resblocks.5.mlp.c_proj.weight',
'visual.transformer.resblocks.5.mlp.c_proj.bias',
'visual.transformer.resblocks.5.ln_2.weight',
'visual.transformer.resblocks.5.ln_2.bias',
'visual.transformer.resblocks.6.attn.in_proj_weight',
'visual.transformer.resblocks.6.attn.in_proj_bias',
'visual.transformer.resblocks.6.attn.out_proj.weight',
'visual.transformer.resblocks.6.attn.out_proj.bias',
'visual.transformer.resblocks.6.ln_1.weight',
'visual.transformer.resblocks.6.ln_1.bias',
'visual.transformer.resblocks.6.mlp.c_fc.weight',
'visual.transformer.resblocks.6.mlp.c_fc.bias',
'visual.transformer.resblocks.6.mlp.c_proj.weight',
'visual.transformer.resblocks.6.mlp.c_proj.bias',
'visual.transformer.resblocks.6.ln_2.weight',
'visual.transformer.resblocks.6.ln_2.bias',
'visual.transformer.resblocks.7.attn.in_proj_weight',
'visual.transformer.resblocks.7.attn.in_proj_bias',
'visual.transformer.resblocks.7.attn.out_proj.weight',
'visual.transformer.resblocks.7.attn.out_proj.bias',
'visual.transformer.resblocks.7.ln_1.weight',
'visual.transformer.resblocks.7.ln_1.bias',
'visual.transformer.resblocks.7.mlp.c_fc.weight',
'visual.transformer.resblocks.7.mlp.c_fc.bias',
'visual.transformer.resblocks.7.mlp.c_proj.weight',
'visual.transformer.resblocks.7.mlp.c_proj.bias',
'visual.transformer.resblocks.7.ln_2.weight',
'visual.transformer.resblocks.7.ln_2.bias',
'visual.transformer.resblocks.8.attn.in_proj_weight',
'visual.transformer.resblocks.8.attn.in_proj_bias',
'visual.transformer.resblocks.8.attn.out_proj.weight',
'visual.transformer.resblocks.8.attn.out_proj.bias',
'visual.transformer.resblocks.8.ln_1.weight',
'visual.transformer.resblocks.8.ln_1.bias',
'visual.transformer.resblocks.8.mlp.c_fc.weight',
'visual.transformer.resblocks.8.mlp.c_fc.bias',
'visual.transformer.resblocks.8.mlp.c_proj.weight',
'visual.transformer.resblocks.8.mlp.c_proj.bias',
'visual.transformer.resblocks.8.ln_2.weight',
'visual.transformer.resblocks.8.ln_2.bias',
'visual.transformer.resblocks.9.attn.in_proj_weight',
'visual.transformer.resblocks.9.attn.in_proj_bias',
'visual.transformer.resblocks.9.attn.out_proj.weight',
'visual.transformer.resblocks.9.attn.out_proj.bias',
'visual.transformer.resblocks.9.ln_1.weight',
'visual.transformer.resblocks.9.ln_1.bias',
'visual.transformer.resblocks.9.mlp.c_fc.weight',
'visual.transformer.resblocks.9.mlp.c_fc.bias',
'visual.transformer.resblocks.9.mlp.c_proj.weight',
'visual.transformer.resblocks.9.mlp.c_proj.bias',
'visual.transformer.resblocks.9.ln_2.weight',
'visual.transformer.resblocks.9.ln_2.bias',
'visual.transformer.resblocks.10.attn.in_proj_weight',
'visual.transformer.resblocks.10.attn.in_proj_bias',
'visual.transformer.resblocks.10.attn.out_proj.weight',
'visual.transformer.resblocks.10.attn.out_proj.bias',
'visual.transformer.resblocks.10.ln_1.weight',
'visual.transformer.resblocks.10.ln_1.bias',
'visual.transformer.resblocks.10.mlp.c_fc.weight',
'visual.transformer.resblocks.10.mlp.c_fc.bias',
'visual.transformer.resblocks.10.mlp.c_proj.weight',
'visual.transformer.resblocks.10.mlp.c_proj.bias',
'visual.transformer.resblocks.10.ln_2.weight',
'visual.transformer.resblocks.10.ln_2.bias',
'visual.transformer.resblocks.11.attn.in_proj_weight',
'visual.transformer.resblocks.11.attn.in_proj_bias',
'visual.transformer.resblocks.11.attn.out_proj.weight',
'visual.transformer.resblocks.11.attn.out_proj.bias',
'visual.transformer.resblocks.11.ln_1.weight',
'visual.transformer.resblocks.11.ln_1.bias',
'visual.transformer.resblocks.11.mlp.c_fc.weight',
'visual.transformer.resblocks.11.mlp.c_fc.bias',
'visual.transformer.resblocks.11.mlp.c_proj.weight',
'visual.transformer.resblocks.11.mlp.c_proj.bias',
'visual.transformer.resblocks.11.ln_2.weight',
'visual.transformer.resblocks.11.ln_2.bias',
'visual.ln_post.weight',
'visual.ln_post.bias',
'visual_ln_post.weight',
'visual_ln_post.bias',
'transformer.resblocks.0.attn.in_proj_weight',
'transformer.resblocks.0.attn.in_proj_bias',
'transformer.resblocks.0.attn.out_proj.weight',
'transformer.resblocks.0.attn.out_proj.bias',
'transformer.resblocks.0.ln_1.weight',
'transformer.resblocks.0.ln_1.bias',
'transformer.resblocks.0.mlp.c_fc.weight',
'transformer.resblocks.0.mlp.c_fc.bias',
'transformer.resblocks.0.mlp.c_proj.weight',
'transformer.resblocks.0.mlp.c_proj.bias',
'transformer.resblocks.0.ln_2.weight',
'transformer.resblocks.0.ln_2.bias',
'transformer.resblocks.1.attn.in_proj_weight',
'transformer.resblocks.1.attn.in_proj_bias',
'transformer.resblocks.1.attn.out_proj.weight',
'transformer.resblocks.1.attn.out_proj.bias',
'transformer.resblocks.1.ln_1.weight',
'transformer.resblocks.1.ln_1.bias',
'transformer.resblocks.1.mlp.c_fc.weight',
'transformer.resblocks.1.mlp.c_fc.bias',
'transformer.resblocks.1.mlp.c_proj.weight',
'transformer.resblocks.1.mlp.c_proj.bias',
'transformer.resblocks.1.ln_2.weight',
'transformer.resblocks.1.ln_2.bias',
'transformer.resblocks.2.attn.in_proj_weight',
'transformer.resblocks.2.attn.in_proj_bias',
'transformer.resblocks.2.attn.out_proj.weight',
'transformer.resblocks.2.attn.out_proj.bias',
'transformer.resblocks.2.ln_1.weight',
'transformer.resblocks.2.ln_1.bias',
'transformer.resblocks.2.mlp.c_fc.weight',
'transformer.resblocks.2.mlp.c_fc.bias',
'transformer.resblocks.2.mlp.c_proj.weight',
'transformer.resblocks.2.mlp.c_proj.bias',
'transformer.resblocks.2.ln_2.weight',
'transformer.resblocks.2.ln_2.bias',
'transformer.resblocks.3.attn.in_proj_weight',
'transformer.resblocks.3.attn.in_proj_bias',
'transformer.resblocks.3.attn.out_proj.weight',
'transformer.resblocks.3.attn.out_proj.bias',
'transformer.resblocks.3.ln_1.weight',
'transformer.resblocks.3.ln_1.bias',
'transformer.resblocks.3.mlp.c_fc.weight',
'transformer.resblocks.3.mlp.c_fc.bias',
'transformer.resblocks.3.mlp.c_proj.weight',
'transformer.resblocks.3.mlp.c_proj.bias',
'transformer.resblocks.3.ln_2.weight',
'transformer.resblocks.3.ln_2.bias',
'transformer.resblocks.4.attn.in_proj_weight',
'transformer.resblocks.4.attn.in_proj_bias',
'transformer.resblocks.4.attn.out_proj.weight',
'transformer.resblocks.4.attn.out_proj.bias',
'transformer.resblocks.4.ln_1.weight',
'transformer.resblocks.4.ln_1.bias',
'transformer.resblocks.4.mlp.c_fc.weight',
'transformer.resblocks.4.mlp.c_fc.bias',
'transformer.resblocks.4.mlp.c_proj.weight',
'transformer.resblocks.4.mlp.c_proj.bias',
'transformer.resblocks.4.ln_2.weight',
'transformer.resblocks.4.ln_2.bias',
'transformer.resblocks.5.attn.in_proj_weight',
'transformer.resblocks.5.attn.in_proj_bias',
'transformer.resblocks.5.attn.out_proj.weight',
'transformer.resblocks.5.attn.out_proj.bias',
'transformer.resblocks.5.ln_1.weight',
'transformer.resblocks.5.ln_1.bias',
'transformer.resblocks.5.mlp.c_fc.weight',
'transformer.resblocks.5.mlp.c_fc.bias',
'transformer.resblocks.5.mlp.c_proj.weight',
'transformer.resblocks.5.mlp.c_proj.bias',
'transformer.resblocks.5.ln_2.weight',
'transformer.resblocks.5.ln_2.bias',
'transformer.resblocks.6.attn.in_proj_weight',
'transformer.resblocks.6.attn.in_proj_bias',
'transformer.resblocks.6.attn.out_proj.weight',
'transformer.resblocks.6.attn.out_proj.bias',
'transformer.resblocks.6.ln_1.weight',
'transformer.resblocks.6.ln_1.bias',
'transformer.resblocks.6.mlp.c_fc.weight',
'transformer.resblocks.6.mlp.c_fc.bias',
'transformer.resblocks.6.mlp.c_proj.weight',
'transformer.resblocks.6.mlp.c_proj.bias',
'transformer.resblocks.6.ln_2.weight',
'transformer.resblocks.6.ln_2.bias',
'transformer.resblocks.7.attn.in_proj_weight',
'transformer.resblocks.7.attn.in_proj_bias',
'transformer.resblocks.7.attn.out_proj.weight',
'transformer.resblocks.7.attn.out_proj.bias',
'transformer.resblocks.7.ln_1.weight',
'transformer.resblocks.7.ln_1.bias',
'transformer.resblocks.7.mlp.c_fc.weight',
'transformer.resblocks.7.mlp.c_fc.bias',
'transformer.resblocks.7.mlp.c_proj.weight',
'transformer.resblocks.7.mlp.c_proj.bias',
'transformer.resblocks.7.ln_2.weight',
'transformer.resblocks.7.ln_2.bias',
'transformer.resblocks.8.attn.in_proj_weight',
'transformer.resblocks.8.attn.in_proj_bias',
'transformer.resblocks.8.attn.out_proj.weight',
'transformer.resblocks.8.attn.out_proj.bias',
'transformer.resblocks.8.ln_1.weight',
'transformer.resblocks.8.ln_1.bias',
'transformer.resblocks.8.mlp.c_fc.weight',
'transformer.resblocks.8.mlp.c_fc.bias',
'transformer.resblocks.8.mlp.c_proj.weight',
'transformer.resblocks.8.mlp.c_proj.bias',
'transformer.resblocks.8.ln_2.weight',
'transformer.resblocks.8.ln_2.bias',
'transformer.resblocks.9.attn.in_proj_weight',
'transformer.resblocks.9.attn.in_proj_bias',
'transformer.resblocks.9.attn.out_proj.weight',
'transformer.resblocks.9.attn.out_proj.bias',
'transformer.resblocks.9.ln_1.weight',
'transformer.resblocks.9.ln_1.bias',
'transformer.resblocks.9.mlp.c_fc.weight',
'transformer.resblocks.9.mlp.c_fc.bias',
'transformer.resblocks.9.mlp.c_proj.weight',
'transformer.resblocks.9.mlp.c_proj.bias',
'transformer.resblocks.9.ln_2.weight',
'transformer.resblocks.9.ln_2.bias',
'transformer.resblocks.10.attn.in_proj_weight',
'transformer.resblocks.10.attn.in_proj_bias',
'transformer.resblocks.10.attn.out_proj.weight',
'transformer.resblocks.10.attn.out_proj.bias',
'transformer.resblocks.10.ln_1.weight',
'transformer.resblocks.10.ln_1.bias',
'transformer.resblocks.10.mlp.c_fc.weight',
'transformer.resblocks.10.mlp.c_fc.bias',
'transformer.resblocks.10.mlp.c_proj.weight',
'transformer.resblocks.10.mlp.c_proj.bias',
'transformer.resblocks.10.ln_2.weight',
'transformer.resblocks.10.ln_2.bias',
'transformer.resblocks.11.attn.in_proj_weight',
'transformer.resblocks.11.attn.in_proj_bias',
'transformer.resblocks.11.attn.out_proj.weight',
'transformer.resblocks.11.attn.out_proj.bias',
'transformer.resblocks.11.ln_1.weight',
'transformer.resblocks.11.ln_1.bias',
'transformer.resblocks.11.mlp.c_fc.weight',
'transformer.resblocks.11.mlp.c_fc.bias',
'transformer.resblocks.11.mlp.c_proj.weight',
'transformer.resblocks.11.mlp.c_proj.bias',
'transformer.resblocks.11.ln_2.weight',
'transformer.resblocks.11.ln_2.bias',
'token_embedding.weight',
'visual.proj',
'visual_proj',
'ln_final.weight',
'ln_final.bias',
}
def gradually_freeze_by_layer(model, global_step, interval, unfreeze_type="both"):
try:
visual_n_layers = model.clip.visual.transformer.layers
except:
visual_n_layers = model.visual.transformer.layers
try:
text_n_layers = model.clip.transformer.layers
except:
text_n_layers = model.transformer.layers
if interval <= 0:
return
layer_to_unfreeze = global_step // interval
if (
visual_n_layers + 1 < layer_to_unfreeze
and text_n_layers + 1 < layer_to_unfreeze
): # Do nothing
return
embeddings = [
"token_embedding",
"visual.positional_embedding" "visual.class_embedding",
"positional_embedding",
]
visual_first_keys = [
"visual.conv1.weight",
"visual.ln_pre",
]
text_first_keys = []
visual_last_keys = ["visual.ln_post", "visual_ln_post" "visual_proj", "visual.proj"]
text_last_keys = ["ln_final"]
def set_unfreeze(n, p, l):
if any(x in n for x in l):
p.requires_grad = True
for n, p in model.named_parameters():
if layer_to_unfreeze == 0: # At the beginning of training
set_unfreeze(n, p, embeddings) # Unfreeze embeddings
if unfreeze_type in ["both", "visual"]:
set_unfreeze(n, p, visual_last_keys)
if unfreeze_type in ["both", "text"]: # Unfreeze text last layers
set_unfreeze(n, p, text_last_keys)
continue
if unfreeze_type in ["both", "visual"]:
if layer_to_unfreeze <= visual_n_layers: # Unfreeze n-th visual layer
param_prefix = f"visual.transformer.resblocks.{visual_n_layers - layer_to_unfreeze}"
set_unfreeze(n, p, [param_prefix])
else: # Unfreeze first visual layers
set_unfreeze(n, p, visual_first_keys)
if unfreeze_type in ["both", "text"]:
if layer_to_unfreeze <= text_n_layers: # Unfreeze n-th text layer
param_prefix = (
f"transformer.resblocks.{visual_n_layers - layer_to_unfreeze}"
)
set_unfreeze(n, p, [param_prefix])
else: # Unfreeze first visual layers
set_unfreeze(n, p, text_first_keys)
| InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/modules/clip_param_keys.py |
# Code for "ActionCLIP: ActionCLIP: A New Paradigm for Action Recognition"
# arXiv:
# Mengmeng Wang, Jiazheng Xing, Yong Liu
import torch
import CoTrain.modules.InternVideo as internvideo
from CoTrain.datasets import K400VideoDataset
def text_prompt(data = K400VideoDataset.classes(), prompt_type='all'):
if prompt_type == 'all':
text_aug = [f"a photo of action {{}}", f"a picture of action {{}}", f"Human action of {{}}", f"{{}}, an action",
f"{{}} this is an action", f"{{}}, a video of action", f"Playing action of {{}}", f"{{}}",
f"Playing a kind of action, {{}}", f"Doing a kind of action, {{}}", f"Look, the human is {{}}",
f"Can you recognize the action of {{}}?", f"Video classification of {{}}", f"A video of {{}}",
f"The man is {{}}", f"The woman is {{}}"]
elif prompt_type == 'single':
text_aug = [f"A video of {{}}"]
elif prompt_type == 'single_doing':
text_aug = [f"A person is doing {{}}"]
elif prompt_type == 'no':
text_aug = [f"{{}}"]
print('-' * 80)
print('Prompt:')
print(text_aug)
print('-' * 80)
text_dict = {}
num_text_aug = len(text_aug)
for ii, txt in enumerate(text_aug):
text_dict[ii] = torch.cat([internvideo.tokenize(txt.format(c), truncate=True) for c in data])
classes = torch.cat([v for _, v in text_dict.items()])
return classes, num_text_aug, text_dict | InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/modules/text_prompt.py |
import random
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import os
import glob
import json
import tqdm
import functools
import itertools
from torch.utils.data.distributed import DistributedSampler
import torch.distributed.nn as distnn
from einops import rearrange, repeat
from CoTrain.modules.dist_utils import all_gather
from CoTrain.modules.retrieval_metrics import t2v_metrics, v2t_metrics
def cost_matrix_cosine(x, y, eps=1e-5):
"""Compute cosine distnace across every pairs of x, y (batched)
[B, L_x, D] [B, L_y, D] -> [B, Lx, Ly]"""
assert x.dim() == y.dim()
assert x.size(0) == y.size(0)
assert x.size(2) == y.size(2)
x_norm = F.normalize(x, p=2, dim=-1, eps=eps)
y_norm = F.normalize(y, p=2, dim=-1, eps=eps)
cosine_sim = x_norm.matmul(y_norm.transpose(1, 2))
cosine_dist = 1 - cosine_sim
return cosine_dist
def trace(x):
""" compute trace of input tensor (batched) """
b, m, n = x.size()
assert m == n
mask = torch.eye(n, dtype=torch.bool, device=x.device).unsqueeze(0).expand_as(x)
trace = x.masked_select(mask).contiguous().view(b, n).sum(dim=-1, keepdim=False)
return trace
@torch.no_grad()
def ipot(C, x_len, x_pad, y_len, y_pad, joint_pad, beta, iteration, k):
""" [B, M, N], [B], [B, M], [B], [B, N], [B, M, N]"""
b, m, n = C.size()
sigma = torch.ones(b, m, dtype=C.dtype, device=C.device) / x_len.unsqueeze(1)
T = torch.ones(b, n, m, dtype=C.dtype, device=C.device)
A = torch.exp(-C.transpose(1, 2) / beta)
# mask padded positions
sigma.masked_fill_(x_pad, 0)
joint_pad = joint_pad.transpose(1, 2)
T.masked_fill_(joint_pad, 0)
A.masked_fill_(joint_pad, 0)
# broadcastable lengths
x_len = x_len.unsqueeze(1).unsqueeze(2)
y_len = y_len.unsqueeze(1).unsqueeze(2)
# mask to zero out padding in delta and sigma
x_mask = (x_pad.to(C.dtype) * 1e4).unsqueeze(1)
y_mask = (y_pad.to(C.dtype) * 1e4).unsqueeze(1)
for _ in range(iteration):
Q = A * T # bs * n * m
sigma = sigma.view(b, m, 1)
for _ in range(k):
delta = 1 / (y_len * Q.matmul(sigma).view(b, 1, n) + y_mask)
sigma = 1 / (x_len * delta.matmul(Q) + x_mask)
T = delta.view(b, n, 1) * Q * sigma
T.masked_fill_(joint_pad, 0)
return T
def optimal_transport_dist(
txt_emb, img_emb, txt_pad, img_pad, beta=0.5, iteration=50, k=1
):
""" [B, M, D], [B, N, D], [B, M], [B, N]"""
cost = cost_matrix_cosine(txt_emb, img_emb)
# mask the padded inputs
joint_pad = txt_pad.unsqueeze(-1) | img_pad.unsqueeze(-2)
cost.masked_fill_(joint_pad, 0)
txt_len = (txt_pad.size(1) - txt_pad.sum(dim=1, keepdim=False)).to(dtype=cost.dtype)
img_len = (img_pad.size(1) - img_pad.sum(dim=1, keepdim=False)).to(dtype=cost.dtype)
T = ipot(
cost.detach(), txt_len, txt_pad, img_len, img_pad, joint_pad, beta, iteration, k
)
distance = trace(cost.matmul(T.detach()))
return distance
def compute_mlm(pl_module, batch, infer=None, mode="video"):
if infer is None:
infer = pl_module.infer(batch, mask_text=True, mode=mode)
mlm_logits = pl_module.mlm_score(infer["text_feats"])
mlm_labels = infer["text_labels"]
mlm_loss = F.cross_entropy(
mlm_logits.view(-1, pl_module.hparams.config["vocab_size"]),
mlm_labels.view(-1),
ignore_index=-100,
)
ret = {
"mlm_loss": mlm_loss,
"mlm_logits": mlm_logits,
"mlm_labels": mlm_labels,
"mlm_ids": infer["text_ids"],
}
phase = "train" if pl_module.training else "val"
loss = getattr(pl_module, f"{phase}_mlm_loss")(ret["mlm_loss"])
acc = getattr(pl_module, f"{phase}_mlm_accuracy")(
ret["mlm_logits"].view(-1, pl_module.hparams.config["vocab_size"]),
ret["mlm_labels"].view(-1)
)
pl_module.log(f"mlm/{phase}/loss", loss)
pl_module.log(f"mlm/{phase}/accuracy", acc)
return ret
def sim_matrix(a, b, eps=1e-8):
"""
added eps for numerical stability
"""
a_n, b_n = a.norm(dim=1)[:, None], b.norm(dim=1)[:, None]
a_norm = a / torch.max(a_n, eps * torch.ones_like(a_n))
b_norm = b / torch.max(b_n, eps * torch.ones_like(b_n))
sim_mt = torch.mm(a_norm, b_norm.transpose(0, 1))
return sim_mt
# == end
# add independent contrastive loss for retrieval
def compute_vtc(pl_module, batch, mode="video"):
infer_text = pl_module.infer(batch, mask_text=False, mask_video=False, input_text_only=True, mode=mode)
with torch.cuda.amp.autocast(enabled=False):
txt_emb = infer_text["text_feats"]
infer_vision = pl_module.infer(batch, mask_text=False, mask_video=False, input_video_only=True, mode=mode)
with torch.cuda.amp.autocast(enabled=False):
img_emb = infer_vision["video_feats"]
# print(txt_emb.size(), img_emb.size())
x = sim_matrix(txt_emb[:, 0], img_emb[:, 0])
temperature = 0.05
"Assumes input x is similarity matrix of N x M \in [-1, 1], computed using the cosine similarity between normalised vectors"
i_logsm = F.log_softmax(x / temperature, dim=1)
j_logsm = F.log_softmax(x.t() / temperature, dim=1)
# sum over positives
idiag = torch.diag(i_logsm)
loss_i = idiag.sum() / len(idiag)
jdiag = torch.diag(j_logsm)
loss_j = jdiag.sum() / len(jdiag)
itc_loss = - loss_i - loss_j
ret = {
"vtc_loss": itc_loss,
}
phase = "train" if pl_module.training else "val"
loss = getattr(pl_module, f"{phase}_vtc_loss")(ret["vtc_loss"])
pl_module.log(f"vtc/{phase}/loss", loss)
return ret
# == end
def compute_vtm_wpa(pl_module, batch, mode="video"):
pos_len = len(batch["text"]) // 2
neg_len = len(batch["text"]) - pos_len
vtm_labels = torch.cat([torch.ones(pos_len), torch.zeros(neg_len)]).to(
pl_module.device
)
vtm_labels = vtm_labels[torch.randperm(vtm_labels.size(0))]
# print(batch.keys())
vtm_videos = [
torch.stack(
[
ti if vtm_labels[i] == 1 else fi
for i, (ti, fi) in enumerate(zip(bti, bfi))
]
)
for bti, bfi in zip(batch["video"], batch["false_video_0"])
]
batch = {k: v for k, v in batch.items()}
batch["video"] = vtm_videos
infer = pl_module.infer(batch, mask_text=False, mask_video=False, mode=mode)
with torch.cuda.amp.autocast(enabled=False):
txt_emb, img_emb = infer["text_feats"], infer["video_feats"]
txt_mask, img_mask = infer["text_masks"].bool(), infer["video_masks"].bool()
for i, _len in enumerate(txt_mask.sum(dim=1)):
txt_mask[i, _len - 1] = False
txt_mask[:, 0] = False
img_mask[:, 0] = False
if "deit" in pl_module.hparams.config["vit"]:
img_mask[:, 1] = False
txt_pad, img_pad = ~txt_mask, ~img_mask
cost = cost_matrix_cosine(txt_emb.float(), img_emb.float())
joint_pad = txt_pad.unsqueeze(-1) | img_pad.unsqueeze(-2)
cost.masked_fill_(joint_pad, 0)
txt_len = (txt_pad.size(1) - txt_pad.sum(dim=1, keepdim=False)).to(
dtype=cost.dtype
)
img_len = (img_pad.size(1) - img_pad.sum(dim=1, keepdim=False)).to(
dtype=cost.dtype
)
T = ipot(
cost.detach(), txt_len, txt_pad, img_len, img_pad, joint_pad, 0.5, 50, 1
)
distance = trace(cost.matmul(T.detach()))
dist_pos = distance.masked_select(vtm_labels == 1)
dist_neg = distance.masked_select(vtm_labels == 0)
ot_loss = (dist_pos.sum() - dist_neg.sum()) / (dist_pos.size(0) + dist_neg.size(0))
vtm_logits = pl_module.vtm_score(infer["cls_feats"])
vtm_loss = F.cross_entropy(vtm_logits, vtm_labels.long())
ret = {
"vtm_loss": vtm_loss,
"vtm_wpa_loss": 0.1 * ot_loss,
"vtm_logits": vtm_logits,
"vtm_labels": vtm_labels,
}
phase = "train" if pl_module.training else "val"
loss = getattr(pl_module, f"{phase}_vtm_loss")(ret["vtm_loss"])
wpa_loss = getattr(pl_module, f"{phase}_vtm_wpa_loss")(ret["vtm_wpa_loss"])
acc = getattr(pl_module, f"{phase}_vtm_accuracy")(
ret["vtm_logits"], ret["vtm_labels"]
)
pl_module.log(f"vtm/{phase}/loss", loss)
pl_module.log(f"vtm/{phase}/wpa_loss", wpa_loss)
pl_module.log(f"vtm/{phase}/accuracy", acc)
return ret
def compute_imgcls(pl_module, batch):
infer = pl_module.infer(batch, mask_text=False, mask_video=False)
imgcls_logits = pl_module.img_classifier(infer["cls_feats"])
imgcls_labels = batch["label"]
imgcls_labels = torch.tensor(imgcls_labels).to(pl_module.device).long()
imgcls_loss = F.cross_entropy(imgcls_logits, imgcls_labels)
ret = {
"imgcls_loss": imgcls_loss,
"imgcls_logits": imgcls_logits,
"imgcls_labels": imgcls_labels,
}
phase = "train" if pl_module.training else "val"
loss = getattr(pl_module, f"{phase}_imgcls_loss")(ret["imgcls_loss"])
acc = getattr(pl_module, f"{phase}_imgcls_accuracy")(
ret["imgcls_logits"], ret["imgcls_labels"]
)
pl_module.log(f"imgcls/{phase}/loss", loss)
pl_module.log(f"imgcls/{phase}/accuracy", acc)
return ret
# vcr q -> a
def compute_vcr_q2a(pl_module, batch):
false_len = pl_module.hparams.config["draw_options_text"] - 1
vtm_labels = torch.tensor(batch["answer"]).to(pl_module.device).long()
_bs, _t, _c, _h, _w = batch["video"][0].shape
# for qa
text_ids = torch.stack(
[batch[f"options_text_{i}_ids"] for i in range(false_len)], dim=1
)
text_masks = torch.stack(
[batch[f"options_text_{i}_masks"] for i in range(false_len)], dim=1
)
text_labels = torch.stack(
[batch[f"options_text_{i}_labels"] for i in range(false_len)], dim=1
)
# concat first option and other options
text_ids = torch.cat([batch["text_ids"].unsqueeze(1), text_ids], dim=1)
text_masks = torch.cat([batch["text_masks"].unsqueeze(1), text_masks], dim=1)
text_labels = torch.cat([batch["text_labels"].unsqueeze(1), text_labels], dim=1)
videos = batch["video"][0].unsqueeze(1).expand(_bs, false_len + 1, _t, _c, _h, _w)
infer = pl_module.infer(
{
"video": [rearrange(videos, "bs fs t c h w -> (bs fs) t c h w")],
"text_ids": rearrange(text_ids, "bs fs tl -> (bs fs) tl"),
"text_masks": rearrange(text_masks, "bs fs tl -> (bs fs) tl"),
"text_labels": rearrange(text_labels, "bs fs tl -> (bs fs) tl"),
}
)
score = pl_module.rank_output(infer["cls_feats"])[:, 0]
score = rearrange(score, "(bs fs) -> bs fs", bs=_bs, fs=false_len + 1)
qa_loss = F.cross_entropy(score, vtm_labels)
# for qa->r
reason_len = pl_module.hparams.config["draw_options_text"]
qar_labels = torch.tensor(batch["reason_answer"]).to(pl_module.device).long()
_bs, _t, _c, _h, _w = batch["video"][0].shape
# for qar
qar_text_ids = torch.stack(
[batch[f"qar_text_{i}_ids"] for i in range(reason_len)], dim=1
)
qar_text_masks = torch.stack(
[batch[f"qar_text_{i}_masks"] for i in range(reason_len)], dim=1
)
qar_text_labels = torch.stack(
[batch[f"qar_text_{i}_labels"] for i in range(reason_len)], dim=1
)
# concat first option and other options
videos = batch["video"][0].unsqueeze(1).expand(_bs, reason_len, _t, _c, _h, _w)
qar_infer = pl_module.infer(
{
"video": [rearrange(videos, "bs fs t c h w -> (bs fs) t c h w")],
"text_ids": rearrange(qar_text_ids, "bs fs tl -> (bs fs) tl"),
"text_masks": rearrange(qar_text_masks, "bs fs tl -> (bs fs) tl"),
"text_labels": rearrange(qar_text_labels, "bs fs tl -> (bs fs) tl"),
}
)
qar_score = pl_module.rank_output_2(qar_infer["cls_feats"])[:, 0]
qar_score = rearrange(qar_score, "(bs fs) -> bs fs", bs=_bs, fs=reason_len)
qar_loss = F.cross_entropy(qar_score, qar_labels)
# print(score, vtm_labels)
phase = "train" if pl_module.training else "val"
qa_acc = getattr(pl_module, f"{phase}_vcr_q2a_accuracy")(
score, vtm_labels
)
qar_acc = getattr(pl_module, f"{phase}_vcr_qar_accuracy")(
qar_score, qar_labels
)
ret = {
"vcr_q2a_loss": qa_loss,
"vcr_qar_loss": qar_loss
}
phase = "train" if pl_module.training else "val"
qa_loss = getattr(pl_module, f"{phase}_vcr_q2a_loss")(ret["vcr_q2a_loss"])
qar_loss = getattr(pl_module, f"{phase}_vcr_qar_loss")(ret["vcr_qar_loss"])
pl_module.log(f"vcr_q2a/{phase}/loss", qa_loss)
pl_module.log(f"vcr_qar/{phase}/loss", qar_loss)
pl_module.log(f"vcr_q2a/{phase}/accuracy", qa_acc)
pl_module.log(f"vcr_qar/{phase}/accuracy", qar_acc)
return ret
# vcr qa -> r
def compute_vcr_qa2r(pl_module, batch):
false_len = pl_module.hparams.config["draw_false_text"] - 1
# stack video multiple times
# print(batch["answer"])
vtm_labels = torch.tensor(batch["answer"]).to(pl_module.device).long()
_bs, _t, _c, _h, _w = batch["video"][0].shape
# print(batch.keys())
text_ids = torch.stack(
[batch[f"false_text_{i}_ids"] for i in range(false_len)], dim=1
)
text_masks = torch.stack(
[batch[f"false_text_{i}_masks"] for i in range(false_len)], dim=1
)
text_labels = torch.stack(
[batch[f"false_text_{i}_labels"] for i in range(false_len)], dim=1
)
# concat first option and other options
text_ids = torch.cat([batch["text_ids"].unsqueeze(1), text_ids], dim=1)
text_masks = torch.cat([batch["text_masks"].unsqueeze(1), text_masks], dim=1)
text_labels = torch.cat([batch["text_labels"].unsqueeze(1), text_labels], dim=1)
videos = batch["video"][0].unsqueeze(1).expand(_bs, false_len + 1, _t, _c, _h, _w)
infer = pl_module.infer(
{
"video": [rearrange(videos, "bs fs t c h w -> (bs fs) t c h w")],
"text_ids": rearrange(text_ids, "bs fs tl -> (bs fs) tl"),
"text_masks": rearrange(text_masks, "bs fs tl -> (bs fs) tl"),
"text_labels": rearrange(text_labels, "bs fs tl -> (bs fs) tl"),
}
)
score = pl_module.rank_output(infer["cls_feats"])[:, 0]
score = rearrange(score, "(bs fs) -> bs fs", bs=_bs, fs=false_len + 1)
loss = F.cross_entropy(score, vtm_labels)
# print(score, vtm_labels)
phase = "train" if pl_module.training else "val"
acc = getattr(pl_module, f"{phase}_multiple_choice_accuracy")(
score, vtm_labels
)
ret = {
"multiple_choice_loss": loss,
}
phase = "train" if pl_module.training else "val"
loss = getattr(pl_module, f"{phase}_multiple_choice_loss")(ret["multiple_choice_loss"])
pl_module.log(f"multiple_choice/{phase}/loss", loss)
pl_module.log(f"multiple_choice/{phase}/accuracy", acc)
return ret
# mc_vqa
def compute_mc_vqa_q2a(pl_module, batch):
false_len = pl_module.hparams.config["draw_options_text"] - 1
vtm_labels = torch.tensor(batch["answer"]).to(pl_module.device).long()
_bs, _t, _c, _h, _w = batch["video"][0].shape
# for qa
text_ids = torch.stack(
[batch[f"options_text_{i}_ids"] for i in range(false_len)], dim=1
)
text_masks = torch.stack(
[batch[f"options_text_{i}_masks"] for i in range(false_len)], dim=1
)
text_labels = torch.stack(
[batch[f"options_text_{i}_labels"] for i in range(false_len)], dim=1
)
# concat first option and other options
text_ids = torch.cat([batch["text_ids"].unsqueeze(1), text_ids], dim=1)
text_masks = torch.cat([batch["text_masks"].unsqueeze(1), text_masks], dim=1)
text_labels = torch.cat([batch["text_labels"].unsqueeze(1), text_labels], dim=1)
videos = batch["video"][0].unsqueeze(1).expand(_bs, false_len + 1, _t, _c, _h, _w)
infer = pl_module.infer(
{
"video": [rearrange(videos, "bs fs t c h w -> (bs fs) t c h w")],
"text_ids": rearrange(text_ids, "bs fs tl -> (bs fs) tl"),
"text_masks": rearrange(text_masks, "bs fs tl -> (bs fs) tl"),
"text_labels": rearrange(text_labels, "bs fs tl -> (bs fs) tl"),
}
)
## v0: use rank output
# score = pl_module.rank_output(infer["cls_feats"])[:, 0]
## v1: use classification head
# print(infer["cls_feats"].size()) # 40, 768
score = pl_module.mc_vqa_classifier(infer["cls_feats"])[:, 0]
score = rearrange(score, "(bs fs) -> bs fs", bs=_bs, fs=false_len + 1)
qa_loss = F.cross_entropy(score, vtm_labels)
# print(score, vtm_labels)
phase = "train" if pl_module.training else "val"
qa_acc = getattr(pl_module, f"{phase}_mc_vqa_accuracy")(
score, vtm_labels
)
ret = {
"mc_vqa_loss": qa_loss,
}
phase = "train" if pl_module.training else "val"
qa_loss = getattr(pl_module, f"{phase}_mc_vqa_loss")(ret["mc_vqa_loss"])
pl_module.log(f"mc_vqa/{phase}/loss", qa_loss)
pl_module.log(f"mc_vqa/{phase}/accuracy", qa_acc)
return ret
# msrvtt multiple choice
def compute_multiple_choice(pl_module, batch):
false_len = pl_module.hparams.config["draw_false_text"] - 1
# stack image multiple times
# print(batch["answer"])
vtm_labels = torch.tensor(batch["answer"]).to(pl_module.device).long()
_bs, _t, _c, _h, _w = batch["video"][0].shape
# print(batch.keys())
texts = [batch[f"false_text_{i}"] for i in range(false_len)]
text_ids = torch.stack(
[batch[f"false_text_{i}_ids"] for i in range(false_len)], dim=1
)
text_masks = torch.stack(
[batch[f"false_text_{i}_masks"] for i in range(false_len)], dim=1
)
text_labels = torch.stack(
[batch[f"false_text_{i}_labels"] for i in range(false_len)], dim=1
)
# concat first option and other options
texts = [batch["text"]] + texts
text_ids = torch.cat([batch["text_ids"].unsqueeze(1), text_ids], dim=1)
text_masks = torch.cat([batch["text_masks"].unsqueeze(1), text_masks], dim=1)
text_labels = torch.cat([batch["text_labels"].unsqueeze(1), text_labels], dim=1)
if "cotrain" in type(pl_module).__name__.lower():
videos = batch["video"][0].unsqueeze(1).expand(_bs, false_len + 1, _t, _c, _h, _w)
infer = pl_module.infer(
{
"video": [rearrange(videos, "bs fs t c h w -> (bs fs) t c h w")],
"text_ids": rearrange(text_ids, "bs fs tl -> (bs fs) tl"),
"text_masks": rearrange(text_masks, "bs fs tl -> (bs fs) tl"),
"text_labels": rearrange(text_labels, "bs fs tl -> (bs fs) tl"),
}
)
score = pl_module.rank_output(infer["cls_feats"])[:, 0]
score = rearrange(score, "(bs fs) -> bs fs", bs=_bs, fs=false_len + 1)
elif "clip" in type(pl_module).__name__.lower():
if pl_module.mc_type == "vtc":
videos = batch["video"][0]
infer = pl_module.infer(
{
"video": [videos],
"text": [x for y in zip(*texts) for x in y]
}
)
video_feats = infer["video_feats"] # 8 * 512
text_feats = infer["text_feats"] # 40 * 512
video_feats = video_feats / video_feats.norm(dim=1, keepdim=True)
text_feats = text_feats / text_feats.norm(dim=1, keepdim=True)
text_feats = rearrange(text_feats, "(bs fs) c -> bs fs c", bs=_bs, fs=false_len + 1)
score = torch.einsum("bc,bfc->bf", video_feats, text_feats) * pl_module.clip.logit_scale.exp()
# elif pl_module.mc_type == "vtc":
# videos = batch["video"][0]
# scores = []
# for _ in range(int(round(1 / ((1 - pl_module.hparams.config["mim_prob"]))))):
# infer = pl_module.infer(
# {
# "video": [videos],
# "text": [x for y in zip(*texts) for x in y]
# },
# mask_video=True,
# )
# video_feats = infer["video_feats"] # 8 * 512
# text_feats = infer["text_feats"] # 40 * 512
# video_feats = video_feats / video_feats.norm(dim=1, keepdim=True)
# text_feats = text_feats / text_feats.norm(dim=1, keepdim=True)
# text_feats = rearrange(text_feats, "(bs fs) c -> bs fs c", bs=_bs, fs=false_len + 1)
# score = torch.einsum("bc,bfc->bf", video_feats, text_feats) * pl_module.clip.logit_scale.exp()
# scores.append(score)
# score = sum([x.softmax(dim=-1) for x in scores]) / len(scores)
elif pl_module.mc_type == "vtc_cap":
videos = batch["video"][0]
videos = repeat(videos, 'b t c h w -> (b fs) t c h w', fs=false_len + 1)
infer = pl_module.infer(
{
"video": [videos],
"text": [x for y in zip(*texts) for x in y]
},
caption=True,
)
feats = infer["cap_logits"]
feats = feats[torch.arange(feats.shape[0]), infer["text_ids"].argmax(dim=-1)]
mc_logits = pl_module.rank_output(
torch.cat([feats, infer["video_feats"], infer["text_feats"]], dim=1))
score = mc_logits.reshape(_bs, false_len + 1)
else:
raise NotImplementedError("Not implemented for model {}".format(pl_module))
loss = F.cross_entropy(score, vtm_labels)
# print(score, itm_labels)
phase = "train" if pl_module.training else "val"
acc = getattr(pl_module, f"{phase}_multiple_choice_accuracy")(
score, vtm_labels
)
# print(acc)
ret = {
"multiple_choice_loss": loss,
}
phase = "train" if pl_module.training else "val"
loss = getattr(pl_module, f"{phase}_multiple_choice_loss")(ret["multiple_choice_loss"])
pl_module.log(f"multiple_choice/{phase}/loss", loss)
pl_module.log(f"multiple_choice/{phase}/accuracy", acc)
return ret
def compute_vqa(pl_module, batch):
infer = pl_module.infer(batch, mask_text=False, mask_video=False)
vqa_logits = pl_module.vqa_classifier(infer["cls_feats"])
vqa_targets = torch.zeros(
len(vqa_logits), pl_module.hparams.config["vqav2_label_size"]
).to(pl_module.device)
vqa_labels = batch["vqa_labels"]
vqa_scores = batch["vqa_scores"]
for i, (_label, _score) in enumerate(zip(vqa_labels, vqa_scores)):
for l, s in zip(_label, _score):
vqa_targets[i, l] = s
vqa_loss = (
F.binary_cross_entropy_with_logits(vqa_logits, vqa_targets)
* vqa_targets.shape[1]
) # https://github.com/jnhwkim/ban-vqa/blob/master/train.py#L19
ret = {
"vqa_loss": vqa_loss,
"vqa_logits": vqa_logits,
"vqa_targets": vqa_targets,
"vqa_labels": vqa_labels,
"vqa_scores": vqa_scores,
}
phase = "train" if pl_module.training else "val"
loss = getattr(pl_module, f"{phase}_vqa_loss")(ret["vqa_loss"])
score = getattr(pl_module, f"{phase}_vqa_score")(
ret["vqa_logits"], ret["vqa_targets"]
)
pl_module.log(f"vqa/{phase}/loss", loss)
pl_module.log(f"vqa/{phase}/score", score)
return ret
# add by vcop
def compute_vcop(pl_module, batch):
infer = pl_module.infer(batch, mask_text=False, mask_video=False)
x = infer["vcop_features"] # BTLC
b = x.size(0)
# # v1: simple concat
# gt_labels = torch.ones(b)
# idx = torch.randperm(pl_module.hparams.config["num_frames"]) # get random order
# classes = list(itertools.permutations(list(range(len(idx.tolist())))))
# label = classes.index(tuple(idx.tolist()))
# h = x[0, idx, 0].view(1, -1)
# gt_labels[0] = label
# for index in range(1, b):
# idx = torch.randperm(pl_module.hparams.config["num_frames"]) # get random order
# classes = list(itertools.permutations(list(range(len(idx.tolist())))))
# label = classes.index(tuple(idx.tolist()))
# gt_labels[index] = label
# h = torch.cat((h, x[index, idx, 0].view(1, -1)), dim=0)
# v2: vcop implementation
gt_labels = torch.ones(b)
idx = torch.randperm(pl_module.hparams.config["num_frames"]) # get random order
classes = list(itertools.permutations(list(range(len(idx.tolist())))))
label = classes.index(tuple(idx.tolist()))
h = x[0, idx, 0].unsqueeze(0)
gt_labels[0] = label
for index in range(1, b):
idx = torch.randperm(pl_module.hparams.config["num_frames"]) # get random order
classes = list(itertools.permutations(list(range(len(idx.tolist())))))
label = classes.index(tuple(idx.tolist()))
gt_labels[index] = label
h = torch.cat((h, x[index, idx, 0].unsqueeze(0)), dim=0)
vcop_logits = pl_module.vcop_classifier(h)
vcop_labels = gt_labels.to(pl_module.device).long()
m = nn.Softmax(dim=1)
if random.random() < 0.01:
print(m(vcop_logits)[0], vcop_labels[0])
# print(vcop_labels)
vcop_loss = F.cross_entropy(vcop_logits, vcop_labels)
ret = {
"vcop_loss": vcop_loss,
"vcop_logits": vcop_logits,
"vcop_labels": vcop_labels,
}
phase = "train" if pl_module.training else "val"
loss = getattr(pl_module, f"{phase}_vcop_loss")(ret["vcop_loss"])
pl_module.log(f"vcop/{phase}/loss", loss)
acc = getattr(pl_module, f"{phase}_vcop_accuracy")(
ret["vcop_logits"], ret["vcop_labels"], unfilterd=False # if remove unknown classes
)
# print(acc)
pl_module.log(f"vcop/{phase}/accuracy", acc)
return ret
# add for dino
def compute_dino(pl_module, batch):
infer = pl_module.infer(batch, mask_text=False, mask_video=False)
x = infer["dino_features"] # BTLC
b = x.size(0)
dino_loss = pl_module.dino_loss
ret = {
"dino_loss": dino_loss,
"dino_logits": dino_logits,
"dino_labels": dino_labels,
}
phase = "train" if pl_module.training else "val"
loss = getattr(pl_module, f"{phase}_dino_loss")(ret["dino_loss"])
pl_module.log(f"dino/{phase}/loss", loss)
acc = getattr(pl_module, f"{phase}_dino_accuracy")(
ret["dino_logits"], ret["dino_labels"], unfilterd=False # if remove unknown classes
)
pl_module.log(f"dino/{phase}/accuracy", acc)
return ret
# add by msrvtt qa
def compute_openend_vqa(pl_module, batch):
phase = "train" if pl_module.training else "val"
# batch["false_video_0"] = batch["false_video_0"][0]
if "allinone" in type(pl_module).__name__.lower():
batch["video"] = batch["video"][0]
infer = pl_module.infer(batch, mask_text=False, mask_video=False, mode="video")
vqa_logits = pl_module.vqa_classifier(infer["cls_feats"])
elif "clip" in type(pl_module).__name__.lower():
if pl_module.qa_type == "vtc":
infer = pl_module.infer(batch, mask_text=False, mask_video=False, mode="video")
vqa_logits = pl_module.vqa_classifier(
torch.cat([infer["video_feats"], infer["text_feats"]], dim=1)
)
elif pl_module.qa_type == "cap":
infer = pl_module.infer(batch, mask_text=False, mask_video=False, caption=True, mode="video")
# Take the feats of the eot_token
feats = feats[torch.arange(feats.shape[0]), infer["text_ids"].argmax(dim=-1)]
vqa_logits = pl_module.vqa_classifier(feats)
elif pl_module.qa_type == "vtc_cap":
infer = pl_module.infer(batch, mask_text=False, mask_video=False, caption=True, mode="video")
feats = infer["cap_logits"]
feats = feats[torch.arange(feats.shape[0]), infer["text_ids"].argmax(dim=-1)]
vqa_logits = pl_module.vqa_classifier(
torch.cat([feats, infer["video_feats"], infer["text_feats"]], dim=1))
elif pl_module.qa_type == "vtc_mlm":
del batch["clip_text_ids"]
assert "clip_text_ids" not in batch
batch["text"] = [f"Question: {q} Answer: " for q in batch["text"]]
infer = pl_module.infer(batch, mask_text=True, mask_video=False, mode="video")
# vqa_logits = pl_module.mlm_score(infer["text_feats"])
# vqa_logits = vqa_logits[torch.arange(vqa_logits.shape[0]), infer["text_ids"].argmax(dim=-1)]
# id_idxes = batch["ans_clip_id"][0]
# vqa_logits = vqa_logits[:, id_idxes]
feats = infer["text_feats"]
feats = feats[torch.arange(feats.shape[0]), infer["text_ids"].argmax(dim=-1)]
vqa_logits = pl_module.vqa_classifier(
torch.cat([feats, infer["video_feats"], infer["text_contrastive_feats"]], dim=1)
)
# vqa_logits = (vqa_logits + vqa_logits_all[:, :vqa_logits.size(1)]) / 2
elif pl_module.qa_type in ["zs", "mlm"]:
del batch["clip_text_ids"]
assert "clip_text_ids" not in batch
batch["text"] = [f"Question: {q} Answer: " for q in batch["text"]]
infer = pl_module.infer(batch, mask_text=True, mask_video=False, mode="video")
vqa_logits = pl_module.mlm_score(infer["text_feats"])
vqa_logits = vqa_logits[torch.arange(vqa_logits.shape[0]), infer["text_ids"].argmax(dim=-1)]
id_idxes = batch["ans_clip_id"][0]
vqa_logits = vqa_logits[:, id_idxes]
else:
raise NotImplementedError("Not implemented for model {}".format(pl_module))
vqa_labels = torch.tensor(batch["vqa_labels"]).to(pl_module.device).long()
# print(vqa_logits.size())
# print(vqa_labels)
vqa_loss = F.cross_entropy(vqa_logits, vqa_labels)
ret = {
"vqa_loss": vqa_loss,
"vqa_logits": vqa_logits,
"vqa_labels": vqa_labels,
}
loss = getattr(pl_module, f"{phase}_vqa_loss")(ret["vqa_loss"])
pl_module.log(f"vqa/{phase}/loss", loss)
acc = getattr(pl_module, f"{phase}_openend_vqa_accuracy")(
ret["vqa_logits"].clone(), ret["vqa_labels"].clone() # if remove unknown classes
)
pl_module.log(f"vqa/{phase}/accuracy", acc)
return ret
def compute_nlvr2(pl_module, batch):
infer1 = pl_module.infer(
batch, mask_text=False, mask_video=False, video_token_type_idx=1
)
infer2 = pl_module.infer(
batch, mask_text=False, mask_video=False, video_token_type_idx=2
)
cls_feats = torch.cat([infer1["cls_feats"], infer2["cls_feats"]], dim=-1)
nlvr2_logits = pl_module.nlvr2_classifier(cls_feats)
nlvr2_labels = batch["answers"]
nlvr2_labels = torch.tensor(nlvr2_labels).to(pl_module.device).long()
nlvr2_loss = F.cross_entropy(nlvr2_logits, nlvr2_labels)
ret = {
"nlvr2_loss": nlvr2_loss,
"nlvr2_logits": nlvr2_logits,
"nlvr2_labels": nlvr2_labels,
}
phase = "train" if pl_module.training else "val"
if phase == "train":
loss = getattr(pl_module, f"{phase}_nlvr2_loss")(ret["nlvr2_loss"])
acc = getattr(pl_module, f"{phase}_nlvr2_accuracy")(
ret["nlvr2_logits"], ret["nlvr2_labels"]
)
pl_module.log(f"nlvr2/{phase}/loss", loss)
pl_module.log(f"nlvr2/{phase}/accuracy", acc)
else:
dev_batches = [i for i, n in enumerate(batch["table_name"]) if "dev" in n]
test_batches = [i for i, n in enumerate(batch["table_name"]) if "test" in n]
if dev_batches:
dev_loss = getattr(pl_module, f"dev_nlvr2_loss")(
F.cross_entropy(
ret["nlvr2_logits"][dev_batches], ret["nlvr2_labels"][dev_batches]
)
)
dev_acc = getattr(pl_module, f"dev_nlvr2_accuracy")(
ret["nlvr2_logits"][dev_batches], ret["nlvr2_labels"][dev_batches]
)
pl_module.log(f"nlvr2/dev/loss", dev_loss)
pl_module.log(f"nlvr2/dev/accuracy", dev_acc)
if test_batches:
test_loss = getattr(pl_module, f"test_nlvr2_loss")(
F.cross_entropy(
ret["nlvr2_logits"][test_batches], ret["nlvr2_labels"][test_batches]
)
)
test_acc = getattr(pl_module, f"test_nlvr2_accuracy")(
ret["nlvr2_logits"][test_batches], ret["nlvr2_labels"][test_batches]
)
pl_module.log(f"nlvr2/test/loss", test_loss)
pl_module.log(f"nlvr2/test/accuracy", test_acc)
return ret
def compute_irtr(pl_module, batch):
is_training_phase = pl_module.training
# modify to module
_bs, _t, _c, _h, _w = batch["video"][0].shape
false_len = pl_module.hparams.config["draw_false_text"]
text_ids = torch.stack(
[batch[f"false_text_{i}_ids"] for i in range(false_len)], dim=1
)
text_masks = torch.stack(
[batch[f"false_text_{i}_masks"] for i in range(false_len)], dim=1
)
text_labels = torch.stack(
[batch[f"false_text_{i}_labels"] for i in range(false_len)], dim=1
)
text_ids = torch.cat([batch["text_ids"].unsqueeze(1), text_ids], dim=1)
text_masks = torch.cat([batch["text_masks"].unsqueeze(1), text_masks], dim=1)
text_labels = torch.cat([batch["text_labels"].unsqueeze(1), text_labels], dim=1)
videos = batch["video"][0].unsqueeze(1).expand(_bs, false_len + 1, _t, _c, _h, _w)
infer = pl_module.infer(
{
"video": [rearrange(videos, "bs fs t c h w -> (bs fs) t c h w")],
"text_ids": rearrange(text_ids, "bs fs tl -> (bs fs) tl"),
"text_masks": rearrange(text_masks, "bs fs tl -> (bs fs) tl"),
"text_labels": rearrange(text_labels, "bs fs tl -> (bs fs) tl"),
}
)
score = pl_module.rank_output(infer["cls_feats"])[:, 0]
score = rearrange(score, "(bs fs) -> bs fs", bs=_bs, fs=false_len + 1)
answer = torch.zeros(_bs).to(score).long()
irtr_loss = F.cross_entropy(score, answer)
ret = {
"irtr_loss": irtr_loss,
}
phase = "train" if pl_module.training else "val"
irtr_loss = getattr(pl_module, f"{phase}_irtr_loss")(ret["irtr_loss"])
pl_module.log(f"irtr/{phase}/irtr_loss", irtr_loss)
return ret
# use this method to achievt multiple view testing
@torch.no_grad()
def compute_irtr_recall(pl_module):
text_dset = pl_module.trainer.datamodule.dms[0].make_no_false_val_dset()
text_dset.tokenizer = pl_module.trainer.datamodule.dms[0].tokenizer
text_loader = torch.utils.data.DataLoader(
text_dset,
batch_size=64,
num_workers=pl_module.hparams.config["num_workers"],
pin_memory=True,
collate_fn=functools.partial(
text_dset.collate,
mlm_collator=pl_module.trainer.datamodule.dms[0].mlm_collator,
),
)
video_dset = pl_module.trainer.datamodule.dms[0].make_no_false_val_dset(
video_only=True
)
video_dset.tokenizer = pl_module.trainer.datamodule.dms[0].tokenizer
dist_sampler = DistributedSampler(video_dset, shuffle=False)
video_loader = torch.utils.data.DataLoader(
video_dset,
batch_size=1,
num_workers=pl_module.hparams.config["num_workers"],
sampler=dist_sampler,
pin_memory=True,
collate_fn=functools.partial(
video_dset.collate,
mlm_collator=pl_module.trainer.datamodule.dms[0].mlm_collator,
),
)
text_preload = list()
for _b in tqdm.tqdm(text_loader, desc="text prefetch loop"):
text_preload.append(
{
"text_ids": _b["text_ids"].to(pl_module.device),
"text_masks": _b["text_masks"].to(pl_module.device),
"text_labels": _b["text_labels"].to(pl_module.device),
"img_index": _b["img_index"],
}
)
tiids = list()
for pre in text_preload:
tiids += pre["img_index"]
tiids = torch.tensor(tiids)
video_preload = list()
for _b in tqdm.tqdm(video_loader, desc="video prefetch loop"):
video = _b["video"][0]
# print(video.size())
(ie, im, _, _) = pl_module.transformer.visual_embed(
video.to(pl_module.device),
max_video_len=pl_module.hparams.config["max_video_len"],
mask_it=False,
)
video_preload.append((ie, im, _b["img_index"][0]))
rank_scores = list()
rank_iids = list()
for img_batch in tqdm.tqdm(video_preload, desc="rank loop"):
_ie, _im, _iid = img_batch
num_frames, l, c = _ie.shape
# print(_ie.size()) # 1x197x168
# print(_im.size()) # 1x197
_ie.unsqueeze(0)
_im.unsqueeze(0)
img_batch_score = list()
for txt_batch in text_preload:
fblen = len(txt_batch["text_ids"])
ie = _ie.expand(fblen, num_frames, l, c)
# print(ie.size())
im = _im.expand(fblen, num_frames, l)
ie = ie.contiguous().view(-1, l, c)
im = im.contiguous().view(-1, l)
with torch.cuda.amp.autocast():
score = pl_module.rank_output(
pl_module.infer(
{
"text_ids": txt_batch["text_ids"],
"text_masks": txt_batch["text_masks"],
"text_labels": txt_batch["text_labels"],
},
video_embeds=ie,
v_masks=im,
)["cls_feats"]
)[:, 0]
img_batch_score.append(score)
img_batch_score = torch.cat(img_batch_score)
rank_scores.append(img_batch_score.cpu().tolist())
rank_iids.append(_iid)
torch.distributed.barrier()
gather_rank_scores = all_gather(rank_scores)
gather_rank_iids = all_gather(rank_iids)
iids = torch.tensor(gather_rank_iids)
iids = iids.view(-1)
scores = torch.tensor(gather_rank_scores)
scores = scores.view(len(iids), -1)
topk10 = scores.topk(10, dim=1)
topk5 = scores.topk(5, dim=1)
topk1 = scores.topk(1, dim=1)
topk10_iids = tiids[topk10.indices]
topk5_iids = tiids[topk5.indices]
topk1_iids = tiids[topk1.indices]
tr_r10 = (iids.unsqueeze(1) == topk10_iids).float().max(dim=1)[0].mean()
tr_r5 = (iids.unsqueeze(1) == topk5_iids).float().max(dim=1)[0].mean()
tr_r1 = (iids.unsqueeze(1) == topk1_iids).float().max(dim=1)[0].mean()
topk10 = scores.topk(10, dim=0)
topk5 = scores.topk(5, dim=0)
topk1 = scores.topk(1, dim=0)
topk10_iids = iids[topk10.indices]
topk5_iids = iids[topk5.indices]
topk1_iids = iids[topk1.indices]
ir_r10 = (tiids.unsqueeze(0) == topk10_iids).float().max(dim=0)[0].mean()
ir_r5 = (tiids.unsqueeze(0) == topk5_iids).float().max(dim=0)[0].mean()
ir_r1 = (tiids.unsqueeze(0) == topk1_iids).float().max(dim=0)[0].mean()
return (ir_r1, ir_r5, ir_r10, tr_r1, tr_r5, tr_r10)
@torch.no_grad()
def compute_decouple_irtr_recall(pl_module):
sample_dset = pl_module.trainer.datamodule.dms[0].make_no_false_val_dset(
)
sample_dset.tokenizer = pl_module.trainer.datamodule.dms[0].tokenizer
dist_sampler = DistributedSampler(sample_dset, shuffle=False)
sample_loader = torch.utils.data.DataLoader(
sample_dset,
batch_size=1,
num_workers=pl_module.hparams.config["num_workers"],
sampler=dist_sampler,
pin_memory=True,
collate_fn=functools.partial(
sample_dset.collate,
mlm_collator=pl_module.trainer.datamodule.dms[0].mlm_collator,
),
)
text_preload = list()
text_embed_arr = []
vid_embed_arr = []
count = 0
with torch.no_grad():
for _b in tqdm.tqdm(sample_loader, desc="text&video prefetch loop"):
# print(_b)
# print(_b.keys())
_b["text_ids"] = _b["text_ids"].to(pl_module.device)
_b["text_masks"] = _b["text_masks"].to(pl_module.device)
_b["text_labels"] = _b["text_labels"].to(pl_module.device)
_b["video"][0] = _b["video"][0].to(pl_module.device)
infer = pl_module.infer(_b, mask_text=False, mask_video=False)
with torch.cuda.amp.autocast(enabled=False):
text_embed, vid_embed = infer["text_retrieval_feats"], infer["video_retrieval_feats"]
if vid_embed is not None:
vid_embed_all = [torch.zeros_like(vid_embed) for _ in range(pl_module.hparams.config["num_gpus"])]
torch.distributed.all_gather(vid_embed_all, vid_embed)
vid_embed_all = torch.cat(vid_embed_all, dim=0)
if text_embed is not None:
text_embed_all = [torch.zeros_like(text_embed) for _ in range(pl_module.hparams.config["num_gpus"])]
torch.distributed.all_gather(text_embed_all, text_embed)
text_embed_all = torch.cat(text_embed_all, dim=0)
text_embed_arr.append(text_embed_all.cpu())
vid_embed_arr.append(vid_embed_all.cpu())
count += 1
text_embeds = torch.cat(text_embed_arr)
vid_embeds = torch.cat(vid_embed_arr)
# print(text_embeds.size(), vid_embeds.size())
st2sv_sims = sim_matrix(text_embeds, vid_embeds).detach().cpu().numpy()
for metric in [t2v_metrics, v2t_metrics]:
metric_name = metric.__name__
metrics = metric(st2sv_sims)
if metric == t2v_metrics:
tr_r1, tr_r5, tr_r10, tr_r50 = metrics["R1"], metrics["R5"], metrics["R10"], metrics["R50"]
else:
ir_r1, ir_r5, ir_r10, ir_r50 = metrics["R1"], metrics["R5"], metrics["R10"], metrics["R50"]
# msg += f"MedR: {metrics['MedR']:g}, MeanR: {metrics['MeanR']:.1f}"
return (ir_r1, ir_r5, ir_r10, tr_r1, tr_r5, tr_r10)
@torch.no_grad()
def compute_zero_shot_classify_recall(pl_module, batch):
# process all prompt action label into text representations
false_len = pl_module.hparams.config["draw_false_text"] - 1
# stack video multiple times
# print(batch["answer"])
vtm_labels = torch.tensor(batch["answer"]).to(pl_module.device).long()
_bs, _t, _c, _h, _w = batch["video"][0].shape
# print(batch.keys())
text_ids = torch.stack(
[batch[f"false_text_{i}_ids"] for i in range(false_len)], dim=1
)
text_masks = torch.stack(
[batch[f"false_text_{i}_masks"] for i in range(false_len)], dim=1
)
text_labels = torch.stack(
[batch[f"false_text_{i}_labels"] for i in range(false_len)], dim=1
)
# concat first option and other options
text_ids = torch.cat([batch["text_ids"].unsqueeze(1), text_ids], dim=1)
text_masks = torch.cat([batch["text_masks"].unsqueeze(1), text_masks], dim=1)
text_labels = torch.cat([batch["text_labels"].unsqueeze(1), text_labels], dim=1)
videos = batch["video"][0].unsqueeze(1).expand(_bs, false_len + 1, _t, _c, _h, _w)
infer = pl_module.infer(
{
"video": [rearrange(videos, "bs fs t c h w -> (bs fs) t c h w")],
"text_ids": rearrange(text_ids, "bs fs tl -> (bs fs) tl"),
"text_masks": rearrange(text_masks, "bs fs tl -> (bs fs) tl"),
"text_labels": rearrange(text_labels, "bs fs tl -> (bs fs) tl"),
}
)
score = pl_module.rank_output(infer["cls_feats"])[:, 0]
score = rearrange(score, "(bs fs) -> bs fs", bs=_bs, fs=false_len + 1)
loss = F.cross_entropy(score, vtm_labels)
# print(score, vtm_labels)
phase = "train" if pl_module.training else "val"
acc = getattr(pl_module, f"{phase}_zero_shot_accuracy")(
score, vtm_labels
)
# print(acc)
ret = {
"multiple_choice_loss": loss,
}
phase = "train" if pl_module.training else "val"
loss = getattr(pl_module, f"{phase}_multiple_choice_loss")(ret["multiple_choice_loss"])
pl_module.log(f"multiple_choice/{phase}/loss", loss)
pl_module.log(f"multiple_choice/{phase}/accuracy", acc)
return acc
# for ind itc
@torch.no_grad()
def compute_ind_irtr_recall(pl_module):
num_views = pl_module.hparams.config["retrieval_views"]
text_embed_arr_multi = []
vid_embed_arr_multi = []
for i in range(num_views):
sample_dset = pl_module.trainer.datamodule.video_dms[0].make_no_false_val_dset(
)
sample_dset.tokenizer = pl_module.trainer.datamodule.video_dms[0].tokenizer
dist_sampler = DistributedSampler(sample_dset, shuffle=False)
sample_loader = torch.utils.data.DataLoader(
sample_dset,
batch_size=1,
num_workers=pl_module.hparams.config["num_workers"],
sampler=dist_sampler,
pin_memory=True,
collate_fn=functools.partial(
sample_dset.collate,
mlm_collator=pl_module.trainer.datamodule.video_dms[0].mlm_collator,
),
)
text_preload = list()
text_embed_arr = []
vid_embed_arr = []
count = 0
with torch.no_grad():
for _b in tqdm.tqdm(sample_loader, desc="text&video prefetch loop"):
# print(_b)
# print(_b.keys())
_b["text_ids"] = _b["text_ids"].to(pl_module.device)
_b["text_masks"] = _b["text_masks"].to(pl_module.device)
_b["text_labels"] = _b["text_labels"].to(pl_module.device)
_b["video"][0] = _b["video"][0].to(pl_module.device)
# infer = pl_module.infer(_b, mask_text=False, mask_video=False)
infer_text = pl_module.infer(_b, mask_text=False, mask_video=False, input_text_only=True)
infer_vision = pl_module.infer(_b, mask_text=False, mask_video=False, input_video_only=True)
with torch.cuda.amp.autocast(enabled=False):
# text_embed, vid_embed = infer_text["raw_cls_feats"], infer_vision["raw_cls_feats"]
text_embed, vid_embed = infer_text["text_feats"][:, 0], infer_vision["video_feats"][:, 0]
if vid_embed is not None:
vid_embed_all = [torch.zeros_like(vid_embed) for _ in range(pl_module.hparams.config["num_gpus"])]
torch.distributed.all_gather(vid_embed_all, vid_embed)
vid_embed_all = torch.cat(vid_embed_all, dim=0)
if text_embed is not None:
text_embed_all = [torch.zeros_like(text_embed) for _ in range(pl_module.hparams.config["num_gpus"])]
torch.distributed.all_gather(text_embed_all, text_embed)
text_embed_all = torch.cat(text_embed_all, dim=0)
text_embed_arr.append(text_embed_all.cpu())
vid_embed_arr.append(vid_embed_all.cpu())
count += 1
text_embeds = torch.cat(text_embed_arr)
vid_embeds = torch.cat(vid_embed_arr)
# append for multi view
text_embed_arr_multi.append(text_embeds)
vid_embed_arr_multi.append(vid_embeds)
# print(text_embeds.size(), vid_embeds.size())
for j in range(len(text_embed_arr_multi)):
if j == 0:
st2sv_sims = sim_matrix(text_embed_arr_multi[j], vid_embed_arr_multi[j]).detach().cpu().numpy() / len(text_embed_arr_multi)
else:
st2sv_sims += sim_matrix(text_embed_arr_multi[j], vid_embed_arr_multi[j]).detach().cpu().numpy() / len(text_embed_arr_multi)
# st2sv_sims = sim_matrix(text_embeds, vid_embeds).detach().cpu().numpy()
for metric in [t2v_metrics, v2t_metrics]:
metric_name = metric.__name__
metrics = metric(st2sv_sims)
if metric == t2v_metrics:
tr_r1, tr_r5, tr_r10, tr_r50 = metrics["R1"], metrics["R5"], metrics["R10"], metrics["R50"]
else:
ir_r1, ir_r5, ir_r10, ir_r50 = metrics["R1"], metrics["R5"], metrics["R10"], metrics["R50"]
# msg += f"MedR: {metrics['MedR']:g}, MeanR: {metrics['MeanR']:.1f}"
return (ir_r1, ir_r5, ir_r10, tr_r1, tr_r5, tr_r10)
def compute_contrastive(pl_module, batch, return_infer=False, mask_text=False, mask_video=False, caption=False, mode="video"):
infer = pl_module.infer(batch, mask_text=mask_text, mask_video=mask_video, caption=caption, mode=mode)
if mask_text:
text_feats = infer["text_contrastive_feats"]
else:
text_feats = infer["text_feats"]
video_feats = infer["video_feats"]
if text_feats.ndim == 3: # [B, N, C] -> [B, C]
text_feats = text_feats.mean(1)
if video_feats.ndim == 3:
video_feats= video_feats.mean(1)
# Normalize the feature
video_feats = video_feats / video_feats.norm(dim=1, keepdim=True)
text_feats = text_feats / text_feats.norm(dim=1, keepdim=True)
if not pl_module.mmt:
# Plain contrastive
# # TODO: Handle logit_scale when model has no logit_scale
video_feats = distnn.all_gather(video_feats)
text_feats = distnn.all_gather(text_feats)
if not isinstance(video_feats, torch.Tensor) or video_feats.ndim == 3:
video_feats = torch.cat(list(video_feats))
text_feats = torch.cat(list(text_feats))
image_logits = video_feats @ text_feats.t() * pl_module.clip.logit_scale.exp()
text_logits = image_logits.t()
ground_truth = torch.arange(len(image_logits)).long().to(image_logits.device)
loss = (F.cross_entropy(image_logits, ground_truth) + F.cross_entropy(text_logits, ground_truth)).div(2)
else:
text_feats_k = infer["text_feats_k"]
video_feats_k = infer["video_feats_k"]
if video_feats_k.ndim == 3:
video_feats_k = video_feats_k.mean(1)
if text_feats_k.ndim == 3:
text_feats_k = text_feats_k.mean(1)
video_feats_k = video_feats_k / video_feats_k.norm(dim=1, keepdim=True)
text_feats_k = text_feats_k / text_feats_k.norm(dim=1, keepdim=True)
video_l_pos = torch.einsum('nc, nc->n', video_feats, text_feats_k).unsqueeze(-1)
video_l_neg = torch.einsum('nc, ck->nk', video_feats, pl_module.queue_text)
image_logits = torch.cat([video_l_pos, video_l_neg], dim=1) * pl_module.clip.logit_scale.exp()
text_l_pos = torch.einsum('nc, nc->n', text_feats, video_feats_k).unsqueeze(-1)
text_l_neg = torch.einsum('nc, ck->nk', text_feats, pl_module.queue_visual)
text_logits = torch.cat([text_l_pos, text_l_neg], dim=1) * pl_module.clip.logit_scale.exp()
ground_truth = torch.zeros(image_logits.shape[0], dtype=torch.long).to(image_logits.device)
loss = (F.cross_entropy(image_logits, ground_truth) + F.cross_entropy(text_logits, ground_truth)).div(2)
pl_module._dequeue_and_enqueue(text_feats_k, video_feats_k)
ret = {
"contrastive_loss": loss,
"contrastive_image_logits": image_logits,
"contrastive_text_logits": text_logits,
"contrastive_labels": ground_truth,
}
phase = "train" if pl_module.training else "val"
loss = getattr(pl_module, f"{phase}_contrastive_loss")(ret["contrastive_loss"])
acc_image = getattr(pl_module, f"{phase}_contrastive_image_accuracy")(
ret["contrastive_image_logits"], ret["contrastive_labels"]
)
acc_text = getattr(pl_module, f"{phase}_contrastive_text_accuracy")(
ret["contrastive_text_logits"], ret["contrastive_labels"]
)
pl_module.log(f"contrastive/{phase}/loss", loss)
pl_module.log(f"contrastive/{phase}/image_accuracy", acc_image)
pl_module.log(f"contrastive/{phase}/text_accuracy", acc_text)
if return_infer:
return ret, infer
return ret
def compute_cap(pl_module, batch, infer=None, mode="video"):
if infer is None: # Skip infer if infer is not None
infer = pl_module.infer(batch, caption=True, mode=mode)
cap_logits = infer["cap_logits"]
# The first is sot_token, prediction starts from the second token
# Note that there is also an eot_token at the end of each seq
cap_labels = infer["text_ids"][:, 1:].long()
special_tokens_mask = infer["special_tokens_mask"][:, 1:] # 1 for masked
cap_labels.masked_fill_(special_tokens_mask, value=-100)
cap_loss = F.cross_entropy(
cap_logits.reshape(-1, pl_module.hparams.config["vocab_size"]),
cap_labels.reshape(-1),
ignore_index=-100,
)
ret = {
"cap_loss": cap_loss,
}
phase = "train" if pl_module.training else "val"
loss = getattr(pl_module, f"{phase}_cap_loss")(cap_loss)
acc = getattr(pl_module, f"{phase}_cap_accuracy")(
cap_logits, cap_labels,
)
pl_module.log(f"cap/{phase}/loss", loss)
pl_module.log(f"cap/{phase}/accuracy", acc)
return ret
def compute_zs_classify(pl_module, batch, text_ret):
text_feats = text_ret["text_feats"]
num_text_aug = text_ret["num_text_aug"]
labels = torch.tensor(batch["answer"]).to(pl_module.device).long()
video_feats = pl_module.forward_video(batch)["video_feats"]
if text_feats.ndim == 3: # [B, N, C] -> [B, C]
text_feats = text_feats.mean(1)
if video_feats.ndim == 3:
video_feats= video_feats.mean(1)
text_feats /= text_feats.norm(dim=-1, keepdim=True)
video_feats /= video_feats.norm(dim=-1, keepdim=True)
similarity = (pl_module.clip.logit_scale.exp() * video_feats @ text_feats.T)
B, _ = video_feats.shape
assert similarity.view(B, num_text_aug, -1).shape[-1] == 400, similarity.shape
similarity = similarity.view(B, num_text_aug, -1).softmax(dim=-1)
similarity = similarity.mean(dim=1, keepdim=False)
phase = "train" if pl_module.training else "val"
ret = {
"similarity": similarity,
"labels": labels,
}
acc = getattr(pl_module, f"{phase}_zs_classify_accuracy")(similarity, labels)
pl_module.log(f"zs_classify/{phase}/accuracy", acc)
return ret
def compute_mim(pl_module, batch, infer=None, mode="video"):
if infer is None: # Skip infer if infer is not None
infer = pl_module.infer(batch, mask_image=True, mode=mode)
mim_feats = infer["mim_video_feats"] # N, Lu, C
video = infer["video"] # N, C, T, H, W
masked_indices = infer["visual_masked_indices"] # N * T, L
patch_size = int(math.sqrt(mim_feats.size(-1) // 3 // 2))
assert 3 * patch_size * patch_size == mim_feats.size(-1)
if mode == "image":
assert video.size(3) == 1
video = video.expand(-1, -1, 2)
img_patch = rearrange(
video, 'b c (p0 t) (h p1) (w p2) -> b (t h w) (p0 p1 p2) c',
p0=2, p1=patch_size, p2=patch_size
)
img_patch = (img_patch - img_patch.mean(dim=-2, keepdim=True)
) / (img_patch.var(dim=-2, unbiased=True, keepdim=True).sqrt() + 1e-6)
img_patch = rearrange(img_patch, 'b l p c -> b l (p c)')
N, _, C = img_patch.shape
img_patch_mask = img_patch[masked_indices].reshape(N, -1, C)
mim_loss = F.mse_loss(mim_feats, img_patch_mask)
ret = {
"mim_loss": mim_loss,
}
phase = "train" if pl_module.training else "val"
loss = getattr(pl_module, f"{phase}_mim_loss")(ret["mim_loss"])
pl_module.log(f"mim/{phase}/loss", loss)
return ret
def init_weights(module):
if isinstance(module, (nn.Linear, nn.Embedding)):
module.weight.data.normal_(mean=0.0, std=0.02)
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
def vqa_test_step(pl_module, batch, output):
id2answer = (
pl_module.trainer.datamodule.dm_dicts["vqa_trainval"].id2answer
if "vqa_trainval" in pl_module.trainer.datamodule.dm_dicts
else pl_module.trainer.datamodule.dm_dicts["vqa"].id2answer
)
vqa_logits = output["vqa_logits"]
vqa_preds = vqa_logits.argmax(dim=-1)
vqa_preds = [id2answer[pred.item()] for pred in vqa_preds]
questions = batch["text"]
qids = batch["qid"]
return {"qids": qids, "preds": vqa_preds}
def openend_vqa_test_step(pl_module, batch, output):
id2answer = (
pl_module.trainer.datamodule.dm_dicts["vqa_trainval"].id2answer
if "vqa_trainval" in pl_module.trainer.datamodule.dm_dicts
else pl_module.trainer.datamodule.dm_dicts["msrvttqa"].id2answer
)
vqa_logits = output["vqa_logits"]
vqa_preds = vqa_logits.argmax(dim=-1)
vqa_preds = [id2answer[pred.item()] for pred in vqa_preds]
questions = batch["text"]
qids = batch["qid"]
return {"qids": qids, "preds": vqa_preds}
def arc_test_step(pl_module, batch, output):
return output
def vqa_test_wrapup(outs, model_name):
rank = torch.distributed.get_rank()
qids, preds = list(), list()
for out in outs:
qids += out["qids"]
preds += out["preds"]
rets = list()
for qid, pred in zip(qids, preds):
rets.append({"question_id": qid, "answer": pred})
with open(f"vqa_submit_{rank}.json", "w") as fp:
json.dump(rets, fp, indent=4)
torch.distributed.barrier()
if rank == 0:
jsons = list()
paths = list(glob.glob("vqa_submit_*.json"))
for path in paths:
with open(path, "r") as fp:
jsons += json.load(fp)
os.makedirs("result", exist_ok=True)
with open(f"result/vqa_submit_{model_name}.json", "w") as fp:
json.dump(jsons, fp, indent=4)
torch.distributed.barrier()
os.remove(f"vqa_submit_{rank}.json")
def arc_test_wrapup(outs, caplen, model_name):
rank = torch.distributed.get_rank()
iids, captions = list(), list()
for out in outs:
iids += out["iid"]
captions += out["captions"]
rets = list()
for iid, caption in zip(iids, captions):
rets.append({"video_id": iid, "caption": caption})
with open(f"coco_cap_len{caplen}_{rank}.json", "w") as fp:
json.dump(rets, fp, indent=4)
torch.distributed.barrier()
if rank == 0:
jsons = list()
paths = list(glob.glob(f"coco_cap_len{caplen}_*.json"))
for path in paths:
with open(path, "r") as fp:
jsons += json.load(fp)
os.makedirs("result/arc", exist_ok=True)
jsons = sorted(jsons, key=lambda x: x["video_id"])
with open(f"result/arc/coco_cap_{model_name}_len{caplen}.json", "w") as fp:
json.dump(jsons, fp, indent=4)
torch.distributed.barrier()
os.remove(f"coco_cap_len{caplen}_{rank}.json")
| InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/modules/objectives.py |
import os
import math
import numbers
from pathlib import Path
import ipdb
import numpy as np
import torch
import scipy.stats
from sklearn.metrics import average_precision_score
import ipdb
import pdb
def t2v_metrics(sims, query_masks=None):
"""Compute retrieval metrics from a similiarity matrix.
Args:
sims (th.Tensor): N x M matrix of similarities between embeddings, where
x_{i,j} = <text_embd[i], vid_embed[j]>
query_masks (th.Tensor): mask any missing queries from the dataset (two videos
in MSRVTT only have 19, rather than 20 captions)
Returns:
(dict[str:float]): retrieval metrics
"""
assert sims.ndim == 2, "expected a matrix"
num_queries, num_vids = sims.shape
dists = -sims
sorted_dists = np.sort(dists, axis=1)
# The indices are computed such that they slice out the ground truth distances
# from the psuedo-rectangular dist matrix
queries_per_video = num_queries // num_vids
gt_idx = [[np.ravel_multi_index([ii, jj], (num_queries, num_vids))
for ii in range(jj * queries_per_video, (jj + 1) * queries_per_video)]
for jj in range(num_vids)]
gt_idx = np.array(gt_idx)
gt_dists = dists.reshape(-1)[gt_idx.reshape(-1)]
gt_dists = gt_dists[:, np.newaxis]
rows, cols = np.where((sorted_dists - gt_dists) == 0) # find column position of GT
# --------------------------------
# NOTE: Breaking ties
# --------------------------------
# We sometimes need to break ties (in general, these should occur extremely rarely,
# but there are pathological cases when they can distort the scores, such as when
# the similarity matrix is all zeros). Previous implementations (e.g. the t2i
# evaluation function used
# here: https://github.com/niluthpol/multimodal_vtt/blob/master/evaluation.py and
# here: https://github.com/linxd5/VSE_Pytorch/blob/master/evaluation.py#L87) generally
# break ties "optimistically". However, if the similarity matrix is constant this
# can evaluate to a perfect ranking. A principled option is to average over all
# possible partial orderings implied by the ties. See # this paper for a discussion:
# McSherry, Frank, and Marc Najork,
# "Computing information retrieval performance measures efficiently in the presence
# of tied scores." European conference on information retrieval. Springer, Berlin,
# Heidelberg, 2008.
# http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.145.8892&rep=rep1&type=pdf
break_ties = "optimistically"
#break_ties = "averaging"
if rows.size > num_queries:
assert np.unique(rows).size == num_queries, "issue in metric evaluation"
if break_ties == "optimistically":
_, idx = np.unique(rows, return_index=True)
cols = cols[idx]
elif break_ties == "averaging":
# fast implementation, based on this code:
# https://stackoverflow.com/a/49239335
locs = np.argwhere((sorted_dists - gt_dists) == 0)
# Find the split indices
steps = np.diff(locs[:, 0])
splits = np.nonzero(steps)[0] + 1
splits = np.insert(splits, 0, 0)
# Compute the result columns
summed_cols = np.add.reduceat(locs[:, 1], splits)
counts = np.diff(np.append(splits, locs.shape[0]))
avg_cols = summed_cols / counts
if False:
print("Running slower code to verify rank averaging across ties")
# slow, but more interpretable version, used for testing
avg_cols_slow = [np.mean(cols[rows == idx]) for idx in range(num_queries)]
assert np.array_equal(avg_cols, avg_cols_slow), "slow vs fast difference"
print("passed num check")
cols = avg_cols
msg = "expected ranks to match queries ({} vs {}) "
if cols.size != num_queries:
import ipdb;
ipdb.set_trace()
assert cols.size == num_queries, msg
if False:
# overload mask to check that we can recover the scores for single-query
# retrieval
print("DEBUGGING MODE")
query_masks = np.zeros_like(query_masks)
query_masks[:, 0] = 1 # recover single query score
if query_masks is not None:
# remove invalid queries
assert query_masks.size == num_queries, "invalid query mask shape"
cols = cols[query_masks.reshape(-1).astype(np.bool)]
assert cols.size == query_masks.sum(), "masking was not applied correctly"
# update number of queries to account for those that were missing
num_queries = query_masks.sum()
if False:
# sanity check against old logic for square matrices
gt_dists_old = np.diag(dists)
gt_dists_old = gt_dists_old[:, np.newaxis]
_, cols_old = np.where((sorted_dists - gt_dists_old) == 0)
assert np.array_equal(cols_old, cols), "new metric doesn't match"
return cols2metrics(cols, num_queries)
def v2t_metrics(sims, query_masks=None):
"""Compute retrieval metrics from a similiarity matrix.
Args:
sims (th.Tensor): N x M matrix of similarities between embeddings, where
x_{i,j} = <text_embd[i], vid_embed[j]>
query_masks (th.Tensor): mask any missing captions from the dataset
Returns:
(dict[str:float]): retrieval metrics
NOTES: We find the closest "GT caption" in the style of VSE, which corresponds
to finding the rank of the closest relevant caption in embedding space:
github.com/ryankiros/visual-semantic-embedding/blob/master/evaluation.py#L52-L56
"""
# switch axes of text and video
sims = sims.T
if False:
# experiment with toy example
sims = np.ones((3, 3))
sims[0, 0] = 2
sims[1, 1:2] = 2
sims[2, :] = 2
query_masks = None
assert sims.ndim == 2, "expected a matrix"
num_queries, num_caps = sims.shape
dists = -sims
caps_per_video = num_caps // num_queries
break_ties = "averaging"
MISSING_VAL = 1E8
query_ranks = []
for ii in range(num_queries):
row_dists = dists[ii, :]
if query_masks is not None:
# Set missing queries to have a distance of infinity. A missing query
# refers to a query position `n` for a video that had less than `n`
# captions (for example, a few MSRVTT videos only have 19 queries)
row_dists[np.logical_not(query_masks.reshape(-1))] = MISSING_VAL
# NOTE: Using distance subtraction to perform the ranking is easier to make
# deterministic than using argsort, which suffers from the issue of defining
# "stability" for equal distances. Example of distance subtraction code:
# github.com/antoine77340/Mixture-of-Embedding-Experts/blob/master/train.py
sorted_dists = np.sort(row_dists)
min_rank = np.inf
for jj in range(ii * caps_per_video, (ii + 1) * caps_per_video):
if row_dists[jj] == MISSING_VAL:
# skip rankings of missing captions
continue
ranks = np.where((sorted_dists - row_dists[jj]) == 0)[0]
if break_ties == "optimistically":
rank = ranks[0]
elif break_ties == "averaging":
# NOTE: If there is more than one caption per video, its possible for the
# method to do "worse than chance" in the degenerate case when all
# similarities are tied. TODO(Samuel): Address this case.
rank = ranks.mean()
if rank < min_rank:
min_rank = rank
query_ranks.append(min_rank)
query_ranks = np.array(query_ranks)
# sanity check against old version of code
if False:
sorted_dists = np.sort(dists, axis=1)
gt_dists_old = np.diag(dists)
gt_dists_old = gt_dists_old[:, np.newaxis]
rows_old, cols_old = np.where((sorted_dists - gt_dists_old) == 0)
if rows_old.size > num_queries:
_, idx = np.unique(rows_old, return_index=True)
cols_old = cols_old[idx]
num_diffs = (1 - (cols_old == query_ranks)).sum()
msg = f"new metric doesn't match in {num_diffs} places"
assert np.array_equal(cols_old, query_ranks), msg
# visualise the distance matrix
import sys
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
sys.path.insert(0, str(Path.home() / "coding/src/zsvision/python"))
from zsvision.zs_iterm import zs_dispFig # NOQA
plt.matshow(dists)
zs_dispFig()
return cols2metrics(query_ranks, num_queries)
def retrieval_as_classification(sims, query_masks=None):
"""Compute classification metrics from a similiarity matrix.
"""
assert sims.ndim == 2, "expected a matrix"
# switch axes of query-labels and video
sims = sims.T
query_masks = query_masks.T
dists = -sims
num_queries, num_labels = sims.shape
break_ties = "averaging"
query_ranks = []
for ii in range(num_queries):
row_dists = dists[ii, :]
# NOTE: Using distance subtraction to perform the ranking is easier to make
# deterministic than using argsort, which suffers from the issue of defining
# "stability" for equal distances. Example of distance subtraction code:
# github.com/antoine77340/Mixture-of-Embedding-Experts/blob/master/train.py
sorted_dists = np.sort(row_dists)
# min_rank = np.inf
label_ranks = []
for gt_label in np.where(query_masks[ii, :])[0]:
ranks = np.where((sorted_dists - row_dists[gt_label]) == 0)[0]
if break_ties == "optimistically":
rank = ranks[0]
elif break_ties == "averaging":
# NOTE: If there is more than one caption per video, its possible for the
# method to do "worse than chance" in the degenerate case when all
# similarities are tied. TODO(Samuel): Address this case.
rank = ranks.mean()
else:
raise ValueError(f"unknown tie-breaking method: {break_ties}")
label_ranks.append(rank)
# Avoid penalising for assigning higher similarity to other gt labels. This is
# done by subtracting out the better ranked query labels. Note that this step
# introduces a slight skew in favour of videos with lots of labels. We can
# address this later with a normalisation step if needed.
label_ranks = [x - idx for idx, x in enumerate(label_ranks)]
# Include all labels in the final calculation
query_ranks.extend(label_ranks)
query_ranks = np.array(query_ranks)
# sanity check against old version of code
if False:
# visualise the distance matrix
import sys
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
sys.path.insert(0, str(Path.home() / "coding/src/zsvision/python"))
from zsvision.zs_iterm import zs_dispFig # NOQA
# plt.matshow(dists)
# zs_dispFig()
plt.hist(query_ranks, bins=313, alpha=0.5)
plt.grid()
zs_dispFig()
import ipdb;
ipdb.set_trace()
return cols2metrics(query_ranks, num_queries=len(query_ranks))
def cols2metrics(cols, num_queries):
metrics = {}
metrics["R1"] = 100 * float(np.sum(cols == 0)) / num_queries
metrics["R5"] = 100 * float(np.sum(cols < 5)) / num_queries
metrics["R10"] = 100 * float(np.sum(cols < 10)) / num_queries
metrics["R50"] = 100 * float(np.sum(cols < 50)) / num_queries
metrics["MedR"] = np.median(cols) + 1
metrics["MeanR"] = np.mean(cols) + 1
stats = [metrics[x] for x in ("R1", "R5", "R10")]
metrics["geometric_mean_R1-R5-R10"] = scipy.stats.mstats.gmean(stats)
return metrics
def mean_average_precision(sims, query_masks=None):
ap_meter = APMeter()
ap_meter.add(output=sims.T, target=query_masks.T)
return {"mAP": ap_meter.value().mean()}
def acc(output, target):
with torch.no_grad():
pred = torch.argmax(output, dim=1)
assert pred.shape[0] == len(target)
correct = 0
correct += torch.sum(pred == target).item()
return correct / len(target)
def my_metric2(output, target, k=3):
with torch.no_grad():
pred = torch.topk(output, k, dim=1)[1]
assert pred.shape[0] == len(target)
correct = 0
for i in range(k):
correct += torch.sum(pred[:, i] == target).item()
return correct / len(target)
def video_precision(output, target):
""" percentage of videos which have been aligned to a matching text pair"""
assert output.shape[0] == target.shape[0]
assert output.shape[2] == target.shape[2] == 2
correct = 0
for bout, btarg in zip(output, target):
for pair in bout:
eq = torch.eq(pair, btarg)
if torch.logical_and(eq[:, 0], eq[:, 1]).any():
correct += 1
return correct / (target.shape[0] * target.shape[1])
def video_precision_adj(output, target):
""" adjusts the video precision metric by ignoring videos which have no aligning text."""
assert output.shape[0] == target.shape[0]
assert output.shape[2] == target.shape[2] == 2
assert output.shape[0] == target.shape[0]
assert output.shape[2] == target.shape[2] == 2
correct = 0
for bout, btarg in zip(output, target):
for pair in bout:
eq = torch.eq(pair, btarg)
if torch.logical_and(eq[:, 0], eq[:, 1]).any():
correct += 1
denom = len(target[:, :, 0].unique())
return correct / denom
def video_precision_adj(output, target):
""" adjusts the video precision metric by ignoring videos which have no aligning text."""
assert output.shape[0] == target.shape[0]
assert output.shape[2] == target.shape[2] == 2
assert output.shape[0] == target.shape[0]
assert output.shape[2] == target.shape[2] == 2
correct = 0
for bout, btarg in zip(output, target):
for pair in bout:
eq = torch.eq(pair, btarg)
if torch.logical_and(eq[:, 0], eq[:, 1]).any():
correct += 1
denom = len(target[:, :, 0].unique())
return correct / denom
| InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/modules/retrieval_metrics.py |
from copy import deepcopy
import torch
import torch.nn as nn
import pytorch_lightning as pl
from CoTrain.modules import heads, cotrain_utils
from CoTrain.modules import objectives as objectives
from CoTrain.modules import base_vision_transformer as vit
from CoTrain.modules.text_prompt import text_prompt
import os
import matplotlib.pyplot as plt
import math
import CoTrain.modules.InternVideo as clip_kc_new
from PIL import Image
import numpy as np
from .clip_param_keys import clip_param_keys, gradually_freeze_by_layer
from .clip_decoders import CaptionDecoder
def vis_save(imgs, texts):
# img: [B, T, C, H, W]
# texts: [str]
os.makedirs("vis_test", exist_ok=True)
imgs = imgs.permute(0, 1, 3, 4, 2).cpu().numpy()
imgs = (imgs - imgs.min()) / (imgs.max() - imgs.min())
for img, text in zip(imgs, texts):
caption = "_".join(text.split())
os.makedirs(os.path.join("vis_test", caption), exist_ok=True)
for i, im in enumerate(img):
img_path = os.path.join("vis_test", caption, f"{i}.png")
plt.imsave(img_path, im)
# utils
@torch.no_grad()
def concat_all_gather(tensor):
"""
Performs all_gather operation on the provided tensors.
*** Warning ***: torch.distributed.all_gather has no gradient.
"""
tensors_gather = [
torch.ones_like(tensor) for _ in range(torch.distributed.get_world_size())
]
torch.distributed.all_gather(tensors_gather, tensor, async_op=False)
output = torch.cat(tensors_gather, dim=0)
return output
@torch.no_grad()
def batch_shuffle_ddp(x):
"""
Batch shuffle, for making use of BatchNorm.
*** Only support DistributedDataParallel (DDP) model. ***
"""
# gather from all gpus
batch_size_this = x.shape[0]
x_gather = concat_all_gather(x)
batch_size_all = x_gather.shape[0]
num_gpus = batch_size_all // batch_size_this
# random shuffle index
idx_shuffle = torch.randperm(batch_size_all).cuda()
# broadcast to all gpus
torch.distributed.broadcast(idx_shuffle, src=0)
# index for restoring
idx_unshuffle = torch.argsort(idx_shuffle)
# shuffled index for this gpu
gpu_idx = torch.distributed.get_rank()
idx_this = idx_shuffle.view(num_gpus, -1)[gpu_idx]
return x_gather[idx_this], idx_unshuffle
@torch.no_grad()
def batch_unshuffle_ddp(x, idx_unshuffle):
"""
Undo batch shuffle.
*** Only support DistributedDataParallel (DDP) model. ***
"""
# gather from all gpus
batch_size_this = x.shape[0]
x_gather = concat_all_gather(x)
batch_size_all = x_gather.shape[0]
num_gpus = batch_size_all // batch_size_this
# restored index for this gpu
gpu_idx = torch.distributed.get_rank()
idx_this = idx_unshuffle.view(num_gpus, -1)[gpu_idx]
return x_gather[idx_this]
class CLIP(pl.LightningModule):
def __init__(self, config):
super().__init__()
self.save_hyperparameters()
self.clip_type = config["clip_type"]
self.prompt_type = config["prompt_type"]
self.mlm_prob = config["mlm_prob"]
self.mim_prob = config["mim_prob"]
self.qa_type = config["clip_qa_type"]
self.mc_type = config["clip_mc_type"]
self.mmt = config["clip_mmt"]
self.alt_data = config["clip_alt_data"]
if config["clip_type"] == "kc_new":
self.clip, self.clip_preprocess = clip_kc_new.load(
config["clip"],
t_size=config["num_frames"],
n_layers=4,
mlp_dropout=[config["clip_evl_dropout"]] * 4,
cls_dropout=config["clip_evl_dropout"],
no_pretrain=config["clip_no_pretrain"],
init_zero=config["clip_init_zero"],
drop_path_rate=config["clip_dpr"],
device=self.device,
use_checkpoint=config["clip_use_checkpoint"],
checkpoint_num=config["clip_checkpoint_num"],
# mask_text=(
# self.hparams.config["loss_names"]["mlm"] > 0
# or (
# self.hparams.config["loss_names"]["openend_vqa"] > 0
# and self.qa_type in ["zs", "mlm", "vtc_mlm"]
# )
# ),
)
else:
raise NotImplementedError(
"Clip type: {} not implemented".format(config["clip_type"])
)
cotrain_utils.set_metrics(self)
self.current_tasks = list()
vision_width = self.clip.visual.conv1.weight.shape[0]
transformer_width = self.clip.transformer.width
if self.hparams.config["loss_names"]["openend_vqa"] > 0:
if self.qa_type == "vtc":
hs = vision_width + transformer_width
elif self.qa_type in ["cap"]:
hs = transformer_width
elif self.qa_type in ["vtc_cap", "vtc_mlm"]:
# We cat the vision feature, text feature
# and cross feature together
hs = vision_width + transformer_width * 2
elif self.qa_type in ["zs", "mlm"]:
pass
else:
raise NotImplementedError("QA Type {} Not Implemented")
if self.qa_type in ["vtc", "cap", "vtc_cap", "vtc_mlm"]:
self.clip.text_projection = None
self.clip.visual_proj = None
vs = self.hparams.config["msrvttqa_label_size"]
self.vqa_classifier = nn.Sequential(
nn.Dropout(config["clip_cls_dropout"]),
nn.Linear(hs, hs * 2),
nn.LayerNorm(hs * 2),
nn.GELU(),
nn.Dropout(config["clip_cls_dropout"]),
# nn.Linear(hs * 2, hs * 2),
# nn.GELU(),
# nn.Dropout(config["clip_cls_dropout"]),
# nn.LayerNorm(hs * 2),
nn.Linear(hs * 2, vs),
)
self.vqa_classifier.apply(objectives.init_weights)
if self.hparams.config["loss_names"]["multiple_choice"] > 0:
if self.mc_type == "vtc":
pass
elif self.mc_type == "cap":
hs = transformer_width
elif self.mc_type == "vtc_cap":
# We cat the vision feature, text feature
# and cross feature together
hs = vision_width + transformer_width * 2
else:
raise NotImplementedError("MC Type {} Not Implemented")
if self.mc_type in ["cap", "vtc_cap"]:
self.clip.text_projection = None
self.clip.visual_proj = None
self.rank_output = nn.Sequential(
nn.Dropout(config["clip_cls_dropout"]),
nn.Linear(hs, hs * 2),
nn.LayerNorm(hs * 2),
nn.GELU(),
nn.Dropout(config["clip_cls_dropout"]),
nn.Linear(hs * 2, 1),
)
self.rank_output.apply(objectives.init_weights)
if (
self.hparams.config["loss_names"]["cap"] > 0
or (
self.hparams.config["loss_names"]["openend_vqa"] > 0
and self.qa_type in ["cap", "vtc_cap"]
)
or (
self.hparams.config["loss_names"]["multiple_choice"] > 0
and self.mc_type in ["cap", "vtc_cap"]
)
):
vs = self.clip.vocab_size
if self.hparams.config["loss_names"][
"openend_vqa"
] > 0 and self.qa_type in ["cap", "vtc_cap"]:
vs = self.hparams.config["msrvttqa_label_size"]
self.caption_decoder = CaptionDecoder(
n_layers=config["clip_cap_decoder_n_layers"],
transformer_width=transformer_width,
vision_width=vision_width,
transformer_heads=transformer_width // 64,
vocab_size=vs,
use_checkpoint=config["clip_use_checkpoint"],
checkpoint_num=config["clip_checkpoint_num"],
)
if (
self.hparams.config["loss_names"]["openend_vqa"] > 0
and self.qa_type in ["cap", "vtc_cap"]
) or (
self.hparams.config["loss_names"]["multiple_choice"] > 0
and self.mc_type in ["cap", "vtc_cap"]
):
self.caption_decoder.predictor = nn.Identity()
self.caption_decoder.apply(objectives.init_weights)
# For zs_classify
self.text_ret = None
if self.hparams.config["load_path"] != "":
# Support multiple load_path
if isinstance(self.hparams.config["load_path"], str):
self.hparams.config["load_path"] = [self.hparams.config["load_path"]]
for i, load_path in enumerate(self.hparams.config["load_path"]):
ckpt = torch.load(
cotrain_utils.read_load_path(load_path),
map_location="cpu",
)
if i == 0:
state_dict = ckpt["state_dict"]
continue
for k in state_dict.keys():
state_dict[k] += ckpt["state_dict"][k]
for k in state_dict.keys():
state_dict[k] /= len(self.hparams.config["load_path"])
modified_keys = []
if config["clip_wiseft_coef"] > 0:
c = config["clip_wiseft_coef"]
assert 0 < c < 1.0
# We assume using clip weight by default
clip_sd = {k: v.cpu() for k, v in self.state_dict().items()}
new_sd = deepcopy(state_dict)
# Directly modify state_dict to load
for k in new_sd:
if k not in clip_sd:
continue
if any(x in k for x in clip_param_keys):
new_sd[k] = clip_sd[k] * c + state_dict[k] * (1.0 - c)
modified_keys.append(k)
state_dict = new_sd
# Remove mismatch parameters for 336
sd = {k: v.cpu() for k, v in self.state_dict().items()}
for k in list(state_dict.keys()):
if k not in sd:
continue
if state_dict[k].shape != sd[k].shape:
print(
"!!!!!!!!!!!Size mismatch {} {} {}".format(
k, state_dict[k].shape, sd[k].shape
)
)
del state_dict[k]
self.load_state_dict(state_dict, strict=False)
if config["clip_freeze"] and config["clip_type"] == "evl":
self.freeze_clip_evl()
if config["clip_freeze"] and config["clip_type"] == "kc":
self.freeze_clip()
if config["clip_freeze_text"]:
self.freeze_text()
self.grad_unfreeze_int = config["clip_grad_unfreeze_int"]
if self.grad_unfreeze_int > 0:
self.freeze_clip()
if self.mmt:
# MoCo Setting
K = 65536
m = 0.999
dim = self.clip.embed_dim
self.K = K
self.m = m
self.clip_k = deepcopy(self.clip)
for p in self.clip_k.parameters():
p.requires_grad = False # not update by gradient
# create the queue
self.register_buffer("queue_visual", torch.randn(dim, K))
self.register_buffer("queue_text", torch.randn(dim, K))
self.queue_visual = nn.functional.normalize(self.queue_visual, dim=0)
self.queue_text = nn.functional.normalize(self.queue_text, dim=0)
self.register_buffer("queue_ptr", torch.zeros(1, dtype=torch.long))
def freeze_clip_evl(self):
for n, p in self.named_parameters():
if (
"clip.visual" in n
and "clip.visual.ln_post" not in n
and "clip.visual.proj" not in n
):
p.requires_grad = False
elif "clip.transformer" in n:
p.requires_grad = False
elif "clip.token_embedding" in n:
p.requires_grad = False
elif "clip.positional_embedding" in n:
p.requires_grad = False
def freeze_clip(self):
for n, p in self.named_parameters():
# Unfreeze the projection layer
if any(x in n for x in ["text_projection", "visual_proj", "visual.proj"]):
continue
if any(x in n for x in clip_param_keys):
p.requires_grad = False
def freeze_text(self):
for n, p in self.named_parameters():
if "clip.transformer" in n:
p.requires_grad = False
elif "clip.token_embedding" in n:
p.requires_grad = False
elif "clip.positional_embedding" in n:
p.requires_grad = False
elif "clip.ln_final" in n:
p.requires_grad = False
elif "clip.text_projection" in n:
p.requires_grad = False
elif "clip.eot_token_embedding" in n:
p.requires_grad = False
@torch.no_grad()
def mask_text_ids(self, text_ids, special_tokens_mask):
if "openend_vqa" in self.current_tasks:
return self.mask_text_ids_qa_mlm(text_ids)
# See https://github.com/huggingface/transformers/blob/a22db885b41b3a1b302fc206312ee4d99cdf4b7c/src/transformers/data/data_collator.py#L748
# text_ids, special_tokens_mask: torch.Tensor of shape (N, L)
labels = text_ids.clone().long()
probability_matrix = torch.full(labels.shape, self.mlm_prob, device=self.device)
# do not mask special_token, including sot_token, eot_token and empty
probability_matrix.masked_fill_(special_tokens_mask, value=0.0)
# probability_matrix[:, 0] = 0.0
# probability_matrix[torch.arange(labels.shape[0]), text_ids.argmax(dim=-1)] = 0.0
masked_indices = torch.bernoulli(probability_matrix).bool()
# We only compute loss on masked tokens, note that padding id is 0
labels[~masked_indices] = -100
# ? Should we use other augmentation in Bert?
return text_ids, labels, masked_indices
@torch.no_grad()
def mask_text_ids_qa_mlm(self, text_ids):
# The text should be in the format "Question: {} Anwser:"
# We add a mask at the end of the sentence
eot_id = text_ids[torch.arange(text_ids.shape[0]), text_ids.argmax(dim=-1)]
assert torch.numel(torch.unique(eot_id)) == 1
masked_indices = torch.zeros(
*text_ids.shape, dtype=torch.bool, device=text_ids.device
)
masked_indices[torch.arange(text_ids.shape[0]), text_ids.argmax(dim=-1)] = 1
labels = text_ids.clone()
text_ids[torch.arange(labels.shape[0]), labels.argmax(dim=-1)] = eot_id - 1
text_ids[torch.arange(labels.shape[0]), labels.argmax(dim=-1) + 1] = eot_id
return text_ids, None, masked_indices
@torch.no_grad()
def mask_visual(self, video, mode="video"):
assert mode in ["video", "image"]
N, C, T, H, W = video.shape
patch_size = self.clip.visual.conv1.weight.shape[-1]
N = N * T
L = H * W // (patch_size * patch_size)
# This is different from text as we are masking a fix number of tokens
Lm = int(self.mim_prob * L)
masked_indices = torch.zeros(N, L)
indices = torch.argsort(torch.rand_like(masked_indices), dim=-1)[:, :Lm]
batch_indices = (
torch.arange(masked_indices.shape[0]).unsqueeze(-1).expand_as(indices)
)
masked_indices[batch_indices, indices] = 1
masked_indices = masked_indices.bool()
return masked_indices
@torch.no_grad()
def _momentum_update_key_encoder(self):
"""
Momentum update of the key encoder
"""
for param_q, param_k in zip(self.clip.parameters(), self.clip_k.parameters()):
param_k.data = param_k.data * self.m + param_q.data * (1.0 - self.m)
@torch.no_grad()
def _dequeue_and_enqueue(self, text_keys, visual_keys):
# gather keys before updating queue
text_keys = concat_all_gather(text_keys)
visual_keys = concat_all_gather(visual_keys)
batch_size = text_keys.shape[0]
ptr = int(self.queue_ptr)
assert self.K % batch_size == 0 # for simplicity
# replace the keys at ptr (dequeue and enqueue)
self.queue_text[:, ptr : ptr + batch_size] = text_keys.T
self.queue_visual[:, ptr : ptr + batch_size] = visual_keys.T
ptr = (ptr + batch_size) % self.K # move pointer
self.queue_ptr[0] = ptr
def infer(
self,
batch,
mask_text=False,
mask_video=False,
input_video_only=False,
input_text_only=False,
caption=False,
mode="video",
):
imgkey = "video"
# Check configs
assert not input_video_only
assert not input_text_only
if mask_text:
assert self.clip_type in ["ori", "evl", "kc", "kc_new"]
if mask_video:
assert self.clip_type in ["ori", "kc", "kc_new"]
# Encode Text #########################################################
if "clip_text_ids" in batch:
# If the clip tokenization is prepared
text_ids, special_tokens_mask = (
batch["clip_text_ids"],
batch["clip_special_tokens_mask"],
)
else: # TODO: Remove this else
text_ids, special_tokens_mask = clip_kc_new.tokenize(
batch[f"text"], truncate=True, return_special_tokens_mask=True
)
text_ids = text_ids.to(self.device)
special_tokens_mask = special_tokens_mask.to(self.device)
if mask_text: # ! This messes with text_feats and text_all_feats
masked_text_ids, text_labels, text_masked_indices = self.mask_text_ids(
text_ids, special_tokens_mask
)
# [N, C], [N, L, C]
text_feats, text_all_feats = self.clip.encode_text(
masked_text_ids,
masked_indices=text_masked_indices,
return_all_feats=True,
)
else:
text_feats, text_all_feats = self.clip.encode_text(
text_ids, return_all_feats=True
)
# Encode Video ########################################################
video = batch[imgkey][0]
if self.clip_type in ["ori", "evl", "kc", "kc_new"]:
# [N, T, C, H, W] -> [N, C, T, H, W]
video = video.contiguous().transpose(1, 2)
# TODO: Remove this if
# [N, C], [L, N, T, C]
# video_feats for contrastive, video_all_feats for mlm, caption
if mask_video:
visual_masked_indices = self.mask_visual(video, mode=mode)
video_feats, video_all_feats = self.clip.encode_video(
video,
return_all_feats=True,
masked_indices=visual_masked_indices,
mode=mode,
)
else:
video_feats, video_all_feats = self.clip.encode_video(
video, return_all_feats=True, mode=mode
)
ret = {
"video": video, # N, C, T, H, W
"text_feats": text_feats, # N, C
"video_feats": video_feats, # N, C
"text_ids": text_ids, # N, L
"special_tokens_mask": special_tokens_mask, # N, L
}
if self.mmt:
# compute key features
with torch.no_grad(): # no gradient to keys
self._momentum_update_key_encoder() # update the key encoder
assert not any([mask_text, mask_video, mode != "video"])
# TODO: We have BN, batch shuffle ?
text_feats_k = self.clip_k.encode_text(text_ids, return_all_feats=False)
# img, idx_unshuffle = batch_shuffle_ddp(img)
video_feats_k = self.clip_k.encode_video(
video, return_all_feats=False, mode=mode
)
# video_feats_k = batch_unshuffle_ddp(video_feats_k, idx_unshuffle)
ret.update(
{
"text_feats_k": text_feats_k,
"video_feats_k": video_feats_k,
}
)
# Mask Text Decoder ##################################################
if mask_text:
text_con_feats = text_feats
text_feats = self.text_decoder(text_all_feats, video_all_feats)
ret.update(
{
# ! Modified the original, no other loss should do the same
"text_feats": text_feats, # N, C
"text_labels": text_labels, # N, L
"text_contrastive_feats": text_con_feats, # N, L
}
)
# Mask Visual Decoder#################################################
if mask_video and hasattr(self, "visual_decoder"):
mim_video_feats = self.visual_decoder(
video_all_feats, visual_masked_indices
)
ret.update(
{
"mim_video_feats": mim_video_feats, # N, L, C
"visual_masked_indices": visual_masked_indices, # N, L
}
)
# Caption decoder ##################################################
if caption:
cap_logits = self.caption_decoder(video_all_feats, text_all_feats[:, :-1])
ret.update(
{
"cap_logits": cap_logits,
}
)
return ret
def sanity_check(self):
image = (
self.clip_preprocess(
Image.open(
"/mnt/petrelfs/liyizhuo/projects/all-in-one-cotrain/CoTraining/dog.png"
)
)
.unsqueeze(0)
.to(self.device)
)
T = 16
B, C, H, W = image.shape
video = image.repeat(T, 1, 1, 1).reshape(T, B, C, H, W).permute(1, 0, 2, 3, 4)
self.eval()
infer = self.infer(
{
"video": [video],
"text": ["a diagram", "a dog", "a cat"],
},
mode="image",
)
score = (
self.clip.logit_scale.exp() * infer["video_feats"] @ infer["text_feats"].t()
)
assert False, (score.softmax(dim=-1), self.clip.logit_scale.item())
def forward(self, batch, batch_idx=None, mode="video"):
# self.sanity_check()
with torch.no_grad():
self.clip.logit_scale.clamp_(0, math.log(100))
ret = dict()
if len(self.current_tasks) == 0:
ret.update(self.infer(batch, mode=mode))
return ret
if "contrastive" in self.current_tasks:
mask_text = "mlm" in self.current_tasks
mask_video = "mim" in self.current_tasks
caption = "cap" in self.current_tasks
if any([mask_text, mask_video, caption]):
contrastive_ret, contrastive_infer = objectives.compute_contrastive(
self,
batch,
return_infer=True,
mask_text=mask_text,
mask_video=mask_video,
caption=caption,
mode=mode,
)
ret.update(contrastive_ret)
else:
ret.update(objectives.compute_contrastive(self, batch, mode=mode))
if "multiple_choice" in self.current_tasks:
ret.update(objectives.compute_multiple_choice(self, batch))
if "openend_vqa" in self.current_tasks:
ret.update(objectives.compute_openend_vqa(self, batch))
if "mlm" in self.current_tasks:
if "contrastive" in self.current_tasks: # Skip infer
ret.update(objectives.compute_mlm(self, batch, infer=contrastive_infer))
else:
ret.update(objectives.compute_mlm(self, batch))
if "mim" in self.current_tasks and hasattr(self, "visual_decoder"):
if "contrastive" in self.current_tasks: # Skip infer
ret.update(
objectives.compute_mim(
self, batch, infer=contrastive_infer, mode=mode
)
)
else:
ret.update(objectives.compute_mim(self, batch))
if "cap" in self.current_tasks:
if "contrastive" in self.current_tasks: # Skip infer
ret.update(
objectives.compute_cap(
self, batch, infer=contrastive_infer, mode=mode
)
)
else:
ret.update(objectives.compute_cap(self, batch, mode=mode))
if "zs_classify" in self.current_tasks:
if self.text_ret is None:
# print(f"Generate text features for in batch-{batch_idx}")
self.text_ret = self.forward_text()
ret.update(objectives.compute_zs_classify(self, batch, self.text_ret))
return ret
def forward_text(self):
classes, num_text_aug, _ = text_prompt(prompt_type=self.prompt_type)
text_inputs = classes.to(self.device)
text_feats = self.clip.encode_text(text_inputs)
# text_feats /= text_feats.norm(dim=-1, keepdim=True)
ret = {
"text_feats": text_feats, # num_text_aug * num_classes, C
"num_text_aug": num_text_aug,
}
return ret
def forward_video(self, batch):
img = batch["video"][0]
if self.clip_type in ["ori", "evl", "kc", "kc_new"]:
# [B, T, C, H, W] -> [B, C, T, H, W]
img = img.contiguous().transpose(1, 2)
video_feats = self.clip.encode_video(img)
ret = {
"video_feats": video_feats, # N, C
}
return ret
def training_step(self, batch, batch_idx):
# gradually_freeze_by_layer(self, self.global_step, self.grad_unfreeze_int)
cotrain_utils.set_task(self)
# self.momentum_checkpoint()
# co-training
if "v" in batch and "i" in batch:
video_output, image_output = {}, {}
if not self.alt_data or batch_idx % 2 == 0:
video_output = self(batch["v"], mode="video")
if not self.alt_data or batch_idx % 2 == 1:
image_output = self(batch["i"], mode="image")
total_loss = sum([v for k, v in video_output.items() if "loss" in k]) + sum(
[v for k, v in image_output.items() if "loss" in k]
)
else:
output = self(batch, mode="video")
total_loss = sum([v for k, v in output.items() if "loss" in k])
return total_loss
def training_epoch_end(self, outs):
cotrain_utils.epoch_wrapup(self)
def validation_step(self, batch, batch_idx):
cotrain_utils.set_task(self)
if "v" in batch and "i" in batch:
video_output = self(batch["v"], mode="video")
image_output = self(batch["i"], mode="image")
else:
output = self(batch, mode="video")
def validation_epoch_end(self, outs):
cotrain_utils.epoch_wrapup(self)
self.text_ret = None
def test_step(self, batch, batch_idx):
cotrain_utils.set_task(self)
if "v" in batch and "i" in batch:
video_output = self(batch["v"], mode="video")
image_output = self(batch["i"], mode="image")
else:
output = self(batch, mode="video")
ret = dict()
if self.hparams.config["loss_names"]["vqa"] > 0:
ret.update(objectives.vqa_test_step(self, batch, image_output))
return ret
def test_epoch_end(self, outs):
if isinstance(self.hparams.config["load_path"], str):
model_name = self.hparams.config["load_path"].split("/")[-1][:-5]
else:
model_name = "multiple"
if self.hparams.config["loss_names"]["vqa"] > 0:
objectives.vqa_test_wrapup(outs, model_name)
cotrain_utils.epoch_wrapup(self)
def configure_optimizers(self):
return cotrain_utils.set_schedule(self)
| InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/modules/clip_module.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
This file contains primitives for multi-gpu communication.
This is useful when doing distributed training.
"""
import functools
import logging
import numpy as np
import pickle
import torch
import torch.distributed as dist
import torch
_LOCAL_PROCESS_GROUP = None
"""
A torch process group which only includes processes that on the same machine as the current process.
This variable is set when processes are spawned by `launch()` in "engine/launch.py".
"""
def get_world_size() -> int:
if not dist.is_available():
return 1
if not dist.is_initialized():
return 1
return dist.get_world_size()
def get_rank() -> int:
if not dist.is_available():
return 0
if not dist.is_initialized():
return 0
return dist.get_rank()
def get_local_rank() -> int:
"""
Returns:
The rank of the current process within the local (per-machine) process group.
"""
if not dist.is_available():
return 0
if not dist.is_initialized():
return 0
assert _LOCAL_PROCESS_GROUP is not None
return dist.get_rank(group=_LOCAL_PROCESS_GROUP)
def get_local_size() -> int:
"""
Returns:
The size of the per-machine process group,
i.e. the number of processes per machine.
"""
if not dist.is_available():
return 1
if not dist.is_initialized():
return 1
return dist.get_world_size(group=_LOCAL_PROCESS_GROUP)
def is_main_process() -> bool:
return get_rank() == 0
def synchronize():
"""
Helper function to synchronize (barrier) among all processes when
using distributed training
"""
if not dist.is_available():
return
if not dist.is_initialized():
return
world_size = dist.get_world_size()
if world_size == 1:
return
dist.barrier()
@functools.lru_cache()
def _get_global_gloo_group():
"""
Return a process group based on gloo backend, containing all the ranks
The result is cached.
"""
if dist.get_backend() == "nccl":
return dist.new_group(backend="gloo")
else:
return dist.group.WORLD
def _serialize_to_tensor(data, group):
backend = dist.get_backend(group)
assert backend in ["gloo", "nccl"]
device = torch.device("cpu" if backend == "gloo" else "cuda")
buffer = pickle.dumps(data)
if len(buffer) > 1024 ** 3:
logger = logging.getLogger(__name__)
logger.warning(
"Rank {} trying to all-gather {:.2f} GB of data on device {}".format(
get_rank(), len(buffer) / (1024 ** 3), device
)
)
storage = torch.ByteStorage.from_buffer(buffer)
tensor = torch.ByteTensor(storage).to(device=device)
return tensor
def _pad_to_largest_tensor(tensor, group):
"""
Returns:
list[int]: size of the tensor, on each rank
Tensor: padded tensor that has the max size
"""
world_size = dist.get_world_size(group=group)
assert (
world_size >= 1
), "comm.gather/all_gather must be called from ranks within the given group!"
local_size = torch.tensor([tensor.numel()], dtype=torch.int64, device=tensor.device)
size_list = [
torch.zeros([1], dtype=torch.int64, device=tensor.device)
for _ in range(world_size)
]
dist.all_gather(size_list, local_size, group=group)
size_list = [int(size.item()) for size in size_list]
max_size = max(size_list)
# we pad the tensor because torch all_gather does not support
# gathering tensors of different shapes
if local_size != max_size:
padding = torch.zeros(
(max_size - local_size,), dtype=torch.uint8, device=tensor.device
)
tensor = torch.cat((tensor, padding), dim=0)
return size_list, tensor
def all_gather(data, group=None):
"""
Run all_gather on arbitrary picklable data (not necessarily tensors).
Args:
data: any picklable object
group: a torch process group. By default, will use a group which
contains all ranks on gloo backend.
Returns:
list[data]: list of data gathered from each rank
"""
if get_world_size() == 1:
return [data]
if group is None:
group = _get_global_gloo_group()
if dist.get_world_size(group) == 1:
return [data]
tensor = _serialize_to_tensor(data, group)
size_list, tensor = _pad_to_largest_tensor(tensor, group)
max_size = max(size_list)
# receiving Tensor from all ranks
tensor_list = [
torch.empty((max_size,), dtype=torch.uint8, device=tensor.device)
for _ in size_list
]
dist.all_gather(tensor_list, tensor, group=group)
data_list = []
for size, tensor in zip(size_list, tensor_list):
buffer = tensor.cpu().numpy().tobytes()[:size]
data_list.append(pickle.loads(buffer))
return data_list
def gather(data, dst=0, group=None):
"""
Run gather on arbitrary picklable data (not necessarily tensors).
Args:
data: any picklable object
dst (int): destination rank
group: a torch process group. By default, will use a group which
contains all ranks on gloo backend.
Returns:
list[data]: on dst, a list of data gathered from each rank. Otherwise,
an empty list.
"""
if get_world_size() == 1:
return [data]
if group is None:
group = _get_global_gloo_group()
if dist.get_world_size(group=group) == 1:
return [data]
rank = dist.get_rank(group=group)
tensor = _serialize_to_tensor(data, group)
size_list, tensor = _pad_to_largest_tensor(tensor, group)
# receiving Tensor from all ranks
if rank == dst:
max_size = max(size_list)
tensor_list = [
torch.empty((max_size,), dtype=torch.uint8, device=tensor.device)
for _ in size_list
]
dist.gather(tensor, tensor_list, dst=dst, group=group)
data_list = []
for size, tensor in zip(size_list, tensor_list):
buffer = tensor.cpu().numpy().tobytes()[:size]
data_list.append(pickle.loads(buffer))
return data_list
else:
dist.gather(tensor, [], dst=dst, group=group)
return []
def shared_random_seed():
"""
Returns:
int: a random number that is the same across all workers.
If workers need a shared RNG, they can use this shared seed to
create one.
All workers must call this function, otherwise it will deadlock.
"""
ints = np.random.randint(2 ** 31)
all_ints = all_gather(ints)
return all_ints[0]
def reduce_dict(input_dict, average=True):
"""
Reduce the values in the dictionary from all processes so that process with rank
0 has the reduced results.
Args:
input_dict (dict): inputs to be reduced. All the values must be scalar CUDA Tensor.
average (bool): whether to do average or sum
Returns:
a dict with the same keys as input_dict, after reduction.
"""
world_size = get_world_size()
if world_size < 2:
return input_dict
with torch.no_grad():
names = []
values = []
# sort the keys so that they are consistent across processes
for k in sorted(input_dict.keys()):
names.append(k)
values.append(input_dict[k])
values = torch.stack(values, dim=0)
dist.reduce(values, dst=0)
if dist.get_rank() == 0 and average:
# only main process gets accumulated, so only divide by
# world_size in this case
values /= world_size
reduced_dict = {k: v for k, v in zip(names, values)}
return reduced_dict
| InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/modules/dist_utils.py |
from requests import patch
import torch
import torch.nn as nn
from .coca import Residual, ParallelTransformerBlock, CrossAttention
from einops import repeat
import torch.utils.checkpoint as checkpoint
import numpy as np
from timm.models.layers import trunc_normal_ as __call_trunc_normal_
def trunc_normal_(tensor, mean=0.0, std=1.0):
__call_trunc_normal_(tensor, mean=mean, std=std, a=-std, b=std)
class CaptionDecoder(nn.Module):
def __init__(
self,
n_layers,
transformer_width,
vision_width,
transformer_heads,
vocab_size,
num_visual_queries=256,
use_checkpoint=False,
checkpoint_num=0,
):
super().__init__()
scale = transformer_width**-0.5
self.visual_queries = nn.Parameter(
scale * torch.randn(num_visual_queries, transformer_width)
)
dim_head = transformer_width // transformer_heads
ff_mult = 4
self.visual_attn_pooler = CrossAttention(
dim=transformer_width,
context_dim=vision_width,
dim_head=dim_head,
heads=transformer_heads,
norm_context=True,
)
self.visual_pooler_norm = nn.LayerNorm(transformer_width)
self.text_norm = nn.LayerNorm(transformer_width)
self.multimodal_layers = nn.ModuleList([])
for ind in range(n_layers):
self.multimodal_layers.append(
nn.ModuleList(
[
Residual(
ParallelTransformerBlock(
dim=transformer_width,
dim_head=dim_head,
heads=transformer_heads,
ff_mult=ff_mult,
)
),
Residual(
CrossAttention(
dim=transformer_width,
dim_head=dim_head,
heads=transformer_heads,
parallel_ff=True,
ff_mult=ff_mult,
)
),
]
)
)
self.predictor = nn.Sequential(
nn.LayerNorm(transformer_width),
nn.Linear(transformer_width, transformer_width),
nn.GELU(),
nn.LayerNorm(transformer_width),
nn.Linear(transformer_width, vocab_size),
)
self.use_checkpoint = use_checkpoint
self.checkpoint_num = checkpoint_num
def forward(self, image_feats, text_embeds):
# image_feats: # L, N, T, C
# text_feats: embeded text feats # N, L, C
# [L, N, T, C] -> [N, T * L, C]
image_feats = image_feats.permute(1, 0, 2, 3).flatten(1, 2)
visual_queries = repeat(
self.visual_queries, 'n d -> b n d', b=image_feats.shape[0]
)
image_feats = self.visual_pooler_norm(
self.visual_attn_pooler(visual_queries, image_feats)
)
text_embeds = self.text_norm(text_embeds)
# go through multimodal layers
for i, (attn_ff, cross_attn) in enumerate(self.multimodal_layers):
if self.use_checkpoint and i < self.checkpoint_num:
text_embeds = checkpoint.checkpoint(attn_ff, text_embeds)
text_embeds = checkpoint.checkpoint(
cross_attn, text_embeds, image_feats
)
else:
text_embeds = attn_ff(text_embeds)
text_embeds = cross_attn(text_embeds, image_feats)
logits = self.predictor(text_embeds)
return logits
| InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/modules/clip_decoders.py |
import torch
import torch.nn as nn
import torch.nn.functional as F
from transformers.models.bert.modeling_bert import BertPredictionHeadTransform
class Pooler(nn.Module):
def __init__(self, hidden_size):
super().__init__()
self.dense = nn.Linear(hidden_size, hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states):
# print(hidden_states.size()) # 64 x 237 x 768
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class vtmHead(nn.Module):
def __init__(self, hidden_size):
super().__init__()
self.fc = nn.Linear(hidden_size, 2)
def forward(self, x):
x = self.fc(x)
return x
class MLMHead(nn.Module):
def __init__(self, config, weight=None):
super().__init__()
self.transform = BertPredictionHeadTransform(config)
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
if weight is not None:
self.decoder.weight = weight
def forward(self, x):
x = self.transform(x)
x = self.decoder(x) + self.bias
return x
class MPPHead(nn.Module):
def __init__(self, config):
super().__init__()
self.transform = BertPredictionHeadTransform(config)
self.decoder = nn.Linear(config.hidden_size, 256 * 3)
def forward(self, x):
x = self.transform(x)
x = self.decoder(x)
return x
| InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/modules/heads.py |
from .internvideo import * | InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/modules/InternVideo/__init__.py |
import numbers
import random
import numpy as np
import PIL
import skimage
import skimage.transform
import torchvision
import torch
from torchvision import transforms
from PIL import Image
import torch
import cv2
def _is_tensor_clip(clip):
return torch.is_tensor(clip) and clip.ndimension() == 4
def crop_clip(clip, min_h, min_w, h, w):
if isinstance(clip[0], np.ndarray):
cropped = [img[min_h:min_h + h, min_w:min_w + w, :] for img in clip]
elif isinstance(clip[0], PIL.Image.Image):
cropped = [
img.crop((min_w, min_h, min_w + w, min_h + h)) for img in clip
]
else:
raise TypeError('Expected numpy.ndarray or PIL.Image' +
'but got list of {0}'.format(type(clip[0])))
return cropped
def resize_clip(clip, size, interpolation='bilinear'):
if isinstance(clip[0], np.ndarray):
if isinstance(size, numbers.Number):
im_h, im_w, im_c = clip[0].shape
# Min spatial dim already matches minimal size
if (im_w <= im_h and im_w == size) or (im_h <= im_w
and im_h == size):
return clip
new_h, new_w = get_resize_sizes(im_h, im_w, size)
size = (new_w, new_h)
else:
size = size[1], size[0]
if interpolation == 'bilinear':
np_inter = cv2.INTER_LINEAR
else:
np_inter = cv2.INTER_NEAREST
scaled = [
cv2.resize(img, size, interpolation=np_inter) for img in clip
]
elif isinstance(clip[0], PIL.Image.Image):
if isinstance(size, numbers.Number):
im_w, im_h = clip[0].size
# Min spatial dim already matches minimal size
if (im_w <= im_h and im_w == size) or (im_h <= im_w
and im_h == size):
return clip
new_h, new_w = get_resize_sizes(im_h, im_w, size)
size = (new_w, new_h)
else:
size = size[1], size[0]
if interpolation == 'bilinear':
pil_inter = PIL.Image.NEAREST
else:
pil_inter = PIL.Image.BILINEAR
scaled = [img.resize(size, pil_inter) for img in clip]
else:
raise TypeError('Expected numpy.ndarray or PIL.Image' +
'but got list of {0}'.format(type(clip[0])))
return scaled
def get_resize_sizes(im_h, im_w, size):
if im_w < im_h:
ow = size
oh = int(size * im_h / im_w)
else:
oh = size
ow = int(size * im_w / im_h)
return oh, ow
def normalize(clip, mean, std, inplace=False):
if not _is_tensor_clip(clip):
raise TypeError('tensor is not a torch clip_test.')
if not inplace:
clip = clip.clone()
dtype = clip.dtype
dim = len(mean)
mean = torch.as_tensor(mean, dtype=dtype, device=clip.device)
std = torch.as_tensor(std, dtype=dtype, device=clip.device)
# print(clip_test.size())
# if dim == 3:
clip.sub_(mean[:, None, None, None]).div_(std[:, None, None, None])
# else:
# clip_test.sub_(mean[:, None, None]).div_(std[:, None, None])
return clip
def convert_img(img):
"""Converts (H, W, C) numpy.ndarray to (C, W, H) format
"""
if len(img.shape) == 3:
img = img.transpose(2, 0, 1)
if len(img.shape) == 2:
img = np.expand_dims(img, 0)
return img
class ClipToTensor(object):
"""Convert a list of m (H x W x C) numpy.ndarrays in the range [0, 255]
to a torch.FloatTensor of shape (C x m x H x W) in the range [0, 1.0]
"""
def __init__(self, channel_nb=3, div_255=True, numpy=False):
self.channel_nb = channel_nb
self.div_255 = div_255
self.numpy = numpy
def __call__(self, clip):
"""
Args: clip_test (list of numpy.ndarray): clip_test (list of images)
to be converted to tensor.
"""
# Retrieve shape
if isinstance(clip[0], np.ndarray):
h, w, ch = clip[0].shape
assert ch == self.channel_nb, 'Got {0} instead of 3 channels'.format(
ch)
elif isinstance(clip[0], Image.Image):
w, h = clip[0].size
else:
raise TypeError('Expected numpy.ndarray or PIL.Image\
but got list of {0}'.format(type(clip[0])))
np_clip = np.zeros([self.channel_nb, len(clip), int(h), int(w)])
# Convert
for img_idx, img in enumerate(clip):
if isinstance(img, np.ndarray):
pass
elif isinstance(img, Image.Image):
img = np.array(img, copy=False)
else:
raise TypeError('Expected numpy.ndarray or PIL.Image\
but got list of {0}'.format(type(clip[0])))
img = convert_img(img)
np_clip[:, img_idx, :, :] = img
if self.numpy:
if self.div_255:
np_clip = np_clip / 255
return np_clip
else:
tensor_clip = torch.from_numpy(np_clip)
if not isinstance(tensor_clip, torch.FloatTensor):
tensor_clip = tensor_clip.float()
if self.div_255:
tensor_clip = tensor_clip.div(255)
return tensor_clip
class ToTensor(object):
"""Converts numpy array to tensor
"""
def __call__(self, array):
tensor = torch.from_numpy(array)
return tensor
class ColorDistortion(object):
def __init__(self, s=1.0):
self.s = s
self.color_jitter = transforms.ColorJitter(0.8*s, 0.8*s, 0.8*s, 0.2*s)
self.rnd_color_jitter = transforms.RandomApply([self.color_jitter], p=0.8)
self.rnd_gray = transforms.RandomGrayscale(p=0.2)
def __call__(self, video):
color_distort = transforms.Compose([self.rnd_color_jitter, self.rnd_gray])
return color_distort(video)
class Compose(object):
"""Composes several transforms
Args:
transforms (list of ``Transform`` objects): list of transforms
to compose
"""
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, clip):
for t in self.transforms:
clip = t(clip)
return clip
class RandomHorizontalFlip(object):
"""Horizontally flip the list of given images randomly
with a probability 0.5
"""
def __call__(self, clip):
"""
Args:
img (PIL.Image or numpy.ndarray): List of images to be cropped
in format (h, w, c) in numpy.ndarray
Returns:
PIL.Image or numpy.ndarray: Randomly flipped clip_test
"""
if random.random() < 0.5:
if isinstance(clip[0], np.ndarray):
return [np.fliplr(img) for img in clip]
elif isinstance(clip[0], PIL.Image.Image):
return [
img.transpose(PIL.Image.FLIP_LEFT_RIGHT) for img in clip
]
else:
raise TypeError('Expected numpy.ndarray or PIL.Image' +
' but got list of {0}'.format(type(clip[0])))
return clip
class RandomResize(object):
"""Resizes a list of (H x W x C) numpy.ndarray to the final size
The larger the original image is, the more times it takes to
interpolate
Args:
interpolation (str): Can be one of 'nearest', 'bilinear'
defaults to nearest
size (tuple): (widht, height)
"""
def __init__(self, ratio=(3. / 4., 4. / 3.), interpolation='nearest'):
self.ratio = ratio
self.interpolation = interpolation
def __call__(self, clip):
scaling_factor = random.uniform(self.ratio[0], self.ratio[1])
if isinstance(clip[0], np.ndarray):
im_h, im_w, im_c = clip[0].shape
elif isinstance(clip[0], PIL.Image.Image):
im_w, im_h = clip[0].size
new_w = int(im_w * scaling_factor)
new_h = int(im_h * scaling_factor)
new_size = (new_w, new_h)
resized = resize_clip(
clip, new_size, interpolation=self.interpolation)
return resized
class Resize(object):
"""Resizes a list of (H x W x C) numpy.ndarray to the final size
The larger the original image is, the more times it takes to
interpolate
Args:
interpolation (str): Can be one of 'nearest', 'bilinear'
defaults to nearest
size (tuple): (widht, height)
"""
def __init__(self, size, interpolation='nearest'):
self.size = size
self.interpolation = interpolation
def __call__(self, clip):
resized = resize_clip(
clip, self.size, interpolation=self.interpolation)
return resized
class RandomCrop(object):
"""Extract random crop at the same location for a list of images
Args:
size (sequence or int): Desired output size for the
crop in format (h, w)
"""
def __init__(self, size):
if isinstance(size, numbers.Number):
size = (size, size)
self.size = size
def __call__(self, clip):
"""
Args:
img (PIL.Image or numpy.ndarray): List of images to be cropped
in format (h, w, c) in numpy.ndarray
Returns:
PIL.Image or numpy.ndarray: Cropped list of images
"""
h, w = self.size
if isinstance(clip[0], np.ndarray):
im_h, im_w, im_c = clip[0].shape
elif isinstance(clip[0], PIL.Image.Image):
im_w, im_h = clip[0].size
else:
raise TypeError('Expected numpy.ndarray or PIL.Image' +
'but got list of {0}'.format(type(clip[0])))
if w > im_w or h > im_h:
error_msg = (
'Initial image size should be larger then '
'cropped size but got cropped sizes : ({w}, {h}) while '
'initial image is ({im_w}, {im_h})'.format(
im_w=im_w, im_h=im_h, w=w, h=h))
raise ValueError(error_msg)
x1 = random.randint(0, im_w - w)
y1 = random.randint(0, im_h - h)
cropped = crop_clip(clip, y1, x1, h, w)
return cropped
class CornerCrop(object):
def __init__(self, size, crop_position=None):
self.size = size
if crop_position is None:
self.randomize = True
else:
self.randomize = False
self.crop_position = crop_position
self.crop_positions = ['c', 'tl', 'tr', 'bl', 'br']
def __call__(self, imgs):
t, h, w, c = imgs.shape
corner_imgs = list()
for n in self.crop_positions:
#print(n)
if n == 'c':
th, tw = (self.size, self.size)
x1 = int(round((w- tw) / 2.))
y1 = int(round((h - th) / 2.))
x2 = x1 + tw
y2 = y1 + th
elif n == 'tl':
x1 = 0
y1 = 0
x2 = self.size
y2 = self.size
elif n == 'tr':
x1 = w - self.size
y1 = 0
x2 = w
y2 = self.size
elif n == 'bl':
x1 = 0
y1 = h - self.size
x2 = self.size
y2 = h
elif n == 'br':
x1 = w - self.size
y1 = h - self.size
x2 = w
y2 = h
corner_imgs.append(imgs[:, y1:y2, x1:x2, :])
return corner_imgs
def randomize_parameters(self):
if self.randomize:
self.crop_position = self.crop_positions[random.randint(
0,
len(self.crop_positions) - 1)]
class RandomRotation(object):
"""Rotate entire clip_test randomly by a random angle within
given bounds
Args:
degrees (sequence or int): Range of degrees to select from
If degrees is a number instead of sequence like (min, max),
the range of degrees, will be (-degrees, +degrees).
"""
def __init__(self, degrees):
if isinstance(degrees, numbers.Number):
if degrees < 0:
raise ValueError('If degrees is a single number,'
'must be positive')
degrees = (-degrees, degrees)
else:
if len(degrees) != 2:
raise ValueError('If degrees is a sequence,'
'it must be of len 2.')
self.degrees = degrees
def __call__(self, clip):
"""
Args:
img (PIL.Image or numpy.ndarray): List of images to be cropped
in format (h, w, c) in numpy.ndarray
Returns:
PIL.Image or numpy.ndarray: Cropped list of images
"""
angle = random.uniform(self.degrees[0], self.degrees[1])
if isinstance(clip[0], np.ndarray):
rotated = [skimage.transform.rotate(img, angle) for img in clip]
elif isinstance(clip[0], PIL.Image.Image):
rotated = [img.rotate(angle) for img in clip]
else:
raise TypeError('Expected numpy.ndarray or PIL.Image' +
'but got list of {0}'.format(type(clip[0])))
return rotated
class STA_RandomRotation(object):
"""Rotate entire clip_test randomly by a random angle within
given bounds
Args:
degrees (sequence or int): Range of degrees to select from
If degrees is a number instead of sequence like (min, max),
the range of degrees, will be (-degrees, +degrees).
"""
def __init__(self, degrees):
if isinstance(degrees, numbers.Number):
if degrees < 0:
raise ValueError('If degrees is a single number,'
'must be positive')
degrees = (-degrees, degrees)
else:
if len(degrees) != 2:
raise ValueError('If degrees is a sequence,'
'it must be of len 2.')
self.degrees = degrees
def __call__(self, clip):
"""
Args:
img (PIL.Image or numpy.ndarray): List of images to be cropped
in format (h, w, c) in numpy.ndarray
Returns:
PIL.Image or numpy.ndarray: Cropped list of images
"""
bsz = len(clip)
angle = random.uniform(self.degrees[0], self.degrees[1])
angles = [(i+1)/(bsz+1) * angle for i in range(bsz)]
if isinstance(clip[0], np.ndarray):
rotated = [skimage.transform.rotate(img, angles[i]) for i, img in enumerate(clip)]
elif isinstance(clip[0], PIL.Image.Image):
rotated = [img.rotate(angles[i]) for i, img in enumerate(clip)]
else:
raise TypeError('Expected numpy.ndarray or PIL.Image' +
'but got list of {0}'.format(type(clip[0])))
return rotated
class Each_RandomRotation(object):
"""Rotate entire clip_test randomly by a random angle within
given bounds
Args:
degrees (sequence or int): Range of degrees to select from
If degrees is a number instead of sequence like (min, max),
the range of degrees, will be (-degrees, +degrees).
"""
def __init__(self, degrees):
if isinstance(degrees, numbers.Number):
if degrees < 0:
raise ValueError('If degrees is a single number,'
'must be positive')
degrees = (-degrees, degrees)
else:
if len(degrees) != 2:
raise ValueError('If degrees is a sequence,'
'it must be of len 2.')
self.degrees = degrees
def __call__(self, clip):
"""
Args:
img (PIL.Image or numpy.ndarray): List of images to be cropped
in format (h, w, c) in numpy.ndarray
Returns:
PIL.Image or numpy.ndarray: Cropped list of images
"""
bsz = len(clip)
angles = [random.uniform(self.degrees[0], self.degrees[1]) for i in range(bsz)]
# print(angles)
if isinstance(clip[0], np.ndarray):
rotated = [skimage.transform.rotate(img, angles[i]) for i, img in enumerate(clip)]
elif isinstance(clip[0], PIL.Image.Image):
rotated = [img.rotate(angles[i]) for i, img in enumerate(clip)]
else:
raise TypeError('Expected numpy.ndarray or PIL.Image' +
'but got list of {0}'.format(type(clip[0])))
return rotated
class CenterCrop(object):
"""Extract center crop at the same location for a list of images
Args:
size (sequence or int): Desired output size for the
crop in format (h, w)
"""
def __init__(self, size):
if isinstance(size, numbers.Number):
size = (size, size)
self.size = size
def __call__(self, clip):
"""
Args:
img (PIL.Image or numpy.ndarray): List of images to be cropped
in format (h, w, c) in numpy.ndarray
Returns:
PIL.Image or numpy.ndarray: Cropped list of images
"""
h, w = self.size
if isinstance(clip[0], np.ndarray):
im_h, im_w, im_c = clip[0].shape
elif isinstance(clip[0], PIL.Image.Image):
im_w, im_h = clip[0].size
else:
raise TypeError('Expected numpy.ndarray or PIL.Image' +
'but got list of {0}'.format(type(clip[0])))
if w > im_w or h > im_h:
error_msg = (
'Initial image size should be larger then '
'cropped size but got cropped sizes : ({w}, {h}) while '
'initial image is ({im_w}, {im_h})'.format(
im_w=im_w, im_h=im_h, w=w, h=h))
raise ValueError(error_msg)
x1 = int(round((im_w - w) / 2.))
y1 = int(round((im_h - h) / 2.))
cropped = crop_clip(clip, y1, x1, h, w)
return cropped
class ColorJitter(object):
"""Randomly change the brightness, contrast and saturation and hue of the clip_test
Args:
brightness (float): How much to jitter brightness. brightness_factor
is chosen uniformly from [max(0, 1 - brightness), 1 + brightness].
contrast (float): How much to jitter contrast. contrast_factor
is chosen uniformly from [max(0, 1 - contrast), 1 + contrast].
saturation (float): How much to jitter saturation. saturation_factor
is chosen uniformly from [max(0, 1 - saturation), 1 + saturation].
hue(float): How much to jitter hue. hue_factor is chosen uniformly from
[-hue, hue]. Should be >=0 and <= 0.5.
"""
def __init__(self, brightness=0, contrast=0, saturation=0, hue=0):
self.brightness = brightness
self.contrast = contrast
self.saturation = saturation
self.hue = hue
def get_params(self, brightness, contrast, saturation, hue):
if brightness > 0:
brightness_factor = random.uniform(
max(0, 1 - brightness), 1 + brightness)
else:
brightness_factor = None
if contrast > 0:
contrast_factor = random.uniform(
max(0, 1 - contrast), 1 + contrast)
else:
contrast_factor = None
if saturation > 0:
saturation_factor = random.uniform(
max(0, 1 - saturation), 1 + saturation)
else:
saturation_factor = None
if hue > 0:
hue_factor = random.uniform(-hue, hue)
else:
hue_factor = None
return brightness_factor, contrast_factor, saturation_factor, hue_factor
def __call__(self, clip):
"""
Args:
clip_test (list): list of PIL.Image
Returns:
list PIL.Image : list of transformed PIL.Image
"""
if isinstance(clip[0], np.ndarray):
raise TypeError(
'Color jitter not yet implemented for numpy arrays')
elif isinstance(clip[0], PIL.Image.Image):
brightness, contrast, saturation, hue = self.get_params(
self.brightness, self.contrast, self.saturation, self.hue)
# Create img sync_dir function sequence
img_transforms = []
if brightness is not None:
img_transforms.append(lambda img: torchvision.transforms.functional.adjust_brightness(img, brightness))
if saturation is not None:
img_transforms.append(lambda img: torchvision.transforms.functional.adjust_saturation(img, saturation))
if hue is not None:
img_transforms.append(lambda img: torchvision.transforms.functional.adjust_hue(img, hue))
if contrast is not None:
img_transforms.append(lambda img: torchvision.transforms.functional.adjust_contrast(img, contrast))
random.shuffle(img_transforms)
# Apply to all images
jittered_clip = []
for img in clip:
for func in img_transforms:
jittered_img = func(img)
jittered_clip.append(jittered_img)
else:
raise TypeError('Expected numpy.ndarray or PIL.Image' +
'but got list of {0}'.format(type(clip[0])))
return jittered_clip
class EachColorJitter(object):
"""Randomly change the brightness, contrast and saturation and hue of the clip_test
Args:
brightness (float): How much to jitter brightness. brightness_factor
is chosen uniformly from [max(0, 1 - brightness), 1 + brightness].
contrast (float): How much to jitter contrast. contrast_factor
is chosen uniformly from [max(0, 1 - contrast), 1 + contrast].
saturation (float): How much to jitter saturation. saturation_factor
is chosen uniformly from [max(0, 1 - saturation), 1 + saturation].
hue(float): How much to jitter hue. hue_factor is chosen uniformly from
[-hue, hue]. Should be >=0 and <= 0.5.
"""
def __init__(self, brightness=0, contrast=0, saturation=0, hue=0):
self.brightness = brightness
self.contrast = contrast
self.saturation = saturation
self.hue = hue
def get_params(self, brightness, contrast, saturation, hue):
if brightness > 0:
brightness_factor = random.uniform(
max(0, 1 - brightness), 1 + brightness)
else:
brightness_factor = None
if contrast > 0:
contrast_factor = random.uniform(
max(0, 1 - contrast), 1 + contrast)
else:
contrast_factor = None
if saturation > 0:
saturation_factor = random.uniform(
max(0, 1 - saturation), 1 + saturation)
else:
saturation_factor = None
if hue > 0:
hue_factor = random.uniform(-hue, hue)
else:
hue_factor = None
return brightness_factor, contrast_factor, saturation_factor, hue_factor
def __call__(self, clip):
"""
Args:
clip_test (list): list of PIL.Image
Returns:
list PIL.Image : list of transformed PIL.Image
"""
if isinstance(clip[0], np.ndarray):
raise TypeError(
'Color jitter not yet implemented for numpy arrays')
elif isinstance(clip[0], PIL.Image.Image):
brightness, contrast, saturation, hue = self.get_params(
self.brightness, self.contrast, self.saturation, self.hue)
# Create img sync_dir function sequence
img_transforms = []
if brightness is not None:
img_transforms.append(lambda img: torchvision.transforms.functional.adjust_brightness(img, brightness))
if saturation is not None:
img_transforms.append(lambda img: torchvision.transforms.functional.adjust_saturation(img, saturation))
if hue is not None:
img_transforms.append(lambda img: torchvision.transforms.functional.adjust_hue(img, hue))
if contrast is not None:
img_transforms.append(lambda img: torchvision.transforms.functional.adjust_contrast(img, contrast))
random.shuffle(img_transforms)
# Apply to all images
jittered_clip = []
for img in clip:
for func in img_transforms:
jittered_img = func(img)
jittered_clip.append(jittered_img)
else:
raise TypeError('Expected numpy.ndarray or PIL.Image' +
'but got list of {0}'.format(type(clip[0])))
return jittered_clip
class Normalize(object):
"""Normalize a clip_test with mean and standard deviation.
Given mean: ``(M1,...,Mn)`` and std: ``(S1,..,Sn)`` for ``n`` channels, this sync_dir
will normalize each channel of the input ``torch.*Tensor`` i.e.
``input[channel] = (input[channel] - mean[channel]) / std[channel]``
.. note::
This sync_dir acts out of place, i.e., it does not mutates the input tensor.
Args:
mean (sequence): Sequence of means for each channel.
std (sequence): Sequence of standard deviations for each channel.
"""
def __init__(self, mean, std):
self.mean = mean
self.std = std
def __call__(self, clip):
"""
Args:
clip (Tensor): Tensor clip_test of size (T, C, H, W) to be normalized.
Returns:
Tensor: Normalized Tensor clip_test.
"""
return normalize(clip, self.mean, self.std)
def __repr__(self):
return self.__class__.__name__ + '(mean={0}, std={1})'.format(self.mean, self.std)
class TensorToNumpy(object):
def __init__(self):
pass
def __call__(self, clip):
np_clip = clip.permute(1, 2, 3, 0).cpu().detach().numpy()
pil_clip = [Image.fromarray(np.uint8(numpy_image)).convert('RGB') for numpy_image in np_clip]
return pil_clip
| InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/modules/InternVideo/video_transform.py |
import gzip
import html
import os
from functools import lru_cache
import ftfy
import regex as re
@lru_cache()
def default_bpe():
return os.path.join(os.path.dirname(os.path.abspath(__file__)), "bpe_simple_vocab_16e6.txt.gz")
@lru_cache()
def bytes_to_unicode():
"""
Returns list of utf-8 byte and a corresponding list of unicode strings.
The reversible bpe codes work on unicode strings.
This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.
When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.
This is a signficant percentage of your normal, say, 32K bpe vocab.
To avoid that, we want lookup tables between utf-8 bytes and unicode strings.
And avoids mapping to whitespace/control characters the bpe code barfs on.
"""
bs = list(range(ord("!"), ord("~")+1))+list(range(ord("¡"), ord("¬")+1))+list(range(ord("®"), ord("ÿ")+1))
cs = bs[:]
n = 0
for b in range(2**8):
if b not in bs:
bs.append(b)
cs.append(2**8+n)
n += 1
cs = [chr(n) for n in cs]
return dict(zip(bs, cs))
def get_pairs(word):
"""Return set of symbol pairs in a word.
Word is represented as tuple of symbols (symbols being variable-length strings).
"""
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
return pairs
def basic_clean(text):
text = ftfy.fix_text(text)
text = html.unescape(html.unescape(text))
return text.strip()
def whitespace_clean(text):
text = re.sub(r'\s+', ' ', text)
text = text.strip()
return text
class SimpleTokenizer(object):
def __init__(self, bpe_path: str = default_bpe()):
self.byte_encoder = bytes_to_unicode()
self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
merges = gzip.open(bpe_path).read().decode("utf-8").split('\n')
merges = merges[1:49152-256-2+1]
merges = [tuple(merge.split()) for merge in merges]
vocab = list(bytes_to_unicode().values())
vocab = vocab + [v+'</w>' for v in vocab]
for merge in merges:
vocab.append(''.join(merge))
vocab.extend(['<|startoftext|>', '<|endoftext|>'])
self.encoder = dict(zip(vocab, range(len(vocab))))
self.decoder = {v: k for k, v in self.encoder.items()}
self.bpe_ranks = dict(zip(merges, range(len(merges))))
self.cache = {'<|startoftext|>': '<|startoftext|>', '<|endoftext|>': '<|endoftext|>'}
self.pat = re.compile(r"""<\|startoftext\|>|<\|endoftext\|>|'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+""", re.IGNORECASE)
def bpe(self, token):
if token in self.cache:
return self.cache[token]
word = tuple(token[:-1]) + ( token[-1] + '</w>',)
pairs = get_pairs(word)
if not pairs:
return token+'</w>'
while True:
bigram = min(pairs, key = lambda pair: self.bpe_ranks.get(pair, float('inf')))
if bigram not in self.bpe_ranks:
break
first, second = bigram
new_word = []
i = 0
while i < len(word):
try:
j = word.index(first, i)
new_word.extend(word[i:j])
i = j
except:
new_word.extend(word[i:])
break
if word[i] == first and i < len(word)-1 and word[i+1] == second:
new_word.append(first+second)
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if len(word) == 1:
break
else:
pairs = get_pairs(word)
word = ' '.join(word)
self.cache[token] = word
return word
def encode(self, text):
bpe_tokens = []
text = whitespace_clean(basic_clean(text)).lower()
for token in re.findall(self.pat, text):
token = ''.join(self.byte_encoder[b] for b in token.encode('utf-8'))
bpe_tokens.extend(self.encoder[bpe_token] for bpe_token in self.bpe(token).split(' '))
return bpe_tokens
def decode(self, tokens):
text = ''.join([self.decoder[token] for token in tokens])
text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors="replace").replace('</w>', ' ')
return text
| InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/modules/InternVideo/simple_tokenizer.py |
import torch
import numpy as np
import decord
from typing import Any, OrderedDict, Union, List
from pkg_resources import packaging
from torchvision import transforms
from . import video_transform
from .simple_tokenizer import SimpleTokenizer as _Tokenizer
from .clip_utils.model import build_model
from .clip_utils import load as load
__all__ = ["load_model", "load_video", "tokenize", "load"]
_tokenizer = _Tokenizer()
def load_model(path):
state = torch.load(path, map_location="cpu")["state_dict"]
state = {k[len("clip.") :]: v for k, v in state.items() if k.startswith("clip.")}
model = build_model(state_dict=state)
return model
def load_video(path):
video_reader = decord.VideoReader(path, num_threads=1, ctx=decord.cpu(0))
decord.bridge.set_bridge('torch')
video_len = len(video_reader)
video = video_reader.get_batch(np.linspace(0, video_len - 1, 8).astype(np.int)).byte()
video = video.permute(3, 0, 1, 2)
input_mean = [0.48145466, 0.4578275, 0.40821073]
input_std = [0.26862954, 0.26130258, 0.27577711]
crop_size, scale_size = 224, 256
trans = transforms.Compose([
video_transform.TensorToNumpy(),
video_transform.Resize(scale_size),
video_transform.CenterCrop(crop_size),
video_transform.ClipToTensor(channel_nb=3),
video_transform.Normalize(mean=input_mean, std=input_std)
])
video = trans(video)
return video
def tokenize(
texts: Union[str, List[str]],
context_length: int = 77,
truncate: bool = False,
return_special_tokens_mask: bool = False,
) -> Union[torch.IntTensor, torch.LongTensor, torch.BoolTensor]:
"""
Returns the tokenized representation of given input string(s)
Parameters
----------
texts : Union[str, List[str]]
An input string or a list of input strings to tokenize
context_length : int
The context length to use; all CLIP models use 77 as the context length
truncate: bool
Whether to truncate the text in case its encoding is longer than the context length
Returns
-------
A two-dimensional tensor containing the resulting tokens, shape = [number of input strings, context_length].
We return LongTensor when torch version is <1.8.0, since older index_select requires indices to be long.
"""
if isinstance(texts, str):
texts = [texts]
sot_token = _tokenizer.encoder["<|startoftext|>"]
eot_token = _tokenizer.encoder["<|endoftext|>"]
all_tokens = [[sot_token] + _tokenizer.encode(text) + [eot_token] for text in texts]
if packaging.version.parse(torch.__version__) < packaging.version.parse("1.8.0"):
result = torch.zeros(len(all_tokens), context_length, dtype=torch.long)
else:
result = torch.zeros(len(all_tokens), context_length, dtype=torch.int)
special_tokens_mask = torch.zeros(len(all_tokens), context_length, dtype=torch.bool)
for i, tokens in enumerate(all_tokens):
if len(tokens) > context_length:
if truncate:
tokens = tokens[:context_length]
tokens[-1] = eot_token
else:
raise RuntimeError(
f"Input {texts[i]} is too long for context length {context_length}"
)
result[i, : len(tokens)] = torch.tensor(tokens)
special_tokens_mask[i, len(tokens) :] = 1
if return_special_tokens_mask:
return result, special_tokens_mask
return result
| InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/modules/InternVideo/internvideo.py |
from .clip import *
| InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/modules/InternVideo/clip_utils/__init__.py |
from collections import OrderedDict
from typing import Tuple, Union
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
from torch.utils.checkpoint import checkpoint_sequential
from . import utils
class LayerNorm(nn.LayerNorm):
"""Subclass torch's LayerNorm to handle fp16."""
def forward(self, x: torch.Tensor):
orig_type = x.dtype
ret = super().forward(x.type(torch.float32))
return ret.type(orig_type)
class QuickGELU(nn.Module):
def forward(self, x: torch.Tensor):
return x * torch.sigmoid(1.702 * x)
class ResidualAttentionBlock(nn.Module):
def __init__(self, d_model: int, n_head: int, attn_mask: torch.Tensor = None):
super().__init__()
self.attn = nn.MultiheadAttention(d_model, n_head)
self.ln_1 = LayerNorm(d_model)
self.mlp = nn.Sequential(
OrderedDict(
[
("c_fc", nn.Linear(d_model, d_model * 4)),
("gelu", QuickGELU()),
("c_proj", nn.Linear(d_model * 4, d_model)),
]
)
)
self.ln_2 = LayerNorm(d_model)
self.attn_mask = attn_mask
def attention(self, x: torch.Tensor):
self.attn_mask = (
self.attn_mask.to(dtype=x.dtype, device=x.device)
if self.attn_mask is not None
else None
)
return self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask)[0]
def forward(self, x: torch.Tensor):
x = x + self.attention(self.ln_1(x))
x = x + self.mlp(self.ln_2(x))
return x
class Transformer(nn.Module):
def __init__(
self,
width: int,
layers: int,
heads: int,
attn_mask: torch.Tensor = None,
use_checkpoint=False,
checkpoint_num=[0, 0],
):
super().__init__()
self.width = width
self.layers = layers
self.resblocks = nn.Sequential(
*[ResidualAttentionBlock(width, heads, attn_mask) for _ in range(layers)]
)
self.use_checkpoint = use_checkpoint
self.checkpoint_num = checkpoint_num
def forward(self, x: torch.Tensor):
if self.use_checkpoint and self.checkpoint_num[1] > 0:
segments = min(len(self.resblocks), self.checkpoint_num[1])
return checkpoint_sequential(self.resblocks, segments, x)
else:
return self.resblocks(x)
class VideoIntern(nn.Module):
def __init__(
self,
embed_dim: int,
# vision
vision_width: int,
# text
context_length: int,
vocab_size: int,
transformer_width: int,
transformer_heads: int,
transformer_layers: int,
# uni
n_layers=4,
n_dim=768,
n_head=12,
drop_path_rate=0.0,
mlp_dropout=[0.5, 0.5, 0.5, 0.5],
cls_dropout=0.5,
t_size=8,
use_image_attnmap=True,
backbone='vit_2plus1d_dw_bias_b16',
return_list=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11],
use_checkpoint=False,
checkpoint_num=[0],
):
super().__init__()
self.vision_width = n_dim
self.context_length = context_length
self.visual = utils.__dict__[backbone](
pretrained=False,
t_size=t_size,
mlp_dropout=mlp_dropout,
cls_dropout=cls_dropout,
n_dim=n_dim,
n_head=n_head,
return_list=return_list,
drop_path_rate=drop_path_rate,
backbone_drop_path_rate=drop_path_rate,
use_checkpoint=use_checkpoint,
checkpoint_num=checkpoint_num,
)
self.visual_ln_post = nn.LayerNorm(n_dim)
scale = n_dim**-0.5
self.visual_proj = nn.Parameter(scale * torch.randn(n_dim, embed_dim))
self.return_qk = use_image_attnmap
self.return_num = n_layers
self.transformer = Transformer(
width=transformer_width,
layers=transformer_layers,
heads=transformer_heads,
attn_mask=self.build_attention_mask(),
use_checkpoint=use_checkpoint,
checkpoint_num=checkpoint_num,
)
self.vocab_size = vocab_size
self.token_embedding = nn.Embedding(vocab_size, transformer_width)
self.positional_embedding = nn.Parameter(
torch.empty(self.context_length, transformer_width)
)
self.ln_final = LayerNorm(transformer_width)
self.text_projection = nn.Parameter(torch.empty(transformer_width, embed_dim))
self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07))
self.embed_dim = embed_dim
# We seperate the mask embedding to load pretrained model
self.text_mask_embedding = nn.Parameter(torch.empty(1, 1, transformer_width))
# # To keep the num_embeddings unchanged, we add this to embedded text
# self.eot_token_embedding = nn.Parameter(torch.empty(1, transformer_width))
self.initialize_parameters()
def initialize_parameters(self):
nn.init.normal_(self.token_embedding.weight, std=0.02)
nn.init.normal_(self.positional_embedding, std=0.01)
nn.init.normal_(self.text_mask_embedding, std=0.02)
# nn.init.constant_(self.eot_token_embedding, 0.0)
proj_std = (self.transformer.width**-0.5) * (
(2 * self.transformer.layers) ** -0.5
)
attn_std = self.transformer.width**-0.5
fc_std = (2 * self.transformer.width) ** -0.5
for block in self.transformer.resblocks:
nn.init.normal_(block.attn.in_proj_weight, std=attn_std)
nn.init.normal_(block.attn.out_proj.weight, std=proj_std)
nn.init.normal_(block.mlp.c_fc.weight, std=fc_std)
nn.init.normal_(block.mlp.c_proj.weight, std=proj_std)
if self.text_projection is not None:
nn.init.normal_(self.text_projection, std=self.transformer.width**-0.5)
nn.init.constant_(self.visual_ln_post.weight, 1.0)
nn.init.constant_(self.visual_ln_post.bias, 0.0)
def build_attention_mask(self):
# lazily create causal attention mask, with full attention between the vision tokens
# pytorch uses additive attention mask; fill with -inf
mask = torch.empty(self.context_length, self.context_length)
mask.fill_(float("-inf"))
mask.triu_(1) # zero out the lower diagonal
return mask
@property
def dtype(self):
return self.visual.conv1.weight.dtype
def encode_video(
self, video, return_all_feats=False, masked_indices=None, mode="video"
):
# video: [N, C, T, H, W]
feats = self.visual(video, return_all_feats=return_all_feats, mode=mode)
if return_all_feats:
x, feats = feats
else:
x = feats
x = self.visual_ln_post(x)
if self.visual_proj is not None:
x = x @ self.visual_proj
if return_all_feats:
return x, feats # [N, C], [L, N, T, C]
return x
def encode_text(self, text, masked_indices=None, return_all_feats=False):
# assert (text.max(dim=-1)[0] + 1 == self.token_embedding.num_embeddings).all(), \
# "The last token of each sentence should be eot_token, check the input"
x = self.token_embedding(text).type(self.dtype) # [batch_size, n_ctx, d_model]
# x[torch.arange(x.shape[0]), text.argmax(dim=-1)] += self.eot_token_embedding
if masked_indices is not None:
x[masked_indices] = self.text_mask_embedding
x = x + self.positional_embedding.type(self.dtype)
x = x.permute(1, 0, 2) # NLD -> LND
x = self.transformer(x)
x = x.permute(1, 0, 2) # LND -> NLD
x = self.ln_final(x).type(self.dtype)
# x.shape = [batch_size, n_ctx, transformer.width]
# take features from the eot embedding (eot_token is the highest number in each sequence)
feats = x[torch.arange(x.shape[0]), text.argmax(dim=-1)]
if self.text_projection is not None:
feats = feats @ self.text_projection
if return_all_feats:
return feats, x
return feats
def build_model(
state_dict: dict,
n_layers=4,
n_dim=768,
n_head=12,
mlp_factor=4.0,
drop_path_rate=0.0,
mlp_dropout=[0.5, 0.5, 0.5, 0.5],
cls_dropout=0.5,
t_size=8,
spatial_size=14,
use_t_conv=True,
use_image_attnmap=True,
use_t_pos_embed=True,
no_pretrain=False,
init_zero=True,
use_checkpoint=False,
checkpoint_num=[0],
):
if "visual.proj" in state_dict:
state_dict["visual_proj"] = state_dict["visual.proj"]
state_dict["visual_ln_post.weight"] = state_dict["visual.ln_post.weight"]
state_dict["visual_ln_post.bias"] = state_dict["visual.ln_post.bias"]
del state_dict["visual.proj"], state_dict["visual.ln_post.weight"], state_dict["visual.ln_post.bias"]
vision_width = state_dict["visual.conv1.weight"].shape[0]
vision_layers = len(
[
k
for k in state_dict.keys()
if k.startswith("visual.") and k.endswith(".attn.in_proj_weight")
]
)
vision_patch_size = state_dict["visual.conv1.weight"].shape[-1]
grid_size = round((state_dict["visual.positional_embedding"].shape[0] - 1) ** 0.5)
image_resolution = vision_patch_size * grid_size
embed_dim = state_dict["text_projection"].shape[1]
context_length = state_dict["positional_embedding"].shape[0]
vocab_size = state_dict["token_embedding.weight"].shape[0]
transformer_width = state_dict["ln_final.weight"].shape[0]
transformer_heads = transformer_width // 64
transformer_layers = len(
set(
k.split(".")[2]
for k in state_dict
if k.startswith(f"transformer.resblocks")
)
)
try:
vision_width = state_dict["visual_proj"].shape[0]
except:
vision_width = state_dict["visual.proj"].shape[0]
n_dim = vision_width
if vision_width == 768:
backbone = "vit_only_global_b16"
n_head = 12
return_list = [8, 9, 10, 11]
elif vision_width == 1024:
backbone = "vit_only_global_l14"
n_head = 16
return_list = [20, 21, 22, 23]
else:
raise NotImplementedError
model = VideoIntern(
embed_dim,
vision_width,
context_length,
vocab_size,
transformer_width,
transformer_heads,
transformer_layers,
n_layers=n_layers,
n_dim=n_dim,
n_head=n_head,
drop_path_rate=drop_path_rate,
mlp_dropout=mlp_dropout,
cls_dropout=cls_dropout,
t_size=t_size,
use_image_attnmap=use_image_attnmap,
backbone=backbone,
return_list=return_list,
use_checkpoint=use_checkpoint,
checkpoint_num=checkpoint_num,
)
for key in ["input_resolution", "context_length", "vocab_size"]:
if key in state_dict:
del state_dict[key]
def inflate_weight(weight_2d, time_dim, center=True):
if center:
weight_3d = torch.zeros(*weight_2d.shape)
weight_3d = weight_3d.unsqueeze(2).repeat(1, 1, time_dim, 1, 1)
middle_idx = time_dim // 2
weight_3d[:, :, middle_idx, :, :] = weight_2d
else:
weight_3d = weight_2d.unsqueeze(2).repeat(1, 1, time_dim, 1, 1)
weight_3d = weight_3d / time_dim
return weight_3d
def load_state_dict_3d(model, state_dict):
state_dict_3d = model.state_dict()
for k in state_dict.keys():
if state_dict[k].shape != state_dict_3d[k].shape:
if len(state_dict_3d[k].shape) <= 2:
print(f'Ignore: {k}')
continue
print(f'Inflate: {k}, {state_dict[k].shape} => {state_dict_3d[k].shape}')
time_dim = state_dict_3d[k].shape[2]
state_dict[k] = inflate_weight(state_dict[k], time_dim)
model.load_state_dict(state_dict, strict=False)
load_state_dict_3d(model, state_dict)
return model.eval()
| InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/modules/InternVideo/clip_utils/model.py |
import hashlib
import os
import urllib
import warnings
from typing import Any, Union, List
from pkg_resources import packaging
import torch
from PIL import Image
from torchvision.transforms import Compose, Resize, CenterCrop, ToTensor, Normalize
from tqdm import tqdm
from .model import build_model
from .simple_tokenizer import SimpleTokenizer as _Tokenizer
try:
from torchvision.transforms import InterpolationMode
BICUBIC = InterpolationMode.BICUBIC
except ImportError:
BICUBIC = Image.BICUBIC
if packaging.version.parse(torch.__version__) < packaging.version.parse("1.7.1"):
warnings.warn("PyTorch version 1.7.1 or higher is recommended")
__all__ = ["available_models", "load", "tokenize"]
# _tokenizer = _Tokenizer()
_MODELS = {
"ViT-B/32": "https://openaipublic.azureedge.net/clip/models/40d365715913c9da98579312b702a82c18be219cc2a73407c4526f58eba950af/ViT-B-32.pt",
"ViT-B/16": "https://openaipublic.azureedge.net/clip/models/5806e77cd80f8b59890b7e101eabd078d9fb84e6937f9e85e4ecb61988df416f/ViT-B-16.pt",
"ViT-L/14": "https://openaipublic.azureedge.net/clip/models/b8cca3fd41ae0c99ba7e8951adf17d267cdb84cd88be6f7c2e0eca1737a03836/ViT-L-14.pt",
}
def _download(url: str, root: str):
os.makedirs(root, exist_ok=True)
filename = os.path.basename(url)
expected_sha256 = url.split("/")[-2]
download_target = os.path.join(root, filename)
if os.path.exists(download_target) and not os.path.isfile(download_target):
raise RuntimeError(f"{download_target} exists and is not a regular file")
if os.path.isfile(download_target):
if hashlib.sha256(open(download_target, "rb").read()).hexdigest() == expected_sha256:
return download_target
else:
warnings.warn(f"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file")
with urllib.request.urlopen(url) as source, open(download_target, "wb") as output:
with tqdm(total=int(source.info().get("Content-Length")), ncols=80, unit='iB', unit_scale=True, unit_divisor=1024) as loop:
while True:
buffer = source.read(8192)
if not buffer:
break
output.write(buffer)
loop.update(len(buffer))
if hashlib.sha256(open(download_target, "rb").read()).hexdigest() != expected_sha256:
raise RuntimeError(f"Model has been downloaded but the SHA256 checksum does not not match")
return download_target
def _convert_image_to_rgb(image):
return image.convert("RGB")
def _transform(n_px):
return Compose([
Resize(n_px, interpolation=BICUBIC),
CenterCrop(n_px),
_convert_image_to_rgb,
ToTensor(),
Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)),
])
def available_models() -> List[str]:
"""Returns the names of available CLIP models"""
return list(_MODELS.keys())
def load(
name: str,
device: Union[str, torch.device] = "cuda" if torch.cuda.is_available() else "cpu",
jit: bool = False, download_root: str = None,
# evl
n_layers=12, n_dim=768, n_head=12, mlp_factor=4.0, drop_path_rate=0.,
mlp_dropout=[0.0, 0.0, 0.0, 0.0], cls_dropout=0.5, t_size=8, spatial_size=14,
use_t_conv=True, use_image_attnmap=True, use_t_pos_embed=True, dropout=0.0, no_pretrain=False, init_zero=True,
use_checkpoint=False, checkpoint_num=[0, 0, 0],
):
"""Load a CLIP model
Parameters
----------
name : str
A model name listed by `clip.available_models()`, or the path to a model checkpoint containing the state_dict
device : Union[str, torch.device]
The device to put the loaded model
jit : bool
Whether to load the optimized JIT model or more hackable non-JIT model (default).
download_root: str
path to download the model files; by default, it uses "~/.cache/clip"
Returns
-------
model : torch.nn.Module
The CLIP model
preprocess : Callable[[PIL.Image], torch.Tensor]
A torchvision transform that converts a PIL image into a tensor that the returned model can take as its input
"""
if name in _MODELS:
model_path = _download(_MODELS[name], download_root or os.path.expanduser("~/.cache/clip"))
elif os.path.isfile(name):
model_path = name
else:
raise RuntimeError(f"Model {name} not found; available models = {available_models()}")
with open(model_path, 'rb') as opened_file:
try:
# loading JIT archive
model = torch.jit.load(opened_file, map_location=device if jit else "cpu").eval()
state_dict = None
except RuntimeError:
# loading saved state dict
if jit:
warnings.warn(f"File {model_path} is not a JIT archive. Loading as a state dict instead")
jit = False
state_dict = torch.load(model_path, map_location="cpu")
if not jit:
model = build_model(
state_dict or model.state_dict(),
n_layers=n_layers, n_dim=n_dim, n_head=n_head, mlp_factor=mlp_factor, drop_path_rate=drop_path_rate,
mlp_dropout=mlp_dropout, cls_dropout=cls_dropout, t_size=t_size, spatial_size=spatial_size,
use_t_conv=use_t_conv, use_image_attnmap=use_image_attnmap, use_t_pos_embed=use_t_pos_embed, no_pretrain=no_pretrain,
init_zero=init_zero, use_checkpoint=use_checkpoint, checkpoint_num=checkpoint_num,
).to(device)
if str(device) == "cpu":
model.float()
return model, _transform(model.visual.input_resolution)
# patch the device names
device_holder = torch.jit.trace(lambda: torch.ones([]).to(torch.device(device)), example_inputs=[])
device_node = [n for n in device_holder.graph.findAllNodes("prim::Constant") if "Device" in repr(n)][-1]
def patch_device(module):
try:
graphs = [module.graph] if hasattr(module, "graph") else []
except RuntimeError:
graphs = []
if hasattr(module, "forward1"):
graphs.append(module.forward1.graph)
for graph in graphs:
for node in graph.findAllNodes("prim::Constant"):
if "value" in node.attributeNames() and str(node["value"]).startswith("cuda"):
node.copyAttributes(device_node)
model.apply(patch_device)
patch_device(model.encode_image)
patch_device(model.encode_text)
# patch dtype to float32 on CPU
if str(device) == "cpu":
float_holder = torch.jit.trace(lambda: torch.ones([]).float(), example_inputs=[])
float_input = list(float_holder.graph.findNode("aten::to").inputs())[1]
float_node = float_input.node()
def patch_float(module):
try:
graphs = [module.graph] if hasattr(module, "graph") else []
except RuntimeError:
graphs = []
if hasattr(module, "forward1"):
graphs.append(module.forward1.graph)
for graph in graphs:
for node in graph.findAllNodes("aten::to"):
inputs = list(node.inputs())
for i in [1, 2]: # dtype can be the second or third argument to aten::to()
if inputs[i].node()["value"] == 5:
inputs[i].node().copyAttributes(float_node)
model.apply(patch_float)
patch_float(model.encode_image)
patch_float(model.encode_text)
model.float()
return model, _transform(model.input_resolution.item())
def tokenize(texts: Union[str, List[str]], context_length: int = 77, truncate: bool = False, return_special_tokens_mask: bool = False) -> Union[torch.IntTensor, torch.LongTensor, torch.BoolTensor]:
"""
Returns the tokenized representation of given input string(s)
Parameters
----------
texts : Union[str, List[str]]
An input string or a list of input strings to tokenize
context_length : int
The context length to use; all CLIP models use 77 as the context length
truncate: bool
Whether to truncate the text in case its encoding is longer than the context length
Returns
-------
A two-dimensional tensor containing the resulting tokens, shape = [number of input strings, context_length].
We return LongTensor when torch version is <1.8.0, since older index_select requires indices to be long.
"""
if isinstance(texts, str):
texts = [texts]
sot_token = _tokenizer.encoder["<|startoftext|>"]
eot_token = _tokenizer.encoder["<|endoftext|>"]
all_tokens = [[sot_token] + _tokenizer.encode(text) + [eot_token] for text in texts]
if packaging.version.parse(torch.__version__) < packaging.version.parse("1.8.0"):
result = torch.zeros(len(all_tokens), context_length, dtype=torch.long)
else:
result = torch.zeros(len(all_tokens), context_length, dtype=torch.int)
special_tokens_mask = torch.zeros(len(all_tokens), context_length, dtype=torch.bool)
for i, tokens in enumerate(all_tokens):
if len(tokens) > context_length:
if truncate:
tokens = tokens[:context_length]
tokens[-1] = eot_token
else:
raise RuntimeError(f"Input {texts[i]} is too long for context length {context_length}")
result[i, :len(tokens)] = torch.tensor(tokens)
special_tokens_mask[i, len(tokens):] = 1
if return_special_tokens_mask:
return result, special_tokens_mask
return result
| InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/modules/InternVideo/clip_utils/clip.py |
import gzip
import html
import os
from functools import lru_cache
import ftfy
import regex as re
@lru_cache()
def default_bpe():
return os.path.join(os.path.dirname(os.path.abspath(__file__)), "bpe_simple_vocab_16e6.txt.gz")
@lru_cache()
def bytes_to_unicode():
"""
Returns list of utf-8 byte and a corresponding list of unicode strings.
The reversible bpe codes work on unicode strings.
This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.
When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.
This is a signficant percentage of your normal, say, 32K bpe vocab.
To avoid that, we want lookup tables between utf-8 bytes and unicode strings.
And avoids mapping to whitespace/control characters the bpe code barfs on.
"""
bs = list(range(ord("!"), ord("~")+1))+list(range(ord("¡"), ord("¬")+1))+list(range(ord("®"), ord("ÿ")+1))
cs = bs[:]
n = 0
for b in range(2**8):
if b not in bs:
bs.append(b)
cs.append(2**8+n)
n += 1
cs = [chr(n) for n in cs]
return dict(zip(bs, cs))
def get_pairs(word):
"""Return set of symbol pairs in a word.
Word is represented as tuple of symbols (symbols being variable-length strings).
"""
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
return pairs
def basic_clean(text):
text = ftfy.fix_text(text)
text = html.unescape(html.unescape(text))
return text.strip()
def whitespace_clean(text):
text = re.sub(r'\s+', ' ', text)
text = text.strip()
return text
class SimpleTokenizer(object):
def __init__(self, bpe_path: str = default_bpe()):
self.byte_encoder = bytes_to_unicode()
self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
merges = gzip.open(bpe_path).read().decode("utf-8").split('\n')
merges = merges[1:49152-256-2+1]
merges = [tuple(merge.split()) for merge in merges]
vocab = list(bytes_to_unicode().values())
vocab = vocab + [v+'</w>' for v in vocab]
for merge in merges:
vocab.append(''.join(merge))
vocab.extend(['<|startoftext|>', '<|endoftext|>'])
self.encoder = dict(zip(vocab, range(len(vocab))))
self.decoder = {v: k for k, v in self.encoder.items()}
self.bpe_ranks = dict(zip(merges, range(len(merges))))
self.cache = {'<|startoftext|>': '<|startoftext|>', '<|endoftext|>': '<|endoftext|>'}
self.pat = re.compile(r"""<\|startoftext\|>|<\|endoftext\|>|'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+""", re.IGNORECASE)
def bpe(self, token):
if token in self.cache:
return self.cache[token]
word = tuple(token[:-1]) + ( token[-1] + '</w>',)
pairs = get_pairs(word)
if not pairs:
return token+'</w>'
while True:
bigram = min(pairs, key = lambda pair: self.bpe_ranks.get(pair, float('inf')))
if bigram not in self.bpe_ranks:
break
first, second = bigram
new_word = []
i = 0
while i < len(word):
try:
j = word.index(first, i)
new_word.extend(word[i:j])
i = j
except:
new_word.extend(word[i:])
break
if word[i] == first and i < len(word)-1 and word[i+1] == second:
new_word.append(first+second)
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if len(word) == 1:
break
else:
pairs = get_pairs(word)
word = ' '.join(word)
self.cache[token] = word
return word
def encode(self, text):
bpe_tokens = []
text = whitespace_clean(basic_clean(text)).lower()
for token in re.findall(self.pat, text):
token = ''.join(self.byte_encoder[b] for b in token.encode('utf-8'))
bpe_tokens.extend(self.encoder[bpe_token] for bpe_token in self.bpe(token).split(' '))
return bpe_tokens
def decode(self, tokens):
text = ''.join([self.decoder[token] for token in tokens])
text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors="replace").replace('</w>', ' ')
return text
| InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/modules/InternVideo/clip_utils/simple_tokenizer.py |
#!/usr/bin/env python
import warnings
from typing import Tuple, Optional
import torch
from torch import Tensor
from torch.nn.modules.linear import Linear
from torch.nn.init import xavier_uniform_
from torch.nn.init import constant_
from torch.nn.init import xavier_normal_
from torch.nn.parameter import Parameter
from torch.nn.modules.module import Module
from .attention_module import multi_head_attention_forward
class _LinearWithBias(Linear):
bias: Tensor
def __init__(self, in_features: int, out_features: int) -> None:
super().__init__(in_features, out_features, bias=True)
class MultiheadAttention(Module):
r"""Allows the model to jointly attend to information
from different representation subspaces.
See reference: Attention Is All You Need
.. math::
\text{MultiHead}(Q, K, V) = \text{Concat}(head_1,\dots,head_h)W^O
\text{where} head_i = \text{Attention}(QW_i^Q, KW_i^K, VW_i^V)
Args:
embed_dim: total dimension of the model.
num_heads: parallel attention heads.
dropout: a Dropout layer on attn_output_weights. Default: 0.0.
bias: add bias as module parameter. Default: True.
add_bias_kv: add bias to the key and value sequences at dim=0.
add_zero_attn: add a new batch of zeros to the key and
value sequences at dim=1.
kdim: total number of features in key. Default: None.
vdim: total number of features in value. Default: None.
Note: if kdim and vdim are None, they will be set to embed_dim such that
query, key, and value have the same number of features.
Examples::
>>> multihead_attn = nn.MultiheadAttention(embed_dim, num_heads)
>>> attn_output, attn_output_weights = multihead_attn(query, key, value)
"""
bias_k: Optional[torch.Tensor]
bias_v: Optional[torch.Tensor]
def __init__(self, embed_dim, num_heads, dropout=0., bias=True, add_bias_kv=False, add_zero_attn=False, kdim=None, vdim=None):
super(MultiheadAttention, self).__init__()
self.embed_dim = embed_dim
self.kdim = kdim if kdim is not None else embed_dim
self.vdim = vdim if vdim is not None else embed_dim
self._qkv_same_embed_dim = self.kdim == embed_dim and self.vdim == embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads"
if self._qkv_same_embed_dim is False:
self.q_proj_weight = Parameter(torch.Tensor(embed_dim, embed_dim))
self.k_proj_weight = Parameter(torch.Tensor(embed_dim, self.kdim))
self.v_proj_weight = Parameter(torch.Tensor(embed_dim, self.vdim))
self.register_parameter('in_proj_weight', None)
else:
self.in_proj_weight = Parameter(torch.empty(3 * embed_dim, embed_dim))
self.register_parameter('q_proj_weight', None)
self.register_parameter('k_proj_weight', None)
self.register_parameter('v_proj_weight', None)
if bias:
self.in_proj_bias = Parameter(torch.empty(3 * embed_dim))
else:
self.register_parameter('in_proj_bias', None)
self.out_proj = _LinearWithBias(embed_dim, embed_dim)
if add_bias_kv:
self.bias_k = Parameter(torch.empty(1, 1, embed_dim))
self.bias_v = Parameter(torch.empty(1, 1, embed_dim))
else:
self.bias_k = self.bias_v = None
self.add_zero_attn = add_zero_attn
self._reset_parameters()
def _reset_parameters(self):
if self._qkv_same_embed_dim:
xavier_uniform_(self.in_proj_weight)
else:
xavier_uniform_(self.q_proj_weight)
xavier_uniform_(self.k_proj_weight)
xavier_uniform_(self.v_proj_weight)
if self.in_proj_bias is not None:
constant_(self.in_proj_bias, 0.)
constant_(self.out_proj.bias, 0.)
if self.bias_k is not None:
xavier_normal_(self.bias_k)
if self.bias_v is not None:
xavier_normal_(self.bias_v)
def __setstate__(self, state):
# Support loading old MultiheadAttention checkpoints generated by v1.1.0
if '_qkv_same_embed_dim' not in state:
state['_qkv_same_embed_dim'] = True
super(MultiheadAttention, self).__setstate__(state)
def forward(self, query, key, value, key_padding_mask=None,
need_weights=True, attn_mask=None, return_qk=False):
# type: (Tensor, Tensor, Tensor, Optional[Tensor], bool, Optional[Tensor], bool) -> Tuple[Tensor, Optional[Tensor]]
r"""
Args:
query, key, value: map a query and a set of key-value pairs to an output.
See "Attention Is All You Need" for more details.
key_padding_mask: if provided, specified padding elements in the key will
be ignored by the attention. When given a binary mask and a value is True,
the corresponding value on the attention layer will be ignored. When given
a byte mask and a value is non-zero, the corresponding value on the attention
layer will be ignored
need_weights: output attn_output_weights.
attn_mask: 2D or 3D mask that prevents attention to certain positions. A 2D mask will be broadcasted for all
the batches while a 3D mask allows to specify a different mask for the entries of each batch.
Shape:
- Inputs:
- query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is
the embedding dimension.
- key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- key_padding_mask: :math:`(N, S)` where N is the batch size, S is the source sequence length.
If a ByteTensor is provided, the non-zero positions will be ignored while the position
with the zero positions will be unchanged. If a BoolTensor is provided, the positions with the
value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged.
- attn_mask: 2D mask :math:`(L, S)` where L is the target sequence length, S is the source sequence length.
3D mask :math:`(N*num_heads, L, S)` where N is the batch size, L is the target sequence length,
S is the source sequence length. attn_mask ensure that position i is allowed to attend the unmasked
positions. If a ByteTensor is provided, the non-zero positions are not allowed to attend
while the zero positions will be unchanged. If a BoolTensor is provided, positions with ``True``
is not allowed to attend while ``False`` values will be unchanged. If a FloatTensor
is provided, it will be added to the attention weight.
- return_qk: whether return Q and K.
- Outputs:
- attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size,
E is the embedding dimension.
- attn_output_weights: :math:`(N, L, S)` where N is the batch size,
L is the target sequence length, S is the source sequence length.
"""
if return_qk:
if not self._qkv_same_embed_dim:
q, k, attn_output, attn_output_weights = multi_head_attention_forward(
query, key, value, self.embed_dim, self.num_heads,
self.in_proj_weight, self.in_proj_bias,
self.bias_k, self.bias_v, self.add_zero_attn,
self.dropout, self.out_proj.weight, self.out_proj.bias,
training=self.training,
key_padding_mask=key_padding_mask, need_weights=need_weights,
attn_mask=attn_mask, use_separate_proj_weight=True,
q_proj_weight=self.q_proj_weight, k_proj_weight=self.k_proj_weight,
v_proj_weight=self.v_proj_weight, return_qk=True)
else:
q, k, attn_output, attn_output_weights = multi_head_attention_forward(
query, key, value, self.embed_dim, self.num_heads,
self.in_proj_weight, self.in_proj_bias,
self.bias_k, self.bias_v, self.add_zero_attn,
self.dropout, self.out_proj.weight, self.out_proj.bias,
training=self.training,
key_padding_mask=key_padding_mask, need_weights=need_weights,
attn_mask=attn_mask, return_qk=True)
return q, k, attn_output, attn_output_weights
else:
if not self._qkv_same_embed_dim:
return multi_head_attention_forward(
query, key, value, self.embed_dim, self.num_heads,
self.in_proj_weight, self.in_proj_bias,
self.bias_k, self.bias_v, self.add_zero_attn,
self.dropout, self.out_proj.weight, self.out_proj.bias,
training=self.training,
key_padding_mask=key_padding_mask, need_weights=need_weights,
attn_mask=attn_mask, use_separate_proj_weight=True,
q_proj_weight=self.q_proj_weight, k_proj_weight=self.k_proj_weight,
v_proj_weight=self.v_proj_weight)
else:
return multi_head_attention_forward(
query, key, value, self.embed_dim, self.num_heads,
self.in_proj_weight, self.in_proj_bias,
self.bias_k, self.bias_v, self.add_zero_attn,
self.dropout, self.out_proj.weight, self.out_proj.bias,
training=self.training,
key_padding_mask=key_padding_mask, need_weights=need_weights,
attn_mask=attn_mask)
| InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/modules/InternVideo/clip_utils/utils/attention.py |
#!/usr/bin/env python
import os
from collections import OrderedDict
from timm.models.layers import DropPath
import torch
from torch import nn
import torch.nn.functional as F
import torch.utils.checkpoint as checkpoint
from .attention import MultiheadAttention
import logging
logger = logging.getLogger(__name__)
MODEL_PATH = '/mnt/lustre/share_data/likunchang.vendor/model'
_MODELS = {
"ViT-B/32": os.path.join(MODEL_PATH, "vit_b32.pth"),
"ViT-B/16": os.path.join(MODEL_PATH, "vit_b16.pth"),
"ViT-L/14": os.path.join(MODEL_PATH, "vit_l14.pth"),
"ViT-L/14_336": os.path.join(MODEL_PATH, "vit_l14_336.pth"),
}
def conv_1x1x1(inp, oup, groups=1):
return nn.Conv3d(inp, oup, (1, 1, 1), (1, 1, 1), (0, 0, 0), groups=groups)
def conv_3x3x3(inp, oup, groups=1):
return nn.Conv3d(inp, oup, (3, 3, 3), (1, 1, 1), (1, 1, 1), groups=groups)
def conv_1x3x3(inp, oup, groups=1):
return nn.Conv3d(inp, oup, (1, 3, 3), (1, 1, 1), (0, 1, 1), groups=groups)
def bn_3d(dim):
return nn.BatchNorm3d(dim)
class LayerNorm(nn.LayerNorm):
"""Subclass torch's LayerNorm to handle fp16."""
def forward(self, x):
orig_type = x.dtype
ret = super().forward(x.type(torch.float32))
return ret.type(orig_type)
class QuickGELU(nn.Module):
def forward(self, x):
return x * torch.sigmoid(1.702 * x)
class ResidualAttentionBlock(nn.Module):
def __init__(
self, d_model, n_head, attn_mask=None, drop_path=0.0,
):
super().__init__()
self.n_head = n_head
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
# logger.info(f'Drop path rate: {drop_path}')
# spatial
self.attn = MultiheadAttention(d_model, n_head)
self.ln_1 = LayerNorm(d_model)
self.mlp = nn.Sequential(OrderedDict([
("c_fc", nn.Linear(d_model, d_model * 4)),
("gelu", QuickGELU()),
("c_proj", nn.Linear(d_model * 4, d_model))
]))
self.ln_2 = LayerNorm(d_model)
self.attn_mask = attn_mask
def attention(self, x):
self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None
return self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask)[0]
def forward(self, x, T=8, use_checkpoint=False):
# x: 1+HW, NT, C
# MHSA
if use_checkpoint:
attn_out = checkpoint.checkpoint(self.attention, self.ln_1(x))
x = x + self.drop_path(attn_out)
else:
x = x + self.drop_path(self.attention(self.ln_1(x)))
# FFN
if use_checkpoint:
mlp_out = checkpoint.checkpoint(self.mlp, self.ln_2(x))
x = x + self.drop_path(mlp_out)
else:
x = x + self.drop_path(self.mlp(self.ln_2(x)))
return x
class Extractor(nn.Module):
def __init__(
self, d_model, n_head, attn_mask=None,
mlp_factor=4.0, dropout=0.0, drop_path=0.0,
):
super().__init__()
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
# logger.info(f'Drop path rate: {drop_path}')
self.attn = nn.MultiheadAttention(d_model, n_head)
self.ln_1 = nn.LayerNorm(d_model)
d_mlp = round(mlp_factor * d_model)
self.mlp = nn.Sequential(OrderedDict([
("c_fc", nn.Linear(d_model, d_mlp)),
("gelu", QuickGELU()),
("dropout", nn.Dropout(dropout)),
("c_proj", nn.Linear(d_mlp, d_model))
]))
self.ln_2 = nn.LayerNorm(d_model)
self.ln_3 = nn.LayerNorm(d_model)
self.attn_mask = attn_mask
# zero init
nn.init.xavier_uniform_(self.attn.in_proj_weight)
nn.init.constant_(self.attn.out_proj.weight, 0.)
nn.init.constant_(self.attn.out_proj.bias, 0.)
nn.init.xavier_uniform_(self.mlp[0].weight)
nn.init.constant_(self.mlp[-1].weight, 0.)
nn.init.constant_(self.mlp[-1].bias, 0.)
def attention(self, x: torch.Tensor, y: torch.Tensor):
#self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None
# return self.attn(x, y, y, need_weights=False, attn_mask=self.attn_mask)[0]
assert self.attn_mask is None # not implemented
# manual forward to add position information
d_model = self.ln_1.weight.size(0)
q = (x @ self.attn.in_proj_weight[:d_model].T) + self.attn.in_proj_bias[:d_model]
k = (y @ self.attn.in_proj_weight[d_model:-d_model].T) + self.attn.in_proj_bias[d_model:-d_model]
v = (y @ self.attn.in_proj_weight[-d_model:].T) + self.attn.in_proj_bias[-d_model:]
Tx, Ty, N = q.size(0), k.size(0), q.size(1)
q = q.view(Tx, N, self.attn.num_heads, self.attn.head_dim).permute(1, 2, 0, 3)
k = k.view(Ty, N, self.attn.num_heads, self.attn.head_dim).permute(1, 2, 0, 3)
v = v.view(Ty, N, self.attn.num_heads, self.attn.head_dim).permute(1, 2, 0, 3)
aff = (q @ k.transpose(-2, -1) / (self.attn.head_dim ** 0.5))
aff = aff.softmax(dim=-1)
out = aff @ v
out = out.permute(2, 0, 1, 3).flatten(2)
out = self.attn.out_proj(out)
return out
def forward(self, x: torch.Tensor, y: torch.Tensor):
x = x + self.drop_path(self.attention(self.ln_1(x), self.ln_3(y)))
x = x + self.drop_path(self.mlp(self.ln_2(x)))
return x
class Transformer(nn.Module):
def __init__(
self, width, layers, heads, attn_mask=None, backbone_drop_path_rate=0.,
use_checkpoint=False, checkpoint_num=[0], t_size=8,
return_list=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11],
n_layers=12, n_dim=768, n_head=12, mlp_factor=4.0, drop_path_rate=0.,
mlp_dropout=[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5],
cls_dropout=0.5, num_classes=400,
):
super().__init__()
self.T = t_size
self.return_list = return_list
# Backbone
b_dpr = [x.item() for x in torch.linspace(0, backbone_drop_path_rate, layers)]
self.resblocks = nn.ModuleList([
ResidualAttentionBlock(
width, heads, attn_mask,
drop_path=b_dpr[i],
) for i in range(layers)
])
# checkpoint
self.use_checkpoint = use_checkpoint
self.checkpoint_num = checkpoint_num
# logger.info(f'Use checkpoint: {self.use_checkpoint}')
# logger.info(f'Checkpoint number: {self.checkpoint_num}')
# Extractor
assert n_layers == len(return_list)
self.temporal_cls_token = nn.Parameter(torch.zeros(1, 1, n_dim))
self.dpe = nn.ModuleList([
nn.Conv3d(n_dim, n_dim, kernel_size=3, stride=1, padding=1, bias=True, groups=n_dim)
for i in range(n_layers)
])
for m in self.dpe:
nn.init.constant_(m.bias, 0.)
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, n_layers)]
self.dec = nn.ModuleList([
Extractor(
n_dim, n_head, mlp_factor=mlp_factor,
dropout=mlp_dropout[i], drop_path=dpr[i],
) for i in range(n_layers)
])
# # projection
# self.proj = nn.Sequential(
# nn.LayerNorm(n_dim),
# nn.Dropout(cls_dropout),
# nn.Linear(n_dim, num_classes),
# )
self.balance = nn.Parameter(torch.zeros((n_dim)))
self.sigmoid = nn.Sigmoid()
def forward(self, x, mode='video', return_all_feats=False):
if mode == 'video':
T_down = self.T
else:
T_down = 1
L, NT, C = x.shape
N = NT // T_down
H = W = int((L - 1) ** 0.5)
cls_token = self.temporal_cls_token.repeat(1, N, 1)
j = -1
for i, resblock in enumerate(self.resblocks):
if self.use_checkpoint and i < self.checkpoint_num[0]:
x = resblock(x, T_down, use_checkpoint=True)
else:
x = resblock(x, T_down)
if i in self.return_list:
j += 1
tmp_x = x.clone()
tmp_x = tmp_x.view(L, N, T_down, C)
# dpe
_, tmp_feats = tmp_x[:1], tmp_x[1:].clone()
tmp_feats = tmp_feats.permute(1, 3, 2, 0).reshape(N, C, T_down, H, W)
tmp_feats = self.dpe[j](tmp_feats).view(N, C, T_down, L - 1).permute(3, 0, 2, 1)
tmp_x[1:] = tmp_x[1:] + tmp_feats
# enhancer
tmp_x = tmp_x.permute(2, 0, 1, 3).flatten(0, 1) # T * L, N, C
cls_token = self.dec[j](cls_token, tmp_x)
weight = self.sigmoid(self.balance)
residual = x.view(L, N, T_down, C)[0].mean(1) # L, N, T, C
# return self.proj((1 - weight) * cls_token[0, :, :] + weight * residual)
feats = (1 - weight) * cls_token[0, :, :] + weight * residual
if return_all_feats:
return feats, x.view(L, N, T_down, C)
return feats
class VisionTransformer(nn.Module):
def __init__(
self,
# backbone
input_resolution, patch_size, width, layers, heads, output_dim, backbone_drop_path_rate=0.,
use_checkpoint=False, checkpoint_num=[0], t_size=8,
# extractor
return_list=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11],
n_layers=12, n_dim=768, n_head=12, mlp_factor=4.0, drop_path_rate=0.,
mlp_dropout=[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5],
cls_dropout=0.5, num_classes=400,
):
super().__init__()
self.input_resolution = input_resolution
self.output_dim = output_dim
self.conv1 = nn.Conv3d(3, width, (1, patch_size, patch_size), (1, patch_size, patch_size), (0, 0, 0), bias=False)
scale = width ** -0.5
self.class_embedding = nn.Parameter(scale * torch.randn(width))
self.positional_embedding = nn.Parameter(scale * torch.randn((input_resolution // patch_size) ** 2 + 1, width))
self.ln_pre = LayerNorm(width)
self.transformer = Transformer(
width, layers, heads,
backbone_drop_path_rate=backbone_drop_path_rate,
use_checkpoint=use_checkpoint, checkpoint_num=checkpoint_num, t_size=t_size,
return_list=return_list, n_layers=n_layers, n_dim=n_dim, n_head=n_head,
mlp_factor=mlp_factor, drop_path_rate=drop_path_rate, mlp_dropout=mlp_dropout,
cls_dropout=cls_dropout, num_classes=num_classes,
)
def forward(self, x, mode='video', return_all_feats=False):
x = self.conv1(x) # shape = [*, width, grid, grid]
N, C, T, H, W = x.shape
x = x.permute(0, 2, 3, 4, 1).reshape(N * T, H * W, C)
x = torch.cat([self.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device), x], dim=1) # shape = [*, grid ** 2 + 1, width]
x = x + self.positional_embedding.to(x.dtype)
x = self.ln_pre(x)
x = x.permute(1, 0, 2) # NLD -> LND
out = self.transformer(x, mode=mode, return_all_feats=return_all_feats)
return out
def inflate_weight(weight_2d, time_dim, center=True):
if center:
weight_3d = torch.zeros(*weight_2d.shape)
weight_3d = weight_3d.unsqueeze(2).repeat(1, 1, time_dim, 1, 1)
middle_idx = time_dim // 2
weight_3d[:, :, middle_idx, :, :] = weight_2d
else:
weight_3d = weight_2d.unsqueeze(2).repeat(1, 1, time_dim, 1, 1)
weight_3d = weight_3d / time_dim
return weight_3d
def load_state_dict(model, state_dict):
state_dict_3d = model.state_dict()
for k in state_dict.keys():
if state_dict[k].shape != state_dict_3d[k].shape:
if len(state_dict_3d[k].shape) <= 2:
logger.info(f'Ignore: {k}')
continue
logger.info(f'Inflate: {k}, {state_dict[k].shape} => {state_dict_3d[k].shape}')
time_dim = state_dict_3d[k].shape[2]
state_dict[k] = inflate_weight(state_dict[k], time_dim)
model.load_state_dict(state_dict, strict=False)
def vit_only_global_b32(
pretrained=True, use_checkpoint=False, checkpoint_num=[0],
t_size=16, backbone_drop_path_rate=0.,
return_list=[8, 9, 10, 11],
n_layers=4, n_dim=768, n_head=12, mlp_factor=4.0, drop_path_rate=0.,
mlp_dropout=[0.5, 0.5, 0.5, 0.5],
cls_dropout=0.5, num_classes=400,
):
model = VisionTransformer(
input_resolution=224,
patch_size=32,
width=768,
layers=12,
heads=12,
output_dim=512,
use_checkpoint=use_checkpoint,
checkpoint_num=checkpoint_num,
t_size=t_size,
backbone_drop_path_rate=backbone_drop_path_rate,
return_list=return_list,
n_layers=n_layers,
n_dim=n_dim,
n_head=n_head,
mlp_factor=mlp_factor,
drop_path_rate=drop_path_rate,
mlp_dropout=mlp_dropout,
cls_dropout=cls_dropout,
num_classes=num_classes,
)
if pretrained:
logger.info('load pretrained weights')
state_dict = torch.load(_MODELS["ViT-B/32"], map_location='cpu')
load_state_dict(model, state_dict)
return model.eval()
def vit_only_global_b16(
pretrained=True, use_checkpoint=False, checkpoint_num=[0],
t_size=16, backbone_drop_path_rate=0.,
return_list=[8, 9, 10, 11],
n_layers=4, n_dim=768, n_head=12, mlp_factor=4.0, drop_path_rate=0.,
mlp_dropout=[0.5, 0.5, 0.5, 0.5],
cls_dropout=0.5, num_classes=400,
):
model = VisionTransformer(
input_resolution=224,
patch_size=16,
width=768,
layers=12,
heads=12,
output_dim=512,
use_checkpoint=use_checkpoint,
checkpoint_num=checkpoint_num,
t_size=t_size,
backbone_drop_path_rate=backbone_drop_path_rate,
return_list=return_list,
n_layers=n_layers,
n_dim=n_dim,
n_head=n_head,
mlp_factor=mlp_factor,
drop_path_rate=drop_path_rate,
mlp_dropout=mlp_dropout,
cls_dropout=cls_dropout,
num_classes=num_classes,
)
if pretrained:
logger.info('load pretrained weights')
state_dict = torch.load(_MODELS["ViT-B/16"], map_location='cpu')
load_state_dict(model, state_dict)
return model.eval()
def vit_only_global_l14(
pretrained=True, use_checkpoint=False, checkpoint_num=[0],
t_size=16, backbone_drop_path_rate=0.,
return_list=[20, 21, 22, 23],
n_layers=4, n_dim=1024, n_head=16, mlp_factor=4.0, drop_path_rate=0.,
mlp_dropout=[0.5, 0.5, 0.5, 0.5],
cls_dropout=0.5, num_classes=400,
):
model = VisionTransformer(
input_resolution=224,
patch_size=14,
width=1024,
layers=24,
heads=16,
output_dim=768,
use_checkpoint=use_checkpoint,
checkpoint_num=checkpoint_num,
t_size=t_size,
backbone_drop_path_rate=backbone_drop_path_rate,
return_list=return_list,
n_layers=n_layers,
n_dim=n_dim,
n_head=n_head,
mlp_factor=mlp_factor,
drop_path_rate=drop_path_rate,
mlp_dropout=mlp_dropout,
cls_dropout=cls_dropout,
num_classes=num_classes,
)
if pretrained:
logger.info('load pretrained weights')
state_dict = torch.load(_MODELS["ViT-L/14"], map_location='cpu')
load_state_dict(model, state_dict)
return model.eval()
def vit_only_global_l14_336(
pretrained=True, use_checkpoint=False, checkpoint_num=[0],
t_size=16, backbone_drop_path_rate=0.,
return_list=[20, 21, 22, 23],
n_layers=4, n_dim=1024, n_head=16, mlp_factor=4.0, drop_path_rate=0.,
mlp_dropout=[0.5, 0.5, 0.5, 0.5],
cls_dropout=0.5, num_classes=400,
):
model = VisionTransformer(
input_resolution=336,
patch_size=14,
width=1024,
layers=24,
heads=16,
output_dim=768,
use_checkpoint=use_checkpoint,
checkpoint_num=checkpoint_num,
t_size=t_size,
backbone_drop_path_rate=backbone_drop_path_rate,
return_list=return_list,
n_layers=n_layers,
n_dim=n_dim,
n_head=n_head,
mlp_factor=mlp_factor,
drop_path_rate=drop_path_rate,
mlp_dropout=mlp_dropout,
cls_dropout=cls_dropout,
num_classes=num_classes,
)
if pretrained:
logger.info('load pretrained weights')
state_dict = torch.load(_MODELS["ViT-L/14_336"], map_location='cpu')
load_state_dict(model, state_dict)
return model.eval()
if __name__ == '__main__':
import time
from fvcore.nn import FlopCountAnalysis
from fvcore.nn import flop_count_table
import numpy as np
seed = 4217
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
num_frames = 8
model = vit_only_global_l14(
pretrained=False,
t_size=num_frames, backbone_drop_path_rate=0.2, drop_path_rate=0.4,
use_checkpoint=True, checkpoint_num=[0],
)
flops = FlopCountAnalysis(model, torch.rand(1, 3, num_frames, 224, 224))
s = time.time()
logger.info(flop_count_table(flops, max_depth=1))
logger.info(time.time()-s) | InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/modules/InternVideo/clip_utils/utils/clip_vit_only_global.py |
# from .evl_module import TransformerDecoder
from .clip_vit_only_global import vit_only_global_b32, vit_only_global_b16, vit_only_global_l14, vit_only_global_l14_336 | InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/modules/InternVideo/clip_utils/utils/__init__.py |
r"""Functional interface"""
import warnings
import math
import torch
from torch import _VF
from torch._jit_internal import Optional, Tuple
from torch.overrides import has_torch_function, handle_torch_function
from torch.nn.functional import pad, linear, softmax, dropout
Tensor = torch.Tensor
def multi_head_attention_forward(query: Tensor,
key: Tensor,
value: Tensor,
embed_dim_to_check: int,
num_heads: int,
in_proj_weight: Tensor,
in_proj_bias: Tensor,
bias_k: Optional[Tensor],
bias_v: Optional[Tensor],
add_zero_attn: bool,
dropout_p: float,
out_proj_weight: Tensor,
out_proj_bias: Tensor,
training: bool = True,
key_padding_mask: Optional[Tensor] = None,
need_weights: bool = True,
attn_mask: Optional[Tensor] = None,
use_separate_proj_weight: bool = False,
q_proj_weight: Optional[Tensor] = None,
k_proj_weight: Optional[Tensor] = None,
v_proj_weight: Optional[Tensor] = None,
static_k: Optional[Tensor] = None,
static_v: Optional[Tensor] = None,
return_qk: bool = False
) -> Tuple[Tensor, Optional[Tensor]]:
r"""
Args:
query, key, value: map a query and a set of key-value pairs to an output.
See "Attention Is All You Need" for more details.
embed_dim_to_check: total dimension of the model.
num_heads: parallel attention heads.
in_proj_weight, in_proj_bias: input projection weight and bias.
bias_k, bias_v: bias of the key and value sequences to be added at dim=0.
add_zero_attn: add a new batch of zeros to the key and
value sequences at dim=1.
dropout_p: probability of an element to be zeroed.
out_proj_weight, out_proj_bias: the output projection weight and bias.
training: apply dropout if is ``True``.
key_padding_mask: if provided, specified padding elements in the key will
be ignored by the attention. This is an binary mask. When the value is True,
the corresponding value on the attention layer will be filled with -inf.
need_weights: output attn_output_weights.
attn_mask: 2D or 3D mask that prevents attention to certain positions. A 2D mask will be broadcasted for all
the batches while a 3D mask allows to specify a different mask for the entries of each batch.
use_separate_proj_weight: the function accept the proj. weights for query, key,
and value in different forms. If false, in_proj_weight will be used, which is
a combination of q_proj_weight, k_proj_weight, v_proj_weight.
q_proj_weight, k_proj_weight, v_proj_weight, in_proj_bias: input projection weight and bias.
static_k, static_v: static key and value used for attention operators.
Shape:
Inputs:
- query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is
the embedding dimension.
- key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- key_padding_mask: :math:`(N, S)` where N is the batch size, S is the source sequence length.
If a ByteTensor is provided, the non-zero positions will be ignored while the zero positions
will be unchanged. If a BoolTensor is provided, the positions with the
value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged.
- attn_mask: 2D mask :math:`(L, S)` where L is the target sequence length, S is the source sequence length.
3D mask :math:`(N*num_heads, L, S)` where N is the batch size, L is the target sequence length,
S is the source sequence length. attn_mask ensures that position i is allowed to attend the unmasked
positions. If a ByteTensor is provided, the non-zero positions are not allowed to attend
while the zero positions will be unchanged. If a BoolTensor is provided, positions with ``True``
are not allowed to attend while ``False`` values will be unchanged. If a FloatTensor
is provided, it will be added to the attention weight.
- static_k: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length,
N is the batch size, E is the embedding dimension. E/num_heads is the head dimension.
- static_v: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length,
N is the batch size, E is the embedding dimension. E/num_heads is the head dimension.
- return_qk: whether return Q and K.
Outputs:
- attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size,
E is the embedding dimension.
- attn_output_weights: :math:`(N, L, S)` where N is the batch size,
L is the target sequence length, S is the source sequence length.
"""
if not torch.jit.is_scripting():
tens_ops = (query, key, value, in_proj_weight, in_proj_bias, bias_k, bias_v,
out_proj_weight, out_proj_bias)
if any([type(t) is not Tensor for t in tens_ops]) and has_torch_function(tens_ops):
return handle_torch_function(
multi_head_attention_forward, tens_ops, query, key, value,
embed_dim_to_check, num_heads, in_proj_weight, in_proj_bias,
bias_k, bias_v, add_zero_attn, dropout_p, out_proj_weight,
out_proj_bias, training=training, key_padding_mask=key_padding_mask,
need_weights=need_weights, attn_mask=attn_mask,
use_separate_proj_weight=use_separate_proj_weight,
q_proj_weight=q_proj_weight, k_proj_weight=k_proj_weight,
v_proj_weight=v_proj_weight, static_k=static_k, static_v=static_v)
tgt_len, bsz, embed_dim = query.size()
assert embed_dim == embed_dim_to_check
# allow MHA to have different sizes for the feature dimension
assert key.size(0) == value.size(0) and key.size(1) == value.size(1)
head_dim = embed_dim // num_heads
assert head_dim * num_heads == embed_dim, "embed_dim must be divisible by num_heads"
scaling = float(head_dim) ** -0.5
if not use_separate_proj_weight:
if torch.equal(query, key) and torch.equal(key, value):
# self-attention
q, k, v = linear(query, in_proj_weight, in_proj_bias).chunk(3, dim=-1)
elif torch.equal(key, value):
# encoder-decoder attention
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = 0
_end = embed_dim
_w = in_proj_weight[_start:_end, :]
if _b is not None:
_b = _b[_start:_end]
q = linear(query, _w, _b)
if key is None:
assert value is None
k = None
v = None
else:
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = embed_dim
_end = None
_w = in_proj_weight[_start:, :]
if _b is not None:
_b = _b[_start:]
k, v = linear(key, _w, _b).chunk(2, dim=-1)
else:
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = 0
_end = embed_dim
_w = in_proj_weight[_start:_end, :]
if _b is not None:
_b = _b[_start:_end]
q = linear(query, _w, _b)
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = embed_dim
_end = embed_dim * 2
_w = in_proj_weight[_start:_end, :]
if _b is not None:
_b = _b[_start:_end]
k = linear(key, _w, _b)
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = embed_dim * 2
_end = None
_w = in_proj_weight[_start:, :]
if _b is not None:
_b = _b[_start:]
v = linear(value, _w, _b)
else:
q_proj_weight_non_opt = torch.jit._unwrap_optional(q_proj_weight)
len1, len2 = q_proj_weight_non_opt.size()
assert len1 == embed_dim and len2 == query.size(-1)
k_proj_weight_non_opt = torch.jit._unwrap_optional(k_proj_weight)
len1, len2 = k_proj_weight_non_opt.size()
assert len1 == embed_dim and len2 == key.size(-1)
v_proj_weight_non_opt = torch.jit._unwrap_optional(v_proj_weight)
len1, len2 = v_proj_weight_non_opt.size()
assert len1 == embed_dim and len2 == value.size(-1)
if in_proj_bias is not None:
q = linear(query, q_proj_weight_non_opt, in_proj_bias[0:embed_dim])
k = linear(key, k_proj_weight_non_opt, in_proj_bias[embed_dim:(embed_dim * 2)])
v = linear(value, v_proj_weight_non_opt, in_proj_bias[(embed_dim * 2):])
else:
q = linear(query, q_proj_weight_non_opt, in_proj_bias)
k = linear(key, k_proj_weight_non_opt, in_proj_bias)
v = linear(value, v_proj_weight_non_opt, in_proj_bias)
q = q * scaling
if attn_mask is not None:
assert attn_mask.dtype == torch.float32 or attn_mask.dtype == torch.float64 or \
attn_mask.dtype == torch.float16 or attn_mask.dtype == torch.uint8 or attn_mask.dtype == torch.bool, \
'Only float, byte, and bool types are supported for attn_mask, not {}'.format(attn_mask.dtype)
if attn_mask.dtype == torch.uint8:
warnings.warn("Byte tensor for attn_mask in nn.MultiheadAttention is deprecated. Use bool tensor instead.")
attn_mask = attn_mask.to(torch.bool)
if attn_mask.dim() == 2:
attn_mask = attn_mask.unsqueeze(0)
if list(attn_mask.size()) != [1, query.size(0), key.size(0)]:
raise RuntimeError('The size of the 2D attn_mask is not correct.')
elif attn_mask.dim() == 3:
if list(attn_mask.size()) != [bsz * num_heads, query.size(0), key.size(0)]:
raise RuntimeError('The size of the 3D attn_mask is not correct.')
else:
raise RuntimeError("attn_mask's dimension {} is not supported".format(attn_mask.dim()))
# attn_mask's dim is 3 now.
# convert ByteTensor key_padding_mask to bool
if key_padding_mask is not None and key_padding_mask.dtype == torch.uint8:
warnings.warn("Byte tensor for key_padding_mask in nn.MultiheadAttention is deprecated. Use bool tensor instead.")
key_padding_mask = key_padding_mask.to(torch.bool)
if bias_k is not None and bias_v is not None:
if static_k is None and static_v is None:
k = torch.cat([k, bias_k.repeat(1, bsz, 1)])
v = torch.cat([v, bias_v.repeat(1, bsz, 1)])
if attn_mask is not None:
attn_mask = pad(attn_mask, (0, 1))
if key_padding_mask is not None:
key_padding_mask = pad(key_padding_mask, (0, 1))
else:
assert static_k is None, "bias cannot be added to static key."
assert static_v is None, "bias cannot be added to static value."
else:
assert bias_k is None
assert bias_v is None
# L, N, E
if return_qk:
return_q = q.clone() / scaling
return_k = k.clone()
q = q.contiguous().view(tgt_len, bsz * num_heads, head_dim).transpose(0, 1)
if k is not None:
k = k.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1)
if v is not None:
v = v.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1)
if static_k is not None:
assert static_k.size(0) == bsz * num_heads
assert static_k.size(2) == head_dim
k = static_k
if static_v is not None:
assert static_v.size(0) == bsz * num_heads
assert static_v.size(2) == head_dim
v = static_v
src_len = k.size(1)
if key_padding_mask is not None:
assert key_padding_mask.size(0) == bsz
assert key_padding_mask.size(1) == src_len
if add_zero_attn:
src_len += 1
k = torch.cat([k, torch.zeros((k.size(0), 1) + k.size()[2:], dtype=k.dtype, device=k.device)], dim=1)
v = torch.cat([v, torch.zeros((v.size(0), 1) + v.size()[2:], dtype=v.dtype, device=v.device)], dim=1)
if attn_mask is not None:
attn_mask = pad(attn_mask, (0, 1))
if key_padding_mask is not None:
key_padding_mask = pad(key_padding_mask, (0, 1))
attn_output_weights = torch.bmm(q, k.transpose(1, 2))
assert list(attn_output_weights.size()) == [bsz * num_heads, tgt_len, src_len]
if attn_mask is not None:
if attn_mask.dtype == torch.bool:
attn_output_weights.masked_fill_(attn_mask, float('-inf'))
else:
attn_output_weights += attn_mask
if key_padding_mask is not None:
attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len)
attn_output_weights = attn_output_weights.masked_fill(
key_padding_mask.unsqueeze(1).unsqueeze(2),
float('-inf'),
)
attn_output_weights = attn_output_weights.view(bsz * num_heads, tgt_len, src_len)
attn_output_weights = softmax(
attn_output_weights, dim=-1)
attn_output_weights = dropout(attn_output_weights, p=dropout_p, training=training)
attn_output = torch.bmm(attn_output_weights, v)
assert list(attn_output.size()) == [bsz * num_heads, tgt_len, head_dim]
attn_output = attn_output.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim)
attn_output = linear(attn_output, out_proj_weight, out_proj_bias)
if return_qk:
if need_weights:
# average attention weights over heads
attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len)
return return_q, return_k, attn_output, attn_output_weights.sum(dim=1) / num_heads
else:
return return_q, return_k, attn_output, None
else:
if need_weights:
# average attention weights over heads
attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len)
return attn_output, attn_output_weights.sum(dim=1) / num_heads
else:
return attn_output, None
| InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/modules/InternVideo/clip_utils/utils/attention_module.py |
r"""Functional interface"""
import warnings
import math
import torch
from torch import _VF
from torch._jit_internal import Optional, Tuple
from torch.overrides import has_torch_function, handle_torch_function
from torch.nn.functional import pad, linear, softmax, dropout
Tensor = torch.Tensor
def multi_head_attention_forward(query: Tensor,
key: Tensor,
value: Tensor,
embed_dim_to_check: int,
num_heads: int,
in_proj_weight: Tensor,
in_proj_bias: Tensor,
bias_k: Optional[Tensor],
bias_v: Optional[Tensor],
add_zero_attn: bool,
dropout_p: float,
out_proj_weight: Tensor,
out_proj_bias: Tensor,
training: bool = True,
key_padding_mask: Optional[Tensor] = None,
need_weights: bool = True,
attn_mask: Optional[Tensor] = None,
use_separate_proj_weight: bool = False,
q_proj_weight: Optional[Tensor] = None,
k_proj_weight: Optional[Tensor] = None,
v_proj_weight: Optional[Tensor] = None,
static_k: Optional[Tensor] = None,
static_v: Optional[Tensor] = None,
return_qk: bool = False,
rpb: Tensor = None,
) -> Tuple[Tensor, Optional[Tensor]]:
r"""
Args:
query, key, value: map a query and a set of key-value pairs to an output.
See "Attention Is All You Need" for more details.
embed_dim_to_check: total dimension of the model.
num_heads: parallel attention heads.
in_proj_weight, in_proj_bias: input projection weight and bias.
bias_k, bias_v: bias of the key and value sequences to be added at dim=0.
add_zero_attn: add a new batch of zeros to the key and
value sequences at dim=1.
dropout_p: probability of an element to be zeroed.
out_proj_weight, out_proj_bias: the output projection weight and bias.
training: apply dropout if is ``True``.
key_padding_mask: if provided, specified padding elements in the key will
be ignored by the attention. This is an binary mask. When the value is True,
the corresponding value on the attention layer will be filled with -inf.
need_weights: output attn_output_weights.
attn_mask: 2D or 3D mask that prevents attention to certain positions. A 2D mask will be broadcasted for all
the batches while a 3D mask allows to specify a different mask for the entries of each batch.
use_separate_proj_weight: the function accept the proj. weights for query, key,
and value in different forms. If false, in_proj_weight will be used, which is
a combination of q_proj_weight, k_proj_weight, v_proj_weight.
q_proj_weight, k_proj_weight, v_proj_weight, in_proj_bias: input projection weight and bias.
static_k, static_v: static key and value used for attention operators.
Shape:
Inputs:
- query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is
the embedding dimension.
- key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- key_padding_mask: :math:`(N, S)` where N is the batch size, S is the source sequence length.
If a ByteTensor is provided, the non-zero positions will be ignored while the zero positions
will be unchanged. If a BoolTensor is provided, the positions with the
value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged.
- attn_mask: 2D mask :math:`(L, S)` where L is the target sequence length, S is the source sequence length.
3D mask :math:`(N*num_heads, L, S)` where N is the batch size, L is the target sequence length,
S is the source sequence length. attn_mask ensures that position i is allowed to attend the unmasked
positions. If a ByteTensor is provided, the non-zero positions are not allowed to attend
while the zero positions will be unchanged. If a BoolTensor is provided, positions with ``True``
are not allowed to attend while ``False`` values will be unchanged. If a FloatTensor
is provided, it will be added to the attention weight.
- static_k: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length,
N is the batch size, E is the embedding dimension. E/num_heads is the head dimension.
- static_v: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length,
N is the batch size, E is the embedding dimension. E/num_heads is the head dimension.
- return_qk: whether return Q and K.
- rpb: relative postion bias
Outputs:
- attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size,
E is the embedding dimension.
- attn_output_weights: :math:`(N, L, S)` where N is the batch size,
L is the target sequence length, S is the source sequence length.
"""
if not torch.jit.is_scripting():
tens_ops = (query, key, value, in_proj_weight, in_proj_bias, bias_k, bias_v,
out_proj_weight, out_proj_bias)
if any([type(t) is not Tensor for t in tens_ops]) and has_torch_function(tens_ops):
return handle_torch_function(
multi_head_attention_forward, tens_ops, query, key, value,
embed_dim_to_check, num_heads, in_proj_weight, in_proj_bias,
bias_k, bias_v, add_zero_attn, dropout_p, out_proj_weight,
out_proj_bias, training=training, key_padding_mask=key_padding_mask,
need_weights=need_weights, attn_mask=attn_mask,
use_separate_proj_weight=use_separate_proj_weight,
q_proj_weight=q_proj_weight, k_proj_weight=k_proj_weight,
v_proj_weight=v_proj_weight, static_k=static_k, static_v=static_v)
tgt_len, bsz, embed_dim = query.size()
assert embed_dim == embed_dim_to_check
# allow MHA to have different sizes for the feature dimension
assert key.size(0) == value.size(0) and key.size(1) == value.size(1)
head_dim = embed_dim // num_heads
assert head_dim * num_heads == embed_dim, "embed_dim must be divisible by num_heads"
scaling = float(head_dim) ** -0.5
if not use_separate_proj_weight:
if torch.equal(query, key) and torch.equal(key, value):
# self-attention
q, k, v = linear(query, in_proj_weight, in_proj_bias).chunk(3, dim=-1)
elif torch.equal(key, value):
# encoder-decoder attention
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = 0
_end = embed_dim
_w = in_proj_weight[_start:_end, :]
if _b is not None:
_b = _b[_start:_end]
q = linear(query, _w, _b)
if key is None:
assert value is None
k = None
v = None
else:
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = embed_dim
_end = None
_w = in_proj_weight[_start:, :]
if _b is not None:
_b = _b[_start:]
k, v = linear(key, _w, _b).chunk(2, dim=-1)
else:
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = 0
_end = embed_dim
_w = in_proj_weight[_start:_end, :]
if _b is not None:
_b = _b[_start:_end]
q = linear(query, _w, _b)
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = embed_dim
_end = embed_dim * 2
_w = in_proj_weight[_start:_end, :]
if _b is not None:
_b = _b[_start:_end]
k = linear(key, _w, _b)
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = embed_dim * 2
_end = None
_w = in_proj_weight[_start:, :]
if _b is not None:
_b = _b[_start:]
v = linear(value, _w, _b)
else:
q_proj_weight_non_opt = torch.jit._unwrap_optional(q_proj_weight)
len1, len2 = q_proj_weight_non_opt.size()
assert len1 == embed_dim and len2 == query.size(-1)
k_proj_weight_non_opt = torch.jit._unwrap_optional(k_proj_weight)
len1, len2 = k_proj_weight_non_opt.size()
assert len1 == embed_dim and len2 == key.size(-1)
v_proj_weight_non_opt = torch.jit._unwrap_optional(v_proj_weight)
len1, len2 = v_proj_weight_non_opt.size()
assert len1 == embed_dim and len2 == value.size(-1)
if in_proj_bias is not None:
q = linear(query, q_proj_weight_non_opt, in_proj_bias[0:embed_dim])
k = linear(key, k_proj_weight_non_opt, in_proj_bias[embed_dim:(embed_dim * 2)])
v = linear(value, v_proj_weight_non_opt, in_proj_bias[(embed_dim * 2):])
else:
q = linear(query, q_proj_weight_non_opt, in_proj_bias)
k = linear(key, k_proj_weight_non_opt, in_proj_bias)
v = linear(value, v_proj_weight_non_opt, in_proj_bias)
q = q * scaling
if attn_mask is not None:
assert attn_mask.dtype == torch.float32 or attn_mask.dtype == torch.float64 or \
attn_mask.dtype == torch.float16 or attn_mask.dtype == torch.uint8 or attn_mask.dtype == torch.bool, \
'Only float, byte, and bool types are supported for attn_mask, not {}'.format(attn_mask.dtype)
if attn_mask.dtype == torch.uint8:
warnings.warn("Byte tensor for attn_mask in nn.MultiheadAttention is deprecated. Use bool tensor instead.")
attn_mask = attn_mask.to(torch.bool)
if attn_mask.dim() == 2:
attn_mask = attn_mask.unsqueeze(0)
if list(attn_mask.size()) != [1, query.size(0), key.size(0)]:
raise RuntimeError('The size of the 2D attn_mask is not correct.')
elif attn_mask.dim() == 3:
if list(attn_mask.size()) != [bsz * num_heads, query.size(0), key.size(0)]:
raise RuntimeError('The size of the 3D attn_mask is not correct.')
else:
raise RuntimeError("attn_mask's dimension {} is not supported".format(attn_mask.dim()))
# attn_mask's dim is 3 now.
# convert ByteTensor key_padding_mask to bool
if key_padding_mask is not None and key_padding_mask.dtype == torch.uint8:
warnings.warn("Byte tensor for key_padding_mask in nn.MultiheadAttention is deprecated. Use bool tensor instead.")
key_padding_mask = key_padding_mask.to(torch.bool)
if bias_k is not None and bias_v is not None:
if static_k is None and static_v is None:
k = torch.cat([k, bias_k.repeat(1, bsz, 1)])
v = torch.cat([v, bias_v.repeat(1, bsz, 1)])
if attn_mask is not None:
attn_mask = pad(attn_mask, (0, 1))
if key_padding_mask is not None:
key_padding_mask = pad(key_padding_mask, (0, 1))
else:
assert static_k is None, "bias cannot be added to static key."
assert static_v is None, "bias cannot be added to static value."
else:
assert bias_k is None
assert bias_v is None
# L, N, E
if return_qk:
return_q = q.clone() / scaling
return_k = k.clone()
q = q.contiguous().view(tgt_len, bsz * num_heads, head_dim).transpose(0, 1)
if k is not None:
k = k.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1)
if v is not None:
v = v.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1)
if static_k is not None:
assert static_k.size(0) == bsz * num_heads
assert static_k.size(2) == head_dim
k = static_k
if static_v is not None:
assert static_v.size(0) == bsz * num_heads
assert static_v.size(2) == head_dim
v = static_v
src_len = k.size(1)
if key_padding_mask is not None:
assert key_padding_mask.size(0) == bsz
assert key_padding_mask.size(1) == src_len
if add_zero_attn:
src_len += 1
k = torch.cat([k, torch.zeros((k.size(0), 1) + k.size()[2:], dtype=k.dtype, device=k.device)], dim=1)
v = torch.cat([v, torch.zeros((v.size(0), 1) + v.size()[2:], dtype=v.dtype, device=v.device)], dim=1)
if attn_mask is not None:
attn_mask = pad(attn_mask, (0, 1))
if key_padding_mask is not None:
key_padding_mask = pad(key_padding_mask, (0, 1))
attn_output_weights = torch.bmm(q, k.transpose(1, 2))
assert list(attn_output_weights.size()) == [bsz * num_heads, tgt_len, src_len]
if attn_mask is not None:
if attn_mask.dtype == torch.bool:
attn_output_weights.masked_fill_(attn_mask, float('-inf'))
else:
attn_output_weights += attn_mask
if key_padding_mask is not None:
attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len)
attn_output_weights = attn_output_weights.masked_fill(
key_padding_mask.unsqueeze(1).unsqueeze(2),
float('-inf'),
)
attn_output_weights = attn_output_weights.view(bsz * num_heads, tgt_len, src_len)
if rpb is not None:
attn_output_weights = attn_output_weights + rpb
attn_output_weights = softmax(attn_output_weights, dim=-1)
attn_output_weights = dropout(attn_output_weights, p=dropout_p, training=training)
attn_output = torch.bmm(attn_output_weights, v)
assert list(attn_output.size()) == [bsz * num_heads, tgt_len, head_dim]
attn_output = attn_output.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim)
attn_output = linear(attn_output, out_proj_weight, out_proj_bias)
if return_qk:
if need_weights:
# average attention weights over heads
attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len)
return return_q, return_k, attn_output, attn_output_weights.sum(dim=1) / num_heads
else:
return return_q, return_k, attn_output, None
else:
if need_weights:
# average attention weights over heads
attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len)
return attn_output, attn_output_weights.sum(dim=1) / num_heads
else:
return attn_output, None
| InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/modules/InternVideo/clip_utils/utils/attention_module_bias.py |
InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/gadgets/__init__.py |
|
import torch
from torchmetrics import Metric
# import torchmetrics as Metric
def order_class_index(order):
"""Return the index of the order in its full permutation.
Args:
order (tensor): e.g. [0,1,2]
"""
classes = list(itertools.permutations(list(range(len(order)))))
return classes.index(tuple(order.tolist()))
class Accuracy(Metric):
def __init__(self, dist_sync_on_step=True):
super().__init__(dist_sync_on_step=dist_sync_on_step)
self.add_state("correct", default=torch.tensor(0.0), dist_reduce_fx="sum")
self.add_state("total", default=torch.tensor(0.0), dist_reduce_fx="sum")
def update(self, logits, target, unfilterd=False):
logits, target = (
logits.detach().to(self.correct.device),
target.detach().to(self.correct.device),
)
preds = logits.argmax(dim=-1)
preds = preds[target != -100]
unfilter_num = target.numel()
target = target[target != -100]
if target.numel() == 0:
return 1
assert preds.shape == target.shape
self.correct += torch.sum(preds == target)
if unfilterd:
# print("no filter")
self.total += unfilter_num
else:
self.total += target.numel()
def compute(self):
return self.correct / self.total
class Scalar(Metric):
def __init__(self, dist_sync_on_step=True):
super().__init__(dist_sync_on_step=dist_sync_on_step)
self.add_state("scalar", default=torch.tensor(0.0), dist_reduce_fx="sum")
self.add_state("total", default=torch.tensor(0.0), dist_reduce_fx="sum")
def update(self, scalar):
if isinstance(scalar, torch.Tensor):
scalar = scalar.detach().to(self.scalar.device)
else:
scalar = torch.tensor(scalar).float().to(self.scalar.device)
self.scalar += scalar
self.total += 1
def compute(self):
return self.scalar / self.total
class VQAScore(Metric):
def __init__(self, dist_sync_on_step=True):
super().__init__(dist_sync_on_step=dist_sync_on_step)
self.add_state("score", default=torch.tensor(0.0), dist_reduce_fx="sum")
self.add_state("total", default=torch.tensor(0.0), dist_reduce_fx="sum")
def update(self, logits, target):
logits, target = (
logits.detach().float().to(self.score.device),
target.detach().float().to(self.score.device),
)
logits = torch.max(logits, 1)[1]
one_hots = torch.zeros(*target.size()).to(target)
one_hots.scatter_(1, logits.view(-1, 1), 1)
scores = one_hots * target
self.score += scores.sum()
self.total += len(logits)
def compute(self):
return self.score / self.total
| InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/gadgets/my_metrics.py |
from setuptools import find_packages, setup
def readme():
with open('README.md', encoding='utf-8') as f:
content = f.read()
return content
version_file = 'mmaction/version.py'
def get_version():
with open(version_file, 'r') as f:
exec(compile(f.read(), version_file, 'exec'))
return locals()['__version__']
def parse_requirements(fname='requirements.txt', with_version=True):
"""Parse the package dependencies listed in a requirements file but strips
specific versioning information.
Args:
fname (str): path to requirements file
with_version (bool, default=False): if True include version specs
Returns:
List[str]: list of requirements items
CommandLine:
python -c "import setup; print(setup.parse_requirements())"
"""
import re
import sys
from os.path import exists
require_fpath = fname
def parse_line(line):
"""Parse information from a line in a requirements text file."""
if line.startswith('-r '):
# Allow specifying requirements in other files
target = line.split(' ')[1]
for info in parse_require_file(target):
yield info
else:
info = {'line': line}
if line.startswith('-e '):
info['package'] = line.split('#egg=')[1]
elif '@git+' in line:
info['package'] = line
else:
# Remove versioning from the package
pat = '(' + '|'.join(['>=', '==', '>']) + ')'
parts = re.split(pat, line, maxsplit=1)
parts = [p.strip() for p in parts]
info['package'] = parts[0]
if len(parts) > 1:
op, rest = parts[1:]
if ';' in rest:
# Handle platform specific dependencies
# http://setuptools.readthedocs.io/en/latest/setuptools.html#declaring-platform-specific-dependencies
version, platform_deps = map(str.strip,
rest.split(';'))
info['platform_deps'] = platform_deps
else:
version = rest # NOQA
info['version'] = (op, version)
yield info
def parse_require_file(fpath):
with open(fpath, 'r') as f:
for line in f.readlines():
line = line.strip()
if line and not line.startswith('#'):
for info in parse_line(line):
yield info
def gen_packages_items():
if exists(require_fpath):
for info in parse_require_file(require_fpath):
parts = [info['package']]
if with_version and 'version' in info:
parts.extend(info['version'])
if not sys.version.startswith('3.4'):
# apparently package_deps are broken in 3.4
platform_deps = info.get('platform_deps')
if platform_deps is not None:
parts.append(';' + platform_deps)
item = ''.join(parts)
yield item
packages = list(gen_packages_items())
return packages
if __name__ == '__main__':
setup(
name='mmaction2',
version=get_version(),
description='OpenMMLab Action Understanding Toolbox and Benchmark',
long_description=readme(),
long_description_content_type='text/markdown',
maintainer='MMAction2 Authors',
maintainer_email='[email protected]',
packages=find_packages(exclude=('configs', 'tools', 'demo')),
keywords='computer vision, action understanding',
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
url='https://github.com/open-mmlab/mmaction2',
license='Apache License 2.0',
setup_requires=parse_requirements('requirements/build.txt'),
tests_require=parse_requirements('requirements/tests.txt'),
install_requires=parse_requirements('requirements/runtime.txt'),
extras_require={
'all': parse_requirements('requirements.txt'),
'tests': parse_requirements('requirements/tests.txt'),
'build': parse_requirements('requirements/build.txt'),
'optional': parse_requirements('requirements/optional.txt'),
},
zip_safe=False)
| InternVideo-main | Downstream/Open-Set-Action-Recognition/setup.py |
# Copyright (c) Open-MMLab. All rights reserved.
__version__ = '0.9.0'
def parse_version_info(version_str):
version_info = []
for x in version_str.split('.'):
if x.isdigit():
version_info.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
version_info.append(int(patch_version[0]))
version_info.append(f'rc{patch_version[1]}')
return tuple(version_info)
version_info = parse_version_info(__version__)
| InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/version.py |
import mmcv
from mmcv import digit_version
from .version import __version__
mmcv_minimum_version = '1.1.1'
mmcv_maximum_version = '1.3'
mmcv_version = digit_version(mmcv.__version__)
assert (digit_version(mmcv_minimum_version) <= mmcv_version
<= digit_version(mmcv_maximum_version)), \
f'MMCV=={mmcv.__version__} is used but incompatible. ' \
f'Please install mmcv>={mmcv_minimum_version}, <={mmcv_maximum_version}.'
__all__ = ['__version__']
| InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/__init__.py |
from .inference import inference_recognizer, init_recognizer
from .test import multi_gpu_test, single_gpu_test, collect_results_cpu
from .train import train_model
__all__ = [
'train_model', 'init_recognizer', 'inference_recognizer', 'multi_gpu_test',
'single_gpu_test', 'collect_results_cpu'
]
| InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/apis/__init__.py |
import os.path as osp
import pickle
import shutil
import tempfile
import pdb
import mmcv
import torch
import torch.distributed as dist
from mmcv.runner import get_dist_info
def single_gpu_test(model, data_loader):
"""Test model with a single gpu.
This method tests model with a single gpu and displays test progress bar.
Args:
model (nn.Module): Model to be tested.
data_loader (nn.Dataloader): Pytorch data loader.
Returns:
list: The prediction results.
"""
model.eval()
results = []
dataset = data_loader.dataset
prog_bar = mmcv.ProgressBar(len(dataset))
for data in data_loader:
with torch.no_grad():
result = model(return_loss=False, **data)
results.extend(result)
# use the first key as main key to calculate the batch size
batch_size = len(next(iter(data.values())))
for _ in range(batch_size):
prog_bar.update()
return results
def multi_gpu_test(model, data_loader, tmpdir=None, gpu_collect=True):
"""Test model with multiple gpus.
This method tests model with multiple gpus and collects the results
under two different modes: gpu and cpu modes. By setting 'gpu_collect=True'
it encodes results to gpu tensors and use gpu communication for results
collection. On cpu mode it saves the results on different gpus to 'tmpdir'
and collects them by the rank 0 worker.
Args:
model (nn.Module): Model to be tested.
data_loader (nn.Dataloader): Pytorch data loader.
tmpdir (str): Path of directory to save the temporary results from
different gpus under cpu mode. Default: None
gpu_collect (bool): Option to use either gpu or cpu to collect results.
Default: True
Returns:
list: The prediction results.
"""
model.eval()
results = []
dataset = data_loader.dataset
rank, world_size = get_dist_info()
if rank == 0:
prog_bar = mmcv.ProgressBar(len(dataset))
for data in data_loader:
with torch.no_grad():
# pdb.set_trace()
result = model(return_loss=False, **data)
results.extend(result)
if rank == 0:
# use the first key as main key to calculate the batch size
batch_size = len(next(iter(data.values())))
for _ in range(batch_size * world_size):
prog_bar.update()
# collect results from all ranks
if gpu_collect:
results = collect_results_gpu(results, len(dataset))
else:
results = collect_results_cpu(results, len(dataset), tmpdir)
return results
def collect_results_cpu(result_part, size, tmpdir=None):
"""Collect results in cpu mode.
It saves the results on different gpus to 'tmpdir' and collects
them by the rank 0 worker.
Args:
result_part (list): Results to be collected
size (int): Result size.
tmpdir (str): Path of directory to save the temporary results from
different gpus under cpu mode. Default: None
Returns:
list: Ordered results.
"""
rank, world_size = get_dist_info()
# create a tmp dir if it is not specified
if tmpdir is None:
MAX_LEN = 512
# 32 is whitespace
dir_tensor = torch.full((MAX_LEN, ),
32,
dtype=torch.uint8,
device='cuda')
if rank == 0:
mmcv.mkdir_or_exist('.dist_test')
tmpdir = tempfile.mkdtemp(dir='.dist_test')
tmpdir = torch.tensor(
bytearray(tmpdir.encode()), dtype=torch.uint8, device='cuda')
dir_tensor[:len(tmpdir)] = tmpdir
dist.broadcast(dir_tensor, 0)
tmpdir = dir_tensor.cpu().numpy().tobytes().decode().rstrip()
else:
mmcv.mkdir_or_exist(tmpdir)
# synchronizes all processes to make sure tmpdir exist
dist.barrier()
# dump the part result to the dir
mmcv.dump(result_part, osp.join(tmpdir, f'part_{rank}.pkl'))
# synchronizes all processes for loding pickle file
dist.barrier()
# collect all parts
if rank != 0:
return None
# load results of all parts from tmp dir
part_list = []
for i in range(world_size):
part_file = osp.join(tmpdir, f'part_{i}.pkl')
part_list.append(mmcv.load(part_file))
# sort the results
ordered_results = []
for res in zip(*part_list):
ordered_results.extend(list(res))
# the dataloader may pad some samples
ordered_results = ordered_results[:size]
# remove tmp dir
shutil.rmtree(tmpdir)
return ordered_results
def collect_results_gpu(result_part, size):
"""Collect results in gpu mode.
It encodes results to gpu tensors and use gpu communication for results
collection.
Args:
result_part (list): Results to be collected
size (int): Result size.
Returns:
list: Ordered results.
"""
rank, world_size = get_dist_info()
# dump result part to tensor with pickle
part_tensor = torch.tensor(
bytearray(pickle.dumps(result_part)), dtype=torch.uint8, device='cuda')
# gather all result part tensor shape
shape_tensor = torch.tensor(part_tensor.shape, device='cuda')
shape_list = [shape_tensor.clone() for _ in range(world_size)]
dist.all_gather(shape_list, shape_tensor)
# padding result part tensor to max length
shape_max = torch.tensor(shape_list).max()
part_send = torch.zeros(shape_max, dtype=torch.uint8, device='cuda')
part_send[:shape_tensor[0]] = part_tensor
part_recv_list = [
part_tensor.new_zeros(shape_max) for _ in range(world_size)
]
# gather all result part
dist.all_gather(part_recv_list, part_send)
if rank == 0:
part_list = []
for recv, shape in zip(part_recv_list, shape_list):
part_list.append(
pickle.loads(recv[:shape[0]].cpu().numpy().tobytes()))
# sort the results
ordered_results = []
for res in zip(*part_list):
ordered_results.extend(list(res))
# the dataloader may pad some samples
ordered_results = ordered_results[:size]
return ordered_results
return None
| InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/apis/test.py |
import copy as cp
import torch
from mmcv.parallel import MMDataParallel, MMDistributedDataParallel
from mmcv.runner import (DistSamplerSeedHook, EpochBasedRunner, OptimizerHook,
build_optimizer)
from mmcv.runner.hooks import Fp16OptimizerHook
from ..core import (DistEpochEvalHook, EpochEvalHook,
OmniSourceDistSamplerSeedHook, OmniSourceRunner, AnnealingRunner)
from ..datasets import build_dataloader, build_dataset
from ..utils import get_root_logger
def train_model(model,
dataset,
cfg,
distributed=False,
validate=False,
timestamp=None,
meta=None):
"""Train model entry function.
Args:
model (nn.Module): The model to be trained.
dataset (:obj:`Dataset`): Train dataset.
cfg (dict): The config dict for training.
distributed (bool): Whether to use distributed training.
Default: False.
validate (bool): Whether to do evaluation. Default: False.
timestamp (str | None): Local time for runner. Default: None.
meta (dict | None): Meta dict to record some important information.
Default: None
"""
logger = get_root_logger(log_level=cfg.log_level)
# prepare data loaders
dataset = dataset if isinstance(dataset, (list, tuple)) else [dataset]
dataloader_setting = dict(
videos_per_gpu=cfg.data.get('videos_per_gpu', 1),
workers_per_gpu=cfg.data.get('workers_per_gpu', 1),
num_gpus=len(cfg.gpu_ids),
dist=distributed,
seed=cfg.seed,
pin_memory=cfg.data.get('pin_memory', True)) # by default, pin_memory=True
dataloader_setting = dict(dataloader_setting,
**cfg.data.get('train_dataloader', {}))
if cfg.omnisource:
# The option can override videos_per_gpu
train_ratio = cfg.data.get('train_ratio', [1] * len(dataset))
omni_videos_per_gpu = cfg.data.get('omni_videos_per_gpu', None)
if omni_videos_per_gpu is None:
dataloader_settings = [dataloader_setting] * len(dataset)
else:
dataloader_settings = []
for videos_per_gpu in omni_videos_per_gpu:
this_setting = cp.deepcopy(dataloader_setting)
this_setting['videos_per_gpu'] = videos_per_gpu
dataloader_settings.append(this_setting)
data_loaders = [
build_dataloader(ds, **setting)
for ds, setting in zip(dataset, dataloader_settings)
]
else:
data_loaders = [
build_dataloader(ds, **dataloader_setting) for ds in dataset
]
# put model on gpus
if distributed:
find_unused_parameters = cfg.get('find_unused_parameters', False)
# Sets the `find_unused_parameters` parameter in
# torch.nn.parallel.DistributedDataParallel
model = MMDistributedDataParallel(
model.cuda(),
device_ids=[torch.cuda.current_device()],
broadcast_buffers=False,
find_unused_parameters=find_unused_parameters)
else:
model = MMDataParallel(
model.cuda(cfg.gpu_ids[0]), device_ids=cfg.gpu_ids)
# build runner
optimizer = build_optimizer(model, cfg.optimizer)
# Runner = OmniSourceRunner if cfg.omnisource else EpochBasedRunner
if cfg.omnisource:
Runner = OmniSourceRunner
elif cfg.get('annealing_runner', False):
Runner = AnnealingRunner # add annealing runner support
else:
Runner = EpochBasedRunner
runner = Runner(
model,
optimizer=optimizer,
work_dir=cfg.work_dir,
logger=logger,
meta=meta)
# an ugly workaround to make .log and .log.json filenames the same
runner.timestamp = timestamp
# fp16 setting
fp16_cfg = cfg.get('fp16', None)
if fp16_cfg is not None:
optimizer_config = Fp16OptimizerHook(
**cfg.optimizer_config, **fp16_cfg, distributed=distributed)
elif distributed and 'type' not in cfg.optimizer_config:
optimizer_config = OptimizerHook(**cfg.optimizer_config)
else:
optimizer_config = cfg.optimizer_config
# register hooks
runner.register_training_hooks(cfg.lr_config, optimizer_config,
cfg.checkpoint_config, cfg.log_config,
cfg.get('momentum_config', None))
if distributed:
if cfg.omnisource:
runner.register_hook(OmniSourceDistSamplerSeedHook())
else:
runner.register_hook(DistSamplerSeedHook())
if validate:
eval_cfg = cfg.get('evaluation', {})
val_dataset = build_dataset(cfg.data.val, dict(test_mode=True))
dataloader_setting = dict(
videos_per_gpu=cfg.data.get('videos_per_gpu', 1),
workers_per_gpu=cfg.data.get('workers_per_gpu', 1),
# cfg.gpus will be ignored if distributed
num_gpus=len(cfg.gpu_ids),
dist=distributed,
pin_memory=cfg.data.get('pin_memory', True), # by default, pin_memory=True
shuffle=False)
dataloader_setting = dict(dataloader_setting,
**cfg.data.get('val_dataloader', {}))
val_dataloader = build_dataloader(val_dataset, **dataloader_setting)
eval_hook = DistEpochEvalHook if distributed else EpochEvalHook
runner.register_hook(eval_hook(val_dataloader, **eval_cfg))
if cfg.resume_from:
runner.resume(cfg.resume_from)
elif cfg.load_from:
runner.load_checkpoint(cfg.load_from)
runner_kwargs = dict()
if cfg.omnisource:
runner_kwargs = dict(train_ratio=train_ratio)
if cfg.get('annealing_runner', False):
runner_kwargs.update(annealing=True)
runner.run(data_loaders, cfg.workflow, cfg.total_epochs, **runner_kwargs)
| InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/apis/train.py |
import os
import os.path as osp
from operator import itemgetter
import mmcv
import torch
from mmcv.parallel import collate, scatter
from mmcv.runner import load_checkpoint
from ..datasets.pipelines import Compose
from ..models import build_recognizer
def init_recognizer(config,
checkpoint=None,
device='cuda:0',
use_frames=False):
"""Initialize a recognizer from config file.
Args:
config (str | :obj:`mmcv.Config`): Config file path or the config
object.
checkpoint (str | None, optional): Checkpoint path/url. If set to None,
the model will not load any weights. Default: None.
device (str | :obj:`torch.device`): The desired device of returned
tensor. Default: 'cuda:0'.
use_frames (bool): Whether to use rawframes as input. Default:False.
Returns:
nn.Module: The constructed recognizer.
"""
if isinstance(config, str):
config = mmcv.Config.fromfile(config)
elif not isinstance(config, mmcv.Config):
raise TypeError('config must be a filename or Config object, '
f'but got {type(config)}')
if ((use_frames and config.dataset_type != 'RawframeDataset')
or (not use_frames and config.dataset_type != 'VideoDataset')):
input_type = 'rawframes' if use_frames else 'video'
raise RuntimeError('input data type should be consist with the '
f'dataset type in config, but got input type '
f"'{input_type}' and dataset type "
f"'{config.dataset_type}'")
# pretrained model is unnecessary since we directly load checkpoint later
config.model.backbone.pretrained = None
model = build_recognizer(config.model, test_cfg=config.test_cfg)
if checkpoint is not None:
load_checkpoint(model, checkpoint, map_location=device)
model.cfg = config
model.to(device)
model.eval()
return model
def inference_recognizer(model, video_path, label_path, use_frames=False):
"""Inference a video with the detector.
Args:
model (nn.Module): The loaded recognizer.
video_path (str): The video file path/url or the rawframes directory
path. If ``use_frames`` is set to True, it should be rawframes
directory path. Otherwise, it should be video file path.
label_path (str): The label file path.
use_frames (bool): Whether to use rawframes as input. Default:False.
Returns:
dict[tuple(str, float)]: Top-5 recognition result dict.
"""
if not (osp.exists(video_path) or video_path.startswith('http')):
raise RuntimeError(f"'{video_path}' is missing")
if osp.isfile(video_path) and use_frames:
raise RuntimeError(
f"'{video_path}' is a video file, not a rawframe directory")
if osp.isdir(video_path) and not use_frames:
raise RuntimeError(
f"'{video_path}' is a rawframe directory, not a video file")
cfg = model.cfg
device = next(model.parameters()).device # model device
# construct label map
with open(label_path, 'r') as f:
label = [line.strip() for line in f]
# build the data pipeline
test_pipeline = cfg.data.test.pipeline
test_pipeline = Compose(test_pipeline)
# prepare data
if use_frames:
filename_tmpl = cfg.data.test.get('filename_tmpl', 'img_{:05}.jpg')
modality = cfg.data.test.get('modality', 'RGB')
start_index = cfg.data.test.get('start_index', 1)
data = dict(
frame_dir=video_path,
total_frames=len(os.listdir(video_path)),
# assuming files in ``video_path`` are all named with ``filename_tmpl`` # noqa: E501
label=-1,
start_index=start_index,
filename_tmpl=filename_tmpl,
modality=modality)
else:
start_index = cfg.data.test.get('start_index', 0)
data = dict(
filename=video_path,
label=-1,
start_index=start_index,
modality='RGB')
data = test_pipeline(data)
data = collate([data], samples_per_gpu=1)
if next(model.parameters()).is_cuda:
# scatter to specified GPU
data = scatter(data, [device])[0]
# forward the model
with torch.no_grad():
scores = model(return_loss=False, **data)[0]
score_tuples = tuple(zip(label, scores))
score_sorted = sorted(score_tuples, key=itemgetter(1), reverse=True)
top5_label = score_sorted[:5]
return top5_label
| InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/apis/inference.py |
from .evaluation import * # noqa: F401, F403
from .lr import * # noqa: F401, F403
from .optimizer import * # noqa: F401, F403
from .runner import * # noqa: F401, F403
| InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/core/__init__.py |
from .copy_of_sgd import CopyOfSGD
from .tsm_optimizer_constructor import TSMOptimizerConstructor
__all__ = ['CopyOfSGD', 'TSMOptimizerConstructor']
| InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/core/optimizer/__init__.py |
import torch
from mmcv.runner import OPTIMIZER_BUILDERS, DefaultOptimizerConstructor
from mmcv.utils import SyncBatchNorm, _BatchNorm, _ConvNd
@OPTIMIZER_BUILDERS.register_module()
class TSMOptimizerConstructor(DefaultOptimizerConstructor):
"""Optimizer constructor in TSM model.
This constructor builds optimizer in different ways from the default one.
1. Parameters of the first conv layer have default lr and weight decay.
2. Parameters of BN layers have default lr and zero weight decay.
3. If the field "fc_lr5" in paramwise_cfg is set to True, the parameters
of the last fc layer in cls_head have 5x lr multiplier and 10x weight
decay multiplier.
4. Weights of other layers have default lr and weight decay, and biases
have a 2x lr multiplier and zero weight decay.
"""
def add_params(self, params, model):
"""Add parameters and their corresponding lr and wd to the params.
Args:
params (list): The list to be modified, containing all parameter
groups and their corresponding lr and wd configurations.
model (nn.Module): The model to be trained with the optimizer.
"""
# use fc_lr5 to determine whether to specify higher multi-factor
# for fc layer weights and bias.
fc_lr5 = self.paramwise_cfg['fc_lr5']
first_conv_weight = []
first_conv_bias = []
normal_weight = []
normal_bias = []
lr5_weight = []
lr10_bias = []
bn = []
conv_cnt = 0
for m in model.modules():
if isinstance(m, _ConvNd):
m_params = list(m.parameters())
conv_cnt += 1
if conv_cnt == 1:
first_conv_weight.append(m_params[0])
if len(m_params) == 2:
first_conv_bias.append(m_params[1])
else:
normal_weight.append(m_params[0])
if len(m_params) == 2:
normal_bias.append(m_params[1])
elif isinstance(m, torch.nn.Linear):
m_params = list(m.parameters())
normal_weight.append(m_params[0])
normal_bias.append(m_params[1])
elif isinstance(m,
(_BatchNorm, SyncBatchNorm, torch.nn.GroupNorm)):
for param in list(m.parameters()):
if param.requires_grad:
bn.append(param)
elif len(m._modules) == 0:
if len(list(m.parameters())) > 0:
raise ValueError(f'New atomic module type: {type(m)}. '
'Need to give it a learning policy')
# pop the cls_head fc layer params
last_fc_weight = normal_weight.pop()
last_fc_bias = normal_bias.pop()
if fc_lr5:
lr5_weight.append(last_fc_weight)
lr10_bias.append(last_fc_bias)
else:
normal_weight.append(last_fc_weight)
normal_bias.append(last_fc_bias)
params.append({
'params': first_conv_weight,
'lr': self.base_lr,
'weight_decay': self.base_wd
})
params.append({
'params': first_conv_bias,
'lr': self.base_lr * 2,
'weight_decay': 0
})
params.append({
'params': normal_weight,
'lr': self.base_lr,
'weight_decay': self.base_wd
})
params.append({
'params': normal_bias,
'lr': self.base_lr * 2,
'weight_decay': 0
})
params.append({'params': bn, 'lr': self.base_lr, 'weight_decay': 0})
params.append({
'params': lr5_weight,
'lr': self.base_lr * 5,
'weight_decay': self.base_wd
})
params.append({
'params': lr10_bias,
'lr': self.base_lr * 10,
'weight_decay': 0
})
| InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/core/optimizer/tsm_optimizer_constructor.py |
from mmcv.runner import OPTIMIZERS
from torch.optim import SGD
@OPTIMIZERS.register_module()
class CopyOfSGD(SGD):
"""A clone of torch.optim.SGD.
A customized optimizer could be defined like CopyOfSGD. You may derive from
built-in optimizers in torch.optim, or directly implement a new optimizer.
"""
| InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/core/optimizer/copy_of_sgd.py |
# Copyright (c) Open-MMLab. All rights reserved.
import time
import warnings
import mmcv
from mmcv.runner import EpochBasedRunner, Hook
from mmcv.runner.utils import get_host_info
def cycle(iterable):
iterator = iter(iterable)
while True:
try:
yield next(iterator)
except StopIteration:
iterator = iter(iterable)
class OmniSourceDistSamplerSeedHook(Hook):
def before_epoch(self, runner):
for data_loader in runner.data_loaders:
if hasattr(data_loader.sampler, 'set_epoch'):
# in case the data loader uses `SequentialSampler` in Pytorch
data_loader.sampler.set_epoch(runner.epoch)
elif hasattr(data_loader.batch_sampler.sampler, 'set_epoch'):
# batch sampler in pytorch wraps the sampler as its attributes.
data_loader.batch_sampler.sampler.set_epoch(runner.epoch)
class OmniSourceRunner(EpochBasedRunner):
"""OmniSource Epoch-based Runner.
This runner train models epoch by epoch, the epoch length is defined by the
dataloader[0], which is the main dataloader.
"""
def run_iter(self, data_batch, train_mode, source, **kwargs):
if self.batch_processor is not None:
outputs = self.batch_processor(
self.model, data_batch, train_mode=train_mode, **kwargs)
elif train_mode:
outputs = self.model.train_step(data_batch, self.optimizer,
**kwargs)
else:
outputs = self.model.val_step(data_batch, self.optimizer, **kwargs)
if not isinstance(outputs, dict):
raise TypeError('"batch_processor()" or "model.train_step()"'
'and "model.val_step()" must return a dict')
# Since we have multiple sources, we add a suffix to log_var names,
# so that we can differentiate them.
if 'log_vars' in outputs:
log_vars = outputs['log_vars']
log_vars = {k + source: v for k, v in log_vars.items()}
self.log_buffer.update(log_vars, outputs['num_samples'])
self.outputs = outputs
def train(self, data_loaders, **kwargs):
self.model.train()
self.mode = 'train'
self.data_loaders = data_loaders
self.main_loader = self.data_loaders[0]
# Add aliasing
self.data_loader = self.main_loader
self.aux_loaders = self.data_loaders[1:]
self.aux_iters = [cycle(loader) for loader in self.aux_loaders]
auxiliary_iter_times = [1] * len(self.aux_loaders)
use_aux_per_niter = 1
if 'train_ratio' in kwargs:
train_ratio = kwargs.pop('train_ratio')
use_aux_per_niter = train_ratio[0]
auxiliary_iter_times = train_ratio[1:]
self._max_iters = self._max_epochs * len(self.main_loader)
self.call_hook('before_train_epoch')
time.sleep(2) # Prevent possible deadlock during epoch transition
for i, data_batch in enumerate(self.main_loader):
self._inner_iter = i
self.call_hook('before_train_iter')
self.run_iter(data_batch, train_mode=True, source='')
self.call_hook('after_train_iter')
if self._iter % use_aux_per_niter != 0:
self._iter += 1
continue
for idx, n_times in enumerate(auxiliary_iter_times):
for _ in range(n_times):
data_batch = next(self.aux_iters[idx])
self.call_hook('before_train_iter')
self.run_iter(
data_batch, train_mode=True, source=f'/aux{idx}')
self.call_hook('after_train_iter')
self._iter += 1
self.call_hook('after_train_epoch')
self._epoch += 1
# Now that we use validate hook, not implement this func to save efforts.
def val(self, data_loader, **kwargs):
raise NotImplementedError
def run(self, data_loaders, workflow, max_epochs=None, **kwargs):
"""Start running.
Args:
data_loaders (list[:obj:`DataLoader`]): Dataloaders for training.
`data_loaders[0]` is the main data_loader, which contains
target datasets and determines the epoch length.
`data_loaders[1:]` are auxiliary data loaders, which contain
auxiliary web datasets.
workflow (list[tuple]): A list of (phase, epochs) to specify the
running order and epochs. E.g, [('train', 2)] means running 2
epochs for training iteratively. Note that val epoch is not
supported for this runner for simplicity.
max_epochs (int | None): The max epochs that training lasts,
deprecated now. Default: None.
"""
assert isinstance(data_loaders, list)
assert mmcv.is_list_of(workflow, tuple)
assert len(workflow) == 1 and workflow[0][0] == 'train'
if max_epochs is not None:
warnings.warn(
'setting max_epochs in run is deprecated, '
'please set max_epochs in runner_config', DeprecationWarning)
self._max_epochs = max_epochs
assert self._max_epochs is not None, (
'max_epochs must be specified during instantiation')
mode, epochs = workflow[0]
self._max_iters = self._max_epochs * len(data_loaders[0])
work_dir = self.work_dir if self.work_dir is not None else 'NONE'
self.logger.info('Start running, host: %s, work_dir: %s',
get_host_info(), work_dir)
self.logger.info('workflow: %s, max: %d epochs', workflow,
self._max_epochs)
self.call_hook('before_run')
while self.epoch < self._max_epochs:
if isinstance(mode, str): # self.train()
if not hasattr(self, mode):
raise ValueError(
f'runner has no method named "{mode}" to run an '
'epoch')
epoch_runner = getattr(self, mode)
else:
raise TypeError(
f'mode in workflow must be a str, but got {mode}')
for _ in range(epochs):
if mode == 'train' and self.epoch >= self._max_epochs:
break
epoch_runner(data_loaders, **kwargs)
time.sleep(1) # wait for some hooks like loggers to finish
self.call_hook('after_run')
| InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/core/runner/omnisource_runner.py |
# Copyright (c) Open-MMLab. All rights reserved.
import os.path as osp
import platform
import shutil
import time
import warnings
import torch
import mmcv
from mmcv.runner import EpochBasedRunner
class AnnealingRunner(EpochBasedRunner):
def run_iter(self, data_batch, train_mode, **kwargs):
if 'annealing' in kwargs:
kwargs.update(epoch=self.epoch)
kwargs.update(total_epoch=self.max_epochs)
kwargs.update(iter=self._iter)
if self.batch_processor is not None:
outputs = self.batch_processor(
self.model, data_batch, train_mode=train_mode, **kwargs)
elif train_mode:
outputs = self.model.train_step(data_batch, self.optimizer,
**kwargs)
else:
outputs = self.model.val_step(data_batch, self.optimizer, **kwargs)
if not isinstance(outputs, dict):
raise TypeError('"batch_processor()" or "model.train_step()"'
'and "model.val_step()" must return a dict')
if 'log_vars' in outputs:
self.log_buffer.update(outputs['log_vars'], outputs['num_samples'])
self.outputs = outputs
def train(self, data_loader, **kwargs):
self.model.train()
self.mode = 'train'
self.data_loader = data_loader
self._max_iters = self._max_epochs * len(self.data_loader)
self.call_hook('before_train_epoch')
time.sleep(2) # Prevent possible deadlock during epoch transition
for i, data_batch in enumerate(self.data_loader):
self._inner_iter = i
self.call_hook('before_train_iter')
self.run_iter(data_batch, train_mode=True, **kwargs)
self.call_hook('after_train_iter')
self._iter += 1
self.call_hook('after_train_epoch')
self._epoch += 1
def val(self, data_loader, **kwargs):
self.model.eval()
self.mode = 'val'
self.data_loader = data_loader
self.call_hook('before_val_epoch')
time.sleep(2) # Prevent possible deadlock during epoch transition
for i, data_batch in enumerate(self.data_loader):
self._inner_iter = i
self.call_hook('before_val_iter')
with torch.no_grad():
self.run_iter(data_batch, train_mode=False, **kwargs)
self.call_hook('after_val_iter')
self.call_hook('after_val_epoch') | InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/core/runner/annealing_runner.py |
from .omnisource_runner import OmniSourceDistSamplerSeedHook, OmniSourceRunner
from .annealing_runner import AnnealingRunner
__all__ = ['OmniSourceRunner', 'OmniSourceDistSamplerSeedHook', 'AnnealingRunner']
| InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/core/runner/__init__.py |
import os.path as osp
import warnings
from math import inf
import mmcv
from mmcv.runner import Hook
from torch.utils.data import DataLoader
from mmaction.utils import get_root_logger
class EpochEvalHook(Hook):
"""Non-Distributed evaluation hook based on epochs.
Notes:
If new arguments are added for EpochEvalHook, tools/test.py,
tools/eval_metric.py may be effected.
This hook will regularly perform evaluation in a given interval when
performing in non-distributed environment.
Args:
dataloader (DataLoader): A PyTorch dataloader.
start (int | None, optional): Evaluation starting epoch. It enables
evaluation before the training starts if ``start`` <= the resuming
epoch. If None, whether to evaluate is merely decided by
``interval``. Default: None.
interval (int): Evaluation interval (by epochs). Default: 1.
save_best (bool): Whether to save best checkpoint during evaluation.
Default: True.
key_indicator (str | None): Key indicator to measure the best
checkpoint during evaluation when ``save_best`` is set to True.
Options are the evaluation metrics to the test dataset. e.g.,
``top1_acc``, ``top5_acc``, ``mean_class_accuracy``,
``mean_average_precision``, ``mmit_mean_average_precision``
for action recognition dataset (RawframeDataset and VideoDataset).
``AR@AN``, ``auc`` for action localization dataset
(ActivityNetDataset). Default: `top1_acc`.
rule (str | None): Comparison rule for best score. Options are None,
'greater' and 'less'. If set to None, it will infer a reasonable
rule. Default: 'None'.
**eval_kwargs: Evaluation arguments fed into the evaluate function of
the dataset.
"""
rule_map = {'greater': lambda x, y: x > y, 'less': lambda x, y: x < y}
init_value_map = {'greater': -inf, 'less': inf}
greater_keys = ['acc', 'top', 'AR@', 'auc', 'precision']
less_keys = ['loss']
def __init__(self,
dataloader,
start=None,
interval=1,
save_best=True,
key_indicator='top1_acc',
rule=None,
**eval_kwargs):
if not isinstance(dataloader, DataLoader):
raise TypeError(f'dataloader must be a pytorch DataLoader, '
f'but got {type(dataloader)}')
if not isinstance(save_best, bool):
raise TypeError("'save_best' should be a boolean")
if save_best and not key_indicator:
raise ValueError('key_indicator should not be None, when '
'save_best is set to True.')
if rule not in self.rule_map and rule is not None:
raise KeyError(f'rule must be greater, less or None, '
f'but got {rule}.')
if rule is None and save_best:
if any(key in key_indicator for key in self.greater_keys):
rule = 'greater'
elif any(key in key_indicator for key in self.less_keys):
rule = 'less'
else:
raise ValueError(
f'key_indicator must be in {self.greater_keys} '
f'or in {self.less_keys} when rule is None, '
f'but got {key_indicator}')
if interval <= 0:
raise ValueError(f'interval must be positive, but got {interval}')
if start is not None and start < 0:
warnings.warn(
f'The evaluation start epoch {start} is smaller than 0, '
f'use 0 instead', UserWarning)
start = 0
self.dataloader = dataloader
self.interval = interval
self.start = start
self.eval_kwargs = eval_kwargs
self.save_best = save_best
self.key_indicator = key_indicator
self.rule = rule
self.logger = get_root_logger()
if self.save_best:
self.compare_func = self.rule_map[self.rule]
self.best_score = self.init_value_map[self.rule]
self.best_json = dict()
self.initial_epoch_flag = True
def before_train_epoch(self, runner):
"""Evaluate the model only at the start of training."""
if not self.initial_epoch_flag:
return
if self.start is not None and runner.epoch >= self.start:
self.after_train_epoch(runner)
self.initial_epoch_flag = False
def evaluation_flag(self, runner):
"""Judge whether to perform_evaluation after this epoch.
Returns:
bool: The flag indicating whether to perform evaluation.
"""
if self.start is None:
if not self.every_n_epochs(runner, self.interval):
# No evaluation during the interval epochs.
return False
elif (runner.epoch + 1) < self.start:
# No evaluation if start is larger than the current epoch.
return False
else:
# Evaluation only at epochs 3, 5, 7... if start==3 and interval==2
if (runner.epoch + 1 - self.start) % self.interval:
return False
return True
def after_train_epoch(self, runner):
"""Called after every training epoch to evaluate the results."""
if not self.evaluation_flag(runner):
return
current_ckpt_path = osp.join(runner.work_dir,
f'epoch_{runner.epoch + 1}.pth')
json_path = osp.join(runner.work_dir, 'best.json')
if osp.exists(json_path) and len(self.best_json) == 0:
self.best_json = mmcv.load(json_path)
self.best_score = self.best_json['best_score']
self.best_ckpt = self.best_json['best_ckpt']
self.key_indicator = self.best_json['key_indicator']
from mmaction.apis import single_gpu_test
results = single_gpu_test(runner.model, self.dataloader)
key_score = self.evaluate(runner, results)
if (self.save_best and self.compare_func(key_score, self.best_score)):
self.best_score = key_score
self.logger.info(
f'Now best checkpoint is epoch_{runner.epoch + 1}.pth')
self.best_json['best_score'] = self.best_score
self.best_json['best_ckpt'] = current_ckpt_path
self.best_json['key_indicator'] = self.key_indicator
mmcv.dump(self.best_json, json_path)
def evaluate(self, runner, results):
"""Evaluate the results.
Args:
runner (:obj:`mmcv.Runner`): The underlined training runner.
results (list): Output results.
"""
eval_res = self.dataloader.dataset.evaluate(
results, logger=runner.logger, **self.eval_kwargs)
for name, val in eval_res.items():
runner.log_buffer.output[name] = val
runner.log_buffer.ready = True
if self.key_indicator is not None:
if self.key_indicator not in eval_res:
warnings.warn('The key indicator for evaluation is not '
'included in evaluation result, please specify '
'it in config file')
return None
return eval_res[self.key_indicator]
return None
class DistEpochEvalHook(EpochEvalHook):
"""Distributed evaluation hook based on epochs.
This hook will regularly perform evaluation in a given interval when
performing in distributed environment.
Args:
dataloader (DataLoader): A PyTorch dataloader.
start (int | None, optional): Evaluation starting epoch. It enables
evaluation before the training starts if ``start`` <= the resuming
epoch. If None, whether to evaluate is merely decided by
``interval``. Default: None.
interval (int): Evaluation interval (by epochs). Default: 1.
save_best (bool): Whether to save best checkpoint during evaluation.
Default: True.
key_indicator (str | None): Key indicator to measure the best
checkpoint during evaluation when ``save_best`` is set to True.
Options are the evaluation metrics to the test dataset. e.g.,
``top1_acc``, ``top5_acc``, ``mean_class_accuracy``,
``mean_average_precision``, ``mmit_mean_average_precision``
for action recognition dataset (RawframeDataset and VideoDataset).
``AR@AN``, ``auc`` for action localization dataset
(ActivityNetDataset). Default: `top1_acc`.
rule (str | None): Comparison rule for best score. Options are None,
'greater' and 'less'. If set to None, it will infer a reasonable
rule. Default: 'None'.
tmpdir (str | None): Temporary directory to save the results of all
processes. Default: None.
gpu_collect (bool): Whether to use gpu or cpu to collect results.
Default: False.
**eval_kwargs: Evaluation arguments fed into the evaluate function of
the dataset.
"""
def __init__(self,
dataloader,
start=None,
interval=1,
save_best=True,
key_indicator='top1_acc',
rule=None,
tmpdir=None,
gpu_collect=False,
**eval_kwargs):
super().__init__(
dataloader,
start=start,
interval=interval,
save_best=save_best,
key_indicator=key_indicator,
rule=rule,
**eval_kwargs)
self.tmpdir = tmpdir
self.gpu_collect = gpu_collect
def after_train_epoch(self, runner):
"""Called after each training epoch to evaluate the model."""
if not self.evaluation_flag(runner):
return
current_ckpt_path = osp.join(runner.work_dir,
f'epoch_{runner.epoch + 1}.pth')
json_path = osp.join(runner.work_dir, 'best.json')
if osp.exists(json_path) and len(self.best_json) == 0:
self.best_json = mmcv.load(json_path)
self.best_score = self.best_json['best_score']
self.best_ckpt = self.best_json['best_ckpt']
self.key_indicator = self.best_json['key_indicator']
from mmaction.apis import multi_gpu_test
tmpdir = self.tmpdir
if tmpdir is None:
tmpdir = osp.join(runner.work_dir, '.eval_hook')
results = multi_gpu_test(
runner.model,
self.dataloader,
tmpdir=tmpdir,
gpu_collect=self.gpu_collect)
if runner.rank == 0:
print('\n')
key_score = self.evaluate(runner, results)
if (self.save_best and key_score is not None
and self.compare_func(key_score, self.best_score)):
self.best_score = key_score
self.logger.info(
f'Now best checkpoint is epoch_{runner.epoch + 1}.pth')
self.best_json['best_score'] = self.best_score
self.best_json['best_ckpt'] = current_ckpt_path
self.best_json['key_indicator'] = self.key_indicator
mmcv.dump(self.best_json, json_path)
| InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/core/evaluation/eval_hooks.py |
from .accuracy import (average_precision_at_temporal_iou,
average_recall_at_avg_proposals, confusion_matrix,
get_weighted_score, interpolated_precision_recall,
mean_average_precision, mean_class_accuracy,
mmit_mean_average_precision, pairwise_temporal_iou,
softmax, top_k_accuracy)
from .eval_detection import ActivityNetDetection
from .eval_hooks import DistEpochEvalHook, EpochEvalHook
__all__ = [
'DistEpochEvalHook', 'EpochEvalHook', 'top_k_accuracy',
'mean_class_accuracy', 'confusion_matrix', 'mean_average_precision',
'get_weighted_score', 'average_recall_at_avg_proposals',
'pairwise_temporal_iou', 'average_precision_at_temporal_iou',
'ActivityNetDetection', 'softmax', 'interpolated_precision_recall',
'mmit_mean_average_precision'
]
| InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/core/evaluation/__init__.py |
import numpy as np
def confusion_matrix(y_pred, y_real, normalize=None):
"""Compute confusion matrix.
Args:
y_pred (list[int] | np.ndarray[int]): Prediction labels.
y_real (list[int] | np.ndarray[int]): Ground truth labels.
normalize (str | None): Normalizes confusion matrix over the true
(rows), predicted (columns) conditions or all the population.
If None, confusion matrix will not be normalized. Options are
"true", "pred", "all", None. Default: None.
Returns:
np.ndarray: Confusion matrix.
"""
if normalize not in ['true', 'pred', 'all', None]:
raise ValueError("normalize must be one of {'true', 'pred', "
"'all', None}")
if isinstance(y_pred, list):
y_pred = np.array(y_pred)
if not isinstance(y_pred, np.ndarray):
raise TypeError(
f'y_pred must be list or np.ndarray, but got {type(y_pred)}')
if not y_pred.dtype == np.int64:
raise TypeError(
f'y_pred dtype must be np.int64, but got {y_pred.dtype}')
if isinstance(y_real, list):
y_real = np.array(y_real)
if not isinstance(y_real, np.ndarray):
raise TypeError(
f'y_real must be list or np.ndarray, but got {type(y_real)}')
if not y_real.dtype == np.int64:
raise TypeError(
f'y_real dtype must be np.int64, but got {y_real.dtype}')
label_set = np.unique(np.concatenate((y_pred, y_real)))
num_labels = len(label_set)
label_map = {label: i for i, label in enumerate(label_set)}
confusion_mat = np.zeros((num_labels, num_labels), dtype=np.int64)
for rlabel, plabel in zip(y_real, y_pred):
index_real = label_map[rlabel]
index_pred = label_map[plabel]
confusion_mat[index_real][index_pred] += 1
with np.errstate(all='ignore'):
if normalize == 'true':
confusion_mat = (
confusion_mat / confusion_mat.sum(axis=1, keepdims=True))
elif normalize == 'pred':
confusion_mat = (
confusion_mat / confusion_mat.sum(axis=0, keepdims=True))
elif normalize == 'all':
confusion_mat = (confusion_mat / confusion_mat.sum())
confusion_mat = np.nan_to_num(confusion_mat)
return confusion_mat
def mean_class_accuracy(scores, labels):
"""Calculate mean class accuracy.
Args:
scores (list[np.ndarray]): Prediction scores for each class.
labels (list[int]): Ground truth labels.
Returns:
np.ndarray: Mean class accuracy.
"""
pred = np.argmax(scores, axis=1)
cf_mat = confusion_matrix(pred, labels).astype(float)
cls_cnt = cf_mat.sum(axis=1)
cls_hit = np.diag(cf_mat)
mean_class_acc = np.mean(
[hit / cnt if cnt else 0.0 for cnt, hit in zip(cls_cnt, cls_hit)])
return mean_class_acc
def top_k_accuracy(scores, labels, topk=(1, )):
"""Calculate top k accuracy score.
Args:
scores (list[np.ndarray]): Prediction scores for each class.
labels (list[int]): Ground truth labels.
topk (tuple[int]): K value for top_k_accuracy. Default: (1, ).
Returns:
list[float]: Top k accuracy score for each k.
"""
res = []
labels = np.array(labels)[:, np.newaxis]
for k in topk:
max_k_preds = np.argsort(scores, axis=1)[:, -k:][:, ::-1]
match_array = np.logical_or.reduce(max_k_preds == labels, axis=1)
topk_acc_score = match_array.sum() / match_array.shape[0]
res.append(topk_acc_score)
return res
def mmit_mean_average_precision(scores, labels):
"""Mean average precision for multi-label recognition. Used for reporting
MMIT style mAP on Multi-Moments in Times. The difference is that this
method calculates average-precision for each sample and averages them among
samples.
Args:
scores (list[np.ndarray]): Prediction scores of different classes for
each sample.
labels (list[np.ndarray]): Ground truth many-hot vector for each
sample.
Returns:
np.float: The MMIT style mean average precision.
"""
results = []
for score, label in zip(scores, labels):
precision, recall, _ = binary_precision_recall_curve(score, label)
ap = -np.sum(np.diff(recall) * np.array(precision)[:-1])
results.append(ap)
return np.mean(results)
def mean_average_precision(scores, labels):
"""Mean average precision for multi-label recognition.
Args:
scores (list[np.ndarray]): Prediction scores of different classes for
each sample.
labels (list[np.ndarray]): Ground truth many-hot vector for each
sample.
Returns:
np.float: The mean average precision.
"""
results = []
scores = np.stack(scores).T
labels = np.stack(labels).T
for score, label in zip(scores, labels):
precision, recall, _ = binary_precision_recall_curve(score, label)
ap = -np.sum(np.diff(recall) * np.array(precision)[:-1])
results.append(ap)
results = [x for x in results if not np.isnan(x)]
if results == []:
return np.nan
return np.mean(results)
def binary_precision_recall_curve(y_score, y_true):
"""Calculate the binary precision recall curve at step thresholds.
Args:
y_score (np.ndarray): Prediction scores for each class.
Shape should be (num_classes, ).
y_true (np.ndarray): Ground truth many-hot vector.
Shape should be (num_classes, ).
Returns:
precision (np.ndarray): The precision of different thresholds.
recall (np.ndarray): The recall of different thresholds.
thresholds (np.ndarray): Different thresholds at which precison and
recall are tested.
"""
assert isinstance(y_score, np.ndarray)
assert isinstance(y_true, np.ndarray)
assert y_score.shape == y_true.shape
# make y_true a boolean vector
y_true = (y_true == 1)
# sort scores and corresponding truth values
desc_score_indices = np.argsort(y_score, kind='mergesort')[::-1]
y_score = y_score[desc_score_indices]
y_true = y_true[desc_score_indices]
# There may be ties in values, therefore find the `distinct_value_inds`
distinct_value_inds = np.where(np.diff(y_score))[0]
threshold_inds = np.r_[distinct_value_inds, y_true.size - 1]
# accumulate the true positives with decreasing threshold
tps = np.cumsum(y_true)[threshold_inds]
fps = 1 + threshold_inds - tps
thresholds = y_score[threshold_inds]
precision = tps / (tps + fps)
precision[np.isnan(precision)] = 0
recall = tps / tps[-1]
# stop when full recall attained
# and reverse the outputs so recall is decreasing
last_ind = tps.searchsorted(tps[-1])
sl = slice(last_ind, None, -1)
return np.r_[precision[sl], 1], np.r_[recall[sl], 0], thresholds[sl]
def pairwise_temporal_iou(candidate_segments,
target_segments,
calculate_overlap_self=False):
"""Compute intersection over union between segments.
Args:
candidate_segments (np.ndarray): 1-dim/2-dim array in format
``[init, end]/[m x 2:=[init, end]]``.
target_segments (np.ndarray): 2-dim array in format
``[n x 2:=[init, end]]``.
calculate_overlap_self (bool): Whether to calculate overlap_self
(union / candidate_length) or not. Default: False.
Returns:
t_iou (np.ndarray): 1-dim array [n] /
2-dim array [n x m] with IoU ratio.
t_overlap_self (np.ndarray, optional): 1-dim array [n] /
2-dim array [n x m] with overlap_self, returns when
calculate_overlap_self is True.
"""
candidate_segments_ndim = candidate_segments.ndim
if target_segments.ndim != 2 or candidate_segments_ndim not in [1, 2]:
raise ValueError('Dimension of arguments is incorrect')
if candidate_segments_ndim == 1:
candidate_segments = candidate_segments[np.newaxis, :]
n, m = target_segments.shape[0], candidate_segments.shape[0]
t_iou = np.empty((n, m), dtype=np.float32)
if calculate_overlap_self:
t_overlap_self = np.empty((n, m), dtype=np.float32)
for i in range(m):
candidate_segment = candidate_segments[i, :]
tt1 = np.maximum(candidate_segment[0], target_segments[:, 0])
tt2 = np.minimum(candidate_segment[1], target_segments[:, 1])
# Intersection including Non-negative overlap score.
segments_intersection = (tt2 - tt1).clip(0)
# Segment union.
segments_union = ((target_segments[:, 1] - target_segments[:, 0]) +
(candidate_segment[1] - candidate_segment[0]) -
segments_intersection)
# Compute overlap as the ratio of the intersection
# over union of two segments.
t_iou[:, i] = (segments_intersection.astype(float) / segments_union)
if calculate_overlap_self:
candidate_length = candidate_segment[1] - candidate_segment[0]
t_overlap_self[:, i] = (
segments_intersection.astype(float) / candidate_length)
if candidate_segments_ndim == 1:
t_iou = np.squeeze(t_iou, axis=1)
if calculate_overlap_self:
if candidate_segments_ndim == 1:
t_overlap_self = np.squeeze(t_overlap_self, axis=1)
return t_iou, t_overlap_self
return t_iou
def average_recall_at_avg_proposals(ground_truth,
proposals,
total_num_proposals,
max_avg_proposals=None,
temporal_iou_thresholds=np.linspace(
0.5, 0.95, 10)):
"""Computes the average recall given an average number (percentile) of
proposals per video.
Args:
ground_truth (dict): Dict containing the ground truth instances.
proposals (dict): Dict containing the proposal instances.
total_num_proposals (int): Total number of proposals in the
proposal dict.
max_avg_proposals (int | None): Max number of proposals for one video.
Default: None.
temporal_iou_thresholds (np.ndarray): 1D array with temporal_iou
thresholds. Default: ``np.linspace(0.5, 0.95, 10)``.
Returns:
tuple([np.ndarray, np.ndarray, np.ndarray, float]):
(recall, average_recall, proposals_per_video, auc)
In recall, ``recall[i,j]`` is recall at i-th temporal_iou threshold
at the j-th average number (percentile) of average number of
proposals per video. The average_recall is recall averaged
over a list of temporal_iou threshold (1D array). This is
equivalent to ``recall.mean(axis=0)``. The ``proposals_per_video``
is the average number of proposals per video. The auc is the area
under ``AR@AN`` curve.
"""
total_num_videos = len(ground_truth)
if not max_avg_proposals:
max_avg_proposals = float(total_num_proposals) / total_num_videos
ratio = (max_avg_proposals * float(total_num_videos) / total_num_proposals)
# For each video, compute temporal_iou scores among the retrieved proposals
score_list = []
total_num_retrieved_proposals = 0
for video_id in ground_truth:
# Get proposals for this video.
proposals_video_id = proposals[video_id]
this_video_proposals = proposals_video_id[:, :2]
# Sort proposals by score.
sort_idx = proposals_video_id[:, 2].argsort()[::-1]
this_video_proposals = this_video_proposals[sort_idx, :].astype(
np.float32)
# Get ground-truth instances associated to this video.
ground_truth_video_id = ground_truth[video_id]
this_video_ground_truth = ground_truth_video_id[:, :2].astype(
np.float32)
if this_video_proposals.shape[0] == 0:
n = this_video_ground_truth.shape[0]
score_list.append(np.zeros((n, 1)))
continue
if this_video_proposals.ndim != 2:
this_video_proposals = np.expand_dims(this_video_proposals, axis=0)
if this_video_ground_truth.ndim != 2:
this_video_ground_truth = np.expand_dims(
this_video_ground_truth, axis=0)
num_retrieved_proposals = np.minimum(
int(this_video_proposals.shape[0] * ratio),
this_video_proposals.shape[0])
total_num_retrieved_proposals += num_retrieved_proposals
this_video_proposals = this_video_proposals[:
num_retrieved_proposals, :]
# Compute temporal_iou scores.
t_iou = pairwise_temporal_iou(this_video_proposals,
this_video_ground_truth)
score_list.append(t_iou)
# Given that the length of the videos is really varied, we
# compute the number of proposals in terms of a ratio of the total
# proposals retrieved, i.e. average recall at a percentage of proposals
# retrieved per video.
# Computes average recall.
pcn_list = np.arange(1, 101) / 100.0 * (
max_avg_proposals * float(total_num_videos) /
total_num_retrieved_proposals)
matches = np.empty((total_num_videos, pcn_list.shape[0]))
positives = np.empty(total_num_videos)
recall = np.empty((temporal_iou_thresholds.shape[0], pcn_list.shape[0]))
# Iterates over each temporal_iou threshold.
for ridx, temporal_iou in enumerate(temporal_iou_thresholds):
# Inspect positives retrieved per video at different
# number of proposals (percentage of the total retrieved).
for i, score in enumerate(score_list):
# Total positives per video.
positives[i] = score.shape[0]
# Find proposals that satisfies minimum temporal_iou threshold.
true_positives_temporal_iou = score >= temporal_iou
# Get number of proposals as a percentage of total retrieved.
pcn_proposals = np.minimum(
(score.shape[1] * pcn_list).astype(np.int), score.shape[1])
for j, num_retrieved_proposals in enumerate(pcn_proposals):
# Compute the number of matches
# for each percentage of the proposals
matches[i, j] = np.count_nonzero(
(true_positives_temporal_iou[:, :num_retrieved_proposals]
).sum(axis=1))
# Computes recall given the set of matches per video.
recall[ridx, :] = matches.sum(axis=0) / positives.sum()
# Recall is averaged.
avg_recall = recall.mean(axis=0)
# Get the average number of proposals per video.
proposals_per_video = pcn_list * (
float(total_num_retrieved_proposals) / total_num_videos)
# Get AUC
area_under_curve = np.trapz(avg_recall, proposals_per_video)
auc = 100. * float(area_under_curve) / proposals_per_video[-1]
return recall, avg_recall, proposals_per_video, auc
def get_weighted_score(score_list, coeff_list):
"""Get weighted score with given scores and coefficients.
Given n predictions by different classifier: [score_1, score_2, ...,
score_n] (score_list) and their coefficients: [coeff_1, coeff_2, ...,
coeff_n] (coeff_list), return weighted score: weighted_score =
score_1 * coeff_1 + score_2 * coeff_2 + ... + score_n * coeff_n
Args:
score_list (list[list[np.ndarray]]): List of list of scores, with shape
n(number of predictions) X num_samples X num_classes
coeff_list (list[float]): List of coefficients, with shape n.
Returns:
list[np.ndarray]: List of weighted scores.
"""
assert len(score_list) == len(coeff_list)
num_samples = len(score_list[0])
for i in range(1, len(score_list)):
assert len(score_list[i]) == num_samples
scores = np.array(score_list) # (num_coeff, num_samples, num_classes)
coeff = np.array(coeff_list) # (num_coeff, )
weighted_scores = list(np.dot(scores.T, coeff).T)
return weighted_scores
def softmax(x, dim=1):
"""Compute softmax values for each sets of scores in x."""
e_x = np.exp(x - np.max(x, axis=dim, keepdims=True))
return e_x / e_x.sum(axis=dim, keepdims=True)
def interpolated_precision_recall(precision, recall):
"""Interpolated AP - VOCdevkit from VOC 2011.
Args:
precision (np.ndarray): The precision of different thresholds.
recall (np.ndarray): The recall of different thresholds.
Returns:
float: Average precision score.
"""
mprecision = np.hstack([[0], precision, [0]])
mrecall = np.hstack([[0], recall, [1]])
for i in range(len(mprecision) - 1)[::-1]:
mprecision[i] = max(mprecision[i], mprecision[i + 1])
idx = np.where(mrecall[1::] != mrecall[0:-1])[0] + 1
ap = np.sum((mrecall[idx] - mrecall[idx - 1]) * mprecision[idx])
return ap
def average_precision_at_temporal_iou(ground_truth,
prediction,
temporal_iou_thresholds=(np.linspace(
0.5, 0.95, 10))):
"""Compute average precision (in detection task) between ground truth and
predicted data frames. If multiple predictions match the same predicted
segment, only the one with highest score is matched as true positive. This
code is greatly inspired by Pascal VOC devkit.
Args:
ground_truth (dict): Dict containing the ground truth instances.
Key: 'video_id'
Value (np.ndarray): 1D array of 't-start' and 't-end'.
prediction (np.ndarray): 2D array containing the information of
proposal instances, including 'video_id', 'class_id', 't-start',
't-end' and 'score'.
temporal_iou_thresholds (np.ndarray): 1D array with temporal_iou
thresholds. Default: ``np.linspace(0.5, 0.95, 10)``.
Returns:
np.ndarray: 1D array of average precision score.
"""
ap = np.zeros(len(temporal_iou_thresholds), dtype=np.float32)
if len(prediction) < 1:
return ap
num_gts = 0.
lock_gt = dict()
for key in ground_truth:
lock_gt[key] = np.ones(
(len(temporal_iou_thresholds), len(ground_truth[key]))) * -1
num_gts += len(ground_truth[key])
# Sort predictions by decreasing score order.
prediction = np.array(prediction)
scores = prediction[:, 4].astype(float)
sort_idx = np.argsort(scores)[::-1]
prediction = prediction[sort_idx]
# Initialize true positive and false positive vectors.
tp = np.zeros((len(temporal_iou_thresholds), len(prediction)),
dtype=np.int32)
fp = np.zeros((len(temporal_iou_thresholds), len(prediction)),
dtype=np.int32)
# Assigning true positive to truly grount truth instances.
for idx, this_pred in enumerate(prediction):
# Check if there is at least one ground truth in the video.
if this_pred[0] in ground_truth:
this_gt = np.array(ground_truth[this_pred[0]], dtype=float)
else:
fp[:, idx] = 1
continue
t_iou = pairwise_temporal_iou(this_pred[2:4].astype(float), this_gt)
# We would like to retrieve the predictions with highest t_iou score.
t_iou_sorted_idx = t_iou.argsort()[::-1]
for t_idx, t_iou_threshold in enumerate(temporal_iou_thresholds):
for jdx in t_iou_sorted_idx:
if t_iou[jdx] < t_iou_threshold:
fp[t_idx, idx] = 1
break
if lock_gt[this_pred[0]][t_idx, jdx] >= 0:
continue
# Assign as true positive after the filters above.
tp[t_idx, idx] = 1
lock_gt[this_pred[0]][t_idx, jdx] = idx
break
if fp[t_idx, idx] == 0 and tp[t_idx, idx] == 0:
fp[t_idx, idx] = 1
tp_cumsum = np.cumsum(tp, axis=1).astype(np.float32)
fp_cumsum = np.cumsum(fp, axis=1).astype(np.float32)
recall_cumsum = tp_cumsum / num_gts
precision_cumsum = tp_cumsum / (tp_cumsum + fp_cumsum)
for t_idx in range(len(temporal_iou_thresholds)):
ap[t_idx] = interpolated_precision_recall(precision_cumsum[t_idx, :],
recall_cumsum[t_idx, :])
return ap
| InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/core/evaluation/accuracy.py |
import json
import numpy as np
from mmcv.utils import print_log
from ...utils import get_root_logger
from .accuracy import interpolated_precision_recall, pairwise_temporal_iou
class ActivityNetDetection:
"""Class to evaluate detection results on ActivityNet.
Args:
ground_truth_filename (str | None): The filename of groundtruth.
Default: None.
prediction_filename (str | None): The filename of action detection
results. Default: None.
tiou_thresholds (np.ndarray): The thresholds of temporal iou to
evaluate. Default: ``np.linspace(0.5, 0.95, 10)``.
verbose (bool): Whether to print verbose logs. Default: False.
"""
def __init__(self,
ground_truth_filename=None,
prediction_filename=None,
tiou_thresholds=np.linspace(0.5, 0.95, 10),
verbose=False):
if not ground_truth_filename:
raise IOError('Please input a valid ground truth file.')
if not prediction_filename:
raise IOError('Please input a valid prediction file.')
self.ground_truth_filename = ground_truth_filename
self.prediction_filename = prediction_filename
self.tiou_thresholds = tiou_thresholds
self.verbose = verbose
self.ap = None
self.logger = get_root_logger()
# Import ground truth and predictions.
self.ground_truth, self.activity_index = self._import_ground_truth(
ground_truth_filename)
self.prediction = self._import_prediction(prediction_filename)
if self.verbose:
log_msg = (
'[INIT] Loaded ground_truth from '
f'{self.ground_truth_filename}, prediction from '
f'{self.prediction_filename}.\n'
f'Number of ground truth instances: {len(self.ground_truth)}\n'
f'Number of predictions: {len(self.prediction)}\n'
f'Fixed threshold for tiou score: {self.tiou_thresholds}')
print_log(log_msg, logger=self.logger)
@staticmethod
def _import_ground_truth(ground_truth_filename):
"""Read ground truth file and return the ground truth instances and the
activity classes.
Args:
ground_truth_filename (str): Full path to the ground truth json
file.
Returns:
tuple[list, dict]: (ground_truth, activity_index).
ground_truth contains the ground truth instances, which is in a
dict format.
activity_index contains classes index.
"""
with open(ground_truth_filename, 'r') as f:
data = json.load(f)
# Checking format
activity_index, class_idx = {}, 0
ground_truth = []
for video_id, video_info in data.items():
for anno in video_info['annotations']:
if anno['label'] not in activity_index:
activity_index[anno['label']] = class_idx
class_idx += 1
# old video_anno
ground_truth_item = {}
ground_truth_item['video-id'] = video_id[2:]
ground_truth_item['t-start'] = float(anno['segment'][0])
ground_truth_item['t-end'] = float(anno['segment'][1])
ground_truth_item['label'] = activity_index[anno['label']]
ground_truth.append(ground_truth_item)
return ground_truth, activity_index
def _import_prediction(self, prediction_filename):
"""Read prediction file and return the prediction instances.
Args:
prediction_filename (str): Full path to the prediction json file.
Returns:
List: List containing the prediction instances (dictionaries).
"""
with open(prediction_filename, 'r') as f:
data = json.load(f)
# Read predictions.
prediction = []
for video_id, video_info in data['results'].items():
for result in video_info:
prediction_item = dict()
prediction_item['video-id'] = video_id
prediction_item['label'] = self.activity_index[result['label']]
prediction_item['t-start'] = float(result['segment'][0])
prediction_item['t-end'] = float(result['segment'][1])
prediction_item['score'] = result['score']
prediction.append(prediction_item)
return prediction
def wrapper_compute_average_precision(self):
"""Computes average precision for each class."""
ap = np.zeros((len(self.tiou_thresholds), len(self.activity_index)))
# Adaptation to query faster
ground_truth_by_label = []
prediction_by_label = []
for i in range(len(self.activity_index)):
ground_truth_by_label.append([])
prediction_by_label.append([])
for gt in self.ground_truth:
ground_truth_by_label[gt['label']].append(gt)
for pred in self.prediction:
prediction_by_label[pred['label']].append(pred)
for i in range(len(self.activity_index)):
ap_result = compute_average_precision_detection(
ground_truth_by_label[i], prediction_by_label[i],
self.tiou_thresholds)
ap[:, i] = ap_result
return ap
def evaluate(self):
"""Evaluates a prediction file.
For the detection task we measure the interpolated mean average
precision to measure the performance of a method.
"""
self.ap = self.wrapper_compute_average_precision()
self.mAP = self.ap.mean(axis=1)
self.average_mAP = self.mAP.mean()
return self.mAP, self.average_mAP
def compute_average_precision_detection(ground_truth,
prediction,
tiou_thresholds=np.linspace(
0.5, 0.95, 10)):
"""Compute average precision (detection task) between ground truth and
predictions data frames. If multiple predictions occurs for the same
predicted segment, only the one with highest score is matches as true
positive. This code is greatly inspired by Pascal VOC devkit.
Args:
ground_truth (list[dict]): List containing the ground truth instances
(dictionaries). Required keys are 'video-id', 't-start' and
't-end'.
prediction (list[dict]): List containing the prediction instances
(dictionaries). Required keys are: 'video-id', 't-start', 't-end'
and 'score'.
tiou_thresholds (np.ndarray): A 1darray indicates the temporal
intersection over union threshold, which is optional.
Default: ``np.linspace(0.5, 0.95, 10)``.
Returns:
Float: ap, Average precision score.
"""
num_thresholds = len(tiou_thresholds)
num_gts = len(ground_truth)
num_preds = len(prediction)
ap = np.zeros(num_thresholds)
if len(prediction) == 0:
return ap
num_positive = float(num_gts)
lock_gt = np.ones((num_thresholds, num_gts)) * -1
# Sort predictions by decreasing score order.
prediction.sort(key=lambda x: -x['score'])
# Initialize true positive and false positive vectors.
tp = np.zeros((num_thresholds, num_preds))
fp = np.zeros((num_thresholds, num_preds))
# Adaptation to query faster
ground_truth_by_videoid = {}
for i, item in enumerate(ground_truth):
item['index'] = i
ground_truth_by_videoid.setdefault(item['video-id'], []).append(item)
# Assigning true positive to truly grount truth instances.
for idx, pred in enumerate(prediction):
if pred['video-id'] in ground_truth_by_videoid:
gts = ground_truth_by_videoid[pred['video-id']]
else:
fp[:, idx] = 1
continue
tiou_arr = pairwise_temporal_iou(
np.array([pred['t-start'], pred['t-end']]),
np.array([np.array([gt['t-start'], gt['t-end']]) for gt in gts]))
tiou_arr = tiou_arr.reshape(-1)
# We would like to retrieve the predictions with highest tiou score.
tiou_sorted_idx = tiou_arr.argsort()[::-1]
for t_idx, tiou_threshold in enumerate(tiou_thresholds):
for j_idx in tiou_sorted_idx:
if tiou_arr[j_idx] < tiou_threshold:
fp[t_idx, idx] = 1
break
if lock_gt[t_idx, gts[j_idx]['index']] >= 0:
continue
# Assign as true positive after the filters above.
tp[t_idx, idx] = 1
lock_gt[t_idx, gts[j_idx]['index']] = idx
break
if fp[t_idx, idx] == 0 and tp[t_idx, idx] == 0:
fp[t_idx, idx] = 1
tp_cumsum = np.cumsum(tp, axis=1).astype(np.float)
fp_cumsum = np.cumsum(fp, axis=1).astype(np.float)
recall_cumsum = tp_cumsum / num_positive
precision_cumsum = tp_cumsum / (tp_cumsum + fp_cumsum)
for t_idx in range(len(tiou_thresholds)):
ap[t_idx] = interpolated_precision_recall(precision_cumsum[t_idx, :],
recall_cumsum[t_idx, :])
return ap
| InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/core/evaluation/eval_detection.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.